text
stringlengths 56
7.94M
|
---|
\begin{document}
\title{Weighted Orthogonal Polynomials-Based Generalization of Wirtinger-Type Integral Inequalities for Delayed Continuous-Time Systems hanks{This work was supported in part by the National Na\-t\-u\-r\-al Science Foundation of China (11371006 and 61203005), the Natural Science Foundation of Heilong\-jiang Province
(QC2013C068, F201326 and A201416),
the Fund of Heilongjiang Education Committee (12541\-603),
and the Postdoctoral Science-research
Developmental Foundation of Heilongjiang Province (LBH-Q12130).}
\slugger{simax}{xxxx}{xx}{x}{x--x}
\begin{abstract}
In the past three years, many researchers have proven and/or employed some Wirtinger-type integral inequalities
to establish less conservative stability criteria for delayed continu\-ous-time systems.
In this present paper, we will investigate weighted orthogonal polynomials-based integral inequalities
which is a generalization of the existing Jensen's inequalities and Wirtinger-type integral inequalities.
\end{abstract}
\begin{keywords}
Wirtinger-type integral inequalities (WTIIs); delayed continuous-time systems; weighted orthogonal polynomials (WOPs).
\end{keywords}
\begin{AMS}
15A45, 34K38, 35A23
\end{AMS}
\pagestyle{myheadings}
\thispagestyle{plain}
\markboth{X., Y. Y. Han, Y. T. Wang, C. Gong}{WOPs-Based Generalization of Wirtinger-Type Integral Inequalities}
\section{Introduction}\label{section-1}
Time delays are inherent in many nature's processes and systems,
for example, spread of infectious diseases and epidemics \cite{wang2014global},
population dynamics systems \cite{JMAA-2015-415},
neural networks \cite{CNSNS-2013-1246,JFI-2013-966},
vehicle active suspension \cite{JSSMS-C-2014-1206}, and
biological and chemical systems \cite{N-2014-105,Zhang.Wu.Zou-IEEE-T-CBB}.
Since time delays are generally regarded as one of main sources of instability and poor performance \cite{N-2015-199,Int.j.RNC-2011-preprint}, the stability analysis issue of time-delay systems is important and has received considerable attention
(see \cite{IEEE-T-CBB-2015-398,A-2015-189,JFI-2015-1378,NCA-2013,NA-RWA-2012-2188} and the references therein).
Most of the results on stability analysis of delayed con\-ti\-nu\-ous-time
systems are obtained by the
Lyapunov-Krasovskii functional (LKF) approach \cite{Gu.Kharitonov.Chen(2003)}.
A key step of the LKF approach is how to construct LKF and to bound its derivative.
It is well-known that an indispensable part of LKF is some integer items like
\begin{eqnarray}\label{15aug261}
\mathcal{I}_m(w_t):=\int_{a}^b(s-a)^mw_t^\mathrm{T}(s)Rw_t(s)\mathrm{d}s,\ t\ge 0,\nonumber
\end{eqnarray}
where $w_t:[a,b]\rightarrow \mathbb{R}^n$ is defined by $w_t(s)=w(t+s)$ for all $s\in [a,b]$, $w:[0,+\infty)\rightarrow \mathbb{R}^n$ is a continuous function, $R$ is a real symmetric positive definite matrix, and $m$ is a nonnegative integer.
It is clear that $\mathcal{I}_0(w_t)=\int_a^bw_t^\mathrm{T}(s)Rw_t(s)\mathrm{d}s$ and
\begin{eqnarray}\label{15sep11222}
\mathcal{I}_m(w_t)=m!\!\!\int_{a}^b\!\int_{\theta_1}^b\!\!\cdots\!\!\int_{\theta_m}^b\!\!w_t^\mathrm{T}(s)Rw_t(s)\mathrm{d}s\mathrm{d}\theta_m\cdots\mathrm{d}\theta_{1}\nonumber
\end{eqnarray}
for $m\ge 1$.
Since
\begin{eqnarray}\label{15sep081}
\frac{\mathrm{d}}{\mathrm{d}t}\mathcal{I}_0(w_t)=w_t^\mathrm{T}(b)Rw_t(b)-w_t^\mathrm{T}(a)Rw_t(a)\nonumber
\end{eqnarray}
and
\begin{eqnarray}\label{15aug263}
\frac{\mathrm{d}}{\mathrm{d}t}\mathcal{I}_m(w_t)\hspace*{-.5mm}=\hspace*{-.5mm}(b\hspace*{-.5mm}-\hspace*{-.5mm}a)^mw_t^\mathrm{T}\hspace*{-.5mm}(b)Rw_t\hspace*{-.5mm}(b)\hspace*{-.5mm}-\hspace*{-.5mm}m\mathcal{I}_{m-1}\hspace*{-.5mm}(w_t),\ m\hspace*{-.5mm}\ge \hspace*{-.5mm}1,\nonumber
\end{eqnarray}
the conservativeness of resulting stability criterion relies mainly on the lower bound to $\mathcal{I}_{m-1}(w_t)$ for $m\ge 1$. Usually, the so-called Jensen's inequalities (JIs) \cite{Gu(2000),IJRNC-2009-1364,AMC-2013-714} are applied to bound $\mathcal{I}_k(w_t)$ for any nonnegative integer $k$.
Recently, some new integral inequalities, spectrally Wir\-ti\-nger-type integral inequalities (WTIIs), have been proposed to improve Jensen's inequalities (i.e., to give more accurate lower bounds of $\mathcal{I}_m(w_t)$ or $\mathcal{I}_m(\dot{w}_t)$) (see \cite{Gu(2000),IJRNC-2009-1364,AMC-2013-714,A-2013-2860,ECC-2014-448,CDC-2013-946,SCL-2015-1,ECC-2013,
A-2015-204,JFI-2015stability,IEEE-T-AC-2015-free,A-2015-189,CNSNS-2013-1246,DDNS-2013-793686,JFI-2015-1378,JFI-2015enhanced,IFAC-2012,NN-2014-57,IET-CTA-2014-1054,IEEE-T-CBB-2015-398,IFAC-2014} and the references therein).
It is shown by Gyurkovics \cite{A-2015-44} that the lower bound of $\mathcal{I}_0(\dot{w}_t)$ given in \cite{A-2013-2860}
is more accurate than ones in \cite{CNSNS-2013-1246,DDNS-2013-793686}, while the estimations to $\mathcal{I}_0(\dot{w}_t)$ obtained in \cite{A-2013-2860,IEEE-T-AC-2015-free} are equivalent.
In this present paper, we aim in reducing the conservativeness of LKF approach by investigating new integral inequalities based on weighted orthogonal polynomials (WOPs) which is a generalization of those JIs and WTIIs mentioned above as special cases.
This paper is organized as follows: In Section \ref{15sep10-section-1}, we will first introduce a class of WOPs, and thereby investigate WOPs-based inequality inequalities. Discussions of the relation between the WOPs-based inequality inequalities and the JIs and WTIIs in
\cite{Gu(2000),IJRNC-2009-1364,AMC-2013-714,A-2013-2860,ECC-2014-448,CDC-2013-946,SCL-2015-1,ECC-2013,
A-2015-204,JFI-2015stability,IEEE-T-AC-2015-free,A-2015-189,CNSNS-2013-1246,DDNS-2013-793686,JFI-2015-1378,JFI-2015enhanced,IFAC-2012}
will be presented in Section\ref{15sep11-section-1}.
We will conclude the results of this paper in Section \ref{15jun06-section-1}.
{\bf\emph{Notations:}} The notations used throughout this paper are fairly standard.
Let $\mathbb{R}^{n\times m}$ be the set of all $n\times m$ matrices over the real number field $\mathbb{R}$.
For a matrix $X\in\mathbb{R}^{n\times n}$, the symbols $X^{-1}$ and $X^\mathrm{T}$ denote the inverse and transpose of $X$, respectively.
Set $\mathbb{R}^n=\mathbb{R}^{n\times 1}$ and $X^{-\mathrm{T}}=(X^{-1})^\mathrm{T}$.
The \emph{Kronecker product}, $A\otimes B$, of two matrices $A=[a_{ij}]\in\mathbb{R}^{m\times n}$ and $B\in\mathbb{R}^{p\times q}$ is the $mp\times nq$ block matrix:
\begin{eqnarray}\label{15sep111}
\begin{bmatrix}
a_{11} \mathbf{B} & \cdots & a_{1n}\mathbf{B} \\
\vdots & \ddots & \vdots \\
a_{m1} \mathbf{B} & \cdots & a_{mn} \mathbf{B}
\end{bmatrix}.\nonumber
\end{eqnarray}
Denote by $\mathrm{diag}(\cdots)$ and $\mathrm{col}(\cdots)$ the (block) diagonal matrix and column matrix formed by the elements in brackets, respectively.
\section{WOPs-based integral inequalities}\label{15sep10-section-1}
In this section we will investi\-gate novel WOPs-based integral inequalities, which is a generalization of many JIs and WTIIs in literature.
\subsection{WOPs}
If $p(s)=\sum_{k=0}^{\mathcal{N}}a_ks^k$ and $a_{\mathcal{N}}\not= 0$, then we say $p(s)$ is a polynomial of degree $\mathcal{N}$.
Let $\mathbb{R}[s]_{\mathcal{N}}$ denote the linear space of polynomials with real coefficients of degree not exceeding $\mathcal{N}$.
Set $f_k(s)=(s-a)^k$, $k=0,1,2,\dots,\mathcal{N}$.
Then $\{f_k(s)\}_{k=0}^\mathcal{N}$ is a basis of $\mathbb{R}[s]_{\mathcal{N}}$.
For an arbitrary but fixed nonnegative integer $m$,
define an inner product, $(\cdot,\cdot)_{m}$, on $\mathbb{R}[s]_n$ by
\begin{eqnarray}\label{15aug161}
(p(s),q(s))_{m}=\int_{a}^b(s-a)^mp(s)q(s)\mathrm{d}s
\end{eqnarray}
for any $p(s),q(s)\in \mathbb{R}[s]_{\mathcal{N}}$.
Let $\{p_{km}(s)\}_{k=0}^\mathcal{N}$ be the orthogonal basis of $\mathbb{R}[s]_\mathcal{N}$ which is obtained by applying the Gram-Schmidt orthogonalization process to the basis $\{f_k(s)\}_{k=0}^\mathcal{N}$, that is,
\begin{eqnarray}\label{l062}
p_{0m}(s)&\hspace*{-1mm}=&\hspace*{-1mm}f_0(s),\nonumber\\
p_{im}(s)&\hspace*{-1mm}=&\hspace*{-1mm}f_i(s)-\sum_{j=0}^{i-1}\frac{g_{ijm}}{\chi_{jm}}p_{jm}(s),\ i=1,2,\dots,\mathcal{N},
\end{eqnarray}
where
\begin{eqnarray}\label{l072}
g_{ijm}=(f_i(s),p_{jm}(s))_m,\ \chi_{jm}=(p_{jm}(s),p_{jm}(s))_m.
\end{eqnarray}
Then $\{p_{km}(s)\}_{k=0}^\mathcal{N}$ are WOPs with the weight function $(s-a)^m$.
Furthermore, (\ref{l062}) can be written as the following matrix form:
\begin{eqnarray}\label{l083}
F_\mathcal{N}(s)=G_{\mathcal{N}m}P_{\mathcal{N}m}(s)
\end{eqnarray}
with
\begin{eqnarray}\label{l084}
P_{\mathcal{N}m}(s)&=&\mathrm{col}(p_{0m}(s),p_{1m}(s),\dots,p_{\mathcal{N}m}(s)),\nonumber
\end{eqnarray}
\begin{eqnarray}\label{15sep082}
F_{\mathcal{N}}(s)&=&\mathrm{col}(f_0(s),f_1(s),\dots,f_{\mathcal{N}}(s)),\nonumber
\end{eqnarray}
where $G_{\mathcal{N}m}$ be the $(\mathcal{N}+1)\times (\mathcal{N}+1)$ unit lower triangular matrix with the $(i,j)$-th entry equal to $\frac{g_{i-1,j-1,m}}{\chi_{j-1,m}}$ for any $i>j$.
\subsection{WOPs-based integral inequalities}
To prove WOPs-based integral inequalities, the following property on Kronecker product of matrices is required.
\begin{lemma}\label{15sep11-lemma-1}\cite{Horn(1985)09051182}
If $A$, $B$, $C$ and $D$ are matrices of appropriate sizes, then $(A \otimes B)(C \otimes D) = (AC) \otimes (BD)$.
\end{lemma}
Based on the previous preparation, now we can investigate the following WOPs-based integral inequalities which give lower bounds of $\mathcal{I}_m(w_t)$.
\begin{theorem}\label{15sep08-theorem-1}
For given integers $\mathcal{N}\ge 0$ and $m\ge 0$, a symmetric positive definite matrix $R\in \mathbb{R}^{n\times n}$, and a continuous function $\omega:[a,b]\rightarrow \mathbb{R}^n$, the following inequality holds:
\begin{eqnarray}\label{15aug0427}
\mathcal{I}_m(w_t)\ge
F_{\mathcal{N}m}^\mathrm{T}(w_t)(\Xi_{\mathcal{N}m} \otimes R)F_{\mathcal{N}m}(w_t)
\end{eqnarray}
with
\begin{eqnarray}\label{15aug0426}
F_{\mathcal{N}m}(w_t)=\int_{a}^b(s-a)^m(F_\mathcal{N}(s)\otimes w_t(s))\mathrm{d}s,
\end{eqnarray}
\begin{eqnarray}\label{15aug0428}
\Xi_{\mathcal{N}m}=G_{\mathcal{N}m}^{-\mathrm{T}}\Lambda_{\mathcal{N}m}^{-1}G_{\mathcal{N}m}^{-1},
\end{eqnarray}
\begin{eqnarray}\label{15aug0414}
\Lambda_{\mathcal{N}m}=\mathrm{diag}(\chi_{0m},\chi_{1m},\chi_{2m},\dots,\chi_{\mathcal{N}m}),
\end{eqnarray}
and $F_\mathcal{N}(s)$, $\chi_{km}$ and $G_{\mathcal{N}m}$ are defined as previously.
\end{theorem}
\begin{proof}
Set
\begin{eqnarray}\label{15aug0420}
z(s)=w_t(s)-\sum_{k=0}^\mathcal{N}\chi_{km}^{-1} p_{km}(s)\pi_{km}(w_t)\nonumber
\end{eqnarray}
with
\begin{eqnarray}\label{15aug0413}
\pi_{km}(w_t)=\int_{a}^b(s-a)^mp_{km}(s)w_t(s)\mathrm{d}s.
\end{eqnarray}
Then it follows from (\ref{15aug161}), (\ref{l072}) and the orthogonality of $\{p_{km}(s)\}_{k=0}^\mathcal{N}$ under the weight function $(s-a)^m$ that
\begin{eqnarray}\label{15aug0422}
\mathcal{I}_m(z)=\mathcal{I}_m(w_t)-\sum_{k=0}^\mathcal{N}\chi_{km}^{-1}\pi_{km}^\mathrm{T}(w_t)R\pi_{km}(w_t).\nonumber
\end{eqnarray}
This, together with $\mathcal{I}_m(z)\ge 0$, implies that
\begin{eqnarray}\label{15aug0423}
\mathcal{I}_m(w_t)&\ge&\sum_{k=0}^\mathcal{N}\chi_{km}^{-1}\pi_{km}^\mathrm{T}(w_t)R\pi_{km}(w_t)\nonumber\\
&=&\Pi_{\mathcal{N}m}^\mathrm{T}(w_t)(\Lambda_{\mathcal{N}m}^{-1}\otimes R)\Pi_{\mathcal{N}m}(w_t),
\end{eqnarray}
where
\begin{eqnarray}\label{15aug0412}
\Pi_{\mathcal{N}m}(w_t)=\mathrm{col}(\pi_{0m}(w_t),\pi_{1m}(w_t),\dots,\pi_{\mathcal{N}m}(w_t)). \nonumber
\end{eqnarray}
Since $G_{\mathcal{N}m}$ is a unit lower triangular matrix, it follows from (\ref{l083}), (\ref{15aug0426}) and (\ref{15aug0413}) that
\begin{eqnarray}\label{15aug0424}
\Pi_{\mathcal{N}m}(w_t)
&=&
\int_{a}^b(s-a)^m(P_{\mathcal{N}m}(s)\otimes w_t(s))\mathrm{d}s\nonumber\\
&=&\int_{a}^b(s-a)^m(G_{\mathcal{N}m}^{-1}F_\mathcal{N}(s)\otimes w_t(s))\mathrm{d}s\nonumber\\
&=&(G_{\mathcal{N}m}^{-1}\otimes I_n)F_{\mathcal{N}m}(w_t).\nonumber
\end{eqnarray}
This, together with (\ref{15aug0423}) and Lemma \ref{15sep11-lemma-1}, completes the proof.
\end{proof}
Since the inequality (\ref{15aug0427}) is obtained by using the WOPs (\ref{l062}), we will refer to (\ref{15aug0427}) as \emph{WOPs-based integral inequalities}.
\section{Discussions}\label{15sep11-section-1}
In this section we will discuss the relation between the WOPs-based integral inequalities in Theorem \ref{15sep08-theorem-1} and the JIs and WTIIs in
\cite{Gu(2000),IJRNC-2009-1364,AMC-2013-714,A-2013-2860,ECC-2014-448,CDC-2013-946,SCL-2015-1,ECC-2013,
A-2015-204,JFI-2015stability,IEEE-T-AC-2015-free,A-2015-189,CNSNS-2013-1246,DDNS-2013-793686,JFI-2015-1378,JFI-2015enhanced,IFAC-2012}.
When $(\mathcal{N},m)=(2,0)$, by employing the symbolic operations of MATLAB, one can easily check that
\begin{eqnarray}\label{15sep083}
F_2(s)=\begin{bmatrix}
1\\
s-a\\
(s-a)^2
\end{bmatrix},\
G_{20}^{-1}=\begin{bmatrix}
1& 0& 0\\
\frac{a-b}{2}& 1& 0\\
\frac{(b-a)^2}{6}& a-b& 1
\end{bmatrix},\nonumber
\end{eqnarray}
\begin{eqnarray}\label{15sep084}
\Lambda_{20}=\frac{1}{b-a}\mathrm{diag}(1,\frac{12}{(b-a)^2},\frac{180}{(b-a)^4}),\nonumber
\end{eqnarray}
and hence
\begin{eqnarray}\label{15sep085}
F_{20}(w_t)=\begin{bmatrix}
\int_{a}^bw_t(s)\mathrm{d}s\\
\int_{a}^b\int_{\alpha}^bw_t(s)\mathrm{d}s\mathrm{d}\alpha\\
2\int_{a}^b\int_{\beta}^b\int_{\alpha}^bw_t(s)\mathrm{d}s\mathrm{d}\alpha\mathrm{d}\beta\nonumber
\end{bmatrix},
\end{eqnarray}
\begin{eqnarray}\label{15sep086}
\Xi_{20}(R)=\frac{1}{b-a}\left(\delta_1R\delta_1^\mathrm{T}+3\delta_2R\delta_2^\mathrm{T}+5\delta_3R\delta_3^\mathrm{T}\right),\nonumber
\end{eqnarray}
where
\begin{eqnarray}\label{15sep087}
\delta_1=\begin{bmatrix}
1\\
0\\
0
\end{bmatrix},\ \delta_2=\begin{bmatrix}
1\\
\frac{2}{a-b}\\
0
\end{bmatrix},\ \delta_3=\begin{bmatrix}
1\\
\frac{6}{a-b}\\
\frac{6}{(b-a)^2}
\end{bmatrix}.\nonumber
\end{eqnarray}
This, together with Theorem \ref{15sep08-theorem-1}, yields the following result.
\begin{corollary}\label{15sep08-corollary-1}
When $(\mathcal{N},m)=(2,0)$, the inequality (\ref{15aug0427}) turns into \cite[(13)]{JFI-2015-1378}, that is,
\begin{eqnarray}\label{15sep088}
\mathcal{I}_0(w_t)\ge
\frac{1}{b-a}\left(\Omega_0^\mathrm{T}R\Omega_0+3\Omega_1^\mathrm{T}R\Omega_1+5\Omega_2^\mathrm{T}R\Omega_2\right),
\end{eqnarray}
where
\begin{eqnarray}\label{15sep089}
\Omega_0&=&\int_{a}^bw_t(s)\mathrm{d}s,\nonumber\\
\Omega_1&=&\int_{a}^bw_t(s)\mathrm{d}s-\frac{2}{b-a}\int_{a}^b\int_{\alpha}^bw_t(s)\mathrm{d}s\mathrm{d}\alpha,\nonumber\\
\Omega_2&=&\int_{a}^bw_t(s)\mathrm{d}s-\frac{6}{b-a}\int_{a}^b\int_{\alpha}^bw_t(s)\mathrm{d}s\mathrm{d}\alpha\nonumber\\
&&+\frac{12}{(b-a)^2}\int_{a}^b\int_{\beta}^b\int_{\alpha}^bw_t(s)\mathrm{d}s\mathrm{d}\alpha\mathrm{d}\beta.\nonumber
\end{eqnarray}
\end{corollary}
Similar to Corollary \ref{15sep08-corollary-1}, the following several corollaries can be easily derived from Theorem \ref{15sep08-theorem-1}.
\begin{corollary}\label{15sep08-corollary-2}
When $(\mathcal{N},m)=(1,0)$, the inequality (\ref{15aug0427}) turns into the so-called Wirtinger-based integral inequality \cite[(8)]{JFI-2015-1378}, that is,
\begin{eqnarray}\label{15sep0810}
\mathcal{I}_0(w_t)\ge
\frac{1}{b-a}\left(\Omega_0^\mathrm{T}R\Omega_0+3\Omega_1^\mathrm{T}R\Omega_1\right),
\end{eqnarray}
where $\Omega_0$ and $\Omega_1$ are defined as in Corollary \ref{15sep08-corollary-1}.
\end{corollary}
\begin{corollary}\label{15sep08-corollary-3}
When $\mathcal{N}=0$, the inequality (\ref{15aug0427}) turns into the celebrated Jensen's inequalities
(see \cite{Gu(2000)} and \cite{IJRNC-2009-1364} for the cases $m=0$ and $m=1$, respectively; and \cite[Lemma 1]{AMC-2013-714} for the special case $(a,b)=(-d,0)$), that is,
\begin{eqnarray}\label{15aug262}
&&\int_{a}^b\!\int_{\theta_1}^b\!\!\cdots\!\!\int_{\theta_m}^b\!\!w_t^\mathrm{T}(s)Rw_t(s)\mathrm{d}s\mathrm{d}\theta_m\cdots\mathrm{d}\theta_{1}\nonumber\\
&\ge&
\frac{(m+1)!}{(b-a)^{m+1}}\tilde{\Omega}_{m}^\mathrm{T}R\tilde{\Omega}_{m},
\end{eqnarray}
where
\begin{eqnarray}\label{15sep091}
\tilde{\Omega}_{m}=\int_{a}^b\!\int_{\theta_1}^b\!\!\cdots\!\!\int_{\theta_m}^b\!\!w_t(s)\mathrm{d}s\mathrm{d}\theta_m\cdots\mathrm{d}\theta_{1}.
\end{eqnarray}
\end{corollary}
\begin{corollary}\label{15sep12-corollary-1}
When $\mathcal{N}=1$, the inequality (\ref{15aug0427}) turns into
\begin{eqnarray}\label{15sep121}
&&\int_{a}^b\!\int_{\theta_1}^b\!\!\cdots\!\!\int_{\theta_m}^b\!\!w_t^\mathrm{T}(s)Rw_t(s)\mathrm{d}s\mathrm{d}\theta_m\cdots\mathrm{d}\theta_{1}\nonumber\\
&\ge&
\frac{(m+1)!}{(b-a)^{m+1}}\left(\tilde{\Omega}_{m}^\mathrm{T}R\tilde{\Omega}_{m}
+(m+3)(m+1)\Sigma_m^\mathrm{T}R\Sigma_m\right),\nonumber
\end{eqnarray}
where
\begin{eqnarray}\label{15sep131}
\Sigma_m=\tilde{\Omega}_{m}-\frac{m+2}{b-a}\tilde{\Omega}_{m+1},\nonumber
\end{eqnarray}
and $\tilde{\Omega}_m$ and $\tilde{\Omega}_{m+1}$ are defined as in Corollary \ref{15sep08-corollary-3}.
\end{corollary}
\begin{corollary}\label{15sep08-corollary-4}
When $(\mathcal{N},m)=(1,1)$, the inequality (\ref{15aug0427}) turns into \cite[(16)]{JFI-2015-1378}, that is,
\begin{eqnarray}\label{15sep0812}
\mathcal{I}_1(w_t)\ge
\frac{2}{(b-a)^2}\left(\Omega_3^\mathrm{T}R\Omega_3+8\Omega_4^\mathrm{T}R\Omega_4\right),
\end{eqnarray}
where
\begin{eqnarray}\label{15sep0813}
\Omega_3&=&\int_{a}^b\hspace*{-1mm}\int_{\alpha}^b\hspace*{-1mm}w_t(s)\mathrm{d}s\mathrm{d}\alpha,\nonumber\\
\Omega_4&=&\int_{a}^b\hspace*{-1mm}\int_{\alpha}^b\hspace*{-1mm}w_t(s)\mathrm{d}s\mathrm{d}\alpha-\frac{3}{b-a}\int_{a}^b\hspace*{-1mm}\int_{\beta}^b\hspace*{-1mm}\int_{\alpha}^b\hspace*{-1mm}w_t(s)\mathrm{d}s\mathrm{d}\alpha\mathrm{d}\beta.\nonumber
\end{eqnarray}
\end{corollary}
\begin{corollary}\label{15sep10-corollary-3}
When $m=0$ and $(a,b)=(-h,0)$, the inequality (\ref{15aug0427}) turns into the so-called Bessel--Legendre inequality \cite[Lemma 3]{SCL-2015-1} (i.e., \cite[Lemma 3]{ECC-2014-448}), that is,
\begin{eqnarray}\label{15sep102}
\mathcal{I}_0(w_t)\ge \frac{1}{h}\sum_{k=0}^\mathcal{N}(2k+1)\hat{\Omega}_k^\mathrm{T}R\hat{\Omega}_k,
\end{eqnarray}
where $\hat{\Omega}_k=\int_{-h}^0L_k(s)w_t(s)\mathrm{d}s$, and $\{L_k(s)\}_{k=0}^\mathcal{N}$ is the Legendre orthogonal polynomials defined in \cite[Definition 1]{SCL-2015-1}.
\end{corollary}
If we replace $w_t$ by $\dot{w}_t$ in Corollaries \ref{15sep08-corollary-1}--\ref{15sep08-corollary-4}, then the following several results can be obtained.
\begin{corollary}\label{15sep09-corollary-1}
When $(\mathcal{N},m)=(2,0)$ and $w_t$ is replaced by $\dot{w}_t$, the inequality (\ref{15aug0427}) turns into \cite[(24)]{JFI-2015-1378}, that is,
\begin{eqnarray}\label{15sep098}
\mathcal{I}_0(\dot{w}_t)\ge
\frac{1}{b-a}\left(\Theta_0^\mathrm{T}R\Theta_0+3\Theta_1^\mathrm{T}R\Theta_1+5\Theta_2^\mathrm{T}R\Theta_2\right),
\end{eqnarray}
where
\begin{eqnarray}\label{15sep099}
\Theta_0&=&w_t(b)-w_t(a),\nonumber\\
\Theta_1&=&w_t(b)+w_t(a)-\frac{2}{b-a}\int_{a}^bw_t(s)\mathrm{d}s,\nonumber\\
\Theta_2&=&w_t(b)-w_t(a)+\frac{6}{b-a}\int_{a}^bw_t(s)\mathrm{d}s\nonumber\\
&&-\frac{12}{(b-a)^2}\int_{a}^b\int_{\alpha}^bw_t(s)\mathrm{d}s\mathrm{d}\alpha.\nonumber
\end{eqnarray}
\end{corollary}
\begin{corollary}\label{15sep09-corollary-2}
When $(\mathcal{N},m)=(1,0)$ and $w_t$ is replaced by $\dot{w}_t$, the inequality (\ref{15aug0427}) turns into \cite[Corollary 5]{A-2013-2860}(i.e., \cite[Lemma 2.1]{ECC-2013} and \cite[Lemma 2.1]{CDC-2013-946} or \cite[(23)]{JFI-2015-1378}), that is,
\begin{eqnarray}\label{15sep0910}
\mathcal{I}_0(\dot{w}_t)\ge
\frac{1}{b-a}\left(\Theta_0^\mathrm{T}R\Theta_0+3\Theta_1^\mathrm{T}R\Theta_1\right),
\end{eqnarray}
where $\Theta_0$ and $\Theta_1$ are defined as in Corollary \ref{15sep09-corollary-1}.
\end{corollary}
\begin{corollary}\label{15sep09-corollary-3}
When $\mathcal{N}=0$ and $w_t$ is replaced by $\dot{w}_t$, the inequality (\ref{15aug0427}) turns into the celebrated Jensen's inequalities (see \cite{Gu(2000)} and \cite{IJRNC-2009-1364} for the cases $m=0$ and $m=1$, respectively), that is,
\begin{eqnarray}\label{15sep093}
&&\int_{a}^b\!\int_{\theta_1}^b\!\!\cdots\!\!\int_{\theta_m}^b\!\!\dot{w}_t^\mathrm{T}(s)R\dot{w}_t(s)\mathrm{d}s\mathrm{d}\theta_m\cdots\mathrm{d}\theta_{1}\nonumber\\
&\ge&
\frac{(m+1)!}{(b-a)^{m+1}}\tilde{\Theta}_m^\mathrm{T}R\tilde{\Theta}_m,
\end{eqnarray}
where $\tilde{\Theta}_0=w_t(b)-w_t(a)$ and
\begin{eqnarray}\label{15sep092}
\tilde{\Theta}_m=\frac{(b-a)^m}{m!}w_t(b)-\tilde{\Omega}_{m-1}, m\ge 1.\nonumber
\end{eqnarray}
\end{corollary}
\begin{corollary}\label{15sep12-corollary-2}
When $\mathcal{N}=1$ and $w_t$ is replaced by $\dot{w}_t$, the inequality (\ref{15aug0427}) turns into
\begin{eqnarray}\label{15sep122}
&&\int_{a}^b\!\int_{\theta_1}^b\!\!\cdots\!\!\int_{\theta_m}^b\!\!\dot{w}_t^\mathrm{T}(s)R\dot{w}_t(s)\mathrm{d}s\mathrm{d}\theta_m\cdots\mathrm{d}\theta_{1}\nonumber\\
&\ge&
\frac{(m+1)!}{(b-a)^{m+1}}\left(\tilde{\Theta}_m^\mathrm{T}R\tilde{\Theta}_m
+(m+1)(m+3)\Psi_m^\mathrm{T}R\Psi_m\right),\nonumber
\end{eqnarray}
where
\begin{eqnarray}\label{15sep132}
\Psi_m=-\frac{(b-a)^m}{(m+1)!}w_t(b)-\tilde{\Theta}_{m-1}+\frac{m+2}{b-a}\tilde{\Theta}_{m},\nonumber
\end{eqnarray}
and $\tilde{\Theta}_m$ and $\tilde{\Theta}_{m-1}$ are defined as in Corollary \ref{15sep09-corollary-3}.
\end{corollary}
\begin{corollary}\label{15sep09-corollary-4}
When $(\mathcal{N},m)=(1,1)$ and $w_t$ is replaced by $\dot{w}_t$, the inequality (\ref{15aug0427}) turns into \cite[(25)]{JFI-2015-1378}, that is,
\begin{eqnarray}\label{15sep0912}
\mathcal{I}_1(\dot{w})\ge
2\Theta_3^\mathrm{T}R\Theta_3+4\Theta_4^\mathrm{T}R\Theta_4,
\end{eqnarray}
where
\begin{eqnarray}\label{15sep0913}
\Theta_3&=&w_t(b)\hspace*{-1mm}-\hspace*{-1mm}\frac{1}{b-a}\int_{a}^bw_t(s)\mathrm{d}s,\nonumber\\
\Theta_4&=&w_t(b)\hspace*{-1mm}+\hspace*{-1mm}\frac{2}{b-a}\int_{a}^b\hspace*{-2mm}w_t(s)\mathrm{d}s\hspace*{-1mm}-\hspace*{-1mm}\frac{6}{(b-a)^2}\int_{a}^b\hspace*{-2mm}\int_{\alpha}^b\hspace*{-2mm}w_t(s)\mathrm{d}s\mathrm{d}\alpha.\nonumber
\end{eqnarray}
\end{corollary}
\begin{remark}\label{15sep10-remark-4}
Based on (\ref{15sep0810}), Park \textit{et al.} \cite[Corollary 1]{A-2015-204} derived
\begin{eqnarray}\label{15sep0914}
\mathcal{I}_1(w_t)\ge
\frac{2}{(b-a)^2}\left(\Omega_3^\mathrm{T}R\Omega_3+2\Omega_4^\mathrm{T}R\Omega_4\right).
\end{eqnarray}
Clearly, the inequality (\ref{15sep0812}) is more accurate than (\ref{15sep0914}).
\end{remark}
\begin{remark}\label{15sep10-remark-2}
It has been proven by Gyurkovics in \cite[Corollary 9]{A-2015-44} that Corollary \ref{15sep09-corollary-2} is equivalent to \cite[Lemma 4]{IEEE-T-AC-2015-free} in term of establishing stability criteria for delayed continuous-time systems. By a similar approach, we can show that Corollary \ref{15sep09-corollary-1} is equivalent to \cite[Lemma 1]{A-2015-189}.
However, unlike \cite[Lemma 4]{IEEE-T-AC-2015-free} and \cite[Lemma 1]{A-2015-189}, no free-weighting matrix is involved in Corollaries \ref{15sep09-corollary-1} and \ref{15sep09-corollary-2}.
\end{remark}
\begin{remark}\label{15sep10-remark-1}
It has been proven by Gyurkovics in \cite[Theorem 6]{A-2015-44} that the inequality in \cite[Lemma 2.4]{CNSNS-2013-1246} (i.e., \cite[(12)]{DDNS-2013-793686}) is more conservative than (\ref{15sep0910}). By a similar approach, one can prove that the inequality \cite[(13)]{DDNS-2013-793686}) is more conservative than (\ref{15sep0912}).
\end{remark}
\begin{remark}
The inequalities in Corollaries \ref{15sep12-corollary-1} and \ref{15sep12-corollary-2} are more accurate than ones in \cite[Lemmas 5 and 6]{JFI-2015stability}, respectively,
since the coefficients of the second item on the right-hand side of the inequalities in \cite[Lemmas 5 and 6]{JFI-2015stability} is $\frac{m!(m+3)}{(b-a)^{m+1}}$ which is smaller than
$\frac{m!(m+1)^2(m+3)}{(b-a)^{m+1}}$ in Corollaries \ref{15sep12-corollary-1} and \ref{15sep12-corollary-2}.
\end{remark}
\begin{remark}\label{15sep11-remark-1}
Note that Corollary \ref{15sep09-corollary-2} refines the inequality proposed
in \cite[Lemma 5]{IFAC-2012}, in which the second term of the righthand side is $\frac{\pi^2}{4}\Theta_1^\mathrm{T}R\Theta_1$ which is less than or equal to $3\Theta_1^\mathrm{T}R\Theta_1$. So,
Corollary \ref{15sep09-corollary-2} is less conservative than \cite[Lemma 5]{IFAC-2012}.
\end{remark}
\begin{remark}\label{15sep10-remark-3}
If $(s-a)^k$ is replaced by $(b-s)^k$ for all positive integer $k$ throughout this paper, then we can obtain new WOPs-based integral inequalities like (\ref{15aug0427}), which is a generalization of
\cite[Corollary 4]{A-2013-2860}, \cite[(3.1) and (3.8)]{JFI-2015enhanced} and \cite[(18) and (26)]{JFI-2015-1378}.
\end{remark}
Corollaries \ref{15sep08-corollary-1}--\ref{15sep09-corollary-4} imply that Theorem \ref{15sep08-theorem-1} contains the corresponding results of \cite{Gu(2000),IJRNC-2009-1364,AMC-2013-714,JFI-2015-1378,A-2013-2860,ECC-2014-448,CDC-2013-946,SCL-2015-1,ECC-2013} as special cases,
while Remarks \ref{15sep10-remark-4}--\ref{15sep10-remark-3} present that Theorem \ref{15sep08-theorem-1} improves the corresponding results of \cite{A-2015-204,JFI-2015stability,IEEE-T-AC-2015-free,A-2015-189,CNSNS-2013-1246,DDNS-2013-793686,JFI-2015-1378,JFI-2015enhanced,IFAC-2012}.
Therefore, Theorem \ref{15sep08-theorem-1} is a generalization of these literature.
\section{Conclusion}\label{15jun06-section-1}
In this paper, we have provided WOPs-based integral inequalities
which encompass and/or improve the corresponding inequalities in \cite{Gu(2000),IJRNC-2009-1364,AMC-2013-714,A-2013-2860,ECC-2014-448,CDC-2013-946,SCL-2015-1,ECC-2013,
A-2015-204,JFI-2015stability,IEEE-T-AC-2015-free,A-2015-189,CNSNS-2013-1246,DDNS-2013-793686,JFI-2015-1378,JFI-2015enhanced,IFAC-2012}.
From these literature, it is clear that the WOPs-based integral inequalities obtained in this paper have potential applications in establishing less conservative stability criteria for delayed continuous-time systems. This will be proceeded in our future work.
\end{document} |
\begin{document}
\title{$F$-purity deforms in $\mathbb{Q}$-Gorenstein rings}
\author[Thomas Polstra]{Thomas Polstra}
\address{Department of Mathematics, University of Alabama, Tuscaloosa, AL 35487}
\thanks{Polstra was supported in part by NSF Grant DMS \#2101890 and by a grant from the Simons Foundation, Grant \#814268, MSRI.}
\operatorname{e_{HK}}mail{[email protected]}
\urladdr{\url{https://thomaspolstra.github.io/}}
\author[Austyn Simpson]{Austyn Simpson}
\thanks{Simpson was supported by NSF Grant DMS \#2202890.}
\address{Department of Mathematics, University of Michigan, Ann Arbor, MI 48109}
\operatorname{e_{HK}}mail{[email protected]}
\begin{abstract}
We show that $F$-purity deforms in local $\mathbb{Q}$-Gorenstein rings of prime characteristic $p>0$. Furthermore, we show that $F$-purity is $\mathfrak{m}$-adically stable in local Cohen-Macaulay $\mathbb{Q}$-Gorenstein rings.
\operatorname{e_{HK}}nd{abstract}
\maketitle
\section{Introduction}
Let $(R,\mathfrak{m},k)$ be a Noetherian local ring of prime characteristic $p>0$, and let $F:R\rightarrow R$ be the Frobenius endomorphism $r\mapsto r^p$. This article is concerned with \operatorname{e_{HK}}mph{$F$-pure} rings, i.e. rings for which the Frobenius is a pure morphism. The notion of $F$-purity is a central pillar in the vast and expanding web of Frobenius singularities emerging from the celebrated works of Hochster-Roberts and Hochster-Huneke on rings of invariants \cite{HR74,HR76} and tight closure \cite{HH90}, respectively. Some of these so-called $F$-singularities include $F$-regular, $F$-rational, and $F$-injective which all are defined (with the foundational work of Kunz \cite{Kun69} as motivation) by some weakening of the flatness of Frobenius. Although fascinating from a purely commutative algebraic viewpoint, these classes of mild singularities have garnered widespread attention in the past few decades due to their connections to singularities in complex birational geometry \cite{BST17,HW02,Sch09b,Smi97,ST17} and (recently) to those in mixed characteristic \cite{MS21,MSTWW22,BMPSTWW20}.
A natural question to consider of any class of singularities is whether they deform. Recall that a property $\mathcal{P}$ of local rings is said to \operatorname{e_{HK}}mph{deform} if given a local ring $(R,\mathfrak{m},k)$ and a non-zero-divisor $f\in\mathfrak{m}$ such that $R/(f)$ is $\mathcal{P}$, $R$ is $\mathcal{P}$ too. Deformation holds for many familiar local properties\footnote{see \cite[Table 2]{Mur22} for a summary} and can be viewed as a weak version of \operatorname{e_{HK}}mph{inversion of adjunction}.
The study of this phenomenon in the realm of $F$-singularities has a long history dating back to \cite{Fed83} wherein Fedder initiated the exploration of whether $F$-injectivity deforms, a question which remains open as of the present article. It was known early on that all four $F$-singularities deform if the ambient ring is Gorenstein \cite{Fed83,HH94}, but outside of this setting the situation is much more delicate (see Table \ref{summary-table} for a summary). The primary subtlety is that if one does not impose $\mathbb{Q}$-Gorenstein assumptions on the ambient ring, then there are counterexamples (due to Fedder \cite{Fed83} and Singh \cite{Sin99}) which show that $F$-purity (resp. $F$-regularity) do not deform. This obstruction has a counterpart in the characteristic zero minimal model program, namely that log canonical and log terminal singularities (the supposed analogues of $F$-purity and $F$-regularity respectively, evidenced by \cite{Har98, HW02, Her16, Tak13}) do not deform without additional $\mathbb{Q}$-Gorenstein hypotheses (see \cite[Example 9.1.7 \& Remark 9.1.15]{Ish18}). The main contribution of the present article is a complete resolution to the question of whether $F$-purity deforms in $F$-finite rings, which we answer affirmatively in the $\mathbb{Q}$-Gorenstein scenario.
\begin{Theoremx} (Theorem \ref{Theorem F-purity deforms})
\label{Main theorem F-purity}
Let $(R,\mathfrak{m},k)$ be a local $F$-finite $\mathbb{Q}$-Gorenstein ring of prime characteristic $p>0$. Suppose that $f\in \mathfrak{m}$ is a non-zero-divisor such that $R/(f)$ is Gorenstein in codimension $1$, $(S_2)$, and $F$-pure. Then $R$ is $F$-pure.
\operatorname{e_{HK}}nd{Theoremx}
The analogous statement regarding the deformation of $F$-regularity was obtained in \cite{AKM98} over two decades ago via tight closure techniques which by present standards are considered classical. Experts were aware that special cases of Theorem \ref{Main theorem F-purity} were achievable if the $\mathbb{Q}$-Gorenstein index of $R$ is coprime to the characteristic (see \cite[Remark 4.10(1)]{HW02} and \cite[Proposition 7.2]{Sch09a}), but the index-free version of the conjecture remained elusive until now.
We wish to emphasize that the history of this problem mirrors that of the deformation and inversion of adjunction problems for log terminal and log canonical singularities in the characteristic zero MMP in two ways: the failure already described above outside of the $\mathbb{Q}$-Gorenstein scenario, and in the jump in difficulty between the two methods. Log terminal singularities were seemingly known to satisfy inversion of adjunction in the program's infancy (see e.g. \cite[Theorem 5.50]{KM98}). By contrast, the analogous conjecture for log canonicity vexed researchers for several decades before Kawakita's solution in \cite{Kaw07}. The parallels that one may draw in the prime characteristic world thus unsurprisingly point to a higher degree of subtlety in the deformation problem for $F$-purity than that of $F$-regularity, which we believe is visible from our methods in Section \ref{Section F-purity deforms and is stable}. We employ a novel strategy involving cyclic covers to remove $p$-torsion of the canonical module, and we remark that this trick cannot be used to obtain a more general inversion of adjunction statement for $F$-pure pairs.
Shifting gears, a related problem that we conclude the article with is that of $\mathfrak{m}$-adic stability for $F$-purity. Following \cite{DSS20}, a property $\mathcal{P}$ of local rings is said to be \operatorname{e_{HK}}mph{$\mathfrak{m}$-adically stable} if given a local ring $(R,\mathfrak{m},k)$ and a non-zero-divisor $f\in \mathfrak{m}$ such that $R/(f)$ is $\mathcal{P}$, there exists an integer $N>0$ so that for every $\operatorname{e_{HK}}psilon\in\mathfrak{m}^N$, $R/(f+\operatorname{e_{HK}}psilon)$ is also $\mathcal{P}$. This notion of stability is related to both deformation and to finite determinacy, has early roots in \cite{Eis74,Hir65,Sam56}, and has sparked a flurry of recent activity \cite{DSS20,Dua22,MQS20,PS18,QT21}. Provided that $\mathcal{P}$ descends along faithfully flat maps and that $\mathcal{P}$ passes from $R$ to $R[x]_{(\mathfrak{m},x)}$ then stability of $\mathcal{P}$ implies deformation of $\mathcal{P}$ \cite[Theorem 2.4]{DSS20}. \operatorname{e_{HK}}mph{Op. cit.} initiates the study of $\mathfrak{m}$-adic stability for $F$-singularities, and it is shown that stability holds (or fails) in many of the same instances that deformation does (see Table \ref{summary-table}). We bolster this program by showing that $F$-purity is $\mathfrak{m}$-adically stable in $\mathbb{Q}$-Gorenstein \operatorname{e_{HK}}mph{Cohen-Macaulay} rings.
\begin{Theoremx} (Theorem \ref{Theorem F-purity deforms Q-Gorenstein index p^e_0})
Let $(R,\mathfrak{m},k)$ be a local $F$-finite Cohen-Macaulay $\mathbb{Q}$-Gorenstein ring of prime characteristic $p>0$. Suppose that $f\in \mathfrak{m}$ is a non-zero-divisor such that $R/(f)$ is Gorenstein in codimension $1$, and $F$-pure. Then there exists $N>0$ so that $R/(f+\operatorname{e_{HK}}psilon)$ is $F$-pure for every $\operatorname{e_{HK}}psilon\in\mathfrak{m}^N$.
\operatorname{e_{HK}}nd{Theoremx}
\subsection{Notation, Conventions, and organization of the paper}
All rings in this article are commutative with unit and Noetherian. Typically, our interests lie in the development of the theory of prime characteristic rings, especially in Section~\ref{Section F-purity deforms and is stable}. If $R$ is of prime characteristic $p>0$, then we denote by $F:R\to R$ the Frobenius endomorphism. If $I\subseteq R$ is an ideal, for each $e\in\mathbb{N}$ we denote by $I^{[p^e]}$ the ideal $\langle r^{p^e}\mid r\in I\rangle$. For each $e\in\mathbb{N}$ and $R$-module $M$ we let $F^e_*M$ denote the $R$-module obtained from $M$ via restriction of scalars along $F^e$. That is, $F^e_*M$ agrees with $M$ as an abelian group, and given $r\in R$ and $m\in M$ we have $r\operatorname{cd}ot F^e_*(m)=F^e_*(r^{p^e} m)$ where $F^e_*(m)$ is the element of $F^e_*M$ corresponding to $m$. Often times we assume that $R$ is \operatorname{e_{HK}}mph{$F$-finite}, i.e. if $M$ is a finitely generated $R$-module, so too is $F^e_* M$ for each $e\in\mathbb{N}$.
We often assume that a local ring is the homomorphic image of a regular local ring, an assumption that is satisfied by every $F$-finite local ring, \cite[Remark~13.6]{Gab04}. If $R\cong S/I$ where $S$ is a regular local ring then $\operatorname{Ext}^{\operatorname{ht}(I)}_S(R,S)$ is a choice of canonical module of $R$. We caution the reader that we do \operatorname{e_{HK}}mph{not} require $\mathbb{Q}$-Gorenstein rings to be Cohen-Macaulay or normal; see Definition \ref{Definition Q-gor} for a precise definition.
In the study of $\mathfrak{m}$-adic stability, we occasionally use the shorthand ``$R/(f+\operatorname{e_{HK}}psilon)$ satisfies property $\mathcal{P}$ for all $\operatorname{e_{HK}}psilon\in\mathfrak{m}^{N\gg 0}$" to mean that there exists an integer $N>0$ so that $R/(f+\operatorname{e_{HK}}psilon)$ is $\mathcal{P}$ for all $\operatorname{e_{HK}}psilon\in\mathfrak{m}^N$.
\section{Preliminary Results}
\label{Section Preliminary}
\subsection{Generalized divisors, divisorial ideals, and cyclic covers}
A ring $R$ is $(G_1)$ if $R$ is Gorenstein in codimension $1$. Suppose that $(R,\mathfrak{m},k)$ is a local $(G_1)$ ring satisfying Serre's condition $(S_2)$, and let $L$ denote the total ring of fractions of $R$. Following \cite{Har94} there is a well-defined notion of linear equivalence on the collection of divisors which are Cartier in codimension $1$, and hence there is a well-defined additive structure on such divisors up to linear equivalence. If $D$ is a Weil divisor which is Cartier in codimension $1$ then we let $R(D)$ denote the corresponding divisorial ideal of $R$, i.e.
\[
R(D)=\{x\in L\setminus\{0\}\mid \mbox{div}(x)+D\geq 0\}\cup\{0\}.
\]
In other words, $R(D)$ denotes the global sections of $\mathcal{O}_{\operatorname{Spec} R}(D)$. We assume that $R$ admits a dualizing complex and thus admits a canonical module $\omega_R$. Under our assumptions we have that $\omega_R\cong R(K_X)$ for a choice of \operatorname{e_{HK}}mph{canonical divisor} $K_X$ on $X=\operatorname{Spec}(R)$ which is Cartier in codimension $1$ (see \cite[Proposition 2.8]{Har94}).
\begin{definition}\label{Definition Q-gor}
A ring $R$ is said to be \operatorname{e_{HK}}mph{$\mathbb{Q}$-Gorenstein} if $R$ has a canonical module and
\begin{enumerate}
\item $R$ is $(G_1)$ and $(S_2)$ with choice of canonical divisor $K_X$;
\item there exists an integer $n$ so that $nK_X$ is Cartier.
\operatorname{e_{HK}}nd{enumerate} The least integer $n$ so that $nK_X$ is Cartier is referred to as the \operatorname{e_{HK}}mph{$\mathbb{Q}$-Gorenstein index of $R$}. The $\mathbb{Q}$-Gorenstein index is independent of choice of canonical divisor as $K_X$ is Cartier in codimension $1$, i.e. $R$ is $(G_1)$.
\operatorname{e_{HK}}nd{definition}
If $D\leq 0$ is an anti-effective divisor, Cartier in codimension $1$, then $I=R(D)\subseteq R$ is an ideal of pure height $1$. Conversely, if $I\subseteq R$ is an ideal of pure height $1$, principal in codimension $1$, then $I=R(D)$ for some anti-effective divisor $D$ which is Cartier in codimension $1$. For every natural number $N\geq 1$ the divisorial ideal $R(ND)$ agrees with $I^{(N)}$, the $N$th symbolic power of $I$.
Suppose that $D$ is a $\mathbb{Q}$-Cartier (i.e. torsion, since $R$ is local) divisor of index $n$ which is Cartier in codimension $1$. Suppose that $R(nD)=R\operatorname{cd}ot u$. The cyclic cover of $R$ corresponding to $D$ is the finite $R$-algebra
\[
R\to R_D:= \bigoplus_{i=0}^{n-1}R(-iD)t^{-i}\cong \bigoplus_{i=0}^{\infty}R(-iD)t^{-i}/(u^{-1}t^{-n}-1).
\]
The map $R\to R_D$ is a finite $R$-module homomorphism and the ring $R_D$ decomposes into a direct sum of $(S_2)$ $R$-modules, hence $R_D$ is $(S_2)$. Furthermore, we can explicitly describe the canonical module of $R_D$ as
\[
\omega_{R_D}\cong \operatorname{Hom}_R(R_D, R(K_X))\cong \bigoplus_{i=0}^{n-1}R(K_X+iD)t^i.
\]
The above computation commutes with localization and hence $R_D$ is Gorenstein in codimension $1$. Indeed, if $P$ is a height $1$ prime ideal of $R_D$ then $\mathfrak{p}=P \cap R$ is a height $1$ prime of $R$ and $R_D\otimes_R R_\mathfrak{p} \cong \omega_{R_D}\otimes_R R_\mathfrak{p}$. Localizing further we find that $(\omega_{R_D})_{P}\cong (R_D)_P$. In what follows, we denote $(-)^\vee:=\operatorname{Hom}_{R_D}(-,R_D)$.
The following lemma is well-known to experts. We sketch a proof for sake of completeness.
\begin{lemma}
\label{Lemma cyclic cover changing index}
Let $(R,\mathfrak{m},k)$ be a local $\mathbb{Q}$-Gorenstein ring with canonical divisor $K_X$ on $X=\operatorname{Spec}(R)$. Suppose that $R$ is of $\mathbb{Q}$-Gorenstein composite index $m\operatorname{cd}ot n$ and $D:=mK_X$. Then the cyclic cover $R_D$ corresponding to the $\mathbb{Q}$-Cartier divisor $D$ is $\mathbb{Q}$-Gorenstein of index $m$.
\operatorname{e_{HK}}nd{lemma}
\begin{proof}
We identify $\omega_{R_D}$ as $\bigoplus_{i=0}^{n-1}R(K_X+iD)t^i$ as above. Notice that after relabeling indices we have
\[
\omega_{R_D}\cong \bigoplus_{i=0}^{n-1}R(K_X+iD)t^i\cong \bigoplus_{i=0}^{n-1}R(K_X-(m-i)D)t^{-(m-i)}\cong (R(K_X)R_D)^{\vee\vee}
\]
where $R(K_X)R_D$ is the extension of $R(K_X)$ to the cyclic cover $R_D$ and $(R(K_X)R_D)^{\vee \vee}$ is the reflexification of $R(K_X)R_D$. Therefore if $K_Y$ is a choice of canonical divisor of $Y=\operatorname{Spec}(R_D)$ and $\pi: Y\to X$ is the associated map of affine schemes then $K_Y\sim \pi^*K_X$, where $\pi^*K_X$ is the divisor on $Y$ such that $R_D(\pi^*K_X)=(R(K_X)R_D)^{\vee\vee}$. Therefore $mK_Y\sim \pi^*mK_X=\pi^*D$, and $\pi^*D\sim 0$ as $(R(D)R_D)^{\vee\vee}$ is isomorphic to a shift of $R_D$.
If $0<m'<m$ then $m'K_Y\sim\pi^*m'K_X$ is not principal. Else, $R_D$ is isomorphic to a shift of $(R(m'K_Y)R_D)^{\vee\vee}$. If this is the case, then $(m'-im)K_X\sim 0$ for some $0\leq i<n$. This is a contradiction as $1\leq -(m'-im)<mn$ and we assumed the index of $K_X$ to be $mn$.
\operatorname{e_{HK}}nd{proof}
In Section~\ref{Section F-purity deforms and is stable} we will need to understand the behavior of divisorial ideals under base change. In particular, we will need to know that the base change of a divisorial ideal remains a divisorial ideal under suitable hypotheses. The following lemma will be a key tool in accomplishing this.
\begin{lemma}
\label{Lemma base change of divisorial ideal}
Let $(R,\mathfrak{m},k)$ be a local ring which is $(S_2)$ and $(G_1)$. Suppose that $J\subsetneq R$ is a canonical ideal of $R$. Let $f\in \mathfrak{m}$ be a non-zero-divisor so that $f$ is regular on $R/J$ and $R/(f)$ is $(S_2)$ and $(G_1)$. If for each $\mathfrak{p}\in V(f)$ we have that $\operatorname{depth}(J^{(n)}R_\mathfrak{p})\geq \min \{\operatorname{ht}(\mathfrak{p}),3\}$, (i.e. $J^{(n)}R_\mathfrak{p}$ is $(S_3)$ for each $\mathfrak{p}\in V(f)$), then $J^{(n)}/fJ^{(n)}\cong \left((J,f)/(f)\right)^{(n)}$. In particular, the isomorphism holds when $J^{(n)}$ is Cohen-Macaulay.
\operatorname{e_{HK}}nd{lemma}
\begin{proof}
Consider the short exact sequence
\[
0\to J^{(n)}\to R\to R/J^{(n)}\to 0.
\]
Since $f$ is regular on $R$ and $R/J$ we have that $f$ is regular on $R/J^{(n)}$ for all $n$ and $\operatorname{Tor}_1(R/(f),R/J^{(n)})\cong H_1(f;R/J^{(n)})=0$, hence there is a short exact sequence
\[
0\to \frac{J^{(n)}}{fJ^{(n)}}\to \frac{R}{(f)}\to \frac{R}{(J^{(n)},f)}\to 0,
\]
and therefore $J^{(n)}/fJ^{(n)}\cong (J^{(n)},f)/(f)$.
We notice that the ideals $(J^{(n)},f)/(f)$ and $\left((J,f)/(f)\right)^{(n)}$ agree at codimension $1$ points of $\operatorname{Spec}(R/(f))$ by the assumption that $R/(f)$ is $(G_1)$. Moreover, our assumptions imply that $(J^{(n)},f)/(f)$ is an $(S_2)$ $R/(f)$-module, which together with the previous sentence tells us that $(J^{(n)},f)/(f)\cong \left((J,f)/(f)\right)^{(n)}$ as claimed.
\operatorname{e_{HK}}nd{proof}
\subsection{\texorpdfstring{$\mathbb{Q}$}{ℚ}-Gorenstein rings and the \texorpdfstring{$(G_1)$}{G\_1} property}
The following result shows that being Gorenstein in codimension $1$ deforms and is $\mathfrak{m}$-adically stable. The proof uses a standard trick involving the Krull intersection theorem which we use elsewhere in this article with details omitted.
\begin{proposition}
\label{Proposition Gorenstein in codimension 1 is stable}
Let $(R,\mathfrak{m},k)$ be a local equidimensional and catenary ring which admits a canonical module. Suppose that $\operatorname{div}m R\geq 2$. If $f\in \mathfrak{m}$ is a non-zero-divisor and $R/(f)$ is $(G_1)$, then $R$ is $(G_1)$. If $R$ is further assumed to be excellent, then the rings $R/(f+\operatorname{e_{HK}}psilon)$ are $(G_1)$ for all $\operatorname{e_{HK}}psilon\in\mathfrak{m}^{N\gg 0}$.
\operatorname{e_{HK}}nd{proposition}
\begin{proof}
Suppose that $\mathfrak{p}$ is a height $1$ prime. Then the ideal $(\mathfrak{p},f)$ has height no more than $2$ since we are assuming $R$ is equidimensional and catenary and hence we can choose a height $2$ prime $\mathfrak{q}\in V((\mathfrak{p},f))$. Then $R_\mathfrak{q}/(f)R_\mathfrak{q}$ is Gorenstein by assumption. The property of being Gorenstein deforms \cite[\operatorname{h}ref{https://stacks.math.columbia.edu/tag/0BJJ}{Tag 0BJJ}]{stacks-project} and therefore $R_\mathfrak{q}$ is Gorenstein. Localizing further we find that $R_\mathfrak{p}$ is Gorenstein as well, hence $R$ is $(G_1)$.
The non-Gorenstein locus of an excellent local ring is a closed subset (e.g. by \cite[Theorem 24.6]{Mat86}). Therefore since the ring $R$ is Gorenstein in codimension $1$, the height $2$ primes in the non-Gorenstein locus of $R$ form a finite set, say $\{\mathfrak{p}_1,\dots, \mathfrak{p}_t\}$. Furthermore, $f\operatorname{no}t\in\mathfrak{p}_i$ for all $i$ by the assumption that $R/(f)$ is $(G_1)$. Moreover, $\bigcap_N(\mathfrak{m}^N+\mathfrak{p}_i)=\mathfrak{p}_i$ for all $i$ by Krull's intersection theorem, so there exists $N_i> 0$ so that $f\operatorname{no}t\in\mathfrak{m}^{N_i}+\mathfrak{p}_i$. Choosing $N\geq \max_i\{N_i\}$ we obtain that $f+\operatorname{e_{HK}}psilon\operatorname{no}t\in\mathfrak{p}_i$ for all $i$ and all $\operatorname{e_{HK}}psilon\in\mathfrak{m}^N$. Hence, $R/(f+\operatorname{e_{HK}}psilon)$ is $(G_1)$ for all $\operatorname{e_{HK}}psilon\in\mathfrak{m}^N$.
\operatorname{e_{HK}}nd{proof}
\begin{lemma}
\label{lemma base change the canonical module}
Let $(R,\mathfrak{m},k)$ be a local ring and $f\in \mathfrak{m}$ a non-zero-divisor of $R$ such that $R/(f)$ is $(G_1)$ and $(S_2)$. Further suppose that $R$ has a canonical module. Let $J\subsetneq R$ be a canonical ideal of $R$ such that $f$ is regular on $R/J$. If $\left((J,f)/(f)\right)^{\operatorname{un}}$ denotes the intersection of the minimal primary components of the ideal $(J,f)/(f)$ of $R/(f)$ then $\left((J,f)/(f)\right)^{\operatorname{un}}\cong \omega_{R/(f)}$.
\operatorname{e_{HK}}nd{lemma}
\begin{proof}
Consider the short exact sequence
\[
0\to R\xrightarrow{\operatorname{cd}ot f}R\to R/(f)\to 0.
\]
Observe that $\omega_{R/(f)}\cong \operatorname{Ext}^1_R(R/(f),J)$ by \cite[Satz 5.12]{HZ71} and so there is an exact sequence
\[
0\to J\xrightarrow{\operatorname{cd}ot f}J\to \omega_{R/(f)}\to \operatorname{Ext}^1_S(R,J)
\]
and hence there is a left exact sequence
\[
0\to \frac{J}{fJ}\cong \frac{(J,f)}{(f)}\to \omega_{R/(f)}\to \operatorname{Ext}_S^{1}(R,J).
\]
By the assumption that $R/(f)$ is $(S_2)$, we know that $\operatorname{Ext}_S^{1}(R,S)$ is not supported at any height $1$ component of $R/(f)$. Hence $(J,f)/(f)\to \omega_{R/(f)}$ is an isomorphism at codimension $1$ points of $R/(f)$. By \cite[Proposition~1.11]{Har94} we have that if $(-)^\vee$ denotes $\operatorname{Hom}_R(-,R/(f))$ then
\[
\left(\frac{(J,f)}{(f)}\right)^{\vee\vee}\cong \omega_{R/(f)}.
\]
Therefore the lemma is proven as $\left((J,f)/(f)\right)^{\vee\vee}\cong \left((J,f)/(f)\right)^{\operatorname{un}}$.
\operatorname{e_{HK}}nd{proof}
\begin{proposition}
\label{Proposition Q-Gorenstein index under base change}
Let $(R,\mathfrak{m},k)$ be an excellent equidimensional local $\mathbb{Q}$-Gorenstein ring of index $n$ satisfying Serre's condition $(S_2)$. Suppose that $f\in \mathfrak{m}$ is a non-zero-divisor and $R/(f)$ is $(G_1)$ and $(S_2)$. Then $R/(f)$ is $\mathbb{Q}$-Gorenstein of index dividing $n$. Moreover, we may choose canonical ideal $J\subsetneq R$ and element $a\in J$ so that
\begin{enumerate}
\item $J^{(n)}=(a)$ and $J/fJ\cong (J,f)/(f)$;
\item $\left((J,f)/(f)\right)^{\operatorname{un}}\cong \omega_{R/(f)}$ and $\left(((J,f)/(f)\right)^{\operatorname{un}})^{(n)}=(a,f)/(f)$.
\operatorname{e_{HK}}nd{enumerate}
\operatorname{e_{HK}}nd{proposition}
\begin{proof}
By Proposition~\ref{Proposition Gorenstein in codimension 1 is stable} the ring $R$ is $(G_1)$. We first show the existence of a canonical ideal $J\subseteq R$ so that $((J,f)/(f))^{\operatorname{un}}\subseteq R/(f)$ is a canonical ideal of $R/(f)$. We will then show that $(((J,f)/(f))^{\operatorname{un}})^{(n)}$ is a principal ideal of $R/(f)$, i.e. the $\mathbb{Q}$-Gorenstein index of $R/(f)$ divides the $\mathbb{Q}$-Gorenstein index of $R$ as claimed.
Suppose that $\omega_R$ is a canonical module of $R$. Let $W$ be the complement of the union of the height $1$ components of $f$. Then $(\omega_R)_W\cong R_W$. Thus there exists $u\in \omega_R$ and ideal $J\subseteq R$ not contained in any height $1$ component of $(f)$, so that $\omega_R\cong J\operatorname{cd}ot u$. Then $J\subseteq R$ is a canonical ideal of $R$. Moreover, since the components of $J$ are disjoint from the components of $(f)$ we may assume that $J$ is an ideal of pure height $1$ and $f$ is a regular element of $R/J$. As in the proof of Lemma~\ref{Lemma base change of divisorial ideal} we have that $J/fJ\cong (J,f)/(f)$ and we have by Lemma~\ref{lemma base change the canonical module} that $((J,f)/(f))^{\operatorname{un}}\cong \omega_{R/(f)}$.
Suppose that $J^{(n)}=(a)$. We claim that $(\left((J,f)/(f)\right)^{\operatorname{un}})^{(n)}=(a,f)/(f)$. Equivalently, we need to show that if $\mathfrak{p}\in V(f)$ with $\operatorname{ht}\mathfrak{p}=2$ then $\left((J^n,f)/(f)\right)R_\mathfrak{p}=\left( (a,f)/(f)\right) R_\mathfrak{p}$. We are assuming $R/(f)$ is $(G_1)$ and hence $R_\mathfrak{p}/(f)R_\mathfrak{p}$ is Gorenstein. The property of being Gorenstein deforms and therefore $R_\mathfrak{p}$ is Gorenstein. In particular, $J^NR_\mathfrak{p}=J^{(N)}R_\mathfrak{p}$ for every $N\in \mathbb{N}$ and so
\[
\left((J^n,f)/(f)\right)R_\mathfrak{p}=\left((J^{(n)},f)/(f)\right)R_\mathfrak{p}=\left( (a,f)/(f)\right) R_\mathfrak{p}
\]
as claimed. Therefore the index of $R/(f)$ divides the index of $R$.
\operatorname{e_{HK}}nd{proof}
\subsection{Degeneracy ideals}
Our study of $\mathfrak{m}$-adic stability of $F$-purity (Theorem \ref{Theorem F-purity deforms Q-Gorenstein index p^e_0}) requires a careful analysis of the Frobenius degeneracy ideals of $R$ which is partially contained in this subsection. We suppose that $(R,\mathfrak{m},k)$ is a local $F$-finite ring of prime characteristic $p>0$. The $e$th Frobenius degeneracy ideal of $R$ is the ideal
\[
I_e(R):=\{c\in R\mid R\xrightarrow{1\mapsto F^e_*c}F^e_*R\mbox{ is not pure}\}.
\]
Frobenius degeneracy ideals were introduced by Yao in \cite{Yao06} and by Aberbach and Enescu in \cite{AE05} and play a prominent role in prime characteristic commutative algebra, especially in the study of $F$-regularity and $F$-purity. It is important for us to notice that a ring $R$ is $F$-pure if and only if $I_e(R)$ is a proper ideal for some (equivalently every) natural number $e\in\mathbb{N}$.
If $R$ is Cohen-Macaulay (e.g if $R$ is $F$-regular) the Frobenius degeneracy ideals $I_e(R)$ may be realized as certain colon ideals, and this viewpoint has been central to the study of the ``weak implies strong" conjecture and $F$-signature theory (see \cite{AP19, HL02, PS18, PT18, WY04} for example).
\begin{proposition}[{\cite[Lemma~6.2]{PT18}}]
\label{Proposition degeneracy ideals as colon ideals}
Let $(R,\mathfrak{m},k)$ be an $F$-finite local Cohen-Macaulay ring of prime characteristic $p>0$. Suppose that $R$ admits a canonical ideal $J\subsetneq R$, $0\operatorname{no}t= x_1\in J$ a non-zero-divisor, $x_2,\ldots, x_d$ parameters on $R/(x_1)$, and suppose that $u$ generates the socle of the $0$-dimensional Gorenstein quotient $R/(J,x_2,\ldots,x_d)$. Then for each $e\in \mathbb{N}$ there exists $t_e\in\mathbb{N}$ so that for all $t\geq t_e$
\[
I_e(R)=(x_1^{t-1}J,x_2^t,\ldots,x_d^t)^{[p^e]}:_R (x_1\operatorname{cd}ots x_d)^{(t-1)p^e}u^{p^e}.
\]
\operatorname{e_{HK}}nd{proposition}
\begin{proof}
Under the listed assumptions, it follows that $R$ is approximately Gorenstein by \cite[page 53]{AL03} (see definition in \textit{op. cit.}). The statement then follows immediately from \cite[Lemma~6.2]{PT18}.
\operatorname{e_{HK}}nd{proof}
The study of Frobenius splittings often requires carefully manipulating the colon ideals described above. We first discuss a powerful technique at our disposal in this vein when the parameter element $x_2$ is chosen so that $x_2$ multiplies the canonical ideal $J$ into a principal ideal contained in $J$. Such elements can be found provided $R$ is $(G_1)$. Choosing such a parameter element conveniently allows us to switch freely between bracket powers of the canonical ideal with symbolic powers thereof, which we record in more detail in the following lemma.
\begin{lemma}
\label{Lemma colon ideal in G_1 rings}
Let $(R,\mathfrak{m},k)$ be a local $F$-finite Cohen-Macaulay $(G_1)$ ring of prime characteristic $p>0$ and of Krull dimension $d$ at least $2$. Suppose that $J\subsetneq R$ is a choice of canonical ideal of $R$ and $x_1\in J$ is a non-zero-divisor. Then there exists a parameter element $x_2$ on $R/(x_1)$ and element $0\operatorname{no}t= a\in J$ such that $x_2J\subseteq (a)$. Moreover, for any choice of parameters $y_3,\ldots, y_d$ on $R/(x_1,x_2)$ and $e,N\in \mathbb{N}$ we have that
\begin{align*}
(J^{[p^e]},x_2^{Np^e},y_3,\ldots, y_d):_Rx_2^{(N-1)p^e}&=(J^{(p^e)},x_2^{Np^e},y_3,\ldots, y_d):_Rx_2^{(N-1)p^e}\\
& = (J^{(p^e)},x_2^{2p^e},y_3,\ldots, y_d):_Rx_2^{p^e}\\
&= (J^{[p^e]},x_2^{2p^e},y_3,\ldots, y_d):_Rx_2^{p^e}.
\operatorname{e_{HK}}nd{align*}
\operatorname{e_{HK}}nd{lemma}
\begin{proof}
We refer the reader to \cite[Lemma~6.7(i)]{PT18} where the first named author and Tucker record a proof of this lemma under the additional assumption that $R$ is a normal domain. We observe that the normality assumption is not necessary and one only needs that $J$ is principal in codimension $1$ for the methodology of \cite{PT18} to be applicable.
\operatorname{e_{HK}}nd{proof}
Another tactic that we employ in the study of the colon ideals appearing in Proposition~\ref{Proposition degeneracy ideals as colon ideals} allows us to remove the $x_1$ term. The following lemma is well-known to experts, but we record a detailed proof for the reader's convenience.
\begin{lemma}
\label{Lemma Removing the x1}
Let $(R,\mathfrak{m},k)$ be a local $F$-finite Cohen-Macaulay ring of prime characteristic $p>0$ and of Krull dimension $d$. Suppose that $J\subsetneq R$ is a choice of canonical ideal of $R$ and $x_1\in J$ is a non-zero-divisor. Then for any choice of parameters $y_2,\ldots,y_d$ of $R/(x_1)$ we have that
\[
((x_1^{t-1}J)^{[p^e]},y_2,\ldots,y_d):_R x_1^{(t-1)p^e}= (J^{[p^e]},y_2,\ldots,y_d).
\]
\operatorname{e_{HK}}nd{lemma}
\begin{proof}
Clearly the ideal on the right-hand side of the claimed equality is contained in the left-hand side. Suppose that $r\in ((x_1^{t-1}J)^{[p^e]},y_2,\ldots,y_d):_R x_1^{(t-1)p^e}$, i.e.
\[
rx_1^{(t-1)p^e}\in ((x_1^{t-1}J)^{[p^e]},y_2,\ldots,y_d).
\]
Equivalently, there exists an element $j\in J^{[p^e]}$ such that
\[
(r-j)x_1^{(t-1)p^e}\in (y_2,\ldots,y_d).
\]
The element $x_1^{(t-1)p^e}$ is a non-zero-divisor on $R/(y_2,\ldots,y_d)$ and therefore
\[
r-j\in (y_2,\ldots,y_d)
\]
and therefore $r\in (J^{[p^e]},y_2,\ldots,y_d)$ as claimed.
\operatorname{e_{HK}}nd{proof}
\begin{comment}
\begin{lemma}
\label{Lemma manipulating colon ideals with punctured spec assumptions}
Let $(R,\mathfrak{m},k)$ be a local $F$-finite Cohen-Macaulay $(G_1)$ ring of prime characteristic $p>0$ and of Krull dimension $d$ at least $2$. Suppose that $J\subsetneq R$ is a choice of canonical ideal of $R$ and $x_1\in J$ is a non-zero-divisor. Suppose that $x_d\in R$ is a parameter element satisfying the following:
\begin{enumerate}
\item $x_d$ is a parameter element of $R/(x_1)$;
\item $x^\operatorname{e_{HK}}ll_d J^{(p^e)}\subseteq I\subseteq J^{(p^e)}$ where $\operatorname{e_{HK}}ll\in \mathbb{N}$, $I\subseteq R$ is an ideal, and $I$ is a Cohen-Macaulay $R$-module.
\operatorname{e_{HK}}nd{enumerate}
Then for any choice of parameters $y_2,\ldots,y_{d-1}$ of $R/(x_1,x_d)$ and natural number $N\geq \operatorname{e_{HK}}ll$ we have that
\[
(J^{(p^e)},y_2,\ldots,y_{d-1},x_d^{Np^e}):_Rx_d^{(N-1)p^e}= (J^{(p^e)},y_2,\ldots,y_{d-1},x_d^{(\operatorname{e_{HK}}ll+1)p^e}):_Rx_d^{\operatorname{e_{HK}}ll p^e}
\]
\operatorname{e_{HK}}nd{lemma}
\begin{proof}
It is clearly the scenario that
\[
(J^{(p^e)},y_2,\ldots,y_{d-1},x_d^{Np^e}):_Rx_d^{(N-1)p^e}\supseteq (J^{(p^e)},y_2,\ldots,y_{d-1},x_d^{(\operatorname{e_{HK}}ll+1)p^e}):_Rx_d^{\operatorname{e_{HK}}ll p^e}
\]
Before establishing the non-trivial containment we first observe that the ideal $(I,y_2,\ldots,y_{d-1})$ contains the ideal $(x_dx_1^{p^e},y_2,\ldots,y_{d-1})$. Furthermore, since both $x_d$ and $x_1^{p^e}$ are regular on $R/(y_2,\ldots,y_{d-1})$ we have that $(x_dx_1^{p^e},y_2,\ldots,y_{d-1})$ is a height $d-1$ ideal and hence $(I,y_2,\ldots,y_{d-1})$ is of height $d-1$. Even further, the following ideal is $\mathfrak{m}$-primary,
\[
(I,y_2,\ldots,y_{d-1})+(J,x_d),
\]
and therefore there must exist an element $b\in J$ so that $x_d+b$ avoids all minimal primes of $(I,y_2,\ldots,y_{d-1})$, i.e. the ideal $(I,y_2,\ldots,y_{d-1},x_d+b)$ is an $\mathfrak{m}$-primary ideal.
Suppose that $r\in (J^{(p^e)},y_2,\ldots,y_{d-1},x_d^{Np^e}):_Rx_d^{(N-1)p^e}$. Then there exists an $s\in R$ so that
\[
(r-sx_d^{p^e})x_d^{(N-1)p^e}\in (J^{(p^e)},y_2,\ldots,y_{d-1}).
\]
Because $b\in J$ we have that $b^{(N-1)p^e}\in J^{(p^e)}$ and therefore \[
(r-sx_d^{p^e})(x_d+b)^{(N-1)p^e}\in (J^{(p^e)},y_2,\ldots,y_{d-1}).
\]
If we multiply by $x_d^{\operatorname{e_{HK}}ll p^e}$, and utilize the assumption $x_d^\operatorname{e_{HK}}ll J^{(p^e)}\subseteq I$, we then find that
\[
(rx_d^{\operatorname{e_{HK}}ll p^e}-sx_d^{(\operatorname{e_{HK}}ll +1)p^e})(x_d+b)^{(N-1)p^e}\in (I,y_2,\ldots,y_{d-1}).
\]
Recall that we are also assuming $I$ is a Cohen-Macaulay $R$-module. It is then the case that $R/I$ is Cohen-Macaulay of dimension $d-1$ and the sequence $y_2,\ldots,y_{d-1},x_d+b$ is a regular sequence on $R/I$. Therefore
\[
(I,y_2,\ldots,y_{d-1}):_R(x_d+b)^{(N-1)p^e}=(I,y_2,\ldots,y_{d-1})
\]
and hence
\[
rx_d^{\operatorname{e_{HK}}ll p^e}-sx_d^{(\operatorname{e_{HK}}ll +1)p^e}\in (I,y_2,\ldots,y_{d-1}).
\]
In particular, we find that
\[
r\in (I,y_2,\ldots,y_{d-1},x_d^{(\operatorname{e_{HK}}ll +1)p^e}):_Rx_d^{\operatorname{e_{HK}}ll p^e}\subseteq (J^{(p^e)},y_2,\ldots,y_{d-1},x_d^{(\operatorname{e_{HK}}ll +1)p^e}):_Rx_d^{\operatorname{e_{HK}}ll p^e}
\]
as claimed.
\operatorname{e_{HK}}nd{proof}
\operatorname{e_{HK}}nd{comment}
\section{Deformation and stability of \texorpdfstring{$F$}{F}-purity}
\label{Section F-purity deforms and is stable}
\subsection{\texorpdfstring{$F$}{F}-purity deforms in \texorpdfstring{$\mathbb{Q}$}{ℚ}-Gorenstein rings}
Let us discuss our strategy to resolving the deformation of $F$-purity problem. Suppose that $(R,\mathfrak{m},k)$ is a $\mathbb{Q}$-Gorenstein $F$-finite local ring of prime characteristic $p>0$ and $f\in \mathfrak{m}$ is a non-zero-divisor such that $R/(f)$ is $(G_1)$, $(S_2)$, and $F$-pure. Suppose that $K_X$ is a choice of canonical divisor on $X=\operatorname{Spec}(R)$, $np^eK_X\sim 0$, $n$ is relatively prime to $p$, and $R(np^eK_X)=R\operatorname{cd}ot u$. We let $D$ be the divisor $nK_X$ and $S=\bigoplus_{i=0}^{\infty }R(-iD)t^{-i}/(u^{-1}t^{-(p^e-1)}-1)$ be the cyclic cover of $R$ corresponding to the divisor $D$. Then the ring $R$ is a direct summand of $S$ and therefore if $S$ is $F$-pure then $R$ is $F$-pure. The ring $S$ is $\mathbb{Q}$-Gorenstein of index relatively prime to $p$, see Lemma~\ref{Lemma cyclic cover changing index}. Thus, if we are able to show $S/fS$ is $F$-pure, then we have reduced solving the deformation of $F$-purity problem to the scenario that the $\mathbb{Q}$-Gorenstein index is relatively prime to the characteristic. As discussed in the introduction, deformation of $F$-purity in this scenario was well-understood by experts before being recorded in the literature by Schwede, \cite[Proposition~7.2]{Sch09a}. Our strategy to show that $S/fS$ is $F$-pure is to show that $S/fS$ is a cyclic cover of $R/(f)$ and then utilize \cite[Proposition~4.20]{Car22} to conclude that $S/fS$ is $F$-pure.\footnote{Carvajal-Rojas makes the running assumption in \cite{Car22} that all rings are essentially of finite type over an algebraically closed field. Furthermore, it is assumed in \cite[Proposition~4.20]{Car22} that $R$ is normal. However, the proof of \cite[Proposition~4.20]{Car22} works verbatim under the milder assumptions that $(R,\mathfrak{m},k)$ is any $F$-finite local ring which is $(G_1)$, $(S_2)$, and $D$ is a divisor which is Cartier in codimension $1$. We also refer the reader to \cite[Chapter~5]{MP21} for an independent treatment of these results.}
For the sake of convenience, we record a proof that $F$-purity deforms in $\mathbb{Q}$-Gorenstein rings whose index is relatively prime to the characteristic. We do this since the reader might wish to avoid the extra technicalities of \cite{Sch09a} where the deformation of $F$-purity along a Weil divisor $D$ of a pair $(R,\Delta)$ is considered.
\begin{proposition}
\label{Proposition F-purity deforms if index not divisible by p}
Let $(R,\mathfrak{m},k)$ be a local $F$-finite ring of prime characteristic $p>0$. Suppose that $R$ is $\mathbb{Q}$-Gorenstein of index relatively prime to $p$ and $f\in \mathfrak{m}$ is a non-zero-divisor such that $R/(f)$ is $(G_1)$, $(S_2)$, and $F$-pure. Then $R$ is $F$-pure.
\operatorname{e_{HK}}nd{proposition}
\begin{proof}
Because the index of $K_X$ is relatively prime to $p$ there exists an $e$ so that $(1-p^e)K_X\sim 0$. To show that $R$ is $F$-pure we will show that every $R$-linear map $F^e_*R/(f)\xrightarrow{\varphi} R/(f)$ can be lifted to a map $\Phi:F^e_*R\to R$. That is there exists $\Phi: F^e_*R\to R$ so that the following diagram is commutative:
\begin{equation*}
\begin{tikzcd}
F^e_*R\arrow[r,dashed,"\Phi"]\arrow[d] & R\arrow[d] \\
F^e_*R/(f)\arrow[r,"\varphi"] & R/(f).
\operatorname{e_{HK}}nd{tikzcd}
\operatorname{e_{HK}}nd{equation*}
To do this we will consider a canonical map $\Psi:\operatorname{Hom}_R(F^e_*R,R)\to \operatorname{Hom}_{R/(f)}(F^e_*R/(f),R/(f))$, described below, and show that $\Psi$ is onto.
The map $\Psi$ factors as $\Psi_2\circ \Psi_1$ where
\[
\Psi_1:\operatorname{Hom}_R(F^e_*R,R)\to \operatorname{Hom}_R(F^e_*R,R/(f))\cong \operatorname{Hom}_{R/(f)}(F^e_*R/(f^{p^e}),R/(f))
\]
is the map obtained by applying $\operatorname{Hom}_R(F^e_*R,-)$ to $R\to R/(f)$ and
\[
\Psi_2:\operatorname{Hom}_{R/(f)}(F^e_*R/(f^{p^e}),R/(f))\to \operatorname{Hom}_{R/(f)}(F^e_*R/(f),R/(f))
\]
is the map obtained by applying $\operatorname{Hom}_R(-,R/(f))$ to $F^e_*R/(f)\xrightarrow{\operatorname{cd}ot F^e_*f^{p^{e}-1}}F^e_*R/(f^{p^e})$.
First suppose that $R$ is Gorenstein and consider the short exact sequence
\[
0\to R\xrightarrow{\operatorname{cd}ot f} R\to R/(f)\to 0.
\]
Then $\operatorname{Ext}^1_R(F^e_*R,R)=0$ since $F^e_*R$ is Cohen-Macaulay and $R\cong R(K_X)$ is the canonical module of $R$, \cite[Theorem~3.3.10]{BH93}. Therefore the natural map $\Psi_1:\operatorname{Hom}_R(F^e_*R,R)\to \operatorname{Hom}_{R/(f)}(F^e_*R/(f),R/(f))$ is indeed onto under the Gorenstein hypothesis. To see that $\Psi_2$ is onto under the Gorenstein hypothesis we begin by considering the short exact sequence
\[
0\to F^e_*R/(f)\xrightarrow {\operatorname{cd}ot F^e_*f^{p^e-1}}F^e_*R/(f^{p^e})\to F^e_*R/(f^{p^e-1})\to 0.
\]
Then $\operatorname{Ext}^1_{R/(f)}(F^e_*R/(f^{p^e-1}), R/(f))=0$ since $R/(f)$ is Gorenstein and therefore the map $\Psi_2$ is onto and it follows that $\Psi=\Psi_2\circ \Psi_1$ is the composition of two onto maps, provided $R$ is Gorenstein.
Now we show $\operatorname{Hom}_R(F^e_*R,R)\to \operatorname{Hom}_{R/(f)}(F^e_*R/(f), R/(f))$ is onto under the milder hypothesis that $R$ is $\mathbb{Q}$-Gorenstein of index relatively prime to $p$. Recall that we choose $e$ large enough so that $(1-p^e)K_X\sim 0$. Using the fact that $R$ is $(G_1)$ and reflexifying, we find that the module $\operatorname{Hom}_R(F^e_*R,R)$ can be identified with $F^e_*R$ via
\begin{align*}
\operatorname{Hom}_R(F^e_*R,R)&\cong \operatorname{Hom}_R(F^e_*R\otimes R(K_X), R(K_X))\cong \operatorname{Hom}_R(F^e_*R(p^eK_X), R(K_X))\\ &\cong F^e_*\operatorname{Hom}_R(R(p^eK_X), R(K_X))\cong F^e_*R((1-p^e)K_X)\cong F^e_*R.
\operatorname{e_{HK}}nd{align*}
The ring $R/(f)$ is $\mathbb{Q}$-Gorenstein of index which divides the index of $R$ by Lemma~\ref{Proposition Q-Gorenstein index under base change}. Therefore it is also the case that $\operatorname{Hom}_{R/(f)}(F^e_*R/(f),R/(f))\cong F^e_*R/(f)$. In particular, when viewed as an $F^e_*R/(f)$-module, the image of $\operatorname{Hom}_R(F^e_*R,R)\to \operatorname{Hom}_{R/(f)}(F^e_*R/(f),R/(f))$ is cyclic and therefore $(S_2)$. We can then check that the image agrees with with entire module at the codimension $1$ points of $\operatorname{Spec}(R/(f))$ by \cite[Theorem~1.12]{Har94}. We are assuming $R/(f)$ is $(G_1)$ and so the surjectivity of the desired map follows by the surjectivity of the map in the Gorenstein scenario.
\operatorname{e_{HK}}nd{proof}
Suppose that $(R,\mathfrak{m},k)$ is a $(G_1)$ and $(S_2)$ local ring. If $D$ is a divisor and $f\in \mathfrak{m}$ a non-zero-divisor of $R$ such that $R/(f)$ is $(G_1)$ and $(S_2)$ and $R(D)/fR(D)$ is principal codimension $1$ $R/(f)$-module, then we let $D|_{V(f)}$ denote a choice of divisor of $R/(f)$ so that $R/(f)(D|_{V(f)})$ is isomorphic to the reflexification $(R(D)/fR(D))^{un}:=\operatorname{Hom}_{R/(f)}(\operatorname{Hom}_{R/(f)}(R(D)/fR(D), R/(f)),R/(f))$ of $R(D)/fR(D)$. The following lemma provides a criterion for when the cyclic cover of a torsion divisor base changes to a cyclic cover of $R/(f)$.
\begin{lemma}
\label{Lemma p torsion cyclic cover and deformation}
Let $(R,\mathfrak{m},k)$ be a $(G_1)$ and $(S_2)$ local $F$-finite ring of prime characteristic $p>0$. Suppose that $D$ is a torsion divisor of index $N$ and $R(ND)=R\operatorname{cd}ot u$. Suppose that $f\in \mathfrak{m}$ is a non-zero-divisor such that $R/(f)$ is $(G_1)$ and $(S_2)$. For each $1\leq i\leq N-1$ suppose that $R(-iD)/fR(-iD)$ is an $(S_2)$ $R/(f)$-module which is principal in codimension $1$. If $S=\bigoplus_{i=0}^\infty R(-iD)t^{-i}/(u^{-1}t^{-N}-1)$ is the cyclic cover of $R$ associated with $D$ then $S/fS$ is the cyclic cover of $R/(f)$ with respect to $D|_{V(f)}$.
\operatorname{e_{HK}}nd{lemma}
\begin{proof}
We must verify two things:
\begin{enumerate}
\item for each $1\leq i\leq N-1$ the ideal $R(-iD)/fR(-iD)$ is isomorphic to $R/(f)(-i D|_{V(f)})$;\label{lemma-cyclic-1}
\item the index of $D|_{V(f)}$ is $N$.\label{lemma-cyclic-2}
\operatorname{e_{HK}}nd{enumerate}
Our assumptions allow us to check (\ref{lemma-cyclic-1}) at the height $1$ primes of $\operatorname{Spec}(R/(f))$. By assumption, each $R(-iD)/fR(-iD)$ is principal at codimension $1$ points of $\operatorname{Spec}(R/(f))$. Hence $R(-iD)$ is principal at height $2$ points of $\operatorname{Spec}(R)$ containing $f$ and so $R(-iD)$ agrees with the $i$th ordinary power ideal $R(-D)^i$ at such points of $\operatorname{Spec}(R)$. Therefore $R(-iD)/fR(-iD)$ is indeed isomorphic to $R/(f)(-i D|_{V(f)})$ as claimed.
For (\ref{lemma-cyclic-2}) we first notice that $R(-ND)\cong R$ and so $R(-ND)/fR(-ND)$ is an $(S_2)$ and principal in codimension $1$ module of $R/(f)$. Therefore by the above, $R(-ND)/fR(-ND)=R/(f)(-ND_{V(f)})\cong R/(f)$ and the index of $D|_{V(f)}$ cannot exceed the index of $D$. However, if there was an $1\leq i\leq N-1$ so that $-iD|_{V(f)}\sim 0$ then $R(-iD|_{V(f)})\cong R(-iD)/fR(-iD)\cong R/(f)$. By Nakayama's Lemma the module $R(-iD)$ is a principal module of $R$ and the index of $D$ would be strictly less than $N$, a contradiction to our initial assumptions.
\operatorname{e_{HK}}nd{proof}
If $(R,\mathfrak{m},k)$ is a local strongly $F$-regular ring and $D$ is a torsion divisor, then $R(D)$ is a direct summand of $F^e_*R$ for some $e\in \mathbb{N}$, see \cite[Proof of Proposition~2.6]{Mar22}. It will likely not be the case that every torsion divisorial ideal in an $F$-pure local ring is a direct summand of $F^e_*R$ for some $e\in \mathbb{N}$, but the following lemma points out that the divisorial ideals of index $p$ to a power are a direct summand of $F^e_*R$ for some $e\in \mathbb{N}$, cf. Lemma~\ref{Lemma peth symbolic power of canonical is a direct summand} below.
\begin{lemma}
\label{Lemma p torsion divisors are direct summands}
Let $(R,\mathfrak{m},k)$ be a local $(G_1)$ and $(S_2)$ $F$-finite and $F$-pure ring of prime characteristic $p>0$. If $D$ is a torsion divisor of index $p^e$ then $R(D)$ is a direct summand of $F^e_*R$.
\operatorname{e_{HK}}nd{lemma}
\begin{proof}
The $e$th iterate of the Frobenius map $R\to F^e_*R$ splits as an $R$-linear map. If we tensor with $R(D)$ and reflexify we find that $R(D)$ is a direct summand of $F^e_*R(p^eD)\cong F^e_*R$.
\operatorname{e_{HK}}nd{proof}
\begin{theorem}
\label{Theorem F-purity deforms}
Let $(R,\mathfrak{m},k)$ be a local $F$-finite ring of prime characteristic $p>0$. Suppose that $R$ is $\mathbb{Q}$-Gorenstein and $f\in \mathfrak{m}$ is a non-zero-divisor such that $R/(f)$ is $(G_1)$, $(S_2)$, and $F$-pure. Then $R$ is $F$-pure.
\operatorname{e_{HK}}nd{theorem}
\begin{proof} If $\operatorname{div}m(R)\leq 2$ then $R/(f)$ being $(G_1)$ implies that $R$ is Gorenstein. The property of being $F$-pure is equivalent to being $F$-injective in Gorenstein rings and $F$-injectivity is known to deform in Cohen-Macaulay rings, see \cite[Lemma~3.3 and Theorem~3.4]{Fed83}. Thus we may assume that $\operatorname{div}m(R)\geq 3$, and by induction we may assume that $R$ is $F$-pure at every non-maximal prime of $\operatorname{Spec}(R)$ containing the element $f$. Let $K_X$ be a choice of canonical divisor of $X=\operatorname{Spec}(R)$. Suppose that $K_X$ has index $np^e$ where $n$ is relatively prime to $p$. The divisor $D=nK_X$ has index $p^e$ and we suppose that $R(p^eD)=R\operatorname{cd}ot u$. Let $S=\bigoplus_{i=0}^{\infty}R(-iD)t^{-i}/(u^{-1}t^{-p^e}-1)$ be the cyclic cover of $R$ corresponding to the divisor $D$. The work that follows will allow us to utilize Lemma~\ref{Lemma p torsion cyclic cover and deformation} and establish that $S/fS$ is a cyclic cover of $R/(f)$.
If $\mathfrak{p}\in \operatorname{Spec}(R)\setminus\{\mathfrak{m}\}$ and $f\in \mathfrak{p}$ then $R(-iD)_\mathfrak{p}$ is a direct summand of $F^e_*R_\mathfrak{p}$ by Lemma~\ref{Lemma p torsion divisors are direct summands}. Hence $\operatorname{depth}(R(-iD)_\mathfrak{p})\geq \min\{\operatorname{ht} (\mathfrak{p}),3\}$ for all primes $\mathfrak{p}\in\operatorname{Spec}(R)\setminus\{\mathfrak{m}\}$ containing $f$. Therefore the quotients $R(-iD)/fR(-iD)$ are $(S_2)$ $R/(f)$-modules on the punctured spectrum of $R/(f)$. In particular, if $C_i$ denotes the cokernel of
\[
R(-iD)/fR(-iD)\subseteq \left(R(-iD)/fR(-iD)\right)^{un}\cong R/(f)(-iD|_{V(f)})
\]
then $C_i$ is a finite length $R$-module and $H^i_\mathfrak{m}(R(-iD)/fR(-iD))\cong H^i_\mathfrak{m}\left((R/(f))(-iD|_{V(f)})\right)$ for all $i\geq 2$. We aim to show that $R(-iD)/fR(-iD)$ is an $(S_2)$ $R/(f)$-module for each $1\leq i\leq p^e-1$. The work above reduces this problem to showing $\operatorname{depth}(R(-iD)/fR(-iD))\geq 2$.
Consider the following commutative diagram whose horizontal arrows are $R$-linear and whose vertical arrows in the top square are $p^e$-linear:\footnote{By definition, a $p^e$-linear map of $R$-modules $N\to M$ is the same as an $R$-linear map $N\to F^e_*M$.}
\begin{equation*}
\begin{tikzcd} \operatorname{div}splaystyle
\frac{R(-iD)}{fR(-iD)}\arrow[r] \arrow[d,"F^e"] & \operatorname{div}splaystyle \frac{R}{(f)}\left(-iD|_{V(f)}\right)\arrow[d,"F^e"]\\ \operatorname{div}splaystyle
\frac{R(-ip^eD)}{fR(-ip^eD)}\arrow[r]\arrow[d,"\cong"] & \operatorname{div}splaystyle \frac{R}{(f)}\left(-ip^eD|_{V(f)}\right)\arrow[d,"\cong"]\\ \operatorname{div}splaystyle
\frac{R}{(f)}\arrow[r] & \operatorname{div}splaystyle \frac{R}{(f)}
\operatorname{e_{HK}}nd{tikzcd}
\operatorname{e_{HK}}nd{equation*}
Recall that $R(p^eD)\cong R$, so by Lemma~\ref{Lemma p torsion cyclic cover and deformation} the middle horizontal map of the above commutative diagram is an isomorphism. By Lemma~\ref{Lemma p torsion divisors are direct summands} the right most vertical map is a split map. It follows that for all $i\geq 2$ the $p^e$-linear maps of local cohomology modules
\[
H^i_\mathfrak{m}(R(-iD)/fR(-iD))\to H^i_\mathfrak{m}(R(-ip^eD)/fR(-ip^eD))
\]
is a split map of abelian groups. In particular, the above $p^e$-linear maps on local cohomology modules are injective.
Now we consider the following commutative diagram:
\begin{equation*}
\begin{tikzcd}
0 \arrow[r]& R(-iD) \arrow[r,"\operatorname{cd}ot f"]& R(-iD) \arrow[r]\arrow[d,"F^e"] & \operatorname{div}splaystyle \frac{R(-iD)}{fR(-iD)}\arrow[r]\arrow[d,"F^e"] & 0 \\
\, & \, & R(-ip^eD) \arrow[r]\arrow[d,"\cong"] & \operatorname{div}splaystyle \frac{R(-ip^eD)}{fR(-ip^eD)}\arrow[d,"\cong"] & \, \\
\, & \, & R \arrow[r] & \operatorname{div}splaystyle \frac{R}{(f)} & \,
\operatorname{e_{HK}}nd{tikzcd}
\operatorname{e_{HK}}nd{equation*}
The top row of the above diagram is a short exact sequence of $R$-modules and the composition of the vertical maps are $p^e$-linear. There is an induced commutative diagram of local cohomology modules whose top row is exact:
\begin{equation*}
\begin{tikzcd}
0 \arrow[r] & \operatorname{div}splaystyle H^1_\mathfrak{m}\left(\frac{R(-iD)}{fR(-iD)}\right)\arrow[r] & H^2_\mathfrak{m}(R(-iD))\arrow[r,"\operatorname{cd}ot f"] & H^2_\mathfrak{m}(R(-iD))\arrow[r,"\pi"]\arrow[d,"F^e"] & \operatorname{div}splaystyle H^2_\mathfrak{m}\left(\frac{R(-iD)}{fR(-iD)}\right) \arrow[d,"F^e"] \\
\, & \, & \, & H^2_\mathfrak{m}(R) \arrow[r]\arrow[d,"\cong"] & \operatorname{div}splaystyle H^2_\mathfrak{m}\left(\frac{R}{(f)}\right) \\
\, & \, & \, & 0
\operatorname{e_{HK}}nd{tikzcd}
\operatorname{e_{HK}}nd{equation*}
We first remark that $H^2_\mathfrak{m}(R)$ is indeed the $0$-module. We are assuming $R/(f)$ is $(S_2)$ and $f$ is a non-zero-divisor, hence $R$ has depth at least $3$ and $H^2_\mathfrak{m}(R)=0$. The right most $p^e$-linear map is injective. Therefore the map $\pi$ is the $0$-map and $H^2_\mathfrak{m}(R(-iD))=fH^2_\mathfrak{m}(R(-iD))$. Because $R$ is excellent and $R(-iD)$ is $(S_2)$, the completed module $\widehat{R(-iD)}$ is an $(S_2)$ $\widehat{R}$-module and therefore $H^2_\mathfrak{m}(R(-iD))$ has finite length.\footnote{If $M\otimes_R \operatorname{h}at{R}$ is an $(S_2)$ $\operatorname{h}at{R}$-module then the Matlis dual of $H^2_\mathfrak{m}(M)$ is $\widehat{\operatorname{Ext}^{d-1}_R(M,R(K_X))}$. If we localize at a non-maximal prime $\mathfrak{p}$ then the $R_\mathfrak{p}$-Matlis dual of $\widehat{\operatorname{Ext}^{d-1}_R(M,R(K_X))}_\mathfrak{p}$ is $H^{\operatorname{ht}(\mathfrak{p})-(d-1)}_\mathfrak{m}(M_\mathfrak{p})=0$ as $\operatorname{ht}(\mathfrak{p})-(d-1)\leq 1$ and $\widehat{M}_\mathfrak{p}$ has depth at least $2$. Therefore the finitely generated module $\operatorname{Ext}^{d-1}_R(M,R(K_X))$ is Artinian and so its Matlis dual, $H^2_\mathfrak{m}(M)$, is Noetherian.} By Nakayama's Lemma we have that $H^2_\mathfrak{m}(R(-iD))=0$, therefore $H^1_\mathfrak{m}(R(-iD)/fR(-iD))=0$, and the module $R(-iD)/fR(-iD)$ is an $(S_2)$ $R/(f)$-module for each $1\leq i\leq p^e-1$.
Suppose that $R(p^eD)=R\operatorname{cd}ot u$ and let $S=\bigoplus_{i=0}^\infty R(-iD)t^{-i}/(u^{-1}t^{-p^e}-1)$ be the cyclic cover of $R$ corresponding to $D$. The ring $R$ is $F$-pure if and only if $S$ is $F$-pure by \cite[Proposition~4.20]{Car22}. Moreover, the ring $S$ is $\mathbb{Q}$-Gorenstein of index $n$, a number relatively prime to $p$, see Lemma~\ref{Lemma cyclic cover changing index}. Thus to verify $S$ is $F$-pure it suffices to check $S/fS$ is $F$-pure by Proposition~\ref{Proposition F-purity deforms if index not divisible by p}. We are assuming that $R/(f)$ is $F$-pure and the work above allows us to utilize Lemma~\ref{Lemma p torsion cyclic cover and deformation} and claim that $S/fS$ is the cyclic cover of $R/(f)$ corresponding to the divisor $D|_{V(f)}$. Therefore $S/fS$ is $F$-pure by a second application of \cite[Proposition~4.20]{Car22} and we conclude that $R$ is $F$-pure.
\operatorname{e_{HK}}nd{proof}
\subsection{\texorpdfstring{$F$}{F}-purity is \texorpdfstring{$\mathfrak{m}$}{m}-adically stable in Cohen-Macaulay \texorpdfstring{$\mathbb{Q}$}{ℚ}-Gorenstein rings}
\label{Section F-pure}
Our strategy to show $F$-purity is $\mathfrak{m}$-adically stable in a Cohen-Macaulay $\mathbb{Q}$-Gorenstein ring $(R,\mathfrak{m},k)$ is different than our strategy for deformation. We study the Frobenius degeneracy ideals of $R/(f+\operatorname{e_{HK}}psilon)$ and compare them to the Frobenius degeneracy ideals of the quotient $R/(f)$. In light of Proposition~\ref{Proposition degeneracy ideals as colon ideals} and Lemma~\ref{Lemma colon ideal in G_1 rings} it is advantageous for there to be some natural number $e$ so that if $J\subsetneq R$ is a canonical ideal of $R$ then $J^{(p^e)}$ is a Cohen-Macaulay $R$-module.
\begin{lemma}
\label{Lemma peth symbolic power of canonical is a direct summand}
Let $(R,\mathfrak{m},k)$ be a $\mathbb{Q}$-Gorenstein $F$-pure local ring of prime characteristic $p>0$. Then there exists integers $e_0,e\in \mathbb{N}$ with $e\geq 1$ so that $R(p^eK_X)$ is a direct summand of $F^{e_0}_*R(K_X)$. In particular, if $R$ is Cohen-Macaulay then there exists an $e\geq 1$ so that $R(p^eK_X)$ is Cohen-Macaulay.
\operatorname{e_{HK}}nd{lemma}
\begin{proof}
Since $R$ is $F$-pure we have that for every integer $e\geq 1$ that $F^{e}_*R\cong R\oplus M$ has a free summand. Applying $\operatorname{Hom}_R(-,R(K_X))$ we find that $\operatorname{Hom}_R(F^{e}_*R,R(K_X))\cong F^{e}_*R(K_X)$ and $\operatorname{Hom}_R(R\oplus M, R(K_X))\cong R(K_X)\oplus \operatorname{Hom}_R(M,R(K_R))$. Therefore $\operatorname{Hom}_R(F^{e}_*R,R(K_X))$ has an $R(K_X)$-summand for every $e\geq 1$. Suppose that the $\mathbb{Q}$-Gorenstein index of $R$ is $np^{e_0}$ where $p$ does not divide $n$. We choose $e\geq 1$ so that $n$ divides $p^{e}-1$. If we apply $-\otimes_R R((p^{e}-1)K_X)$ and then reflexify the direct sum decomposition $F^{e_0}_*R(K_X)\cong R(K_X)\oplus M'$ we find that
\[
F^{e_0}_*R(K_X+p^{e_0}(p^{e}-1)K_X)\cong F^{e_0}_*R(K_X)\cong R(p^{e}K_X)\oplus M''.
\]
\operatorname{e_{HK}}nd{proof}
\begin{theorem}
\label{Theorem F-purity deforms Q-Gorenstein index p^e_0}
Let $(R,\mathfrak{m},k)$ be a local $F$-finite $\mathbb{Q}$-Gorenstein Cohen-Macaulay ring of prime characteristic $p>0$. Suppose that $f\in \mathfrak{m}$ is a non-zero-divisor and $R/(f)$ is $(G_1)$ and $F$-pure. Then $R/(f+\operatorname{e_{HK}}psilon)$ is $F$-pure for all $\operatorname{e_{HK}}psilon\in\mathfrak{m}^{N\gg 0}$.
\operatorname{e_{HK}}nd{theorem}
\begin{proof}
The ring $R$ is $F$-pure by Theorem~\ref{Theorem F-purity deforms}. By Lemma~\ref{Lemma peth symbolic power of canonical is a direct summand} there exists an $e\in\mathbb{N}$ so that $R(p^eK_X)$ is a Cohen-Macaulay $R$-module.
We aim to show that the $e$th Frobenius degeneracy ideals $I_e(R/(f+\operatorname{e_{HK}}psilon))$ of $R/(f+\operatorname{e_{HK}}psilon)$ are proper ideals for all $\operatorname{e_{HK}}psilon\in\mathfrak{m}^{N\gg 0}$. If the dimension of $R$ is no more than $2$ then $R/(f)$ being $(G_1)$ implies that $R$ is Gorenstein. In the Gorenstein setting, $F$-purity and $F$-injectivity are equivalent, and the latter is known to deform and be $\mathfrak{m}$-adically stable in Cohen-Macaulay rings, see \cite[Lemma~3.3 and Theorem~3.4]{Fed83} and \cite[Corollary 4.9]{DSS20}. Thus we may assume $R$ is of dimension $d+1\geq 3$.
We may select canonical ideal $J\subsetneq R$ so that $J/fJ\cong (J,f)/(f)$ is the canonical ideal of $R/(f)$, see Lemma~\ref{lemma base change the canonical module}. By Proposition~\ref{Proposition Gorenstein in codimension 1 is stable} we know that the rings $R$ and $R/(f+\operatorname{e_{HK}}psilon)$ are $(G_1)$ for all $\operatorname{e_{HK}}psilon\in\mathfrak{m}^{N\gg 0}$. Furthermore, the rings $R/(f+\operatorname{e_{HK}}psilon)$ are $\mathbb{Q}$-Gorenstein of index dividing the $\mathbb{Q}$-Gorenstein index of $R$ for all $\operatorname{e_{HK}}psilon\in\mathfrak{m}^{N\gg0 }$, see Proposition~\ref{Proposition Q-Gorenstein index under base change}. Also note that for all $\operatorname{e_{HK}}psilon\in \mathfrak{m}^{N\gg0}$ we have that $f+\operatorname{e_{HK}}psilon$ avoids all components of $J$ by the Krull intersection theorem (see also the proof of Proposition \ref{Proposition Gorenstein in codimension 1 is stable}). Therefore $J^{(n)}/(f+\operatorname{e_{HK}}psilon)J^{(n)}\cong (J^{(n)}, f+\operatorname{e_{HK}}psilon)/(f+\operatorname{e_{HK}}psilon)$.
Since $J^{(p^e)}$ is Cohen-Macaulay, it follows that for all $\operatorname{e_{HK}}psilon\in \mathfrak{m}^{N\gg 0}$ that $(J^{(p^e)}, f+\operatorname{e_{HK}}psilon)/(f+\operatorname{e_{HK}}psilon)$ is a Cohen-Macaulay $R/(f+\operatorname{e_{HK}}psilon)$-module. By Lemma~\ref{Lemma base change of divisorial ideal} $(J^{(p^e)}, f+\operatorname{e_{HK}}psilon)/(f+\operatorname{e_{HK}}psilon)$ is an unmixed ideal of height $1$ and
\[
\frac{(J^{(p^e)}, f+\operatorname{e_{HK}}psilon)}{(f+\operatorname{e_{HK}}psilon)}=\left(\frac{(J,f+\operatorname{e_{HK}}psilon)}{(f+\operatorname{e_{HK}}psilon)}\right)^{(p^e)}.
\]
To ease notation we write $R_\operatorname{e_{HK}}psilon$ to denote the quotient $R/(f+\operatorname{e_{HK}}psilon)$. The above tells us that $J^{(p^e)}R_\operatorname{e_{HK}}psilon=(JR_\operatorname{e_{HK}}psilon)^{(p^e)}$ for all $\operatorname{e_{HK}}psilon\in\mathfrak{m}^{N\gg 0}$. Choose a non-zero-divisor $x_1\in J$ on $R$ and $R/(f)$ and let $W$ denote the multiplicatively closed set given by the complement of the union of the minimal primes of the unmixed ideal $(x_1,f)$. Since $R/(f)$ is $(G_1)$ the localized ideal $\left((J,f)/(f)\right)R_W$ is principal. Hence there exists a parameter element $x_2$ on $R/(x_1,f)$ and $a\in J$ so that $x_2J\subseteq (a)\subseteq J$.
We extend $x_2$ to a full parameter sequence $x_2,x_3,\ldots ,x_d$ on $R/(x_1,f)$ and choose a socle generator $u\in R$ on $R/(J,x_2,\ldots,x_d,f)$. Observe that for all $t\gg 0$ and all $\operatorname{e_{HK}}psilon\in\mathfrak{m}^{N\gg 0}$,
\begin{align}
I_e(R_\operatorname{e_{HK}}psilon)=(x_1^{t-1}JR_\operatorname{e_{HK}}psilon,x_2^t,\ldots,x_d^t)^{[p^e]}:_{R_\operatorname{e_{HK}}psilon}(x_1\operatorname{cd}ots x_d)^{(t-1)p^e}u^{p^e}\label{thm-stability-1}\\
=(JR_\operatorname{e_{HK}}psilon,x_2^t,\ldots,x_d^t)^{[p^e]}:_{R_\operatorname{e_{HK}}psilon}(x_2\operatorname{cd}ots x_d)^{(t-1)p^e}u^{p^e}\label{thm-stability-2}\\
=((JR_\operatorname{e_{HK}}psilon)^{(p^e)},x_2^{2p^e},x_3^{tp^e}\ldots,x_d^{tp^e}):_{R_\operatorname{e_{HK}}psilon}(x_3\operatorname{cd}ots x_d)^{(t-1)p^e}(x_2u)^{p^e}\label{thm-stability-3}\\
=((JR_\operatorname{e_{HK}}psilon)^{(p^e)},x_2^{p^e},x_3^{p^e}\ldots,x_d^{p^e}):_{R_\operatorname{e_{HK}}psilon}u^{p^e}\label{thm-stability-4}\\
=(JR_\operatorname{e_{HK}}psilon,x^2_2,x_3\ldots,x_d)^{[p^e]}:_{R_\operatorname{e_{HK}}psilon}(x_2u)^{p^e}\label{thm-stability-5}
\operatorname{e_{HK}}nd{align}
where (\ref{thm-stability-1}) follows from Proposition~\ref{Proposition degeneracy ideals as colon ideals}, (\ref{thm-stability-2}) follows from Lemma~\ref{Lemma Removing the x1}, and both (\ref{thm-stability-3}) and (\ref{thm-stability-5}) follow from Lemma~\ref{Lemma colon ideal in G_1 rings}. To see (\ref{thm-stability-4}), notice that since $J^{(p^e)}$ is a Cohen-Macaulay $R$-module, and consequently $(JR_\operatorname{e_{HK}}psilon)^{(p^e)}$ are Cohen-Macaulay $R_\operatorname{e_{HK}}psilon$-modules, the quotient $R/J^{(p^e)}$ is Cohen-Macaulay of dimension $d$ and the quotients $R_\operatorname{e_{HK}}psilon/(JR_\operatorname{e_{HK}}psilon)^{(p^e)}$ are Cohen-Macaulay of dimension $d-1$.
Observe now that for all $\operatorname{e_{HK}}psilon\in\mathfrak{m}^{N\gg 0}$
\[
I_e(R_\operatorname{e_{HK}}psilon)=\frac{((J,x^2_2,x_3\ldots,x_d)^{[p^e]},f+\operatorname{e_{HK}}psilon):_{R}(x_2u)^{p^e}}{(f+\operatorname{e_{HK}}psilon)}.
\]
Thus $R/(f)$ is $F$-pure if and only if the following colon ideal is a proper ideal of $R$:
\[
((J,x^2_2,x_3\ldots,x_d)^{[p^e]},f):_{R}(x_2u)^{p^e}.
\]
Observe that if $\operatorname{e_{HK}}psilon\in (J,x^2_2,x_3\ldots,x_d,f)$ then
\[
((J,x^2_2,x_3\ldots,x_d)^{[p^e]},f+\operatorname{e_{HK}}psilon):_{R}(x_2u)^{p^e}=((J,x^2_2,x_3\ldots,x_d)^{[p^e]},f):_{R}(x_2u)^{p^e}.
\]
Therefore $I_e(R_\operatorname{e_{HK}}psilon)$ is a proper ideal of $R_\operatorname{e_{HK}}psilon$ for all $\operatorname{e_{HK}}psilon\in\mathfrak{m}^{N\gg 0}$, i.e. $R_\operatorname{e_{HK}}psilon$ is $F$-pure and the property of being $F$-pure is indeed $\mathfrak{m}$-adically stable.
\operatorname{e_{HK}}nd{proof}
\begin{table}[h!]
\tiny
\centering
\caption{Summary of deformation and $\mathfrak{m}$-adic stability of $F$-singularities for $F$-finite local rings $(R,\mathfrak{m})$}\label{summary-table}
\begin{threeparttable}
\begin{tabular}{c|cc }
$\mathcal{P}$ & $\mathcal{P}$ deforms? & $\mathcal{P}$ is $\mathfrak{m}$-adically stable? \\
\operatorname{h}line
$F$-injective& \textbf{open}; Yes\tnotex{t:fi}\operatorname{h}space{.1cm} \cite[Thm. 3.4(1)]{Fed83}\tnotex{t:cm} & Yes \cite[Cor. 4.9]{DSS20}\tnotex{t:cm}\\
$F$-pure & No in general \cite{Fed83}; Yes (Theorem \ref{Theorem F-purity deforms})\tnotex{t:qg}&No in general \cite[Thm. 5.3]{DSS20}; Yes (Theorem \ref{Theorem F-purity deforms Q-Gorenstein index p^e_0})\tnotex{t:qg}\operatorname{h}space{.45cm}\textsuperscript{,}\tnotex{t:cm}\\
$F$-rational & Yes \cite[Thm. 4.2(h)]{HH94}& Yes \cite[Cor. 3.9]{DSS20}\\
Strongly $F$-regular&No in general \cite[Thm. 1.1]{Sin99}; Yes \cite[3.3.2]{Mac96}\tnotex{t:a}\operatorname{h}space{.1cm}+\cite{AKM98}\tnotex{t:qg}\operatorname{h}space{.5cm} & No in general \cite[5.3]{DSS20}; Yes \cite[Thm. 5.11]{DSS20}\tnotex{t:qgps}\operatorname{h}space{.6cm}\textsuperscript{,}\\
$F$-nilpotent &No \cite[Ex. 2.8(2)]{ST17}&No \cite[Ex. 2.8(2)]{ST17}+\cite[Thm. 2.4]{DSS20}+\cite[Thm. 5.5]{KMPS19}\\
$F$-anti-nilpotent&Yes \cite[Thm. 4.2(i)]{MQ18}&\textbf{open}\\
$F$-full &Yes \cite[Thm. 4.2(ii)]{MQ18}& \textbf{open}\\
\operatorname{h}line
\operatorname{e_{HK}}nd{tabular}
\begin{tablenotes}\tiny
\item[1] \label{t:fi} See also \cite[Theorem 3.7 \& Corollary 4.7]{HMS14}, \cite[Theorem 5.11]{MQ18} and \cite{DSM22}\\
\item[2] \label{t:a} See also \cite[2.2.4]{Abe02}\\
\item[$\mathbb{Q}$-Gor] \label{t:qg} $R$ is $\mathbb{Q}$-Gorenstein
\item[$\mathbb{Q}$-Gor$^\circ$] \label{t:qgps} $R$ is $\mathbb{Q}$-Gorenstein on the punctured spectrum
\item[C-M] \label{t:cm}$R$ is Cohen-Macaulay
\item[Gor] \label{t:g} $R$ is Gorenstein
\operatorname{e_{HK}}nd{tablenotes}
\operatorname{e_{HK}}nd{threeparttable}
\operatorname{e_{HK}}nd{table}
\operatorname{e_{HK}}nd{document} |
\begin{document}
\title{A multiplicatively symmetrized version of the Chung-Diaconis-Graham
random process}
\author{Martin Hildebrand
\footnote{Department of Mathematics and Statistics,
University at Albany, State University of New York, Albany, NY 12222.
{\tt [email protected]}}}
\maketitle
\begin{abstract}
This paper considers random processes of the form $X_{n+1}=a_nX_n+b_n\pmod p$
where $p$ is
odd, $X_0=0$, $(a_0,b_0), (a_1,b_1), (a_2,b_2),...$ are i.i.d., and $a_n$
and $b_n$ are independent with $P(a_n=2)=P(a_n=(p+1)/2)=1/2$ and
$P(b_n=1)=P(b_n=0)=P(b_n=-1)=1/3$. This can be viewed
as a multiplicatively symmetrized version of a random process of
Chung, Diaconis, and Graham. This paper shows that order $(\log p)^2$
steps suffice
for $X_n$ to be close to uniformly distributed on the integers mod $p$ for
all odd $p$ while order $(\log p)^2$ steps are necessary for $X_n$ to
be close to uniformly distributed on the intgers mod $p$.
\end{abstract}
\section{Introduction}
Chung, Diaconis, and Graham~\cite{cdg} comsidered random processes of
the form $X_{n+1}=2X_n+b_n\pmod p$ where $p$ is odd, $X_0=0$, and
$b_0, b_1, b_2,...$ are i.i.d. with $P(b_n=1)=P(b_n=0)=P(b_n=-1)=1/3$.
They showed that order $(\log p)\log(\log p)$ steps suffice to make
$X_n$ close to uniformly distributed on the integers mod $p$.
Diaconis~\cite{diaconis} asked about random processes of the form
$X_{n+1}=a_nX_n+b_n \pmod p$ where $p$ is odd, $X_0=0$, and $(a_0,b_0),
(a_1,b_1), (a_2,b_2),...$ are i.i.d. with $a_n$ and $b_n$ being
independent, $P(a_n=2)=P(a_n=(p+1)/2)=1/2$ and $P(b_n=1)=P(b_n=-1)=1/2$.
In his Ph.D. thesis, the author~\cite{mvhphd} showed that order
$(\log p)^2$ steps suffice to make $X_n$ close to uniformly distributed
on the integers mod $p$ and that order $(\log p)^2$ steps
are necessary to
make $X_n$ close to uniformly distributed on the integers mod $p$.
The techniques used there can be readily adapted if the distribution
is changed so that $P(b_n=1)=P(b_n=0)=P(b_n=-1)=1/3$; in this case,
these techniques show that order $((\log p)(\log(\log p)))^2$ steps
suffice to make $X_n$ close to uniformly distributed on the integers mod
$p$ for all odd integers $p$ and order $(\log p)^2$ steps suffice for
almost all odd integers $p$
while order $(\log p)^2$ steps are necessary to make $X_n$ close
to uniformly distributed in the integrs mod $p$.
This paper shows that this result can be improved to show that
order $(\log p)^2$ steps suffice to make $X_n$ close to uniformly
distributed on the integers mod $p$ for all odd integers $p$.
\section{Some Background, Notation, and Main Result}
We let the integers mod $p$ be denoted by ${\mathbb Z}/p{\mathbb Z}$. We may denote
elements of this group by $0, 1,..., p-1$
instead of $0+p{\mathbb Z}, 1+p{\mathbb Z},...,(p-1)+{\mathbb Z}$.
A probability $P$ on the integers mod $p$ satifies $P(s)\ge 0$ for
$s\in{\mathbb Z}/p{\mathbb Z}$ and $\sum_{s\in{\mathbb Z}/p{\mathbb Z}}P(s)=1$.
We use the variation distance to measure how far a probability
$P$ on ${\mathbb Z}/p{\mathbb Z}$ is from the uniform distribution on
${\mathbb Z}/p{\mathbb Z}$. This distance is given by
\[
\|P-U\|=\frac{1}{2}\sum_{s\in{\mathbb Z}/p{\mathbb Z}}\left|P(s)-\frac{1}{p}\right|
=\max_{A\subset{\mathbb Z}/p{\mathbb Z}}|P(A)-U(A)|
\]
where $P(A)=\sum_{s\in A}P(s)$ and $U(A)=\sum_{s\in A}1/p=|A|/p$.
Note that $\|P-U\|\le 1$ for all probabilities $P$ on ${\mathbb Z}/p{\mathbb Z}$.
\begin{proposition}
\label{probmixture}
If $P=p_1P_1+p_2P_2+...+p_mP_m$ where $p_1, p_2,..., p_m$ are positive
real numbers summing to $1$, then
\[
\|P-U\|\le\sum_{i=1}^mp_i\|P_i-U\|.
\]
\end{proposition}
This proposition can be readily shown using the triangle inequality.
If $P$ is a probability on ${\mathbb Z}/p{\mathbb Z}$, define the Fourier tranform
\[\hat P(k)=\sum_{j=0}^{p-1}P(j)e^{2\pi ijk/p}\]
for $k=0, 1,..., p-1$.
The Upper Bound Lemma of Diaconis and Shahshahani (see, for example,
Diaconis~\cite{diaconis}, p. 24) implies
\[
\|P-U\|^2\le\frac{1}{4}\sum_{k=1}^{p-1}|\hat P(k)|^2.
\]
The main theorem is
\begin{theorem}
\label{mainthm}
Suppose $X_0=0$ and $p$ is an odd integer greater than $1$.
Let $X_{n+1}=a_nX_n+b_n \pmod p$ where $(a_0,b_0), (a_1,b_1), (a_2,b_2),...$
are i.i.d. such that $a_n$ and $b_n$ are independent,
$P(a_n=2)=P(a_n=(p+1)/2)=1/2$, and $P(b_n=1)=P(b_n=0)=P(b_n=-1)=1/3$.
Let $P_n(j)=P(X_n=j)$ for $j\in{\mathbb Z}/p{\mathbb Z}$. Let $\epsilon>0$ be given.
For some $c>0$, if $n>c(\log p)^2$, then $\|P_n-U\|<\epsilon$.
\end{theorem}
\section{Beginnings of the argument}
Observe that
\begin{eqnarray*}
X_0&=&0\\
X_1&=&b_0\\
X_2&=&a_1b_0+b_1\\
X_3&=&a_2a_1b_0+a_2b_1+b_2\\
&&...\\
X_n&=&a_{n-1}...a_2a_1b_0+a_{n-1}...a_2b_1+...+a_{n-1}b_{n-2}+b_{n-1}
\end{eqnarray*}
We shall focus on the distribution of $X_n$ given values for
$a_1, a_2,..., a_{n-1}$. In the case where $a_{n-1}=2$, $a_{n-2}=(p+1)/2$,
$a_{n-3}=2$, $a_{n-4}=(p+1)/2$, etc., then
\[
X_n=2(b_{n-2}+b_{n-4}+...)+(b_{n-1}+b_{n-3}+...) \pmod p .
\]
If $n=c(\log p)^2$, then $X_n$ lies between $-(3/2)c(\log p)^2$ and
$(3/2)c(\log p)^2$ and, for large enough $p$, will not be close to
uniformly distributed on the integers mod $p$. In the case where
$a_{n-1}=2$, $a_{n-2}=2$, $a_{n-3}=2$, ..., $a_0=2$, then results of
Chung, Diaconis, and Graham~\cite{cdg} show that order $(\log p)\log(\log p)$
steps suffice to make $X_n$ close to uniformly distributed on the integers
mod $p$, and so order $(\log p)^2$ steps suffice as well.
Let $P_n(a_{n-1}, a_{n-2},...,a_1)(s)=P(a_{n-1}...a_1b_0+a_{n-1}...a_2b_1+...+a_{n-1}b_{n-2}+b_{n-1}=s \pmod p )$ where $b_0, b_1,...,b_{n-1}$ are i.i.d.
uniform on $\{1,0,-1\}$.
We shall show
\begin{theorem}
\label{indiviudalcases}
Let $\epsilon>0$ be given.
There exists a constant $c>0$ such that if $n>c(\log p)^2$, then
\[
\|P_n(a_{n-1}, a_{n-2},..., a_1)-U\|<\epsilon/2
\]
except for a set $A$ of values $(a_{n-1}, a_{n-2},..., a_1)$ in
$\{2,(p+1)/2\}^{n-1}$ where $|A|<(\epsilon/2)2^{n-1}$. ($\{2,(p+1)/2\}^{n-1}$
is the set of $(n-1)$-tuples with entries in $\{2,(p+1)/2\}$.)
\end{theorem}
By Proposition~\ref{probmixture}, Theorem~\ref{indiviudalcases}
implies Theorem~\ref{mainthm}.
\section{Random Walk on the Exponent}
\label{rwexp}
Suppose $a_0, a_1, a_2,...$ are i.i.d. with $P(a_1=2)=P(a_1=(p+1)/2)=1/2$. In
the integers mod $p$, one can view $(p+1)/2$ as $2^{-1}$, the multiplicative
inverse of $2$. So $1, a_{n-1}, a_{n-1}a_{n-2}, a_{n-1}a_{n-2}a_{n-3},...$ can
be viewed as $2^{w_0}, 2^{w_1}, 2^{w_2}, 2^{w_3},...$ where $w_0=0$ and
$w_{j+1}-w_j$ are i.i.d. for $j=0, 1, 2,...$ with
$P(w_{j+1}-w_j=1)=P(w_{j+1}-w_j=-1)=1/2$.
Let $M_j=\max\{w_0, w_1,..., w_j\}$ and $m_j=\min\{w_0, w_1,..., w_j\}$.
By Theorem 1 of Section III.7 of Feller~\cite{feller},
$P(M_j=\ell)=p_{j,\ell}+p_{j,\ell+1}$ where
$p_{j,\ell}={j \choose (j+\ell)/2}2^{-j}$ where the binomial
coefficient is $0$ unless $(j+\ell)/2$ is an integer between $0$ and $j$,
inclusive. Thus by Central Limit Theorem considerations, for some constant
$c_1>0$, if $\epsilon_1>0$ and $j=\lceil c_1(\log p)^2\rceil$, then
$P(M_j\le 0.5\log_2p)<\epsilon_1/4$ for sufficiently large $p$, and,
by symmetry,
$P(-m_j\le 0.5\log_2p)<\epsilon_1/4$ for sufficiently large $p$.
Also by Central Limit Theorem considerations, for some constant $c_2>0$,
$P(M_j\ge (c_2/2)\log_2p)<\epsilon_1/4$ and
$P(-m_j\ge (c_2/2)\log_2p)<\epsilon_1/4$ for sufficiently large $p$.
So if
$j=\lceil c_1(\log p)^2\rceil$, $P(\log_2p<M_j-m_j<c_2\log_2p)>1-\epsilon_1$
for sufficiently large $p$. If this event does not hold, then
$(a_{n-1},a_{n-2},...,a_1)$ might be in the set $A$.
Exercise III.10 of Feller~\cite{feller} gives
\[
z_{r,2n}=\frac{1}{2^{2n-r}}{2n-r\choose n}
\]
where $z_{r,2n}$ is the probability of exactly $r$ returns to the origin
in the first $2n$ steps of the symmetric nearest neighbor random walk on
the integers. Observe
\[
z_{0,2n}=\frac{1}{2^{2n}}{2n\choose n}\sim \frac{1}{\sqrt{\pi n}},
\]
which is approximately a multiple of $1/\log p$ if $n$ is approximately
a multiple of $(\log p)^2$.
Observe that if $r\ge 0$, then
\begin{eqnarray*}
\frac{z_{r+1,2n}}{z_{r,2n}}&=&\frac{1/2^{2n-r-1}}{1/2^{2n-r}}
\frac{{2n-r-1\choose n}}{{2n-r\choose n}}\\
&=&2\frac{n-r}{2n-r}\\
&\le &1.
\end{eqnarray*}
Thus $z_{r+1,2n}\le z_{r,2n}$.
For $k\in[m_j,M_j]$ with $j\lceil c_1(\log p)^2\rceil$,
let $R(k)$ be the number of $i$
such that $w_i=k$ where $0<i-\min_i\{w_i=k\}\le(\log p)^2$. Observe that
$P(R(k)\le f(p))\le c_3(f(p)+1)/\log p$ for some positive constant
$c_3$.
For some positive constant $c_4$,
observe that
$E(|\{k:R(k)\le f(p), m_j\le k\le M_j\}|\ | \log_2p<M_j-m_j<c_2(\log_2p))
\le c_4(f(p)+1)$. Thus by Markov's inequality,
$P(|\{k:R(k)\le f(p), m_j\le k\le M_j\}|\ge c_5(f(p)+1)| \log_2p<M_j-m_j<
c_2(\log_2p))\le c_4/c_5$.
\section{Fourier transform argument}
Let $\tilde P_n(a_{n-1}, a_{n-2},..., a_1)(s)=P(2^n(a_{n-1}a_{n-2}...a_1b_0+
a_{n-1}a_{n-2}...a_2b_1+...+a_{n-1}b_{n-2}+b_{n-1})=s \pmod p )$
where $b_0, b_1,..., b_{n-1}$ are i.i.d. uniform on $\{1, 0, -1\}$.
Observe $\|\tilde P_n(a_{n-1}, a_{n-2},..., a_1)-U\|=
\|P_n(a_{n-1}, a_{n-2},..., a_1)-U\|$ since $p$ is odd. Note that all
powers of $2$ in $2^na_{n-1}a_{n-2}...a_1$, $2^na_{n-1}a_{n-2}...a_2$, ...,
$2^na_{n-1}$, $2^n$ are nonnegative.
The Upper Bound Lemma implies
\begin{eqnarray*}
\|\tilde P_n(a_{n-1}, a_{n-2},..., a_1)-U\|&\le&\frac{1}{4}\sum_{m=1}^{p-1}
\prod_{\ell=n+m_j}^{n+M_j}\left(\frac{1}{3}+\frac{2}{3}\cos(2\pi 2^{\ell}m/p)
\right)^{2R(\ell-n)}\\
&\times &\prod_{r=j+1}^{n-1}\left(\frac{1}{3}+\frac{2}{3}\cos(2\pi 2^{n+w_r}m/p)
\right)^2.
\end{eqnarray*}
Note that the first product term is for times up to $j$ and the second product
term is for times after $j$. Recall $j=\lceil c_1(\log p)^2\rceil$.
Note that
\[
\left(\frac{1}{3}+\frac{2}{3}\cos(2\pi 2^{\ell}m/p)\right)^{2R(\ell-n)}
\le
\cases{9^{-R(\ell-n)}&if $1/4\le \{2^{\ell}m/p\}<3/4$\cr
1&otherwise}
\]
and
\[
\left(\frac{1}{3}+\frac{2}{3}\cos(2\pi 2^{n+w_r}m/p)\right)^2
\le
\cases{1/9&if $1/4\le \{2^{n+w_r}m/p\}<3/4$\cr
1&otherwise}
\]
where $\{x\}$ is the fractional part of $x$.
Assume
$|\{k:R(k)\le {c_6} \log(\log p), m_j\le k\le M_j\}|<c_5(\log(\log p)+1)$
where $c_5$ is such that $c_4c_6/c_5<\epsilon_2$ where $\epsilon_2>0$
is given and $j=\lceil c_1(\log p)^2\rceil$
and
$|\{k:R(k)<(\log(\log p))^{2.1}, m_j\le k\le M_j\}|<(\log(\log p))^{2.5}$.
Also assume $\log_2p<M_j-m_j<c_2(\log_2p)$,
If these assumptions don't hold, then
$(a_{n-1},a_{n-2},...,a_1)$ might be in the set $A$.
We shall consider various cases for $m$.
{\underbar {Case 1}}: $m$ is such that for some $\ell\in[n+m_j,n+M_j]$,
$1/4\le\{2^{\ell}m/p\}<3/4$ and $R(\ell-n)>(\log(\log p))^{2.1}$.
Let $S_1$ be the set of such $m$ in $1, 2,..., p-1$. Then, by
arguments similar to those in Chung, Diaconis, and Graham~\cite{cdg}
\[
\sum_{m\in S_1}\prod_{\ell=n+m_j}^{n+M_j}\left(\frac{1}{3}+\frac{2}{3}
\cos(2\pi 2^{\ell}m/p)\right)^{2R(\ell-n)}<\epsilon.
\]
Details appear in Section~\ref{fouriervalues}.
{\underbar {Case 2}}: $m\notin S_1$ and for $b$ values
of $\ell\in[n+m_j,n+M_j]$, $1/4\le\{2^{\ell}m/p\}<3/4$ and
${c_6} \log(\log p)<R(\ell-n)\le(\log(\log p))^{2.1}.$ Let $S_{2,b}$ be the
set of such $m$ in $1, 2,..., p-1$.
Let's consider the binary expansion of $m/p$; in particular, consider the
positions $n+m_j+1$ through $n+M_j+1$. If $1/4\le\{2^{\ell}m/p\}<3/4$, then
there is an ``alternation'' between positions $(\ell+1)$ and $(\ell+2)$,
i.e. there is a $1$ followed by a $0$ or a $0$ followed by a $1$. We say
an alternation follows position $\ell$ if there is an alternation between
positions $\ell+1$ and $\ell+2$. Alternations
will start following $b$ of no more than $(\log(\log p))^{2.5}$ positions $\ell$
where ${c_6} \log(\log p)<R(\ell-n)<(\log(\log p))^{2.1}$, and alternations
may or may not start following each of no more than
$c_5(\log(\log p)+1)$ positions
$\ell$ with $R(\ell-n)\le {c_6} \log(\log p)$.
No other alternations may occur. Place $n+m_j+1$ may be either $0$ or $1$.
Places $n+m_j+1$ through $n+M_j+1$ of the
binary expansion of $m/p$ are unique for each $m$ in $\{1,2,...,p-1\}$
since $M_j-m_j>\log_2p$ by an observation similar to
the blocks in the
argument of Chung, Diaconis, and Graham~\cite{cdg} being unique. So
\begin{eqnarray*}
|S_{2,b}|&\le &2\cdot 2^{c_5(\log(\log p)+1)}{\lfloor(\log(\log p))^{2.5}\rfloor
\choose b}\\
&\le &2\cdot 2^{c_5(\log(\log p)+1)}(\log(\log p))^{2.5b}
\end{eqnarray*}
If $m\in S_{2,b}$, then
\[
\prod_{\ell=n+m_j}^{n+M_j}\left(\frac{1}{3}+\frac{2}{3}\cos(2\pi 2^{\ell}m/p)
\right)^{2R(\ell-n)}
\le
(1/9)^{b{c_6} \log(\log p)}.
\]
So
\begin{eqnarray*}
&&\sum_{m\in S_{2,b}}\prod_{\ell=n+m_j}^{n+M_j}\left(\frac{1}{3}+
\frac{2}{3}\cos(2\pi 2^{\ell}m/p)\right)^{2R(\ell-n)}
\\
&\le &
2\cdot 2^{c_5(\log (\log p)+1)}((\log(\log p))^{2.5}
(1/9)^{{c_6} \log(\log p)})^b
\end{eqnarray*}
Note that for large enough $p$, $(\log(\log p))^{2.5}(1/9)^{{c_6}
\log(\log p)}<1/2$.
Also observe for $b\ge b_{\min}$ where $b_{\min}$ is a value depending on
$c_5$ and ${c_6}$,
\[
2^{c_5(\log (\log p)+1)}((\log(\log p))^{2.5}(1/9)^{{c_6}
\log(\log p)})^b\rightarrow 0
\]
as $p\rightarrow\infty$.
Thus
\[
\sum_{b=b_{\min}}^{\infty}
2^{c_5(\log(\log p)+1)}((\log(\log p))^{2.5}
(1/9)^{{c_6} \log(\log p)})^b\rightarrow 0
\]
and
\[
\sum_{b=b_{\min}}^{\infty}\sum_{m\in S_{2,b}}\prod_{\ell=n+m_j}^{n+M_j}
\left(\frac{1}{3}+\frac{2}{3}\cos(2\pi 2^{\ell}m/p)\right)^{2R(\ell-n)}
\rightarrow 0.
\]
So all we need to consider are $m\in S_{2,b}$ where $b<b_{\min}$.
To consider such $m$, we shall look at further steps in the Fourier transform.
We shall use the following lemma.
\begin{lemma}
Let $\epsilon^{\prime}>0$ be given. Let $d$ be a positive number. For some
constant $c_7>0$, except with probability no more than $\epsilon^{\prime}$,
\[
\max_{\ell=d+1}^{d+\lfloor c_7(\log p)^2\rfloor}w_{\ell}-\min_{\ell=d+1}^{d+\lfloor c_7(\log p)^2\rfloor}
w_{\ell}>2\log_2p.
\]
If this inequality holds, then, given $m\in\{1, 2,..., p-1\}$, $1/4\le
\{2^{\ell}m/p\}<3/4$ for some $\ell\in\{d+1, d+2,..., d+\lfloor c_7(\log p)^2
\rfloor\}$. With probability at least
$1-(\log(\log p))^{2.5}/\log p$,
\[
|\{h:\ell+1\le h\le\ell+(\log p)^2, w_{\ell}=w_h\}|>(\log(\log p))^{2.1}.
\]
\end{lemma}
{\it Proof:} Similar to reasoning in section~\ref{rwexp}, the
existence of $c_7$ follows by Central Limit Theorem
considerations and Theorem 1 of Section III.7 of Feller~\cite{feller}.
The existence of such $\ell$ follows since for each positive integer
$k$, at least one of
$\{2^km/p\}$, $\{2^{k+1}m/p\}$,...,$\{2^{k+\lfloor 2\log_2p\rfloor-1}m/p\}$
lies in $[1/4,3/4)$. The result
on $|\{h:\ell+1\le h\le\ell+(\log p)^2, w_{\ell}=w_h\}|$
follows similarly to the earlier argument
that $P(R(k)\le f(p))\le c_3(f(p)+1)/\log p$.
$\Box$
Suppose $n_{before}$ is the number of $m$ being considered, i.e. need further
Fourier transform terms before going an additional $\lfloor c_7(\log p)^2\rfloor
+\lfloor(\log p)^2\rfloor$ terms. Afterwards, we will need to continue to
consider only $m$ such that $\ell$ in the lemma exists and
$|\{h:\ell+1\le h\le \ell+(\log_2p)^2: w_{\ell}=w_jh\}|<(\log(\log p))^{2.1}$;
otherwise we have sufficient additional terms in the Fourier transform; see
Section~\ref{fouriervalues}.
Except for at most $(\epsilon^{\prime}+o(1))2^{n-1}$ $(n-1)$-tuples in $A$,
$n_{after}\le n_{before}(\log(\log p))^{2.5}/\log p$ where $n_{after}$ is the
number of $m$ still being considered after going the additional
$\lfloor c_7(\log p)^2\rfloor+\lfloor(\log p)^2\rfloor$ steps.
Repeating this a fixed number $f$
times will give $n_{after}<1$, i.e. $n_{after}=0$ except for at most
$f(\epsilon^{\prime}+o(1))2^{n-1}$ $(n-1)$-tuples in $A$.
\section{Bounding the Fourier transform sums}
\label{fouriervalues}
Some of the ideas in this section, for example ``alternations'', come from
Chung, Diaconis, and Graham~\cite{cdg}.
Suppose $m\in S_1$. If
\[
g(x)=\cases{1/9&if $1/4\le\{x\}<3/4$ \cr 1&otherwise,}
\]
then
\begin{eqnarray*}
\prod_{\ell=n+m_j}^{n+M_j}\left(\frac{1}{3}+\frac{2}{3}\cos(2\pi 2^{\ell}m/p)
\right)^{2R(\ell-n)}
&\le&
\prod_{\ell=n+m_j}^{n+M+j}(g(2^{\ell}m/p))^{R(\ell-n)}\\
&\le&(1/9)^{{c_6} \log(\log p)A(B_m)}
\end{eqnarray*}
where $A(B_m)$ is the number of ``alternations'' in the first $M_j-m_j$
positions of the binary expansion of $\{2^{n+m_j}m/p\}$.
An alternation in the binary expansion
$.\alpha_1\alpha_2\alpha_3...$ occurs when $\alpha_i\ne\alpha_{i+1}$. There
will be an alternation in the first $\lceil\log_2p\rceil$ positions of the
binary expansion of $\{2^{n+m_j}m/p\}$ if $m\in\{1, 2,..., p-1\}$, and for
different $m\in\{1, 2,..., p-1\}$, the first $\lceil \log_2p\rceil$
positions of the binary expansion of $\{2^{n+m_j}m/p\}$ will differ.
The inequality ending $<(1/9)^{{c_6} \log(\log p)A(B_m)}$ occurs since
for some $\ell\in[n+m_j,n+M_j]$ with $1/4\le\{2^{\ell}m/p\}<3/4$,
$R(\ell-n)\ge(\log(\log p))^{2.1}$ and the $R(\ell-n)$ powers of $1/9$ also
cover all
$c_5(\log(\log p)+1)$ terms of the from $(1/9)^{R(\ell-n)}$
with $\ell$ such that
$R(\ell-n)\le{c_6}\log(\log p)$ if $p$ is large enough.
Observe
\begin{eqnarray*}
\sum_{m\in S_1}(1/9)^{{c_6} \log(\log p)A(B_m)}&\le&
\sum_{m=1}^{p-1}(1/9)^{{c_6} \log(\log p)A(B_m)}\\
&\le&
2\sum_{s=1}^{M_j-m_j}{M_j-m_j\choose s}(1/9)^{{c_6} \log(\log p)s}\\
&\le&
2\sum_{s=1}^{M_j-m_j}(M_j-m_j)^s(1/9)^{{c_6} \log(\log p)s}\\
&\rightarrow&0
\end{eqnarray*}
as $p\rightarrow\infty$ if $\log_2p<M_j-m_j<c_2(\log p)$ and ${c_6}$
is large enough.
Now suppose $m\in S_{2,0}$ and for some $\ell$ with
$1/4\le\{2^{\ell}m/p\}<3/4$ where $\ell<n-(\log p)^2$ and
$|\{h:\ell+1\le h\le(\log p)^2,w_{\ell}=w_h\}|\ge(\log(\log p))^{2.1}$, then
\begin{eqnarray*}
&&
\prod_{\ell=n+m_j}^{n+M_j}\left(\frac{1}{3}+\frac{2}{3}\cos(2\pi 2^{\ell}m/p)
\right)^{2R(\ell-n)}
\\
&&
\times
\prod_{r=j+1}^{n-1}\left(\frac{1}{3}+\frac{2}{3}\cos(2\pi 2^{n+w_r}m/p)\right)^2
\\
&\le& (1/9)^{{c_6} \log(\log p) A(B_m)}.
\end{eqnarray*}
In other words, the powers of $1/9$ for these
values of $h$ cover all $c_5(\log(\log p)+1)$
terms of the form $(1/9)^{R(\ell-n)}$ with $\ell$ such that
$R(\ell-n)\le {c_6}\log(\log p)$
if $p$ is large enough. By reasoning similar to the sum involving $m\in S_1$,
\[
\sum_{m\in S_{2,0}}(1/9)^{{c_6} \log(\log p) A(B_m)}
\rightarrow 0
\]
as $p\rightarrow\infty$.
\section{Lower Bound}
The argument for the lower bound is more straightforward and is based upon
\cite{mvhphd}.
\begin{theorem}
\label{lowerbound}
Suppose $X_n$, $a_n$, $b_n$, and $p$ are as in Theorem~\ref{mainthm}. Let
$\epsilon>0$ be given. For some $c>0$, if $n<c(\log p)^2$ for large
enough $p$, then $\|P_n-U\|>1-\epsilon$.
\end{theorem}
{\it Proof:}
Let $m_j$ and $M_j$ be as in Section~\ref{rwexp}. For some $c>0$,
if $n=\lfloor c(\log p)^2\rfloor$, then
$P(m_j\le -0.25\log_2p)<\epsilon/3$ and $P(M_j\ge 0.25\log_2p)<\epsilon/3$.
If $m_j>-0.25\log_2p$ and $M_j<0.25\log_2p$, then
$2^{\lceil -0.25\log_2p\rceil}X_n$ lies in the interval
$[-{\sqrt p}c(\log p)^2, {\sqrt p}c(\log p)^2]$, and so
$\|P_n-U\|\ge(1-2\epsilon/3)-(2{\sqrt p}c(\log p)^2+1)/p>1-\epsilon$ for
sufficiently large $p$.
\section{Discussion of Generalizations for $a_n$}
One can ask if the results generalize to the case where $a$ is a fixed
integer greater than $1$, $(a,p)=1$, and $P(a_n=a)=P(a_n=a^{-1})=1/2$.
The results indeed should generalize.
Chapter 3 of Hildebrand~\cite{mvhphd} gives a result if $P(a_n=a)=1$.
This result gives an upper bound similar to the original
Chung-Diaconis-Graham result with $P(a_n=2)=1$ and involves an $a$-ary
expansion along with a generalization of alternations in a Fourier
transform argument. The random walk on the exponent should work with
powers of $a$ instead of powers of $2$. The Fourier transform
argument may consider the interval $[1/a^2,1-1/a^2)$ instead of
$[1/4,3/4)$. The constant $1/9$ may be replaced by another constant
less than $1$. One needs to be careful with the size of the analogue of
$S_{2,b}$.
Also Breuillard and Varj\'u~\cite{bv} consider the Chung-Diaconis-Graham
process with $P(a_n=a)=1$ where $a$ is not fixed. One might explore
cases where $P(a_n=a)=P(a_n=a^{-1})=1/2$ where $a$ is not fixed but does
have a multiplicative inverse in the integers mod $p$.
\section{Questions for Further Study}
Eberhard and Varj\'u~\cite{ev} prove and locate
a cut-off phenomonon for most odd integers $p$ in the original
Chung-Diaconis-Graham random process. However, the diffusive
nature of the random walk on the exponent suggests that a cut-off phenomenon
might not appear in the multiplicatively symmetrized version. Exploring
this question more rigorously is a problem for further study.
The Chung-Diaconis-Graham random process can be extended to multiple
dimensions. Klyachko~\cite{klyachko} considers random processes of the
form $X_{N+1}=A_NX_N+B_N \pmod p$ where $X_N$ is a random vector in
$({\mathbb Z}/p{\mathbb Z})\times({\mathbb Z}/p{\mathbb Z})$ and $A_N$ is a fixed $2\times 2$
matrix with some conditions. Perhaps techniques
in this paper could be combined with
Klyachko's result to get a result for the
case where $A_N$ is a fixed $2\times 2$ matrix
or its inverse with probability $1/2$ each.
\section{Acknowledgment}
The author would like to thank the referee for some suggestions.
This is
a preprint of an article published in {\it Journal of Theoretical Probability}.
The final authenticated version is available online at
{\tt https://doi.org/10.1007/s10959-021-01088-3}.
\end{document} |
\begin{document}
\title{Parameterized Study of \stree on Unit Disk Graphs}
\begin{abstract}
We study the {\sc Steiner Tree}\xspace problem on unit disk graphs.
Given a $n$ vertex unit disk graph $G$, a subset $R\subseteq V(G)$ of $t$ vertices and a positive integer $k$, the objective is to decide if there exists a tree $T$ in $G$ that spans over all vertices of $R$ and uses at most $k$ vertices from $V\setminus R$.
The vertices of $R$ are referred to as \emph{terminals} and the vertices of $V(G)\setminus R$ as \emph{Steiner} vertices. First, we show that the problem is \textsf{NP}\xspaceH.
Next, we prove that the {\sc Steiner Tree}\xspace problem on unit disk graphs can be solved in $n^{O(\sqrt{t+k})}$ time. We also show that the {\sc Steiner Tree}\xspace problem on unit disk graphs parameterized by $k$ has an FPT algorithm with running time $2^{O(k)}n^{O(1)}$. In fact, the algorithms are designed for a more general class of graphs, called clique-grid graphs~\cite{fomin2019finding}. We mention that the algorithmic results can be made to work for {\sc Steiner Tree}\xspace on disk graphs with bounded aspect ratio. Finally, we prove that {\sc Steiner Tree}\xspace on disk graphs parameterized by $k$ is W[1]-hard.
\end{abstract}
and a subset $R\subseteq V(G)$ of vertices, a Steiner tree is an acyclic subgraph of $G$
spanning all vertices of $R$. The vertices of $R$ are usually referred to as \emph{terminals} and the vertices of $V(G)\setminus R$ as \emph{Steiner} vertices.
The {\sc Minimum Steiner Tree} problem is to find a Steiner tree $T$ such the total weight of $E(T)$ is minimized. The decision version of this is the {\sc Steiner Tree}\xspace problem, where given a graph $G$, a subset $R\subseteq V(G)$ of vertices and a positive integer $k$, the objective is to determine if there exists a Steiner tree $T$ in $G$ for the \emph{terminal} set $R$ such that the number of \emph{Steiner} vertices in $T$ is at most $k$. The {\sc Steiner Tree}\xspace problem is one of Karp's classic \textsf{NP}-complete\xspace problems \cite{k-racp-72}; moreover, that makes the optimization problem \textsf{NP}\xspaceH.
A special case of the {\sc Minimum Steiner Tree} problem is the \textsc{Metric Steiner Tree} problem. Given a complete graph $G=(V,E)$, each vertex corresponds to a point in a metric space, and for each edge $e\in E$ the weight $w(e)$ corresponds to the distances in the space. In other words, the edge weights satisfy the triangle inequality. It is well known that, given an instance of the non-metric Steiner tree problem,
it is possible to transform it in polynomial time into an equivalent instance of the \textsc{Metric Steiner Tree} problem. Moreover, this transformation preserves the approximation factor \cite{vazirani2013approximation}.
The {\sc Euclidean Steiner Tree} problem or {\sc Geometric Steiner Tree} problem takes as input $n$ points in the plane. The objective is to connect them by lines of minimum total length in such a way that any two points may be interconnected by line segments either directly or via other points and line segments. The {\sc Minimum Steiner Tree} problem is \textsf{NP}\xspaceH even in Euclidean or Rectilinear metrics \cite{garey1977rectilinear}.
Arora \cite{arora1998polynomial} showed that the {\sc Euclidean Steiner Tree}
and {\sc Rectilinear Steiner Tree} problems can be efficiently approximated arbitrarily close to the optimal. Several approximation schemes have been proposed over the years on {\sc Minimum Steiner Tree} for graphs with arbitrary weights \cite{berman1994improved, borchers1997thek, karpinski1997new, promel2000new}.
Although the Euclidean version admits a PTAS,
it is known that the \textsc{Metric Steiner Tree} problem is APX-complete. There is a polynomial-time algorithm that approximates the minimum Steiner tree to within a factor of $\ln(4)+\epsilon \approx 1.386$ \cite{chlebikova2008steiner}; however, approximating within a factor $\frac{96}{95} \approx 1.0105$ is \textsf{NP}\xspaceH \cite{berman20091}.
The decision version, {\sc Steiner Tree}\xspace is well-studied in parameterized complexity. A well-studied parameter for the {\sc Steiner Tree}\xspace is the number of terminals $t = |R|$.
It is known that the {\sc Steiner Tree}\xspace is FPT for this parameter
due to the classical result of Dreyfus and Wagner \cite{dreyfus1971steiner}. Fuchs et al.~\cite{FuchsKMRRW07} and Nederlof~\cite{Nederlof13} gave alternative algorithms for {\sc Steiner Tree}\xspace parameterized by $t$ with running times that are not comparable with the Dreyfus and Wagner algorithm. On the other hand, {\sc Steiner Tree}\xspace parameterized by the number of Steiner vertices $k$ is W[2]-hard~\cite{downey2012parameterized}.
Hence, the focus has been on designing parameterized algorithms for graph subclasses like planar graphs \cite{jones2013parameterized}, $d$-degenerate graphs \cite{suchy2017extending}, etc.
In \cite{dvovrak2017parameterized}, Dco\v{r}\'{a}k et al. designed an efficient parameterized approximation scheme (EPAS) for the {\sc Steiner Tree}\xspace parameterized by $k$ \footnote{For any $\epsilon>0$ computes a $(1+\epsilon)$ approximation in time $f(p,\epsilon)\times n^{O(1)}$ for a computable function $f$ independent of $n$.}.
In this paper, we study the {\sc Steiner Tree}\xspace problem on unit disk graphs when the parameter is the number of Steiner vertices $k$. Unit disk graphs are the geometric intersection graphs of unit circles in the plane. That is, given $n$ unit circles in the plane, we have a graph $G$ where each vertex corresponds to a circle such that there is an edge between two vertices when the corresponding circles intersect. Unit disk graphs have been widely studied in computational geometry and graph algorithms due to their usefulness in many real-world problems, e.g., optimal facility location \cite{wang1988study}, wireless and sensor networks; see \cite{hale1980frequency, kammerlander1984c}. These led to the study of many \textsf{NP}-complete\xspace problems on unit disk graphs; see \cite{clark1991unit, dumitrescu2011minimum}.
There are some works on variants of {\sc Minimum Steiner Tree} on unit disk graphs in the approximation paradigm. Li et al. \cite{li2009ptas} studied node-weighted Steiner trees on unit disk graphs, and presented a PTAS when the given set of vertices is $c$-local. Moreover, they used this to solve the node-weighted connected dominating set problem in unit disk graphs and obtained a
$(5+\epsilon)$-approximation algorithm. In \cite{biniazfull}, Biniaz et al. studied the {\sc Full Steiner Tree}\footnote{A full Steiner tree is a Steiner tree which has all the terminal vertices as its leaves} problem on unit disk graphs. They presented a $20$-approximation algorithm for this problem, and for $\lambda$-precise graphs gave a $(10+\frac{1}{\lambda})$-approximation algorithm where $\lambda$ is the length of the longest edge. Although there have been a plethora of work on variants of the {\sc Minimum Steiner Tree} problem on unit disk graphs in approximation algorithms, hardly anything is known in parameterized complexity for the decision version.
In this regard, we refer to the work of Marx et al.~\cite{marx2018subexponential}
who investigated the parameterized complexity of the {\sc Minimum Steiner Tree} problem on planar graphs, where the number of terminals ($k$) is regarded as the parameter. They have designed an $n^{O(\sqrt{k})}$-time exact algorithm,
and showed that this problem on planar graphs
cannot be solved in time $2^{o(k)}\cdot n^{O(1)}$, assuming ETH. However, these results do not directly apply on unit disk graphs as unit disk graphs can contain very large cliques, but, then planar graphs contains arbitrarily large stars. Recently, Berg et al.~\cite{de2018framework} showed that the {\sc Steiner Tree}\xspace problem can be solved in $2^{O(n^{1-\frac{1}{d}})}$ time on intersection graphs of $d$-dimensional similarly-sized fat objects, for some $d \in \mathbb{Z}_{+}$.
More often than not, the geometric intersection graph families such as unit disk graphs, unit square intersection graphs, rectangle intersection graphs, provide additional geometric structure that helps to generate algorithms. In this paper, our objective is to understand parameterized tractability landscape of the {\sc Steiner Tree}\xspace problem on unit disk graphs.
\paragraph*{Our Results.}
First in Section~\ref{sec:nph}, we show that {\sc Steiner Tree}\xspace on unit disk graphs is \textsf{NP}\xspaceH. Then, in Section~\ref{sec:subexp}, we design a subexponential algorithm for the {\sc Steiner Tree}\xspace problem on unit disk graphs parameterized by the number of terminals $t$ and the number of Steiner vertices $k$.
\begin{restatable}
{theorem}{subexp}\label{thm:subexp-UDG}
{\sc Steiner Tree}\xspace on unit disk graphs can be solved in $n^{O(\sqrt{t+k})}$ time.
\end{restatable}
The approach to design this subexponential algorithm is very similar to that used in~\cite{fomin2019finding}. First, we apply a Baker-like shifting strategy to create a family $\mathcal{F}$ of instances (of {\sc Exact Steiner Tree}\xspace, which is a variant of {\sc Steiner Tree}\xspace) such that if the input instance $(G,R,t,k)$ is a yes-instance then there is at least one constructed instance in $\mathcal{F}$ that is a yes-instance of {\sc Exact Steiner Tree}\xspace. On the other hand, if $(G,R,t,k)$ is a no-instance of {\sc Steiner Tree}\xspace, then no instance of $\mathcal{F}$ is a yes-instance of {\sc Exact Steiner Tree}\xspace. With the knowledge that the answer is preserved in the family $\mathcal{F}$, we design a dynamic programming subroutine to solve {\sc Exact Steiner Tree}\xspace on each of the constructed instances of $\mathcal{F}$.
Next, in Section~\ref{sec:FPT}, we show that the {\sc Steiner Tree}\xspace on unit disk graphs has an FPT algorithm when parameterized by $k$.
\begin{restatable}
{theorem}{FPT}\label{thm:FPT-UDG}
{\sc Steiner Tree}\xspace on unit disk graphs can be solved in $2^{O(k)}n^{O(1)}$ time.
\end{restatable}
Here, we show that solving the {\sc Steiner Tree}\xspace problem on an instance $(G,R,t,k)$ is equivalent to solving the problem on an instance $(G',R',t',k)$ where the graph $G'$ is obtained by contracting all connected components of $G[R]$. Although $G'$ loses all geometric properties, we show that the number of terminals in $R'$ is only dependent on $k$. This essentially changes the problem to running the Dreyfus-Wagner algorithm on $(G',R',t',k)$.
Both the results in Theorem~\ref{thm:subexp-UDG} and \ref{thm:FPT-UDG} are shown to work for a superclass of graphs, called clique-grid graphs. We would like to remark that the algorithms can also be made to work for disk graphs with constant aspect ratio.
Finally, in contrast, in Section~\ref{sec:whard} we prove that the {\sc Steiner Tree}\xspace problem for disk graphs is W[1]-hard, parameterized by the number Steiner vertices $k$. The {\sc Steiner Tree}\xspace problem is known to be W[2]-hard on general graphs~\cite{downey2012parameterized}.
However, it is not clear how to use that reduction for disk graphs. We show a reduction of our problem from {\sc Grid Tiling} with $\ge$~\cite{cygan2015parameterized},
ruling out the possibility of a $f(k)n^{o(k)}$ time algorithm for any function $f$, assuming ETH.
\begin{restatable}
{theorem}{whardness}\label{w1-hard}
\label{whard}
The {\sc Steiner Tree}\xspace problem on disk graphs is W[1]-hard, parameterized by the number of Steiner vertices $k$.
\end{restatable}
The set $\{1,2,\ldots,n\}$ is denoted as $[n]$. For a graph $G$, and a subset $V' \subseteq V(G)$, $G[V']$ denotes the subgraph induced on $V'$. The {\sc Exact Steiner Tree}\xspace problem takes as input a graph $G$, a terminal set $R$ with $t$ terminals and a positive integer $k$. The aim is to determine whether there is a Steiner tree $T$ in $G$ for $R$ that has exactly $k$ Steiner vertices. A Steiner tree with at most $k$ Steiner vertices is called a $k$-Steiner tree while one with exactly $k$ Steiner vertices is called an exact $k$-Steiner tree. Note that if $T$ is an exact $k$-Steiner tree then $\vert V(T) \vert = t+k$. When the {\sc Steiner Tree}\xspace or {\sc Exact Steiner Tree}\xspace problem is restricted to taking input graphs only from a graph class $\mathcal{G}$, then these variants are referred to as {\sc Steiner Tree}\xspace on $\mathcal{G}$ and {\sc Exact Steiner Tree}\xspace on $\mathcal{G}$, respectively.
\begin{observation}\label{obs:exact}
A tree $T$ is a $k$-Steiner tree for an instance $(G,R,t,k)$ if and only if $T$ is an exact $k'$-Steiner tree for the instance $(G,R,t,k')$ of {\sc Exact Steiner Tree}\xspace for some $k' \leq k$.
\end{observation}
\begin{definition}~\cite{fomin2019finding}\label{def:clique-grid}
A graph $G$ is a clique-grid graph if there is a pair $p,p'\in \mathbb{N}$ and a function $f: V(G) \rightarrow [p]\times [p']$ such that the following conditions hold:
\begin{enumerate}
\item For all $(i,j) \in [p] \times [p']$, $f^{-1}(i,j)$ is a clique in $G$.
\item For all $uv \in E(G)$, if $f(u) = (i,j)$ and $f(v) = (i',j')$ then $\vert i-i'\vert \leq 2$ and $\vert j-j' \vert \leq 2$.
\end{enumerate}
Such a function $f$ is called a representation of the graph $G$.
\end{definition}
Unit disk graphs are clique-grid graphs~\cite{fomin2019finding}. Next, we define a representation of a clique-grid graph called a cell graph.
\begin{definition}~\cite{fomin2019finding}\label{def:cell-graph}
Given a clique-grid graph $G$ with representation $f:V(G) \rightarrow [p]\times [p']$, the cell graph ${\sf cell}(G)$ is defined as follows:
\begin{itemize}
\item $V({\sf cell}(G)) = \{v_{ij} \vert i \in [p], j \in [p'], f^{-1}(i,j)\neq \emptyset\}$,
\item $E({\sf cell}(G)) = \{v_{ij}v_{i'j'} \vert (i,j) \neq (i',j'), \exists u \in f^{-1}(i,j) \mbox{ and } \exists v \in f^{-1}(i',j') \mbox{ such that } uv\in E(G)\}$.
\end{itemize}
\end{definition}
For each vertex $v_{ij}\in V({\sf cell}(G))$, the pair $(i,j)$ is also called a cell of $G$ and by definition corresponds to a non-empty clique of $G$. A vertex $v\in V(G)$ is said to be in the cell $(i,j)$ if $f(v) = (i,j)$. The neighbour of a cell $\mathcal{C} = (i,j)$ in a cell $\mathcal{C}' = (i',j') \neq \mathcal{C}$ are $\{v \in V(G) \vert f(v) = (i',j'), \exists u \mbox{ such that } f(u) = (i,j) \mbox{ and } uv \in E(G)\}$.
Let $G$ be a graph. A {\em path decomposition} of a graph $G$ is a pair $\mathcal{T} = (P,\beta: V(P) \rightarrow 2^{V(G)})$, where
$P$ is a path where every node $p\in V(P)$
is assigned a subset $\beta(p)\subseteq V(G)$, called a bag, such that
the following conditions hold: (i) ${\bf i}gcup_{p\in V(P)}{\beta(p)}=V(G)$, (ii) for every edge $xy\in E(G)$ there is a $p\in V(P)$ such that $\{x,y\}\subseteq \beta(p)$, and (iii) for any $v\in V(G)$ the subgraph of $P$ induced by the set $\{p\mid v\in \beta(p)\}$ is connected. A path decomposition will also be denoted as a sequence of bags $\{\beta(p_1),\beta(p_2),\ldots,\beta(p_q)\}$ where $P = p_1p_2\ldots p_q$. The {\em width} of a path decomposition is $\max_{p\in V(P)} |\beta(p)| -1$. The {\em pathwidth} of $G$ is the minimum width over all path decompositions of $G$ and is denoted by ${\sf pw}(G)$. Given a path decomposition of a graph $G$, we say it is rooted at exactly one of the two degree one vertices of the underlying path.
\begin{definition}~\cite{fomin2019finding}\label{def:cell-pathdecomp}
A path decomposition $\mathcal{T} = (P,\beta)$ of a clique-grid graph $G$ with representation $f:V(G) \rightarrow [p] \times [p']$ is a nice $\ell$-clique path decomposition ($\ell$-NCPD) if for the root $r$ of $P$, $\beta(r) = \emptyset$ and for each $v \in V(P)$ the following hold:
\begin{enumerate}
\item There are at most $\ell$ cells $\{(i_1,j_1),(i_2,j_2),\ldots,(i_\ell,j_\ell)\}$ such that $\beta(v) = {\bf i}gcup_{p=1}^{\ell} f^{-1}(i_p,j_p)$,
\item The node $v$ is one of the following types: (i) Leaf node where $\beta(v) = \emptyset$, (ii) Forget node where $v$ has exactly one child $u$ and there is a cell $(i,j) \in [p] \times [p']$ such that $f^{-1}(i,j) \subseteq \beta(u)$ and $\beta(v) = \beta(u) \setminus f^{-1}(i,j)$, (iii) Introduce node where $v$ has exactly one child $u$ and there is a cell $(i,j) \in [p] \times [p']$ such that $f^{-1}(i,j) \subseteq \beta(v)$ and $\beta(u) = \beta(v) \setminus f^{-1}(i,j)$,
\end{enumerate}
\end{definition}
See Figure~\ref{l-ncpd} for an example of an NCPD. A path decomposition for a clique-grid graph $G$ with representation $f$ where only property $1$ of Definition~\ref{def:cell-pathdecomp} is true for a positive number $\ell$ is referred to as an $\ell$-CPD.
\begin{figure}
\caption{An illustration of nice $2$-clique path decomposition.}
\label{l-ncpd}
\end{figure}
\section{Introduction}\label{sec:intro}
Given a graph $G$ with a weight function $w:E(G)\rightarrow \mathbb{R}^+$
and a subset $R\subseteq V(G)$ of vertices, a Steiner tree is an acyclic subgraph of $G$
spanning all vertices of $R$. The vertices of $R$ are usually referred to as \emph{terminals} and the vertices of $V(G)\setminus R$ as \emph{Steiner} vertices.
The {\sc Minimum Steiner Tree} problem is to find a Steiner tree $T$ such the total weight of $E(T)$ is minimized. The decision version of this is the {\sc Steiner Tree}\xspace problem, where given a graph $G$, a subset $R\subseteq V(G)$ of vertices and a positive integer $k$, the objective is to determine if there exists a Steiner tree $T$ in $G$ for the \emph{terminal} set $R$ such that the number of \emph{Steiner} vertices in $T$ is at most $k$. The {\sc Steiner Tree}\xspace problem is one of Karp's classic \textsf{NP}-complete\xspace problems \cite{k-racp-72}; moreover, that makes the optimization problem \textsf{NP}\xspaceH.
A special case of the {\sc Minimum Steiner Tree} problem is the \textsc{Metric Steiner Tree} problem. Given a complete graph $G=(V,E)$, each vertex corresponds to a point in a metric space, and for each edge $e\in E$ the weight $w(e)$ corresponds to the distances in the space. In other words, the edge weights satisfy the triangle inequality. It is well known that, given an instance of the non-metric Steiner tree problem,
it is possible to transform it in polynomial time into an equivalent instance of the \textsc{Metric Steiner Tree} problem. Moreover, this transformation preserves the approximation factor \cite{vazirani2013approximation}.
The {\sc Euclidean Steiner Tree} problem or {\sc Geometric Steiner Tree} problem takes as input $n$ points in the plane. The objective is to connect them by lines of minimum total length in such a way that any two points may be interconnected by line segments either directly or via other points and line segments. The {\sc Minimum Steiner Tree} problem is \textsf{NP}\xspaceH even in Euclidean or Rectilinear metrics \cite{garey1977rectilinear}.
Arora \cite{arora1998polynomial} showed that the {\sc Euclidean Steiner Tree}
and {\sc Rectilinear Steiner Tree} problems can be efficiently approximated arbitrarily close to the optimal. Several approximation schemes have been proposed over the years on {\sc Minimum Steiner Tree} for graphs with arbitrary weights \cite{berman1994improved, borchers1997thek, karpinski1997new, promel2000new}.
Although the Euclidean version admits a PTAS,
it is known that the \textsc{Metric Steiner Tree} problem is APX-complete. There is a polynomial-time algorithm that approximates the minimum Steiner tree to within a factor of $\ln(4)+\epsilon \approx 1.386$ \cite{chlebikova2008steiner}; however, approximating within a factor $\frac{96}{95} \approx 1.0105$ is \textsf{NP}\xspaceH \cite{berman20091}.
The decision version, {\sc Steiner Tree}\xspace is well-studied in parameterized complexity. A well-studied parameter for the {\sc Steiner Tree}\xspace is the number of terminals $t = |R|$.
It is known that the {\sc Steiner Tree}\xspace is FPT for this parameter
due to the classical result of Dreyfus and Wagner \cite{dreyfus1971steiner}. Fuchs et al.~\cite{FuchsKMRRW07} and Nederlof~\cite{Nederlof13} gave alternative algorithms for {\sc Steiner Tree}\xspace parameterized by $t$ with running times that are not comparable with the Dreyfus and Wagner algorithm. On the other hand, {\sc Steiner Tree}\xspace parameterized by the number of Steiner vertices $k$ is W[2]-hard~\cite{downey2012parameterized}.
Hence, the focus has been on designing parameterized algorithms for graph subclasses like planar graphs \cite{jones2013parameterized}, $d$-degenerate graphs \cite{suchy2017extending}, etc.
In \cite{dvovrak2017parameterized}, Dco\v{r}\'{a}k et al. designed an efficient parameterized approximation scheme (EPAS) for the {\sc Steiner Tree}\xspace parameterized by $k$ \footnote{For any $\epsilon>0$ computes a $(1+\epsilon)$ approximation in time $f(p,\epsilon)\times n^{O(1)}$ for a computable function $f$ independent of $n$.}.
In this paper, we study the {\sc Steiner Tree}\xspace problem on unit disk graphs when the parameter is the number of Steiner vertices $k$. Unit disk graphs are the geometric intersection graphs of unit circles in the plane. That is, given $n$ unit circles in the plane, we have a graph $G$ where each vertex corresponds to a circle such that there is an edge between two vertices when the corresponding circles intersect. Unit disk graphs have been widely studied in computational geometry and graph algorithms due to their usefulness in many real-world problems, e.g., optimal facility location \cite{wang1988study}, wireless and sensor networks; see \cite{hale1980frequency, kammerlander1984c}. These led to the study of many \textsf{NP}-complete\xspace problems on unit disk graphs; see \cite{clark1991unit, dumitrescu2011minimum}.
There are some works on variants of {\sc Minimum Steiner Tree} on unit disk graphs in the approximation paradigm. Li et al. \cite{li2009ptas} studied node-weighted Steiner trees on unit disk graphs, and presented a PTAS when the given set of vertices is $c$-local. Moreover, they used this to solve the node-weighted connected dominating set problem in unit disk graphs and obtained a
$(5+\epsilon)$-approximation algorithm. In \cite{biniazfull}, Biniaz et al. studied the {\sc Full Steiner Tree}\footnote{A full Steiner tree is a Steiner tree which has all the terminal vertices as its leaves} problem on unit disk graphs. They presented a $20$-approximation algorithm for this problem, and for $\lambda$-precise graphs gave a $(10+\frac{1}{\lambda})$-approximation algorithm where $\lambda$ is the length of the longest edge. Although there have been a plethora of work on variants of the {\sc Minimum Steiner Tree} problem on unit disk graphs in approximation algorithms, hardly anything is known in parameterized complexity for the decision version.
In this regard, we refer to the work of Marx et al.~\cite{marx2018subexponential}
who investigated the parameterized complexity of the {\sc Minimum Steiner Tree} problem on planar graphs, where the number of terminals ($k$) is regarded as the parameter. They have designed an $n^{O(\sqrt{k})}$-time exact algorithm,
and showed that this problem on planar graphs
cannot be solved in time $2^{o(k)}\cdot n^{O(1)}$, assuming ETH. However, these results do not directly apply on unit disk graphs as unit disk graphs can contain very large cliques, but, then planar graphs contains arbitrarily large stars. Recently, Berg et al.~\cite{de2018framework} showed that the {\sc Steiner Tree}\xspace problem can be solved in $2^{O(n^{1-\frac{1}{d}})}$ time on intersection graphs of $d$-dimensional similarly-sized fat objects, for some $d \in \mathbb{Z}_{+}$.
More often than not, the geometric intersection graph families such as unit disk graphs, unit square intersection graphs, rectangle intersection graphs, provide additional geometric structure that helps to generate algorithms. In this paper, our objective is to understand parameterized tractability landscape of the {\sc Steiner Tree}\xspace problem on unit disk graphs.
\paragraph*{Our Results.}
First in Section~\ref{sec:nph}, we show that {\sc Steiner Tree}\xspace on unit disk graphs is \textsf{NP}\xspaceH. Then, in Section~\ref{sec:subexp}, we design a subexponential algorithm for the {\sc Steiner Tree}\xspace problem on unit disk graphs parameterized by the number of terminals $t$ and the number of Steiner vertices $k$.
\begin{restatable}
{theorem}{subexp}\label{thm:subexp-UDG}
{\sc Steiner Tree}\xspace on unit disk graphs can be solved in $n^{O(\sqrt{t+k})}$ time.
\end{restatable}
The approach to design this subexponential algorithm is very similar to that used in~\cite{fomin2019finding}. First, we apply a Baker-like shifting strategy to create a family $\mathcal{F}$ of instances (of {\sc Exact Steiner Tree}\xspace, which is a variant of {\sc Steiner Tree}\xspace) such that if the input instance $(G,R,t,k)$ is a yes-instance then there is at least one constructed instance in $\mathcal{F}$ that is a yes-instance of {\sc Exact Steiner Tree}\xspace. On the other hand, if $(G,R,t,k)$ is a no-instance of {\sc Steiner Tree}\xspace, then no instance of $\mathcal{F}$ is a yes-instance of {\sc Exact Steiner Tree}\xspace. With the knowledge that the answer is preserved in the family $\mathcal{F}$, we design a dynamic programming subroutine to solve {\sc Exact Steiner Tree}\xspace on each of the constructed instances of $\mathcal{F}$.
Next, in Section~\ref{sec:FPT}, we show that the {\sc Steiner Tree}\xspace on unit disk graphs has an FPT algorithm when parameterized by $k$.
\begin{restatable}
{theorem}{FPT}\label{thm:FPT-UDG}
{\sc Steiner Tree}\xspace on unit disk graphs can be solved in $2^{O(k)}n^{O(1)}$ time.
\end{restatable}
Here, we show that solving the {\sc Steiner Tree}\xspace problem on an instance $(G,R,t,k)$ is equivalent to solving the problem on an instance $(G',R',t',k)$ where the graph $G'$ is obtained by contracting all connected components of $G[R]$. Although $G'$ loses all geometric properties, we show that the number of terminals in $R'$ is only dependent on $k$. This essentially changes the problem to running the Dreyfus-Wagner algorithm on $(G',R',t',k)$.
Both the results in Theorem~\ref{thm:subexp-UDG} and \ref{thm:FPT-UDG} are shown to work for a superclass of graphs, called clique-grid graphs. We would like to remark that the algorithms can also be made to work for disk graphs with constant aspect ratio.
Finally, in contrast, in Section~\ref{sec:whard} we prove that the {\sc Steiner Tree}\xspace problem for disk graphs is W[1]-hard, parameterized by the number Steiner vertices $k$. The {\sc Steiner Tree}\xspace problem is known to be W[2]-hard on general graphs~\cite{downey2012parameterized}.
However, it is not clear how to use that reduction for disk graphs. We show a reduction of our problem from {\sc Grid Tiling} with $\ge$~\cite{cygan2015parameterized},
ruling out the possibility of a $f(k)n^{o(k)}$ time algorithm for any function $f$, assuming ETH.
\begin{restatable}
{theorem}{whardness}\label{w1-hard}
\label{whard}
The {\sc Steiner Tree}\xspace problem on disk graphs is W[1]-hard, parameterized by the number of Steiner vertices $k$.
\end{restatable}
\section{Preliminaries}\label{sec:prelims}
The set $\{1,2,\ldots,n\}$ is denoted as $[n]$. For a graph $G$, and a subset $V' \subseteq V(G)$, $G[V']$ denotes the subgraph induced on $V'$. The {\sc Exact Steiner Tree}\xspace problem takes as input a graph $G$, a terminal set $R$ with $t$ terminals and a positive integer $k$. The aim is to determine whether there is a Steiner tree $T$ in $G$ for $R$ that has exactly $k$ Steiner vertices. A Steiner tree with at most $k$ Steiner vertices is called a $k$-Steiner tree while one with exactly $k$ Steiner vertices is called an exact $k$-Steiner tree. Note that if $T$ is an exact $k$-Steiner tree then $\vert V(T) \vert = t+k$. When the {\sc Steiner Tree}\xspace or {\sc Exact Steiner Tree}\xspace problem is restricted to taking input graphs only from a graph class $\mathcal{G}$, then these variants are referred to as {\sc Steiner Tree}\xspace on $\mathcal{G}$ and {\sc Exact Steiner Tree}\xspace on $\mathcal{G}$, respectively.
\begin{observation}\label{obs:exact}
A tree $T$ is a $k$-Steiner tree for an instance $(G,R,t,k)$ if and only if $T$ is an exact $k'$-Steiner tree for the instance $(G,R,t,k')$ of {\sc Exact Steiner Tree}\xspace for some $k' \leq k$.
\end{observation}
\begin{definition}~\cite{fomin2019finding}\label{def:clique-grid}
A graph $G$ is a clique-grid graph if there is a pair $p,p'\in \mathbb{N}$ and a function $f: V(G) \rightarrow [p]\times [p']$ such that the following conditions hold:
\begin{enumerate}
\item For all $(i,j) \in [p] \times [p']$, $f^{-1}(i,j)$ is a clique in $G$.
\item For all $uv \in E(G)$, if $f(u) = (i,j)$ and $f(v) = (i',j')$ then $\vert i-i'\vert \leq 2$ and $\vert j-j' \vert \leq 2$.
\end{enumerate}
Such a function $f$ is called a representation of the graph $G$.
\end{definition}
Unit disk graphs are clique-grid graphs~\cite{fomin2019finding}. Next, we define a representation of a clique-grid graph called a cell graph.
\begin{definition}~\cite{fomin2019finding}\label{def:cell-graph}
Given a clique-grid graph $G$ with representation $f:V(G) \rightarrow [p]\times [p']$, the cell graph ${\sf cell}(G)$ is defined as follows:
\begin{itemize}
\item $V({\sf cell}(G)) = \{v_{ij} \vert i \in [p], j \in [p'], f^{-1}(i,j)\neq \emptyset\}$,
\item $E({\sf cell}(G)) = \{v_{ij}v_{i'j'} \vert (i,j) \neq (i',j'), \exists u \in f^{-1}(i,j) \mbox{ and } \exists v \in f^{-1}(i',j') \mbox{ such that } uv\in E(G)\}$.
\end{itemize}
\end{definition}
For each vertex $v_{ij}\in V({\sf cell}(G))$, the pair $(i,j)$ is also called a cell of $G$ and by definition corresponds to a non-empty clique of $G$. A vertex $v\in V(G)$ is said to be in the cell $(i,j)$ if $f(v) = (i,j)$. The neighbour of a cell $\mathcal{C} = (i,j)$ in a cell $\mathcal{C}' = (i',j') \neq \mathcal{C}$ are $\{v \in V(G) \vert f(v) = (i',j'), \exists u \mbox{ such that } f(u) = (i,j) \mbox{ and } uv \in E(G)\}$.
Let $G$ be a graph. A {\em path decomposition} of a graph $G$ is a pair $\mathcal{T} = (P,\beta: V(P) \rightarrow 2^{V(G)})$, where
$P$ is a path where every node $p\in V(P)$
is assigned a subset $\beta(p)\subseteq V(G)$, called a bag, such that
the following conditions hold: (i) ${\bf i}gcup_{p\in V(P)}{\beta(p)}=V(G)$, (ii) for every edge $xy\in E(G)$ there is a $p\in V(P)$ such that $\{x,y\}\subseteq \beta(p)$, and (iii) for any $v\in V(G)$ the subgraph of $P$ induced by the set $\{p\mid v\in \beta(p)\}$ is connected. A path decomposition will also be denoted as a sequence of bags $\{\beta(p_1),\beta(p_2),\ldots,\beta(p_q)\}$ where $P = p_1p_2\ldots p_q$. The {\em width} of a path decomposition is $\max_{p\in V(P)} |\beta(p)| -1$. The {\em pathwidth} of $G$ is the minimum width over all path decompositions of $G$ and is denoted by ${\sf pw}(G)$. Given a path decomposition of a graph $G$, we say it is rooted at exactly one of the two degree one vertices of the underlying path.
\begin{definition}~\cite{fomin2019finding}\label{def:cell-pathdecomp}
A path decomposition $\mathcal{T} = (P,\beta)$ of a clique-grid graph $G$ with representation $f:V(G) \rightarrow [p] \times [p']$ is a nice $\ell$-clique path decomposition ($\ell$-NCPD) if for the root $r$ of $P$, $\beta(r) = \emptyset$ and for each $v \in V(P)$ the following hold:
\begin{enumerate}
\item There are at most $\ell$ cells $\{(i_1,j_1),(i_2,j_2),\ldots,(i_\ell,j_\ell)\}$ such that $\beta(v) = {\bf i}gcup_{p=1}^{\ell} f^{-1}(i_p,j_p)$,
\item The node $v$ is one of the following types: (i) Leaf node where $\beta(v) = \emptyset$, (ii) Forget node where $v$ has exactly one child $u$ and there is a cell $(i,j) \in [p] \times [p']$ such that $f^{-1}(i,j) \subseteq \beta(u)$ and $\beta(v) = \beta(u) \setminus f^{-1}(i,j)$, (iii) Introduce node where $v$ has exactly one child $u$ and there is a cell $(i,j) \in [p] \times [p']$ such that $f^{-1}(i,j) \subseteq \beta(v)$ and $\beta(u) = \beta(v) \setminus f^{-1}(i,j)$,
\end{enumerate}
\end{definition}
See Figure~\ref{l-ncpd} for an example of an NCPD. A path decomposition for a clique-grid graph $G$ with representation $f$ where only property $1$ of Definition~\ref{def:cell-pathdecomp} is true for a positive number $\ell$ is referred to as an $\ell$-CPD.
\begin{figure}
\caption{An illustration of nice $2$-clique path decomposition.}
\label{l-ncpd}
\end{figure}
\section{NP-Hardness of {\sc Steiner Tree}\xspace on Unit Disk Graphs}\label{sec:nph}
In this section, we consider the {\sc Steiner Tree}\xspace problem on unit disk graphs and prove that this problem is \textsf{NP}\xspaceH.
We show a reduction from {\sc Connected Vertex Cover} in planar graphs with maximum degree $4$. The reduction is very similar to that in~\cite{abu2015euclidean}.
\begin{theorem}
\label{nphard}
The {\sc Steiner Tree}\xspace problem on unit disk graphs is \textsf{NP}\xspaceH.
\end{theorem}
\begin{proof}
We show a reduction from the {\sc Connected Vertex Cover} in planar graphs with maximum degree $4$ problem, which is known to be \textsf{NP}\xspaceH \cite{garey1977rectilinear}.
Given a planar graph $G$ with maximum degree $4$ and an integer $k$, the {\sc Connected Vertex Cover} problem asks to find if there exists a vertex cover $D$ for $G$ such that the subgraph induced by $D$ is connected and $|D|\le k$.
We adopt the proof of Abu-Affash \cite{abu2015euclidean}, where it was shown that the $k$-{\sc Bottleneck Full Steiner Tree} problem is \textsf{NP}\xspaceH.
We make this reduction compatible for unit disk graphs.
Given a planar graph $G$ with maximum degree $4$ and an integer $k$,
we construct an unit disk graph $G_{\mathcal{C}}$ where $V(G_{\mathcal{C}})=\mathcal{C}$ in polynomial time, where $V(G_{\mathcal{C}})$ is divided into two sets of unit disks $R$ and $S$, denoted by Steiner and terminals, respectively.
Let $V(G)=\{v_1,v_2,\ldots,v_n\}$ and let $E(G)=\{e_1,e_2,\ldots,e_m\}$. Then, we compute an integer $k'$ such that $G$ has a connected vertex cover $D$ of size $k$ if and only if there exists a {\sc Steiner Tree}\xspace with at most $k'$ Steiner vertices of $G_{\mathcal{C}}$.
As as an intermediate step we build a rectangular grid graph $G'$.
First, we embed $G$ on a rectangular grid, with distance at least $8$ between adjacent vertices. Each vertex $v_i\in V(G)$ corresponds to a grid vertex, and each edge $e=v_iv_j\in E(G)$ corresponds to a rectilinear path comprised of some horizontal and vertical grid segments with endpoints corresponding to $v_i$ and $v_j$. Let $V(G')=\{v'_1,\ldots,v'_n\}$ be the grid points corresponding to the vertices of $V(G)$, and let $E(G')=\{p_{e_1},\ldots,p_{e_m}\}$
be the set of paths corresponding to the edges of $E(G)$
Moreover, these paths are pairwise disjoint; see Figure~\ref{np1}(b).
This embedding can be done in $O(n)$ time and the size of the grid is at most $n-2$ by $n-2$; see \cite{schnyder1990embedding}.
Next, we construct an unit disk graph $G_{\mathcal{C}}$ from $G'$.
First, we replace each grid vertex $v'_i\in V(G')$ by an unit disk. Let $C=\{c_1,\ldots,c_n\}$ be the set of unit disks centered at the grid points corresponding to the vertices of $V(G')$.
For the sake of explanation we call these disks grid point disks. At this point, the unit disk graph is not connected due to the edge length which we have taken between any two adjacent vertices in the grid graph. In fact this length ensures that there are no undesirable paths other than the ones in $G$. Next, we place two sets of disks on each path $p_{e_i}\in E(G')$. Let $|p_{e_i}|$ be the total length of the grid segments of $p_{e_i}$.
We place two Steiner disks on $p_{e_i}$, such that each one of them is adjacent to a grid point disk corresponding to $p_{e_i}$ and the distance between their centers is exactly $2$. Next, we place $|p_{e_i}|-6/2$ many terminals disks on $p_{e_i}$ such that the distance between any two adjacent centers is exactly $2$.
See Figure~\ref{np1}(c) for detailed explanation.
Let $s(e_i)$ be the set of Steiner disks and
$t(e_i)$ be the set of terminal disks placed to $p_{e_i}$. The terminal set $R=\underset{e_i\in E(G')}{{\bf i}gcup} t(e_i)$; the Steiner set $S=C\cup \underset{e_i\in E(G')}{{\bf i}gcup} s(e_i)$.
$V(G_{\mathcal{C}})=R\cup S$ and $G_{\mathcal{C}}$ is the intersection graph induced by $V(G_{\mathcal{C}})$. Finally, we set $k'=m+2k-1$.
Observe that, for any path $p_{e_i}$, the terminal set $t(e_i)$ itself form a Steiner tree without any Steiner disks. However, in order to make that tree connected we need at least one of Steiner disks from $s(e_i)$. This completes the construction.
\begin{figure}
\caption{(a) A planar graph $G$ of maximum degree $4$,
(b) the intermediate rectilinear embedding $G'$ of $G$,
(c) the unit disk graph $G_{\mathcal{C}
\label{np1}
\end{figure}
In the forward direction, suppose $G$ has a connected vertex cover $D$ of size at most $k$. We construct a Steiner tree of $R$ in the following manner.
For each edge $e_i$, we simply take the terminal path induced by $t(e_i)$. Now, let $T_S$ be any spanning tree of the subgraph of $G$ induced by $D$, containing $|D|-1$ edges. The existence of such a spanning tree is ensured since $D$ is a connected vertex cover of $G$. For each edge $e=v_iv_j\in T_S$ we connect the corresponding disks $c_i,c_j$ by two Steiner red disks adjacent to them.
Then, for each edge $e=v_iv_j\in G\setminus T_S$ we select one endpoint that is in $D$ (say $v_i$) and connect $c_i$ to the tree by its adjacent disk.
The constructed tree is a Steiner tree of $R$ consisting $|D|+2(|D|-1)+(m-(|D|-1))$ which is $m+2k-1$.
Conversely, let there exists a Steiner tree $T$ of $R$ with at most $k'$ Steiner disks. Let $D\subseteq C$ be the set of vertices that appear in $T$,
and let $T'$ be the subtree of $T$ spanning over $D$.
For each subset $t(e_i)\subseteq R$, let $T_{e_i}$ be the subtree of $T_{e_i}$ spanning the vertices in $t(e_i)$. By the above construction, $T_{e_i}$ does not require any Steiner disk. Moreover, it is easy to see that in any valid solution $T_{e_i}$ must be connected to at least one endpoint of $D$.
This implies that the set of vertices in $G$ corresponding to the vertices in $D$ is a connected vertex cover of $G$.
Moreover a tree $T_{e_i}$ which also a subtree of $T$ is connected to $D$ via two Steiner disks of $s(e_i)$. Therefore, $T_S$ contains $|D|+2(|D|-1)+(m-(|D|-1))$ many Steiner disks. We started with the tree $T$ with at most $k'=m+2k-1$ many Steiner disks. This completes the proof.
\end{proof}
\section{Subexponential Exact Algorithm for {\sc Steiner Tree}\xspace on Unit Disk Graphs}\label{sec:subexp}
In this section, we prove Theorem~\ref{thm:subexp-UDG} by designing a sub-exponential algorithm for the {\sc Steiner Tree}\xspace problem on unit disk graphs parameterized by $t+k$, where $t$ is the number of terminals and $k$ is an upper bound on the number of Steiner vertices.
In fact, our aim for this section is to design a subexponential algorithm for {\sc Steiner Tree}\xspace on clique-grid graphs and as unit disk graphs are clique-grid graphs~\cite{fomin2019finding}, this would imply the algorithm proposed in Theorem~\ref{thm:subexp-UDG}.
\begin{lemma}\label{lem:subexp-CGG}
The {\sc Steiner Tree}\xspace problem on clique-grid graphs can be solved in $n^{O(\sqrt{t+k})}$ time.
\end{lemma}
For the rest of the section, we concentrate on proving Lemma~\ref{lem:subexp-CGG}. Informally, we first apply a Baker-like shifting strategy to create a family $\mathcal{F}$ of instances of {\sc Exact Steiner Tree}\xspace that preserves the answer for the input instance $(G,R,t,k)$ of {\sc Steiner Tree}\xspace: if $(G,R,t,k)$ is a yes-instance then there is at least one constructed instance in $\mathcal{F}$ that is a yes-instance of {\sc Exact Steiner Tree}\xspace; if $(G,R,t,k)$ is a no-instance of {\sc Steiner Tree}\xspace then all instances of $\mathcal{F}$ are no-instances of {\sc Exact Steiner Tree}\xspace. As a second step, we design a dynamic programming subroutine to solve {\sc Exact Steiner Tree}\xspace on each of the constructed instances of $\mathcal{F}$, which is enough to solve the {\sc Steiner Tree}\xspace problem on $(G,R,t,k)$.
Before we describe the subexponential algorithm, we state some properties of Steiner trees in clique-grid graphs.
\begin{observation}\label{obs:cell-cell-edge}
Consider a $k$-Steiner tree $T$ for a clique-grid graph $G$ with representation $f$, such that the set $\{uv\in E(T) \vert f(u) \neq f(v)\}$ is minimised over all $k$-Steiner trees for $G$. Let $\mathcal{C} = (i,j)$ be a cell of $G$. Then there are at most $$24$ \xspace$ edges with one endpoint in $\mathcal{C}$ and the other endpoint in another cell.
\end{observation}
\begin{proof}
We claim that in the $k$-Steiner tree where the set $\{uv\in E(T) \vert f(u) \neq f(v)\}$ is minimised, there can be at most one neighbour of $\mathcal{C}$ in each cell $\mathcal{C}'\neq \mathcal{C}$. Suppose that $\mathcal{C}'$ is a cell that contains at least two neighbours of $\mathcal{C}$. Let two such neighbours be $u',v'$. Note that $u'v'$ is an edge in $E(G)$. Let $u,v$ (may be the same) be the neighbours of $u,v$, respectively in $\mathcal{C}$. Note that $uv$ is an edge in $E(G)$. Thus adding the edge $u'v'$ and removing the edge $uu'$ results in a connected graph containing all the terminals. The spanning tree of this connected graph has strictly less number of edges with endpoints in different cells, which is a contradiction to the choice of $T$.
By the definition of clique-grid graphs, $\vert i-i'\vert , \vert j-j'\vert \leq 2$. Thus, when we fix a cell $\mathcal{C}$ there are at most $$24$ \xspace$ cells that can have neighbours of vertices in $\mathcal{C}$. Putting everything together, for the $k$-Steiner tree $T$ where the set $\{uv\in E(T) \vert f(u) \neq f(v)\}$ is minimised, $\vert \{v \vert f(v) \neq (i,j), \exists u$ such that $f(u) = (i,j), uv \in E(G) \} \vert \leq $24$ \xspace$.
\end{proof}
\begin{observation}\label{obs:cell-bd}
Suppose there is a $k$-Steiner tree for a clique-grid graph $G$, and let $T$ be a $k$-Steiner tree where the set $\{uv\in E(T) \vert f(u) \neq f(v)\}$ is minimised. Moreover, amongst $k$-Steiner trees where $\{uv\in E(T) \vert f(u) \neq f(v)\}$ is minimised, $T$ has minimum number of Steiner points. Then, in $T$ the number of Steiner vertices per cell is at most $$24$ \xspace$.
\end{observation}
\begin{proof}
For the sake of contradiction, let $\mathcal{C}=(i,j)$ be a cell such that $\vert f^{-1}(i,j) \cap V(T) \vert \geq $24$ \xspace +1$. Then by Observation~\ref{obs:cell-cell-edge}, there is at least one Steiner vertex $v \in f^{-1}(i,j) \cap V(T)$ such that it does not have any neighbours in $T \setminus f^{-1}(i,j)$. Consider the subgraph $T \setminus \{v\}$. Since the vertices of $f^{-1}(i,j)$ induce a clique, $T \setminus \{v\}$ is still a connected subgraph that contains all the terminals and strictly less number of Steiner vertices. Thus, a spanning tree of this connected subgraph contradicts the choice of $T$.
\end{proof}
Consider a $k$-Steiner tree $T$ for an instance $(G,R,t,k)$ of {\sc Steiner Tree}\xspace where $\{uv\in E(T) \vert f(u) \neq f(v)\}$ is minimised and then the number of Steiner vertices is minimised. By Observation~\ref{obs:exact}, $T$ is an exact $k'$-Steiner tree for the instance $(G,R,t,k')$ of {\sc Exact Steiner Tree}\xspace for some $k' \leq k$. Next, we define a {\em good family of instances} that preserve the answer for $(G,R,t,k)$ of {\sc Steiner Tree}\xspace.
\begin{definition}\label{def:good-fam}
For an instance $(G,R,t,k)$ of {\sc Steiner Tree}\xspace on clique-grid graphs where $G$ has representation $f$, a good family of instances $\mathcal{F}$ has the following properties:
\begin{enumerate}
\item For each instance $(H,R,t,k')$ in the family, the input graph $H$ is an induced subgraph of $G$ that contains all vertices in $R$ and $k'\leq k$. Note that $H$ is also a clique-grid graph where $f\vert_{V(H)}$ is a representation.
\item $(G,R,t,k)$ is a yes-instance of {\sc Steiner Tree}\xspace if and only if there exists an instance $(H,R,t,k')\in \mathcal{F}$ which is a yes-instance of {\sc Exact Steiner Tree}\xspace.
\item For any instance $(H,R,t,k') \in \mathcal{F}$, $H$ has a $$7$ \xspace \sqrt{t+k}$-NCPD.
\end{enumerate}
\end{definition}
We show that given an instance $(G,R,t,k)$ of {\sc Steiner Tree}\xspace on clique-grid graphs, a good family of instances can be enumerated in subexponential time.
\begin{lemma}\label{lem:compute-good-fam}
Given an instance $(G,R,t,k)$ for {\sc Steiner Tree}\xspace on clique-grid graphs with $G$ represented by $f$, a good family of instances $\mathcal{F}$ can be computed in $n^{O(\sqrt{t+k})}$ time.
\end{lemma}
\begin{proof}
Let $T$ be a $k$-Steiner tree for $G$. In particular, $T$ is an exact $k'$-Steiner tree for some $k' \leq k$ and $V(T) = t+k' \leq t+k$. First, we employ a Baker-like technique similar to~\cite{fomin2019finding} (please refer to Figure~\ref{sub-exp-partition}). Note that if $G$ has $n$ vertices and has representation $f: V(G) \rightarrow [p]\times [p']$, then $p,p' \leq n$. Thus, $f$ represents $G$ on the $n \times n$ grid. First we define a column of the $n\times n$ grid. For any $j\in [n]$ the set of cells $\{(i, j) | i \in [n]\}$ is called a column. There are $n$ columns for the $n\times n$ grid. We partition the $n$ columns of the $n \times n$ grid with $n/2$ blocks of two consecutive columns and label them from the set of labels $[\sqrt{t+k}]$. Formally, each set of consecutive columns $\{2i-1,2i\}$, where $i \in [n/2]$ is labelled with $i \mbox{ mod } \sqrt{t+k}$. Thus, all the two consecutive columns $\{2i-1,2i\}$ are labelled with $i \mbox{ mod } \sqrt{t+k}$.
Recall that an exact $k'$-Steiner tree $T$ has at most $t+k$ vertices. Applying the pigeonhole principle, there is a label $\ell \in \{1, 2, \ldots ,\sqrt{t+k}\}$ such that the number of vertices from $V(T)$ which are in columns labelled $\ell$ is at most $\sqrt{t+k}$. As we do not know this $k'$-Steiner tree $T$, we guess the Steiner vertices of $V(T)$ which are in the columns labelled $\ell$. The number of potential guesses is bounded by $n^{O(\sqrt{t+k})}$. Suppose $Y'$ is the set of guessed Steiner vertices of $V(T)$ which are in the columns labelled by $\ell$. Then we delete all the non-terminal vertices in columns labelled $\ell$, except the vertices of $Y'$ . Let $S$ be the set of deleted non-terminal vertices. Let $Y_{R}$ be the set of terminal vertices that are in columns labelled by $\ell$. Let $Y = Y' \cup Y_{R}$. Notice that by choice of label $\ell$, $\lvert Y\rvert \leq \sqrt{t+k}$. By Property $2$ of clique-grid graphs, $G\setminus (S\cup Y)$ is a disjoint union of clique-grid graphs each of which
is represented by a function with at most $2 \sqrt{t+k}$ columns. Formally, $G_1 = G[{\bf i}gcup_{j=1}^{2(\ell-1)} f^{-1}(*,j)]$ and $G_{i+1} = G[{\bf i}gcup_{j=i\cdot 2\ell +1}^{{\sf min}\{i \cdot2\ell +2\sqrt{t+k},n\}} f^{-1}(*,j)]$ for each $i \in \{1,\ldots,n/\sqrt{t+k}\}$. Each $G_i$ is a clique-grid graph with representation $f_i: V(G_i) \rightarrow [n]\times[2\sqrt{t+k}]$ defined as, $f_i(u) = (r, j)$, when $f(u) = (r, (i-1) 2\ell +j)$. Thus, by Property $2$ of Definition~\ref{def:clique-grid}, $G\setminus (S \cup Y) = G_1\uplus \ldots\uplus G_{n/\sqrt{t+k}}$.
\begin{figure}
\caption{An illustration of grid labelling. The blue disks are terminals, and the red and black disks are chosen Steiner vertices and not-chosen non-terminal vertices, respectively.}
\label{sub-exp-partition}
\end{figure}
\begin{claim}\label{clm:NCPD-graph}
The graph $G \setminus S$ has a $$7$ \xspace \sqrt{t+k}$-NCPD.
\end{claim}
\begin{proof}
Suppose we are able to show that for each $i \in \{1,\ldots, n/\sqrt{t+k}\}$ $G_i$ has a $$7$ \xspacetwo \sqrt{t+k}$-CPD. This results in a $$7$ \xspacetwo \sqrt{t+k}$-CPD for $G \setminus (S \cup Y) = G_1\uplus \ldots\uplus G_{n/\sqrt{t+k}}$. Finally, note that $\vert Y \vert \leq \sqrt{t+k}$ and therefore the vertices of $Y$ can belong to at most $\sqrt{t+k}$ cells. We add $Y$ to all the bags in the $$7$ \xspacetwo\sqrt{t+k}$-CPD for $G \setminus (S \cup Y)$ to obtain a $$7$ \xspace \sqrt{t+k}$-CPD for $G \setminus S$. We convert the $$7$ \xspace\sqrt{t+k}$-CPD of $G \setminus S$ into a NCPD using the known algorithm of~\cite{Bodlaender96alinear}. Note that this results in a $$7$ \xspace \sqrt{t+k}$-NCPD.
What is left to show is that for each $G_i$ there is a $$7$ \xspacetwo \sqrt{t+k}$-CPD. First, for each $G_i$, we give a path decomposition with the following sequence of bags: $\{X_1,X_2,\ldots, X_{n-2}\}$. This is done by defining each $X_i = f^{-1}(i,*) \cup f^{-1}(i+1,*) \cup f^{-1}(i+2,*)$. It is easy to check that this is a path decomposition of $G_i$. Note that since $G_i$ has at most $2\sqrt{t+k}$ columns, the number of cells contained in each $X_j, j \in [n-1]$ is at most $$7$ \xspacetwo \sqrt{t+k}$.
\end{proof}
Finally, notice that from the definition of the constructed instances keeping in mind potential $k$-Steiner trees, $(G,R,t,k)$ is a yes-instance of {\sc Steiner Tree}\xspace if and only if there is an instance $(H,R,t,k') \in \mathcal{F}$ such that it is a yes-instance of {\sc Exact Steiner Tree}\xspace. Thus, accounting for guessing a label $\ell \in [\sqrt{t+k}]$ and the set $Y$ of Steiner vertices and terminal vertices of a potential solution Steiner tree that belong to columns labelled $\ell$, we obtain a good family of $n^{O(\sqrt{t+k})}$ instances for the given instance $(G,R,t,k)$.
\end{proof}
For the ease of our algorithm design, we make a slight modification of the NCPD for a constructed instance $(H,R,t,k') \in \mathcal{F}$: Upon fixing the label $\ell$ and a set $Y$ of terminal vertices and potential Steiner vertices in the columns labelled by $\ell$, we add the set $Y$ in all the bags of the resulting NCPD for $G \setminus S$. Therefore, no bag is empty after this modification. In particular the first and the last bags of the modified path decomposition contain only the set $Y$. Also notice that as $\vert Y \vert \leq \sqrt{t+k}$, the new path decomposition of $H$ is still an $O(\sqrt{t+k})$-CPD. We call this new path decomposition of $H$ a \emph{modified NCPD}. Now, we are ready to prove Lemma~\ref{lem:subexp-CGG}
\begin{proof}[Proof of Lemma~\ref{lem:subexp-CGG}]
As a first step of the algorithm, by Lemma~\ref{lem:compute-good-fam} in $n^{O(\sqrt{t+k})}$ time we compute a good family of instances $\mathcal{F}$ for the given instance $(G,R,t,k)$ of {\sc Steiner Tree}\xspace on clique-grid graphs. From Definition~\ref{def:good-fam}(2), $(G,R,t,k)$ is a yes-instance of {\sc Steiner Tree}\xspace if and only if there is an instance $(H,R,t,k') \in \mathcal{F}$ that is a yes-instance of {\sc Exact Steiner Tree}\xspace. Deriving from Definition~\ref{def:good-fam}(3), Lemma~\ref{lem:compute-good-fam} and the construction of a modified NCPD, for each instance $(H,R,t,k') \in \mathcal{F}$, there is a modified $O(\sqrt{t+k})$-NCPD for $H$, due to a guessed label $\ell$ and a guessed set $Y$ of non-terminal vertices from columns labelled by $\ell$ such that the following hold: (i) $\vert Y \vert \leq \sqrt{t+k}$, (ii) if $(H,R,t,k')$ is a yes-instance then there is an exact $k'$-Steiner tree $T$ such that all vertices of $Y$ are Steiner vertices in $T$. Let the modified NCPD using the set $Y$ have the sequence of bags $\{X_1,X_2,\ldots, X_q\}$. Recall that the definition of the modified NCPD ensures that $X_1 = X_q = Y$.
In the next step, our algorithm for {\sc Steiner Tree}\xspace considers every instance $(H,R,t,k') \in \mathcal{F}$ and checks if it is a yes-instance of {\sc Exact Steiner Tree}\xspace. By Definition~\ref{def:good-fam}(2), this is sufficient to determine if $(G,R,t,k)$ is a yes-instance of {\sc Steiner Tree}\xspace.
For the rest of the proof we design a dynamic programming subroutine algorithm $\mathcal{A}$ for {\sc Exact Steiner Tree}\xspace that takes as input an instance $(H,R,t,k') \in \mathcal{F}$ and uses its modified $O(\sqrt{t+k})$-NCPD to determine whether it is a yes-instance of {\sc Exact Steiner Tree}\xspace. Suppose $(G,R,t,k)$ is a yes-instance and consider a $k$-Steiner tree $T$ for $(G,R,t,k)$ where $\{uv\in E(T) \vert f(u) \neq f(v)\}$ is minimised and then the number of Steiner vertices in $T$ is minimised. Using Observation~\ref{obs:exact}, this is an exact $k'$-Steiner tree of $G$ for some $k' \leq k$. By the construction in Lemma~\ref{lem:compute-good-fam} note that there is an instance $(H,R,t,k') \in \mathcal{F}$ such that $T$ is an exact $k'$-Steiner tree for $(H,R,t,k')$. The aim of the dynamic programming algorithm is to correctly determine that this particular instance $(H,R,t,k')$ is a yes-instance. The algorithm $\mathcal{A}$ is designed in such a manner that for such a yes-instance $(H,R,t,k')$ the tree $T$ will be the potential solution Steiner tree that behaves as a certificate of correctness.
The states of the dynamic programming algorithm store information required to represent the partial solution Steiner tree, which is the potential solution Steiner tree restricted to the graph seen so far. The states are of the form $\mathcal{A}$[$\ell, Q, \mathcal{Q} =Q_1\uplus Q_2 \ldots \uplus Q_b, \mathcal{P} = P_1\uplus\ldots P_b,k''$] where:
\begin{itemize}
\item $\ell \in [q]$ denotes the index of the bag $X_\ell$ of the modified NCPD of $H$.
\item $Q \subseteq X_\ell \setminus R$ is a set of at most $$24$ \xspace \cdot $7$ \xspace$ non-terminal vertices. For each cell $\mathcal{C} = (i,j)$ that belongs to $X_\ell$, $\vert Q \cap f^{-1}(i,j) \vert \leq $24$ \xspace $.
\item $\mathcal{Q} = Q_1\uplus Q_2 \ldots \uplus Q_b$ is a partition of $Q$ with the property that for each cell $\mathcal{C} = (i,j)$, $Q \cap f^{-1}(i,j)$ is contained completely in exactly one part of $\mathcal{Q}$.
\item The partition $\mathcal{P}$ is over the vertex set $Q \cup (R \cap X_\ell)$. $Q \cap P_i = Q_i$. Also for each cell $\mathcal{C}$ in $X_\ell$, $\mathcal{C} \cap (Q \cup R)$ is completely contained in exactly one part of $\mathcal{P}$.
\item The value $k''$ represents the total number of Steiner vertices used so far in this partial solution Steiner tree. $\vert Q \vert \leq k''$ holds.
\end{itemize}
Essentially, let $T$ be an exact $k'$-Steiner tree for $(H,R,t,k')$ if it is a yes-instance. For $\ell \in [q]$, let $T_{\sf ptl}^\ell$ represent the partial solution Steiner tree when $T$ is restricted to $H[{\bf i}gcup_{j=1}^{\ell} X_j]$. The partition $\mathcal{P}$ represents the intersection of a component of $T_{\sc ptl}^{\ell}$ with $X_\ell$. The set $Q$ is the set of Steiner vertices of $T_{\sf ptl}^{\ell}$ in the bag $X_\ell$ and $\mathcal{Q}$ is the partition of $Q$ with respect to the components of $T_{\sc ptl}^{\ell}$. The number $k''$ denotes the total number of Steiner vertices in $T_{\sf ptl}^\ell$.
In order to show the correctness of $\mathcal{A}$ we need to maintain the following invariant throughout the algorithm:
(LHS) $\mathcal{A}[\ell,Q,\mathcal{Q}=Q_1 \uplus Q_2,\ldots Q_b,\mathcal{P} = P_1\uplus P_2 \uplus P_b,k']=1$ if and only if (RHS) there is a forest $T'$ as a subgraph of $H[{\bf i}gcup_{j=1}^{\ell}]$ with $b$ connected components $D_1,\ldots,D_b$: $D_i \cap X_\ell = P_i$, $(D_i \setminus R) \cap X_\ell = Q_i$, the total number of non-terminal points in $T'$ is $k''$, for each cell $\mathcal{C}$ the number of nonterminal vertices in $\mathcal{C} \cap T'$ is at most $$24$ \xspace$, and $R \cap ({\bf i}gcup_{j=1}^{\ell} X_j) \subseteq V(T')$.
Suppose the algorithm invariant is true. This means that if $\mathcal{A}[q,Y, {Y},{Y},k']=1$ then there is an exact $k'$-Steiner tree for $(H,R,t,k')$. On the other hand, suppose $(G,R,t,k)$ is a yes-instance and has a $k$-Steiner tree $T$ where $\{uv\in E(T) \vert f(u) \neq f(v)\}$ is minimised and then the number of Steiner vertices in $T$ is minimised. By Observation~\ref{obs:cell-bd}, the number of Steiner vertices of $T$ in each cell of $G$ is bounded by $$24$ \xspace$. By Observation~\ref{obs:exact} and the construction in Lemma~\ref{lem:compute-good-fam} note that there is a subset $Y$ and an instance $(H,R,t,k') \in mathcal{F}$ such that $T$ is an exact $k'$-Steiner tree for $(H,R,t,k')$ and $Y \subseteq V(T)$. Suppose the invariant of the algorithm is true. This means that if $(G,R,t,k)$ is a yes-instance of {\sc Steiner Tree}\xspace then there is a $(H,R,t,k')$ for which $\mathcal{A}[q,Y,{Y},{Y},k']=1$.
Thus, proving the correctness of the algorithm $\mathcal{A}$ amounts to proving the correctness of the invariant of $\mathcal{A}$. We prove the correctness of the invariant by induction on $\ell$.
If $\ell =1$ then $X_\ell$ must be a {\bf leaf bag}. By definition of the modified NCPD, the bag contains $Y$.
$\mathcal{A}[1,Q,\mathcal{Q},\mathcal{P},k''] = 1$ if $Q = Y$, $\mathcal{Q}$ is the partition of $Y$ into the connected components in $H[Y]$, $\mathcal{P} = \mathcal{Q}$, $k'' = \vert Y \vert$. In all other cases, $\mathcal{A}[1,Q,\mathcal{Q},\mathcal{P},k''] = 0$.
First, suppose $\mathcal{A}[1,Q,\mathcal{Q},\mathcal{P},k''] = 1$. Then as $X_1$ does not contain any terminal vertices, (RHS) trivially is true for the cases when $\mathcal{A}[1,Q,\mathcal{Q},\mathcal{P},k''] = 1$. On the other hand, suppose (RHS) is true for $\ell = 1$. Again considering the cases when $\mathcal{A}[1,Q,\mathcal{Q},\mathcal{P},k''] = 1$, (LHS) holds. So the invariant holds when $\ell =1$.
Now, we assume that $\ell >1$. Our induction hypothesis is that the invariant of the algorithm is true for all $1 \leq \ell' <\ell$. We show that the invariant is true for $\ell$. There can be two cases:
\subparagraph*{Case 1:}
$X_\ell$ is a {\bf forget bag} with exactly one child $X_{\ell-1}$ : Let $\mathcal{C}$ be the cell being forgotten in $X_\ell$. Consider $\mathcal{A}[\ell,Q, \mathcal{Q}=Q_1,\ldots Q_b,\mathcal{P}=P_1\ldots P_b,k'']$.
Let $Q' \subseteq X_{\ell-1} \setminus R$ such that $Q \subseteq Q'$ and $Q' \setminus Q$ consists of a set of at most $$24$ \xspace$ non-terminal vertices from $\mathcal{C}$. Let $\mathcal{P}'=P'_1 \ldots P'_b$ be a partition of $(Q' \cup R) \cap X_{\ell-1})$ such for each cell $\mathcal{C}'$ in $X_{\ell-1}$, $\mathcal{C}' \cap (Q' \cup R)$ is completely contained in exactly one part. Also, $P_i = P_i' \setminus \mathcal{C}$. Moreover, consider the part $P'_i$ such that $\mathcal{C} \cap (Q' \cup R) \subseteq P'_i$: $P'_i \setminus (\mathcal{C} \cap (Q' \cup R)) \neq \emptyset$. Let $\mathcal{Q}'$ be the partition of $Q'$ such that $Q' \cap P'_i = Q'_i$.
If $\mathcal{A}[\ell-1,Q',\mathcal{Q}',\mathcal{P}',k''] = 1$ then $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']=1$. Otherwise, $\mathcal{A}[\ell,Q,\mathcal{P},k'']=0$.
Suppose (LHS) of the invariant is true for $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']$: $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']=1$. By definition, there is a $\mathcal{A}[\ell-1,Q',\mathcal{Q}',\mathcal{P}',k''] = 1$ for a $Q',\mathcal{Q}',\mathcal{P}'$ as described above. By induction hypothesis, (RHS) corresponding to $\mathcal{A}[\ell-1,Q',\mathcal{Q}',\mathcal{P}',k''] = 1$ holds. Thus, there is a witness forest $T'$ in $H[{\bf i}gcup_{j=1}^{\ell-1} X_j] = H[{\bf i}gcup_{j=1}^{\ell}]$ (By definition of a forget bag). By definition of $Q,\mathcal{Q},\mathcal{P}$, $T'$ is also a witness forest in $H[{\bf i}gcup_{j=1}^{\ell} X_j]$ and therefore (RHS) is true for $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']$.
On the other hand, suppose (RHS) is true for $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']$. Then there is a witness forest $T'$ in $H[{\bf i}gcup_{j=1}^{\ell} X_j] = H[{\bf i}gcup_{j=1}^{\ell-1}]$. Moreover, $T'$ has $b$ connected components $D_1,\ldots,D_b$: $D_i \cap X_\ell = P_i$, $(D_i \setminus R) \cap X_\ell = Q_i$, the total number of non-terminal points in $T'$ is $k''$ and $R \cap ({\bf i}gcup_{j=1}^{\ell} X_j) \subseteq V(T')$. Let $D_i \cap X_{\ell -1} =P'_i$, $(D_i \setminus R) \cap X_{\ell-1} = Q'_i$, $Q' = {\bf i}gcup_{j=1}^{b} Q'_i$. Note that the total number of non-terminal points in $T'$ is $k''$ and by definition of a forget node it is still true that $R \cap ({\bf i}gcup_{j=1}^{\ell-1} X_j) \subseteq V(T')$. By induction hypothesis, (LHS) is true for $\mathcal{A}[\ell-1,Q',\mathcal{Q}',\mathcal{P}',k'']$ and $\mathcal{A}[\ell-1,Q',\mathcal{Q}',\mathcal{P}',k'']=1$. By the description above, this implies that $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']=1$. Therefore, (LHS) is true for $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']$.
\subparagraph*{Case 2:}
$X_\ell$ is an {\bf introduce bag} with exactly one child $X_{\ell -1}$. Let $\mathcal{C}$ be the cell being introduced in $X_\ell$. Consider $\mathcal{A}[\ell,Q, \mathcal{Q}=Q_1,\ldots Q_b,\mathcal{P}=P_1\ldots P_b,k'']$. Without loss of generality, let $P_b$ contain all the vertices in $\mathcal{C} \cap (Q \cup R)$.
By definition of a state, $\vert \mathcal{C} \cap Q \vert \leq $24$ \xspace$. Let ${\sf St} = \mathcal{C} \cap Q$ and $Q' = Q \setminus {\sf St}$.
Let $\mathcal{P}' = P'_1\uplus P'_2\ldots \uplus P'_b \uplus \ldots P'_{d}$ be a partition of $Q' \cup (R \cap X_{\ell-1})$ such that for $j < b, P_j = P'_j$, and $P_b = \mathcal{C} \cap (Q \cup R) \cup {\bf i}gcup_{j=b}^{d} P'_j$. Moreover, $\mathcal{C} \cap (Q \cup R)$ has a neighbour in each $P'_j, b \leq j \leq d$.
Let $\mathcal{Q}'$ be the partition of $Q'$ such that $Q' \cap P'_i = Q'_i$.
Let $k^* = k'' - \vert {\sf St} \vert$.
If $\mathcal{A}[\ell-1,Q',\mathcal{Q}',\mathcal{P}',k^*] = 1$ then $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']=1$. Otherwise, $\mathcal{A}[\ell,Q,\mathcal{P},k'']=0$.
Suppose (LHS) of the invariant is true for $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']$: $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']=1$. By definition, there is a $\mathcal{A}[\ell-1,Q',\mathcal{Q}',\mathcal{P}',k^*] = 1$ for a $Q',\mathcal{Q}',\mathcal{P}'$ as described above. By induction hypothesis, (RHS) corresponding to $\mathcal{A}[\ell-1,Q',\mathcal{Q}',\mathcal{P}',k^*] = 1$ holds. Thus, there is a witness forest $T'$ in $H[{\bf i}gcup_{j=1}^{\ell-1} X_j]$. By definition of $Q,\mathcal{Q},\mathcal{P}$, $H[V(T') \cup (\mathcal{C} \cap (Q \cup R))]$ is a connected graph. Consider a spanning tree of this connected graph. By definition of $k^*$, this spanning tree has all vertices of $R$ and exactly $k''$ non-terminal vertices. Therefore, this spanning tree is a witness forest in $H[{\bf i}gcup_{j=1}^{\ell} X_j]$ and therefore (RHS) is true for $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']$.
On the other hand, suppose (RHS) is true for $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']$. Then there is a witness forest $T'$ in $H[{\bf i}gcup_{j=1}^{\ell} X_j]$. Moreover, $T'$ has $b$ connected components $D_1,\ldots,D_b$: $D_i \cap X_\ell = P_i$, $(D_i \setminus R) \cap X_\ell = Q_i$, the total number of non-terminal points in $T'$ is $k''$ and $R \cap ({\bf i}gcup_{j=1}^{\ell} X_j) \subseteq V(T')$. Without loss of generality, let $D_b$ contain $T' \cap \mathcal{C}$. Let $D'_1,D'_2,\ldots D'_b,\ldots, D'_d$ be the connected components of $T'$ restricted to $H[{\bf i}gcup_{j=1}^{\ell-1} X_j]$. Let $D'_i \cap X_{\ell -1} =P'_i$, $(D'_i \setminus R) \cap X_{\ell-1} = Q'_i$, $Q' = {\bf i}gcup_{j=1}^{d} Q'_i$. Note that the total number of non-terminal points in $T'$ is $k^* = k'' - \vert {\sf St} \vert $ and by definition of an introduce node it is true that $R \cap ({\bf i}gcup_{j=1}^{\ell-1} X_j) \subseteq V(T') \cap ({\bf i}gcup_{j=1}^{\ell-1} X_j)$. By induction hypothesis, (LHS) is true for $\mathcal{A}[\ell-1,Q',\mathcal{Q}',\mathcal{P}',k^*]$ and $\mathcal{A}[\ell-1,Q',\mathcal{Q}',\mathcal{P}',k^*]=1$. By the description above, this implies that $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']=1$. Therefore, (LHS) is true for $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']$.
Finally, we analyse the time complexity of the algorithm. First, the good family $\mathcal{F}$ is computed in $n^{O(\sqrt{t+k})}$ time as per Lemma~\ref{lem:compute-good-fam}, and the number of instances in the good family $\mathcal{F}$ is $n^{O(\sqrt{t+k})}$. For one such instance $(H,R,t,k')$ the possible states for the algorithm $\mathcal{A}$ are of the form $[\ell, Q,\mathcal{Q},\mathcal{P},k'']$. By definition, $\ell \leq n$, $k'' \leq k'$ and $Q = O(\sqrt{t+k})$. Again, by definition $\mathcal{P}$ is upper bounded by the number of partitions of cells contained in a bag of the modified NCPD of $(H,R,t,k')$. Thus, the number of possibilities of $\mathcal{P}$ is $\sqrt{t+k}^{O(\sqrt{t+k})})$. Also by definition, $\mathcal{Q}$ is fixed once $Q$ and $\mathcal{P}$ are fixed. Therefore, the number of possible states is $n^{O(\sqrt{t+k})}$. From the description of $\mathcal{A}$, the computation of $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']$ may look up the solution for $n^{O(\sqrt{t+k})}$ instances of the form $\mathcal{A}[\ell-1,Q',\mathcal{Q}',\mathcal{P}',k^*]$ and therefore takes $n^{O(\sqrt{t+k})}$ time. Thus, the total time for the dynamic programming is $O(n^{\sqrt{t+k}})$.
\end{proof}
\old{
\begin{proof}[Proof Sketch]
As a first step of the algorithm, by Lemma~\ref{lem:compute-good-fam} in $n^{O(\sqrt{t+k})}$ time we compute a good family of instances $\mathcal{F}$ for the given instance $(G,R,t,k)$ of {\sc Steiner Tree}\xspace on clique-grid graphs. From Definition~\ref{def:good-fam}(2), $(G,R,t,k)$ is a yes-instance of {\sc Steiner Tree}\xspace if and only if there is an instance $(H,R,t,k') \in \mathcal{F}$ that is a yes-instance of {\sc Exact Steiner Tree}\xspace. Deriving from Definition~\ref{def:good-fam}(3), Lemma~\ref{lem:compute-good-fam} and the construction of a modified NCPD, for each instance $(H,R,t,k') \in \mathcal{F}$, there is a modified $O(\sqrt{t+k})$-NCPD for $H$, due to a guessed label $\ell$ and a guessed set $Y$ of non-terminal vertices from columns labelled by $\ell$ such that the following hold: (i) $\vert Y \vert \leq \sqrt{t+k}$, (ii) if $(H,R,t,k')$ is a yes-instance then there is an exact $k'$-Steiner tree $T$ such that all vertices of $Y$ are Steiner vertices in $T$. Let the modified NCPD using the set $Y$ have the sequence of bags $\{X_1,X_2,\ldots, X_q\}$. Recall that the definition of the modified NCPD ensures that $X_1 = X_q = Y$.
In the next step, our algorithm for {\sc Steiner Tree}\xspace considers every instance $(H,R,t,k') \in \mathcal{F}$ and checks if it is a yes-instance of {\sc Exact Steiner Tree}\xspace. By Definition~\ref{def:good-fam}(2), this is sufficient to determine if $(G,R,t,k)$ is a yes-instance of {\sc Steiner Tree}\xspace.
For the rest of the proof we design a dynamic programming subroutine algorithm $\mathcal{A}$ for {\sc Exact Steiner Tree}\xspace that takes as input an instance $(H,R,t,k') \in \mathcal{F}$ and uses its modified $O(\sqrt{t+k})$-NCPD to determine whether it is a yes-instance of {\sc Exact Steiner Tree}\xspace. Suppose $(G,R,t,k)$ is a yes-instance and consider a $k$-Steiner tree $T$ for $(G,R,t,k)$ where $\{uv\in E(T) \vert f(u) \neq f(v)\}$ is minimised and then the number of Steiner vertices in $T$ is minimised. Using Observation~\ref{obs:exact}, this is an exact $k'$-Steiner tree of $G$ for some $k' \leq k$. By the construction in Lemma~\ref{lem:compute-good-fam} note that there is an instance $(H,R,t,k') \in \mathcal{F}$ such that $T$ is an exact $k'$-Steiner tree for $(H,R,t,k')$. The aim of the dynamic programming algorithm is to correctly determine that this particular instance $(H,R,t,k')$ is a yes-instance. The algorithm $\mathcal{A}$ is designed in such a manner that for such a yes-instance $(H,R,t,k')$ the tree $T$ will be the potential solution Steiner tree that behaves as a certificate of correctness.
The states of the dynamic programming algorithm store information required to represent the partial solution Steiner tree, which is the potential solution Steiner tree restricted to the graph seen so far. The states are of the form $\mathcal{A}$[$\ell, Q, \mathcal{Q} =Q_1\uplus Q_2 \ldots \uplus Q_b, \mathcal{P} = P_1\uplus\ldots P_b,k''$] where:
\begin{itemize}
\item $\ell \in [q]$ denotes the index of the bag $X_\ell$ of the modified NCPD of $H$.
\item $Q \subseteq X_\ell \setminus R$ is a set of at most $$24$ \xspace \cdot $7$ \xspace$ non-terminal vertices. For each cell $\mathcal{C} = (i,j)$ that belongs to $X_\ell$, $\vert Q \cap f^{-1}(i,j) \vert \leq $24$ \xspace $.
\item $\mathcal{Q} = Q_1\uplus Q_2 \ldots \uplus Q_b$ is a partition of $Q$ with the property that for each cell $\mathcal{C} = (i,j)$, $Q \cap f^{-1}(i,j)$ is contained completely in exactly one part of $\mathcal{Q}$.
\item The partition $\mathcal{P}$ is over the vertex set $Q \cup (R \cap X_\ell)$. $Q \cap P_i = Q_i$. Also for each cell $\mathcal{C}$ in $X_\ell$, $\mathcal{C} \cap (Q \cup R)$ is completely contained in exactly one part of $\mathcal{P}$.
\item The value $k''$ represents the total number of Steiner vertices used so far in this partial solution Steiner tree. $\vert Q \vert \leq k''$ holds.
\end{itemize}
Essentially, let $T$ be an exact $k'$-Steiner tree for $(H,R,t,k')$ if it is a yes-instance. For $\ell \in [q]$, let $T_{\sf ptl}^\ell$ represent the partial solution Steiner tree when $T$ is restricted to $H[{\bf i}gcup_{j=1}^{\ell} X_j]$. The partition $\mathcal{P}$ represents the intersection of a component of $T_{\sf ptl}^{\ell}$ with $X_\ell$. The set $Q$ is the set of Steiner vertices of $T_{\sf ptl}^{\ell}$ in the bag $X_\ell$ and $\mathcal{Q}$ is the partition of $Q$ with respect to the components of $T_{\sf ptl}^{\ell}$. The number $k''$ denotes the total number of Steiner vertices in $T_{\sf ptl}^\ell$.
In order to show the correctness of $\mathcal{A}$ we need to maintain the following invariant throughout the algorithm:
(LHS) $\mathcal{A}[\ell,Q,\mathcal{Q}=Q_1 \uplus Q_2,\ldots Q_b,\mathcal{P} = P_1\uplus P_2 \uplus P_b,k']=1$ if and only if (RHS) there is a forest $T'$ as a subgraph of $H[{\bf i}gcup_{j=1}^{\ell}]$ with $b$ connected components $D_1,\ldots,D_b$: $D_i \cap X_\ell = P_i$, $(D_i \setminus R) \cap X_\ell = Q_i$, the total number of non-terminal points in $T'$ is $k''$, for each cell $\mathcal{C}$ the number of nonterminal vertices in $\mathcal{C} \cap T'$ is at most $$24$ \xspace$, and $R \cap ({\bf i}gcup_{j=1}^{\ell} X_j) \subseteq V(T')$.
Suppose the algorithm invariant is true. This means that if $\mathcal{A}[q,Y, {Y},{Y},k']=1$ then there is an exact $k'$-Steiner tree for $(H,R,t,k')$. On the other hand, suppose $(G,R,t,k)$ is a yes-instance and has a $k$-Steiner tree $T$ where $\{uv\in E(T) \vert f(u) \neq f(v)\}$ is minimised and then the number of Steiner vertices in $T$ is minimised. By Observation~\ref{obs:cell-bd}, the number of Steiner vertices of $T$ in each cell of $G$ is bounded by $$24$ \xspace$. By Observation~\ref{obs:exact} and the construction in Lemma~\ref{lem:compute-good-fam} note that there is a subset $Y$ and an instance $(H,R,t,k') \in \mathcal{F}$ such that $T$ is an exact $k'$-Steiner tree for $(H,R,t,k')$ and $Y \subseteq V(T)$. Suppose the invariant of the algorithm is true. This means that if $(G,R,t,k)$ is a yes-instance of {\sc Steiner Tree}\xspace then there is a $(H,R,t,k')$ for which $\mathcal{A}[q,Y,{Y},{Y},k']=1$.
Thus, proving the correctness of the algorithm $\mathcal{A}$ amounts to proving the correctness of the invariant of $\mathcal{A}$. We prove the correctness of the invariant by induction on $\ell$. Due to paucity of space we defer the full proof including the correctness of the algorithm invariant and the running time analysis to Appendix~\ref{secapp:subexp}.
\end{proof}
}
\section{FPT Algorithm for {\sc Steiner Tree}\xspace on Unit Disk Graphs}\label{sec:FPT}
In this section, we prove Theorem~\ref{thm:FPT-UDG}. We consider the {\sc Steiner Tree}\xspace problem on unit disk graphs and design an FPT algorithm parameterized by $k$, which is an upper bound on the number of Steiner vertices in the solution Steiner tree. Our algorithm is based on the idea that for an instance $(G,R,t,k)$, in order to determine the existence of a Steiner tree we can first find spanning trees for all components of $G[R]$ and extend these spanning trees to a required $k$-Steiner tree.
In fact, we prove our results for the superclass of clique-grid graphs. For an instance $(G,R,t,k)$ of {\sc Steiner Tree}\xspace on clique-grid graphs, where $G$ has $n$ vertices and $R\subseteq V(G)$ is the set of terminals we prove the following result in this rest of this section.
\begin{lemma}\label{lem:FPT}
{\sc Steiner Tree}\xspace on clique-grid graphs has an FPT algorithm with running time $2^{O(k)}n^{O(1)}$.
\end{lemma}
First, we prove some properties of Steiner trees for unit disk graphs. Consider the induced subgraph $G[R]$. Let $C_1,C_2,\ldots,C_q$ be the connected components in $G[R]$. For each $C_i$, $i \in [q]$, let $T_i$ be a spanning tree of $C_i$.
\begin{observation}\label{obs:terminal-comps}
Let $G$ be a clique-grid graph with the terminal set $R$. Let $C_1,C_2,\ldots,C_q$ be the connected components of $G[R]$, and for each $i \in [q]$ let $T_i$ be a spanning tree for each $C_i$. For any $k$, let $T'$ be a $k$-Steiner tree for $G$. Then there is a $k$-Steiner tree $T$ such that for each $i \in [q]$ $T_i$ is a subtree of $T$. Moreover, $q \leq $24$ \xspace k$.
\end{observation}
\begin{proof}
Consider the $k$-Steiner tree $T$ and let $S = V(T) \setminus R$ be the set of Steiner vertices of $T$. Note that in $G[R \cup S]$, $T$ is a spanning tree and therefore $G[R \cup S]$ is a connected graph. Similarly, for each $i \in [q]$, $T_i$ is a subgraph of $G[R \cup S]$. Consider the subgraph $H = T' \cup {\bf i}gcup_{i\in[q]} T_i$. As $T'$ is a spanning tree, $T' \cup {\bf i}gcup_{i\in[q]} T_i$ is a connected graph. We consider an arbitrary ordering $\mathcal{O}$ of the edges in $E(H) \setminus ({\bf i}gcup_{i\in [q]} E(T_i))$. In this order we iteratively throw away an edge $e_j \in E(H) \setminus ({\bf i}gcup_{i\in [q]} E(T_i))$ if the resulting graph remains connected upon throwing $e_j$ away. Let $H'$ be the graph at the end of considering all the edges in the order $\mathcal{O}$. We prove that $H'$ must be a tree. Suppose for the sake of contradiction, there is a cycle $C$ as a subgraph of $H'$. As for each $i\in [q]$, $T_i$ is a tree and for each $i \neq i' \in [q]$, $V(T_i) \cap V(T_{i'}) = \emptyset$, there must be an edge from $E(H) \setminus ({\bf i}gcup_{i\in [q]} E(T_i))$ in $E(C)$. Consider the edge $e \in (E(H) \setminus ({\bf i}gcup_{i\in [q]} E(T_i))) \cap E(C)$ with the largest index according to $\mathcal{O}$. This edge was throwable as $C \setminus \{e\}$ ensured any connectivity due to $e$. Thus, there can be no cycle in $H'$ and it is a spanning tree of $V(H)$. This implies that $T=H'$ is a $k$-Steiner tree for $G$, $S$ being the set of at most $k$ Steiner vertices, such that for each $i \in [q]$, $T_i$ is a subtree of $T$.
Finally, we show that if a $k$-Steiner tree $T$ exists then $q \leq $24$ \xspace k$. Let $f$ be a representation of the clique-grid graph $G$. Note that for any cell $(a,b)$ $f^{-1}(a,b)$ is a clique, Therefore, there can be at most one component $C_i$ intersecting with a cell $(a,b)$. By property $(2)$ of Definition~\ref{def:clique-grid}, there are at most $$24$ \xspace$ cells that can have neighbours of any vertex in $(a,b)$. Thus, for any Steiner vertex, there can be at most $$24$ \xspace$ components of $G[R]$ it can have neighbours in. Putting everything together, if there are at most $k$ Steiner vertices that are used to connect the $q$ connected components of $G[R]$ and each Steiner vertex can have neighbours in at most $$24$ \xspace$ components, then it must be that $q \leq $24$ \xspace k$.
\end{proof}
Henceforth, we wish to find a solution $k$-Steiner tree $T$ such that for each $i \in [q]$, $T_i$ is a subtree of $T$.
\begin{definition}\label{def:contract}
Let $G$ be a clique-grid graph with the terminal set $R$. Let $C_1,C_2,\ldots,C_q$ be the connected components of $G[R]$, and for each $i \in [q]$ let $T_i$ be a spanning tree for each $C_i$. Let $G^*$ be the following graph: $V(G^*) = V(G \setminus R) \cup R^*$ where $R^* = \{c_i \vert i \in [q]\}$, $E(G^*) = \{v_1v_2 \vert v_1,v_2 \in V(G) \setminus R\} \cup \{vc_i \vert v \in V(G) \setminus R, \exists u \in C_i \mbox{ s.t } vu\in E(G) \}$. $G^*$ is called the component contracted graph of $G$ and $\{c_i\vert i \in [q]\}$ is the set of terminals for $G^*$ (See Figure~\ref{FPT-contraction}).
\end{definition}
\begin{figure}
\caption{An illustration of the component contraction; (a) red disks are Steiners and blue disks are terminals; (b) red vertices are Steiner vertices and blue vertices are contracted terminal components.}
\label{FPT-contraction}
\end{figure}
Note that $G^*$ may no longer be a clique-grid graph. From the definition of a component contracted graph and Observation~\ref{obs:terminal-comps}, we have the following observation.
\begin{observation}\label{obs:contract}
Let $G$ be a clique-grid graph with the terminal set $R$. Let $C_1,C_2,\ldots,C_q$ be the connected components of $G[R]$, and for each $i \in [q]$ let $T_i$ be a spanning tree for each $C_i$. Let $G^*$ be the component contracted graph of $G$ using the $T_i$'s. Then $(G,R,t,k)$ is a yes-instance of {\sc Steiner Tree}\xspace if and only if $q \leq $24$ \xspace k$ and $(G^*,R^*,q,k)$ is a yes-instance of {\sc Steiner Tree}\xspace.
\end{observation}
Now we are ready to design our FPT algorithm for {\sc Steiner Tree}\xspace on clique-grid graphs parameterized by $k$ and complete the proof of Lemma~\ref{lem:FPT}.
\begin{proof}[Proof of Lemma~\ref{lem:FPT}]
Let $(G,R,t,k)$ be an input instance of
$n$-vertex clique-grid graphs. Let $C_1,C_2,\ldots,C_q$ be the connected components of $G[R]$, and for each $i \in [q]$ let $T_i$ be a spanning tree for each $C_i$. Let $G^*$ be the component contracted graph of $G$ using the $T_i$'s. Let $R^* = \{c^i\vert i \in [q] \}$ be the terminal set of $G^*$. By, Observation~\ref{obs:terminal-comps}, if $G$ is a yes-instance then it must be that $q \leq $24$ \xspace k$. If this is not the case, then we immediately output no.
From now on, we are in the case $q \leq $24$ \xspace k$. By Observation~\ref{obs:contract}, it is enough to determine whether $(G^*,R^*,q,k)$ is a yes-instance of {\sc Steiner Tree}\xspace. As noted earlier, $G^*$ may no longer be a clique-grid graph.
We run the Dreyfus-Wagner algorithm \cite{dreyfus1971steiner} which returns a minimum edge-weighted Steiner tree connecting $R^*$ in $G^*$. Since $G^*$ is unweighted, the returned solution Steiner tree $T$ has the minimum number of edges. Note that since $G^*$ is unweighted, a Steiner tree for $R^*$ minimizes the number of Steiner vertices if and only if it has minimum number of edges. The total number of Steiner vertices in $T$ is $\vert V(T) \vert - \vert R^*\vert$. If $\vert V(T) \vert - \vert R^*\vert \leq k$, then our algorithm returns that $(G^*,R^*,q,k)$ is a yes-instance of {\sc Steiner Tree}\xspace, and otherwise it returns no.
The construction of $G^*$ is done in polynomial time. Since $q \leq $24$ \xspace k$, the Dreyfus-Wagner algorithm runs in $2^{O(k)}n^{O(1)}$. Thus, our algorithm also has running time $2^{O(k)}n^{O(1)}$.
\end{proof}
\section{W[1]-Hardness for {\sc Steiner Tree}\xspace on Disk Graphs}\label{sec:whard}
In this section, we consider the {\sc Steiner Tree}\xspace problem on disk graphs and prove that this problem is W[1]-hard parameterized by the number Steiner vertices $k$.
\whardness*
\begin{proof}
We prove Theorem~\ref{w1-hard} by giving a parameterized reduction from the \textsc{Grid Tiling with} $\ge$ problem which is known to be W[1]-hard\footnote{$k\times k$ \textsc{Grid Tiling with} $\ge$ problem is W[1]-hard, assuming ETH, cannot be solved in $f(k)n^{o(k)}$ for any function $f$} \cite{cygan2015parameterized}.
In the \textsc{Grid Tiling with} $\ge$ problem,
we are given an integer n, a $k\times k$ matrix for an integer $k$ and a set of pairs $S_{ij}\subseteq [n]\times [n]$ of each cell. The objective is to find, for each $1\le i,j\le k$, a value $s_{ij}\in S_{ij}$ such that if $s_{ij}=(a,b)$ and $s_{i+1,j}=(a',b')$ then $a\ge a'$; if $s_{ij}=(a,b)$ and $s_{i,j+1}=(a',b')$ then $b\ge b'$.
Let $I=(n,k,\mathcal{S})$ be an instance of the \textsc{Grid Tiling with} $\ge$.
We construct a set of unit disks $D$, that is divided into three sets of unit disks $D_1, D_2, D_3$; $D=D_1\uplus D_2\uplus D_3$.
Each disk in $D_1, D_2, D_3$ is of radius $1$, $\delta$ and $\kappa$, respectively. We will define the value of $\delta$ and $\kappa$ shortly.
The construction of the set $D=D_1\uplus D_2\uplus D_3$ will ensure that $D$ contains a {\sc Steiner Tree}\xspace with $k^2$ Steiner vertices if and only if $I$ is a yes instance of \textsc{Grid Tiling with} $\ge$. Let $\epsilon = 1/n^{10}$, and $\delta =\epsilon/4$. Here, we point out that the value of $\kappa, \epsilon$ are independent of each other.
First, we move the cells away from each other, such that the horizontal (resp. vertical) distance between the left columns (resp. top rows) any two consecutive cell is $2+\epsilon$.
Let
$100\delta$ be the side of length of each cell. Then, we introduce diagonal chains of terminal disks into $D_3$ of radius $\kappa=\sqrt{2}(2+\epsilon - 100\delta)/1000$ to connect the cells diagonally; see Figure~\ref{W-hard-connected}(a). For every $1\le x, y\le k$, and every $(a,b)\in S(x,y)\subseteq [n]\times [n]$,
we introduce into $D_1$ a disk of radius $1$ centered at $(2x+\epsilon x+\epsilon a, 2y+\epsilon y+\epsilon b)$.
Let $D[x,y]\subseteq D_1$ be the set of
disks introduced for a fixed $x$ and $y$, and notice that they mutually intersect each other.
Next, for $1\le x, y\le k$, we introduce into $D_2$, disks of radius $\delta$ between consecutive cells of coordinate $(2x+1+\epsilon x+\epsilon a, 2y+\epsilon y)$ (placed horizontally); and $(2x+\epsilon x, 2y+1+\epsilon y+\epsilon b)$ (placed vertically). For every cell $S[x,y]$,
we denote the top, bottom, left, right cluster of terminal disks of radius $\delta$ from $D_2$ by $L[x,y], R[x,y], T[x,y], B[x,y]$, respectively.
Moreover, for each cell $S[x,y]$, we introduce a disk of radius $\delta$
at a coordinate that is completely inside the rectangle bounding the centres of disks in $D[x,y]$. This is to enforce that at least one disk is chosen form each $D[x,y]$.
See Figure~\ref{W-hard-connected}(b) for an illustration.
\begin{figure}
\caption{(a) The schematic diagram of the cells, after adjusting the distance between adjacent cells which is $2+\epsilon$. The red disks inside each cells, are the coordinates where the center of the Steiner disk of radius~$1$ will be placed. The diagonal chains consisting of terminal disks of radius $\kappa$, are connecting the cells diagonally. (b) The small black dots inside each cell are extra terminals of radius $\delta$.
Consider a cell $S[x,y]$. The shaded grey disks are the potential disks and the shaded red disk is chosen in the solution from $D[x,y]$.}
\label{W-hard-connected}
\end{figure}
We proceed with the following observation.
Consider a disk $p$ that is centered at
$(2x+\epsilon x+\epsilon a, 2y+\epsilon y+\epsilon b)$ for some $(a,b)\in [n]\times [n]$. Now, consider a disk $q$ from $R[x,y]$ centered at $(2x+1+\epsilon x+\epsilon a, 2y+\epsilon y)$. The distance between their centers are $\sqrt{1+\epsilon^2 b^2}$.
We need to show that this is less than $(1+\epsilon/4)$.
This is true because $1+\epsilon^2 b^2$ is less than $(1+\epsilon/4)$ as the value of $b$ goes to $n$, $\epsilon = 1/n^{10}$ and the value of $n$ is large. Hence, $q$ is covered by the disk $p$ from $S[x,y]$ centered at $(a,b)$. Next, consider a disk $q'$ from $R[x,y]$ centered at $(2x+1+\epsilon x+\epsilon (a+1), 2y+\epsilon y)$. The distance between their centers are $\sqrt{(1+\epsilon)^2+\epsilon^2 b^2}$. We show that this value is bigger than $(1+\epsilon/4)$. This means $(1+\epsilon)^2+\epsilon^2 b^2$ is bigger than
$(1+\epsilon/4)^2$. As the value of $b$ goes to $n$, it is not hard to see the left side is bigger since $\epsilon=1/n^{10}$ and the value of $n$ is large.
Therefore, $q'$ is not covered by the disk $p$ from $S[x,y]$ centered at $(a,b)$. The same calculation holds for $L[x,y]$, $T[x,y]$
and $B[x,y]$.
In the forward direction, let the pairs $s[x,y]\in S[x,y]$ form a solution for instance $I$, and let $s[x,y]=(a[x,y],b[x,y])$.
For every $1\le x,y\le k$,
we select the disk $d[x,y]$ from $D_1$ of radius $1$ centered at
$(2x+\epsilon x+\epsilon a[x,y], 2y+\epsilon y+\epsilon b[x,y])$.
We have seen in the previous paragraph that this disk cover any disk from $R[x,y]$ of center with $(2x+1+\epsilon x+\epsilon a[x,y], 2y+\epsilon y)$ but does not covers disks with coordinate $(2x+1+\epsilon x+\epsilon (a[x,y]+1), 2y+\epsilon y)$. Similarly, this holds for $L[x,y],T[x,y],B[x,y]$. $s[x,y]$'s forms a solution of $I$, then we have $a[x,y]\ge a[x+1,y]$. Therefore, the disks $d[x,y]$ and $d[x+1,y]$ will cover all disks from $R[x,y]$. Similarly, we have
$b[x,y]\ge b[x,y+1]$ which implies that $d[x,y]$ and $d[x,y+1]$ will cover $T[x,y]$ and form a component them. Now, the diagonals chains consisting of terminal disks of radius $\kappa$, we have taken to join the cells (see Figure~\ref{W-hard-connected}(a)) ensures that all cells are connected. Moreover, we have shown that if $s[x,y]$'s form a solution of instance $I$, then all terminals in $L[x,y], R[x,y], T[x,y], B[x,y]$ (for any $1\le x, y\le k$) are covered. Therefore, this will form a connected Steiner tree with $k^2$ many Steiner disks.
In the reverse direction,
let $D'\subseteq D_1$ be a set of $k^2$ Steiner disks that spans over all terminals in $D_2\cup D_3$. This is true when for every $1\le x,y\le k$, the set $D'$ contains a disk $d[x,y]\in D[x,y]$ that is centered at $(2x+\epsilon x+\epsilon a[x,y], 2y+\epsilon y+\epsilon b[x,y])$ for some $(a[x,y],b[x,y]) \in [n]\times [n]$. Indeed, we are required to choose one disk from $D[x,y]$ due to the reason that there is a terminal disk lying inside the rectangle bounding the centres of disks in $D[x,y]$. The claim is that $s[x,y]=(a[x,y],b[x,y])$'s form a solution of $I$. First of all, $d[x,y]\in D[x,y]$ implies that $s[x,y]\in S[x,y]$. Consider a cell $S[x,y]$. We have observed that it covers disk $q$ from $R[x,y]$ centered at $(2x+1+\epsilon x+\epsilon a, 2y+\epsilon y)$, but a disk $q'$ from $R[x,y]$
centered at $(2x+1+\epsilon x+\epsilon (a+1), 2y+\epsilon y)$ is not covered.
This is true for $L[x,y], T[x,y], B[x,y]$. Hence, if all terminals points from inside $S[x,y]$'s and $L[x,y], R[x,y], T[x,y], B[x,y]$ are covered by $k^2$ many Steiner disks, it would imply that $a[x,y]\ge a[x+1,y]$ and $b[x,y]\ge b[x,y+1]$.
Therefore, $s[x,y]$'s form the solution for \textsc{Grid Tiling with} $\ge$ instance $I$. This completes the proof.
\end{proof}
\paragraph*{Conclusion}
In this paper we studied the parameterized complexity of {\sc Steiner Tree}\xspace on unit disk graphs and disk graphs under the parameterizations of $k$ and $t+k$. In future, we wish to explore tight bounds for the algorithms we have obtained and to probe into kernelization questions under these parameters. It would also be interesting to consider the minimum weight of a solution $k$-Steiner tree as a parameter. A variant of {\sc Steiner Tree}\xspace that usually is easier to study is {\sc Full Steiner Tree}. However, in the case of unit disk graphs this problem proved to be very resilient to all our algorithmic strategies. We wish to explore {\sc Full Steiner Tree} on unit disk graphs under natural and structural parameters in future works.
{\bf i}bliography{main.bib}
\begin{figure}
\caption{An illustration of nice $2$-clique path decomposition.}
\label{l-ncpd}
\end{figure}
}
\old{
\begin{figure}
\caption{An illustration of the component contraction; (a) red disks are Steiners and blue disks are terminals; (b) red vertices are Steiner vertices and blue vertices are contracted terminal components.}
\label{FPT-contraction}
\end{figure}
}
\old{
\section{The NP-hardness proof of {\sc Steiner Tree}\xspace on unit disk graphs stated in Theorem~\ref{nphard}}\label{secapp:nphard}
\begin{proof}
We show a reduction from the {\sc Connected Vertex Cover} in planar graphs with maximum degree $4$ problem, which is known to be \textsf{NP}\xspaceH \cite{garey1977rectilinear}.
Given a planar graph $G$ with maximum degree $4$ and an integer $k$, the {\sc Connected Vertex Cover} problem asks to find if there exists a vertex cover $D$ for $G$ such that the subgraph induced by $D$ is connected and $|D|\le k$.
We adopt the proof of Abu-Affash \cite{abu2015euclidean}, where it was shown that the $k$-{\sc Bottleneck Full Steiner Tree} problem is \textsf{NP}\xspaceH.
We make this reduction compatible for unit disk graphs.
Given a planar graph $G$ with maximum degree $4$ and an integer $k$,
we construct an unit disk graph $G_{\mathcal{C}}$ where $V(G_{\mathcal{C}})=\mathcal{C}$ in polynomial time, where $V(G_{\mathcal{C}})$ is divided into two sets of unit disks $R$ and $S$, denoted by Steiner and terminals, respectively.
Let $V(G)=\{v_1,v_2,\ldots,v_n\}$ and let $E(G)=\{e_1,e_2,\ldots,e_m\}$. Then, we compute an integer $k'$ such that $G$ has a connected vertex cover $D$ of size $k$ if and only if there exists a {\sc Steiner Tree}\xspace with at most $k'$ Steiner vertices of $G_{\mathcal{C}}$.
As as an intermediate step we build a rectangular grid graph $G'$.
First, we embed $G$ on a rectangular grid, with distance at least $8$ between adjacent vertices. Each vertex $v_i\in V(G)$ corresponds to a grid vertex, and each edge $e=v_iv_j\in E(G)$ corresponds to a rectilinear path comprised of some horizontal and vertical grid segments with endpoints corresponding to $v_i$ and $v_j$. Let $V(G')=\{v'_1,\ldots,v'_n\}$ be the grid points corresponding to the vertices of $V(G)$, and let $E(G')=\{p_{e_1},\ldots,p_{e_m}\}$
be the set of paths corresponding to the edges of $E(G)$
Moreover, these paths are pairwise disjoint; see Figure~\ref{np1}(b).
This embedding can be done in $O(n)$ time and the size of the grid is at most $n-2$ by $n-2$; see \cite{schnyder1990embedding}.
Next, we construct an unit disk graph $G_{\mathcal{C}}$ from $G'$.
First, we replace each grid vertex $v'_i\in V(G')$ by an unit disk. Let $C=\{c_1,\ldots,c_n\}$ be the set of unit disks centered at the grid points corresponding to the vertices of $V(G')$.
For the sake of explanation we call these disks grid point disks. At this point, the unit disk graph is not connected due to the edge length which we have taken between any two adjacent vertices in the grid graph. In fact this length ensures that there are no undesirable paths other than the ones in $G$. Next, we place two sets of disks on each path $p_{e_i}\in E(G')$. Let $|p_{e_i}|$ be the total length of the grid segments of $p_{e_i}$.
We place two Steiner disks on $p_{e_i}$, such that each one of them is adjacent to a grid point disk corresponding to $p_{e_i}$ and the distance between their centers is exactly $2$. Next, we place $|p_{e_i}|-6/2$ many terminals disks on $p_{e_i}$ such that the distance between any two adjacent centers is exactly $2$.
See Figure~\ref{np1}(c) for detailed explanation.
Let $s(e_i)$ be the set of Steiner disks and
$t(e_i)$ be the set of terminal disks placed to $p_{e_i}$. The terminal set $R=\underset{e_i\in E(G')}{{\bf i}gcup} t(e_i)$; the Steiner set $S=C\cup \underset{e_i\in E(G')}{{\bf i}gcup} s(e_i)$.
$V(G_{\mathcal{C}})=R\cup S$ and $G_{\mathcal{C}}$ is the intersection graph induced by $V(G_{\mathcal{C}})$. Finally, we set $k'=m+2k-1$.
Observe that, for any path $p_{e_i}$, the terminal set $t(e_i)$ itself form a Steiner tree without any Steiner disks. However, in order to make that tree connected we need at least one of Steiner disks from $s(e_i)$. This completes the construction.
\begin{figure}
\caption{(a) A planar graph $G$ of maximum degree $4$,
(b) the intermediate rectilinear embedding $G'$ of $G$,
(c) the unit disk graph $G_{\mathcal{C}
\label{np1}
\end{figure}
In the forward direction, suppose $G$ has a connected vertex cover $D$ of size at most $k$. We construct a Steiner tree of $R$ in the following manner.
For each edge $e_i$, we simply take the terminal path induced by $t(e_i)$. Now, let $T_S$ be any spanning tree of the subgraph of $G$ induced by $D$, containing $|D|-1$ edges. The existence of such a spanning tree is ensured since $D$ is a connected vertex cover of $G$. For each edge $e=v_iv_j\in T_S$ we connect the corresponding disks $c_i,c_j$ by two Steiner red disks adjacent to them.
Then, for each edge $e=v_iv_j\in G\setminus T_S$ we select one endpoint that is in $D$ (say $v_i$) and connect $c_i$ to the tree by its adjacent disk.
The constructed tree is a Steiner tree of $R$ consisting $|D|+2(|D|-1)+(m-(|D|-1))$ which is $m+2k-1$.
Conversely, let there exists a Steiner tree $T$ of $R$ with at most $k'$ Steiner disks. Let $D\subseteq C$ be the set of vertices that appear in $T$,
and let $T'$ be the subtree of $T$ spanning over $D$.
For each subset $t(e_i)\subseteq R$, let $T_{e_i}$ be the subtree of $T_{e_i}$ spanning the vertices in $t(e_i)$. By the above construction, $T_{e_i}$ does not require any Steiner disk. Moreover, it is easy to see that in any valid solution $T_{e_i}$ must be connected to at least one endpoint of $D$.
This implies that the set of vertices in $G$ corresponding to the vertices in $D$ is a connected vertex cover of $G$.
Moreover a tree $T_{e_i}$ which also a subtree of $T$ is connected to $D$ via two Steiner disks of $s(e_i)$. Therefore, $T_S$ contains $|D|+2(|D|-1)+(m-(|D|-1))$ many Steiner disks. We started with the tree $T$ with at most $k'=m+2k-1$ many Steiner disks. This completes the proof.
\end{proof}
}
\old{
\section{Missing Proofs of Section~\ref{sec:subexp}}\label{secapp:subexp}
\begin{proof}[Proof of Observation~\ref{obs:cell-cell-edge}]
We claim that in the $k$-Steiner tree where the set $\{uv\in E(T) \vert f(u) \neq f(v)\}$ is minimised, there can be at most one neighbour of $\mathcal{C}$ in each cell $\mathcal{C}'\neq \mathcal{C}$. Suppose that $\mathcal{C}'$ is a cell that contains at least two neighbours of $\mathcal{C}$. Let two such neighbours be $u',v'$. Note that $u'v'$ is an edge in $E(G)$. Let $u,v$ (may be the same) be the neighbours of $u,v$, respectively in $\mathcal{C}$. Note that $uv$ is an edge in $E(G)$. Thus adding the edge $u'v'$ and removing the edge $uu'$ results in a connected graph containing all the terminals. The spanning tree of this connected graph has strictly less number of edges with endpoints in different cells, which is a contradiction to the choice of $T$.
By the definition of clique-grid graphs, $\vert i-i'\vert , \vert j-j'\vert \leq 2$. Thus, when we fix a cell $\mathcal{C}$ there are at most $$24$ \xspace$ cells that can have neighbours of vertices in $\mathcal{C}$. Putting everything together, for the $k$-Steiner tree $T$ where the set $\{uv\in E(T) \vert f(u) \neq f(v)\}$ is minimised, $\vert \{v \vert f(v) \neq (i,j), \exists u$ such that $f(u) = (i,j), uv \in E(G) \} \vert \leq $24$ \xspace$.
\end{proof}
}
\old{
\begin{proof}[Proof of Observation~\ref{obs:cell-bd}]
For the sake of contradiction, let $\mathcal{C}=(i,j)$ be a cell such that $\vert f^{-1}(i,j) \cap V(T) \vert \geq $24$ \xspace +1$. Then by Observation~\ref{obs:cell-cell-edge}, there is at least one Steiner vertex $v \in f^{-1}(i,j) \cap V(T)$ such that it does not have any neighbours in $T \setminus f^{-1}(i,j)$. Consider the subgraph $T \setminus \{v\}$. Since the vertices of $f^{-1}(i,j)$ induce a clique, $T \setminus \{v\}$ is still a connected subgraph that contains all the terminals and strictly less number of Steiner vertices. Thus, a spanning tree of this connected subgraph contradicts the choice of $T$.
\end{proof}
}
\old{
\begin{proof}[Proof of Claim~\ref{clm:NCPD-graph}]
Suppose we are able to show that for each $i \in \{1,\ldots, n/\sqrt{t+k}\}$ $G_i$ has a $$7$ \xspacetwo \sqrt{t+k}$-CPD. This results in a $$7$ \xspacetwo \sqrt{t+k}$-CPD for $G \setminus (S \cup Y) = G_1\uplus \ldots\uplus G_{n/\sqrt{t+k}}$. Finally, note that $\vert Y \vert \leq \sqrt{t+k}$ and therefore the vertices of $Y$ can belong to at most $\sqrt{t+k}$ cells. We add $Y$ to all the bags in the $$7$ \xspacetwo\sqrt{t+k}$-CPD for $G \setminus (S \cup Y)$ to obtain a $$7$ \xspace \sqrt{t+k}$-CPD for $G \setminus S$. We convert the $$7$ \xspace\sqrt{t+k}$-CPD of $G \setminus S$ into a NCPD using the known algorithm of~\cite{Bodlaender96alinear}. Note that this results in a $$7$ \xspace \sqrt{t+k}$-NCPD.
What is left to show is that for each $G_i$ there is a $$7$ \xspacetwo \sqrt{t+k}$-CPD. First, for each $G_i$, we give a path decomposition with the following sequence of bags: $\{X_1,X_2,\ldots, X_{n-2}\}$. This is done by defining each $X_i = f^{-1}(i,*) \cup f^{-1}(i+1,*) \cup f^{-1}(i+2,*)$. It is easy to check that this is a path decomposition of $G_i$. Note that since $G_i$ has at most $2\sqrt{t+k}$ columns, the number of cells contained in each $X_j, j \in [n-1]$ is at most $$7$ \xspacetwo \sqrt{t+k}$.
\end{proof}
}
\old{
\begin{proof}[Proof of Lemma~\ref{lem:subexp-CGG}]
As a first step of the algorithm, by Lemma~\ref{lem:compute-good-fam} in $n^{O(\sqrt{t+k})}$ time we compute a good family of instances $\mathcal{F}$ for the given instance $(G,R,t,k)$ of {\sc Steiner Tree}\xspace on clique-grid graphs. From Definition~\ref{def:good-fam}(2), $(G,R,t,k)$ is a yes-instance of {\sc Steiner Tree}\xspace if and only if there is an instance $(H,R,t,k') \in \mathcal{F}$ that is a yes-instance of {\sc Exact Steiner Tree}\xspace. Deriving from Definition~\ref{def:good-fam}(3), Lemma~\ref{lem:compute-good-fam} and the construction of a modified NCPD, for each instance $(H,R,t,k') \in \mathcal{F}$, there is a modified $O(\sqrt{t+k})$-NCPD for $H$, due to a guessed label $\ell$ and a guessed set $Y$ of non-terminal vertices from columns labelled by $\ell$ such that the following hold: (i) $\vert Y \vert \leq \sqrt{t+k}$, (ii) if $(H,R,t,k')$ is a yes-instance then there is an exact $k'$-Steiner tree $T$ such that all vertices of $Y$ are Steiner vertices in $T$. Let the modified NCPD using the set $Y$ have the sequence of bags $\{X_1,X_2,\ldots, X_q\}$. Recall that the definition of the modified NCPD ensures that $X_1 = X_q = Y$.
In the next step, our algorithm for {\sc Steiner Tree}\xspace considers every instance $(H,R,t,k') \in \mathcal{F}$ and checks if it is a yes-instance of {\sc Exact Steiner Tree}\xspace. By Definition~\ref{def:good-fam}(2), this is sufficient to determine if $(G,R,t,k)$ is a yes-instance of {\sc Steiner Tree}\xspace.
For the rest of the proof we design a dynamic programming subroutine algorithm $\mathcal{A}$ for {\sc Exact Steiner Tree}\xspace that takes as input an instance $(H,R,t,k') \in \mathcal{F}$ and uses its modified $O(\sqrt{t+k})$-NCPD to determine whether it is a yes-instance of {\sc Exact Steiner Tree}\xspace. Suppose $(G,R,t,k)$ is a yes-instance and consider a $k$-Steiner tree $T$ for $(G,R,t,k)$ where $\{uv\in E(T) \vert f(u) \neq f(v)\}$ is minimised and then the number of Steiner vertices in $T$ is minimised. Using Observation~\ref{obs:exact}, this is an exact $k'$-Steiner tree of $G$ for some $k' \leq k$. By the construction in Lemma~\ref{lem:compute-good-fam} note that there is an instance $(H,R,t,k') \in \mathcal{F}$ such that $T$ is an exact $k'$-Steiner tree for $(H,R,t,k')$. The aim of the dynamic programming algorithm is to correctly determine that this particular instance $(H,R,t,k')$ is a yes-instance. The algorithm $\mathcal{A}$ is designed in such a manner that for such a yes-instance $(H,R,t,k')$ the tree $T$ will be the potential solution Steiner tree that behaves as a certificate of correctness.
The states of the dynamic programming algorithm store information required to represent the partial solution Steiner tree, which is the potential solution Steiner tree restricted to the graph seen so far. The states are of the form $\mathcal{A}$[$\ell, Q, \mathcal{Q} =Q_1\uplus Q_2 \ldots \uplus Q_b, \mathcal{P} = P_1\uplus\ldots P_b,k''$] where:
\begin{itemize}
\item $\ell \in [q]$ denotes the index of the bag $X_\ell$ of the modified NCPD of $H$.
\item $Q \subseteq X_\ell \setminus R$ is a set of at most $$24$ \xspace \cdot $7$ \xspace$ non-terminal vertices. For each cell $\mathcal{C} = (i,j)$ that belongs to $X_\ell$, $\vert Q \cap f^{-1}(i,j) \vert \leq $24$ \xspace $.
\item $\mathcal{Q} = Q_1\uplus Q_2 \ldots \uplus Q_b$ is a partition of $Q$ with the property that for each cell $\mathcal{C} = (i,j)$, $Q \cap f^{-1}(i,j)$ is contained completely in exactly one part of $\mathcal{Q}$.
\item The partition $\mathcal{P}$ is over the vertex set $Q \cup (R \cap X_\ell)$. $Q \cap P_i = Q_i$. Also for each cell $\mathcal{C}$ in $X_\ell$, $\mathcal{C} \cap (Q \cup R)$ is completely contained in exactly one part of $\mathcal{P}$.
\item The value $k''$ represents the total number of Steiner vertices used so far in this partial solution Steiner tree. $\vert Q \vert \leq k''$ holds.
\end{itemize}
Essentially, let $T$ be an exact $k'$-Steiner tree for $(H,R,t,k')$ if it is a yes-instance. For $\ell \in [q]$, let $T_{\sf ptl}^\ell$ represent the partial solution Steiner tree when $T$ is restricted to $H[{\bf i}gcup_{j=1}^{\ell} X_j]$. The partition $\mathcal{P}$ represents the intersection of a component of $T_{\sc ptl}^{\ell}$ with $X_\ell$. The set $Q$ is the set of Steiner vertices of $T_{\sf ptl}^{\ell}$ in the bag $X_\ell$ and $\mathcal{Q}$ is the partition of $Q$ with respect to the components of $T_{\sc ptl}^{\ell}$. The number $k''$ denotes the total number of Steiner vertices in $T_{\sf ptl}^\ell$.
In order to show the correctness of $\mathcal{A}$ we need to maintain the following invariant throughout the algorithm:
(LHS) $\mathcal{A}[\ell,Q,\mathcal{Q}=Q_1 \uplus Q_2,\ldots Q_b,\mathcal{P} = P_1\uplus P_2 \uplus P_b,k']=1$ if and only if (RHS) there is a forest $T'$ as a subgraph of $H[{\bf i}gcup_{j=1}^{\ell}]$ with $b$ connected components $D_1,\ldots,D_b$: $D_i \cap X_\ell = P_i$, $(D_i \setminus R) \cap X_\ell = Q_i$, the total number of non-terminal points in $T'$ is $k''$, for each cell $\mathcal{C}$ the number of nonterminal vertices in $\mathcal{C} \cap T'$ is at most $$24$ \xspace$, and $R \cap ({\bf i}gcup_{j=1}^{\ell} X_j) \subseteq V(T')$.
Suppose the algorithm invariant is true. This means that if $\mathcal{A}[q,Y, {Y},{Y},k']=1$ then there is an exact $k'$-Steiner tree for $(H,R,t,k')$. On the other hand, suppose $(G,R,t,k)$ is a yes-instance and has a $k$-Steiner tree $T$ where $\{uv\in E(T) \vert f(u) \neq f(v)\}$ is minimised and then the number of Steiner vertices in $T$ is minimised. By Observation~\ref{obs:cell-bd}, the number of Steiner vertices of $T$ in each cell of $G$ is bounded by $$24$ \xspace$. By Observation~\ref{obs:exact} and the construction in Lemma~\ref{lem:compute-good-fam} note that there is a subset $Y$ and an instance $(H,R,t,k') \in mathcal{F}$ such that $T$ is an exact $k'$-Steiner tree for $(H,R,t,k')$ and $Y \subseteq V(T)$. Suppose the invariant of the algorithm is true. This means that if $(G,R,t,k)$ is a yes-instance of {\sc Steiner Tree}\xspace then there is a $(H,R,t,k')$ for which $\mathcal{A}[q,Y,{Y},{Y},k']=1$.
Thus, proving the correctness of the algorithm $\mathcal{A}$ amounts to proving the correctness of the invariant of $\mathcal{A}$. We prove the correctness of the invariant by induction on $\ell$.
If $\ell =1$ then $X_\ell$ must be a {\bf leaf bag}. By definition of the modified NCPD, the bag contains $Y$.
$\mathcal{A}[1,Q,\mathcal{Q},\mathcal{P},k''] = 1$ if $Q = Y$, $\mathcal{Q}$ is the partition of $Y$ into the connected components in $H[Y]$, $\mathcal{P} = \mathcal{Q}$, $k'' = \vert Y \vert$. In all other cases, $\mathcal{A}[1,Q,\mathcal{Q},\mathcal{P},k''] = 0$.
First, suppose $\mathcal{A}[1,Q,\mathcal{Q},\mathcal{P},k''] = 1$. Then as $X_1$ does not contain any terminal vertices, (RHS) trivially is true for the cases when $\mathcal{A}[1,Q,\mathcal{Q},\mathcal{P},k''] = 1$. On the other hand, suppose (RHS) is true for $\ell = 1$. Again considering the cases when $\mathcal{A}[1,Q,\mathcal{Q},\mathcal{P},k''] = 1$, (LHS) holds. So the invariant holds when $\ell =1$.
Now, we assume that $\ell >1$. Our induction hypothesis is that the invariant of the algorithm is true for all $1 \leq \ell' <\ell$. We show that the invariant is true for $\ell$. There can be two cases:
\subparagraph*{Case 1:}
$X_\ell$ is a {\bf forget bag} with exactly one child $X_{\ell-1}$ : Let $\mathcal{C}$ be the cell being forgotten in $X_\ell$. Consider $\mathcal{A}[\ell,Q, \mathcal{Q}=Q_1,\ldots Q_b,\mathcal{P}=P_1\ldots P_b,k'']$.
Let $Q' \subseteq X_{\ell-1} \setminus R$ such that $Q \subseteq Q'$ and $Q' \setminus Q$ consists of a set of at most $$24$ \xspace$ non-terminal vertices from $\mathcal{C}$. Let $\mathcal{P}'=P'_1 \ldots P'_b$ be a partition of $(Q' \cup R) \cap X_{\ell-1})$ such for each cell $\mathcal{C}'$ in $X_{\ell-1}$, $\mathcal{C}' \cap (Q' \cup R)$ is completely contained in exactly one part. Also, $P_i = P_i' \setminus \mathcal{C}$. Moreover, consider the part $P'_i$ such that $\mathcal{C} \cap (Q' \cup R) \subseteq P'_i$: $P'_i \setminus (\mathcal{C} \cap (Q' \cup R)) \neq \emptyset$. Let $\mathcal{Q}'$ be the partition of $Q'$ such that $Q' \cap P'_i = Q'_i$.
If $\mathcal{A}[\ell-1,Q',\mathcal{Q}',\mathcal{P}',k''] = 1$ then $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']=1$. Otherwise, $\mathcal{A}[\ell,Q,\mathcal{P},k'']=0$.
Suppose (LHS) of the invariant is true for $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']$: $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']=1$. By definition, there is a $\mathcal{A}[\ell-1,Q',\mathcal{Q}',\mathcal{P}',k''] = 1$ for a $Q',\mathcal{Q}',\mathcal{P}'$ as described above. By induction hypothesis, (RHS) corresponding to $\mathcal{A}[\ell-1,Q',\mathcal{Q}',\mathcal{P}',k''] = 1$ holds. Thus, there is a witness forest $T'$ in $H[{\bf i}gcup_{j=1}^{\ell-1} X_j] = H[{\bf i}gcup_{j=1}^{\ell}]$ (By definition of a forget bag). By definition of $Q,\mathcal{Q},\mathcal{P}$, $T'$ is also a witness forest in $H[{\bf i}gcup_{j=1}^{\ell} X_j]$ and therefore (RHS) is true for $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']$.
On the other hand, suppose (RHS) is true for $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']$. Then there is a witness forest $T'$ in $H[{\bf i}gcup_{j=1}^{\ell} X_j] = H[{\bf i}gcup_{j=1}^{\ell-1}]$. Moreover, $T'$ has $b$ connected components $D_1,\ldots,D_b$: $D_i \cap X_\ell = P_i$, $(D_i \setminus R) \cap X_\ell = Q_i$, the total number of non-terminal points in $T'$ is $k''$ and $R \cap ({\bf i}gcup_{j=1}^{\ell} X_j) \subseteq V(T')$. Let $D_i \cap X_{\ell -1} =P'_i$, $(D_i \setminus R) \cap X_{\ell-1} = Q'_i$, $Q' = {\bf i}gcup_{j=1}^{b} Q'_i$. Note that the total number of non-terminal points in $T'$ is $k''$ and by definition of a forget node it is still true that $R \cap ({\bf i}gcup_{j=1}^{\ell-1} X_j) \subseteq V(T')$. By induction hypothesis, (LHS) is true for $\mathcal{A}[\ell-1,Q',\mathcal{Q}',\mathcal{P}',k'']$ and $\mathcal{A}[\ell-1,Q',\mathcal{Q}',\mathcal{P}',k'']=1$. By the description above, this implies that $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']=1$. Therefore, (LHS) is true for $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']$.
\subparagraph*{Case 2:}
$X_\ell$ is an {\bf introduce bag} with exactly one child $X_{\ell -1}$. Let $\mathcal{C}$ be the cell being introduced in $X_\ell$. Consider $\mathcal{A}[\ell,Q, \mathcal{Q}=Q_1,\ldots Q_b,\mathcal{P}=P_1\ldots P_b,k'']$. Without loss of generality, let $P_b$ contain all the vertices in $\mathcal{C} \cap (Q \cup R)$.
By definition of a state, $\vert \mathcal{C} \cap Q \vert \leq $24$ \xspace$. Let ${\sf St} = \mathcal{C} \cap Q$ and $Q' = Q \setminus {\sf St}$.
Let $\mathcal{P}' = P'_1\uplus P'_2\ldots \uplus P'_b \uplus \ldots P'_{d}$ be a partition of $Q' \cup (R \cap X_{\ell-1})$ such that for $j < b, P_j = P'_j$, and $P_b = \mathcal{C} \cap (Q \cup R) \cup {\bf i}gcup_{j=b}^{d} P'_j$. Moreover, $\mathcal{C} \cap (Q \cup R)$ has a neighbour in each $P'_j, b \leq j \leq d$.
Let $\mathcal{Q}'$ be the partition of $Q'$ such that $Q' \cap P'_i = Q'_i$.
Let $k^* = k'' - \vert {\sf St} \vert$.
If $\mathcal{A}[\ell-1,Q',\mathcal{Q}',\mathcal{P}',k^*] = 1$ then $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']=1$. Otherwise, $\mathcal{A}[\ell,Q,\mathcal{P},k'']=0$.
Suppose (LHS) of the invariant is true for $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']$: $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']=1$. By definition, there is a $\mathcal{A}[\ell-1,Q',\mathcal{Q}',\mathcal{P}',k^*] = 1$ for a $Q',\mathcal{Q}',\mathcal{P}'$ as described above. By induction hypothesis, (RHS) corresponding to $\mathcal{A}[\ell-1,Q',\mathcal{Q}',\mathcal{P}',k^*] = 1$ holds. Thus, there is a witness forest $T'$ in $H[{\bf i}gcup_{j=1}^{\ell-1} X_j]$. By definition of $Q,\mathcal{Q},\mathcal{P}$, $H[V(T') \cup (\mathcal{C} \cap (Q \cup R))]$ is a connected graph. Consider a spanning tree of this connected graph. By definition of $k^*$, this spanning tree has all vertices of $R$ and exactly $k''$ non-terminal vertices. Therefore, this spanning tree is a witness forest in $H[{\bf i}gcup_{j=1}^{\ell} X_j]$ and therefore (RHS) is true for $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']$.
On the other hand, suppose (RHS) is true for $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']$. Then there is a witness forest $T'$ in $H[{\bf i}gcup_{j=1}^{\ell} X_j]$. Moreover, $T'$ has $b$ connected components $D_1,\ldots,D_b$: $D_i \cap X_\ell = P_i$, $(D_i \setminus R) \cap X_\ell = Q_i$, the total number of non-terminal points in $T'$ is $k''$ and $R \cap ({\bf i}gcup_{j=1}^{\ell} X_j) \subseteq V(T')$. Without loss of generality, let $D_b$ contain $T' \cap \mathcal{C}$. Let $D'_1,D'_2,\ldots D'_b,\ldots, D'_d$ be the connected components of $T'$ restricted to $H[{\bf i}gcup_{j=1}^{\ell-1} X_j]$. Let $D'_i \cap X_{\ell -1} =P'_i$, $(D'_i \setminus R) \cap X_{\ell-1} = Q'_i$, $Q' = {\bf i}gcup_{j=1}^{d} Q'_i$. Note that the total number of non-terminal points in $T'$ is $k^* = k'' - \vert {\sf St} \vert $ and by definition of an introduce node it is true that $R \cap ({\bf i}gcup_{j=1}^{\ell-1} X_j) \subseteq V(T') \cap ({\bf i}gcup_{j=1}^{\ell-1} X_j)$. By induction hypothesis, (LHS) is true for $\mathcal{A}[\ell-1,Q',\mathcal{Q}',\mathcal{P}',k^*]$ and $\mathcal{A}[\ell-1,Q',\mathcal{Q}',\mathcal{P}',k^*]=1$. By the description above, this implies that $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']=1$. Therefore, (LHS) is true for $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']$.
Finally, we analyse the time complexity of the algorithm. First, the good family $\mathcal{F}$ is computed in $n^{O(\sqrt{t+k})}$ time as per Lemma~\ref{lem:compute-good-fam}, and the number of instances in the good family $\mathcal{F}$ is $n^{O(\sqrt{t+k})}$. For one such instance $(H,R,t,k')$ the possible states for the algorithm $\mathcal{A}$ are of the form $[\ell, Q,\mathcal{Q},\mathcal{P},k'']$. By definition, $\ell \leq n$, $k'' \leq k'$ and $Q = O(\sqrt{t+k})$. Again, by definition $\mathcal{P}$ is upper bounded by the number of partitions of cells contained in a bag of the modified NCPD of $(H,R,t,k')$. Thus, the number of possibilities of $\mathcal{P}$ is $\sqrt{t+k}^{O(\sqrt{t+k})})$. Also by definition, $\mathcal{Q}$ is fixed once $Q$ and $\mathcal{P}$ are fixed. Therefore, the number of possible states is $n^{O(\sqrt{t+k})}$. From the description of $\mathcal{A}$, the computation of $\mathcal{A}[\ell,Q,\mathcal{Q},\mathcal{P},k'']$ may look up the solution for $n^{O(\sqrt{t+k})}$ instances of the form $\mathcal{A}[\ell-1,Q',\mathcal{Q}',\mathcal{P}',k^*]$ and therefore takes $n^{O(\sqrt{t+k})}$ time. Thus, the total time for the dynamic programming is $O(n^{\sqrt{t+k}})$.
\end{proof}}
\old{
\section{Missing Proofs of Section~\ref{sec:FPT}}\label{secapp:FPT}
\begin{proof}[Proof of Observation~\ref{obs:terminal-comps}]
Consider the $k$-Steiner tree $T$ and let $S = V(T) \setminus R$ be the set of Steiner vertices of $T$. Note that in $G[R \cup S]$, $T$ is a spanning tree and therefore $G[R \cup S]$ is a connected graph. Similarly, for each $i \in [q]$, $T_i$ is a subgraph of $G[R \cup S]$. Consider the subgraph $H = T' \cup {\bf i}gcup_{i\in[q]} T_i$. As $T'$ is a spanning tree, $T' \cup {\bf i}gcup_{i\in[q]} T_i$ is a connected graph. We consider an arbitrary ordering $\mathcal{O}$ of the edges in $E(H) \setminus ({\bf i}gcup_{i\in [q]} E(T_i))$. In this order we iteratively throw away an edge $e_j \in E(H) \setminus ({\bf i}gcup_{i\in [q]} E(T_i))$ if the resulting graph remains connected upon throwing $e_j$ away. Let $H'$ be the graph at the end of considering all the edges in the order $\mathcal{O}$. We prove that $H'$ must be a tree. Suppose for the sake of contradiction, there is a cycle $C$ as a subgraph of $H'$. As for each $i\in [q]$, $T_i$ is a tree and for each $i \neq i' \in [q]$, $V(T_i) \cap V(T_{i'}) = \emptyset$, there must be an edge from $E(H) \setminus ({\bf i}gcup_{i\in [q]} E(T_i))$ in $E(C)$. Consider the edge $e \in (E(H) \setminus ({\bf i}gcup_{i\in [q]} E(T_i))) \cap E(C)$ with the largest index according to $\mathcal{O}$. This edge was throwable as $C \setminus \{e\}$ ensured any connectivity due to $e$. Thus, there can be no cycle in $H'$ and it is a spanning tree of $V(H)$. This implies that $T=H'$ is a $k$-Steiner tree for $G$, $S$ being the set of at most $k$ Steiner vertices, such that for each $i \in [q]$, $T_i$ is a subtree of $T$.
Finally, we show that if a $k$-Steiner tree $T$ exists then $q \leq $24$ \xspace k$. Let $f$ be a representation of the clique-grid graph $G$. Note that for any cell $(a,b)$ $f^{-1}(a,b)$ is a clique, Therefore, there can be at most one component $C_i$ intersecting with a cell $(a,b)$. By property $(2)$ of Definition~\ref{def:clique-grid}, there are at most $$24$ \xspace$ cells that can have neighbours of any vertex in $(a,b)$. Thus, for any Steiner vertex, there can be at most $$24$ \xspace$ components of $G[R]$ it can have neighbours in. Putting everything together, if there are at most $k$ Steiner vertices that are used to connect the $q$ connected components of $G[R]$ and each Steiner vertex can have neighbours in at most $$24$ \xspace$ components, then it must be that $q \leq $24$ \xspace k$.
\end{proof}
}
\old{
\section{Missing Proofs of Section~\ref{sec:whard}}\label{secapp:whard}
\begin{proof}[Full Proof of Theorem~\ref{w1-hard}]
We prove Theorem~\ref{w1-hard} by giving a parameterized reduction from the \textsc{Grid Tiling with} $\ge$ problem which is known to be W[1]-hard\footnote{$k\times k$ \textsc{Grid Tiling with} $\ge$ problem is W[1]-hard, assuming ETH, cannot be solved in $f(k)n^{o(k)}$ for any function $f$} \cite{cygan2015parameterized}.
In the \textsc{Grid Tiling with} $\ge$ problem,
we are given an integer n, a $k\times k$ matrix for an integer $k$ and a set of pairs $S_{ij}\subseteq [n]\times [n]$ of each cell. The objective is to find, for each $1\le i,j\le k$, a value $s_{ij}\in S_{ij}$ such that if $s_{ij}=(a,b)$ and $s_{i+1,j}=(a',b')$ then $a\ge a'$; if $s_{ij}=(a,b)$ and $s_{i,j+1}=(a',b')$ then $b\ge b'$.
Let $I=(n,k,\mathcal{S})$ be an instance of the \textsc{Grid Tiling with} $\ge$.
We construct a set of unit disks $D$, that is divided into three sets of unit disks $D_1, D_2, D_3$; $D=D_1\uplus D_2\uplus D_3$.
Each disk in $D_1, D_2, D_3$ is of radius $1$, $\delta$ and $\kappa$, respectively. We will define the value of $\delta$ and $\kappa$ shortly.
The construction of the set $D=D_1\uplus D_2\uplus D_3$ will ensure that $D$ contains a {\sc Steiner Tree}\xspace with $k^2$ Steiner vertices if and only if $I$ is a yes instance of \textsc{Grid Tiling with} $\ge$.
Let $\epsilon = 1/n^{10}$, and $\delta =\epsilon/4$. Here, we point out that the value of $\kappa, \epsilon$ are independent of each other.
First, we move the cells away from each other, such that the horizontal (resp. vertical) distance between the left columns (resp. top rows) any two consecutive cell is $2+\epsilon$.
Let
$100\delta$ be the side of length of each cell. Then, we introduce diagonal chains of terminal disks into $D_3$ of radius $\kappa=\sqrt{2}(2+\epsilon - 100\delta)/1000$ to connect the cells diagonally; see Figure~\ref{W-hard-connected}(a).
For every $1\le x, y\le k$, and every $(a,b)\in S(x,y)\subseteq [n]\times [n]$,
we introduce into $D_1$ a disk of radius $1$ centered at $(2x+\epsilon x+\epsilon a, 2y+\epsilon y+\epsilon b)$.
Let $D[x,y]\subseteq D_1$ be the set of
disks introduced for a fixed $x$ and $y$, and notice that they mutually intersect each other.
Next, for $1\le x, y\le k$, we introduce into $D_2$, disks of radius $\delta$ between consecutive cells of coordinate $(2x+1+\epsilon x+\epsilon a, 2y+\epsilon y)$ (placed horizontally); and $(2x+\epsilon x, 2y+1+\epsilon y+\epsilon b)$ (placed vertically). For every cell $S[x,y]$,
we denote the top, bottom, left, right cluster of terminal disks of radius $\delta$ from $D_2$ by $L[x,y], R[x,y], T[x,y], B[x,y]$, respectively.
Moreover, for each cell $S[x,y]$, we introduce a disk of radius $\delta$
at a coordinate that is completely inside the rectangle bounding the centres of disks in $D[x,y]$. This is to enforce that at least one disk is chosen form each $D[x,y]$.
See Figure~\ref{W-hard-connected}(b) for an illustration.
We proceed with the following observation.
Consider a disk $p$ that is centered at
$(2x+\epsilon x+\epsilon a, 2y+\epsilon y+\epsilon b)$ for some $(a,b)\in [n]\times [n]$. Now, consider a disk $q$ from $R[x,y]$ centered at $(2x+1+\epsilon x+\epsilon a, 2y+\epsilon y)$. The distance between their centers are $\sqrt{1+\epsilon^2 b^2}$.
We need to show that this is less than $(1+\epsilon/4)$.
This is true because $1+\epsilon^2 b^2$ is less than $(1+\epsilon/4)$ as the value of $b$ goes to $n$, $\epsilon = 1/n^{10}$ and the value of $n$ is large. Hence, $q$ is covered by the disk $p$ from $S[x,y]$ centered at $(a,b)$. Next, consider a disk $q'$ from $R[x,y]$ centered at $(2x+1+\epsilon x+\epsilon (a+1), 2y+\epsilon y)$. The distance between their centers are $\sqrt{(1+\epsilon)^2+\epsilon^2 b^2}$. We show that this value is bigger than $(1+\epsilon/4)$. This means $(1+\epsilon)^2+\epsilon^2 b^2$ is bigger than
$(1+\epsilon/4)^2$. As the value of $b$ goes to $n$, it is not hard to see the left side is bigger since $\epsilon=1/n^{10}$ and the value of $n$ is large.
Therefore, $q'$ is not covered by the disk $p$ from $S[x,y]$ centered at $(a,b)$. The same calculation holds for $L[x,y]$, $T[x,y]$
and $B[x,y]$.
In the forward direction, let the pairs $s[x,y]\in S[x,y]$ form a solution for instance $I$, and let $s[x,y]=(a[x,y],b[x,y])$.
For every $1\le x,y\le k$,
we select the disk $d[x,y]$ from $D_1$ of radius $1$ centered at
$(2x+\epsilon x+\epsilon a[x,y], 2y+\epsilon y+\epsilon b[x,y])$.
We have seen in the previous paragraph that this disk cover any disk from $R[x,y]$ of center with $(2x+1+\epsilon x+\epsilon a[x,y], 2y+\epsilon y)$ but does not covers disks with coordinate $(2x+1+\epsilon x+\epsilon (a[x,y]+1), 2y+\epsilon y)$. Similarly, this holds for $L[x,y],T[x,y],B[x,y]$. $s[x,y]$'s forms a solution of $I$, then we have $a[x,y]\ge a[x+1,y]$. Therefore, the disks $d[x,y]$ and $d[x+1,y]$ will cover all disks from $R[x,y]$. Similarly, we have
$b[x,y]\ge b[x,y+1]$ which implies that $d[x,y]$ and $d[x,y+1]$ will cover $T[x,y]$ and form a component them. Now, the diagonals chains consisting of terminal disks of radius $\kappa$, we have taken to join the cells (see Figure~\ref{W-hard-connected}(a)) ensures that all cells are connected. Moreover, we have shown that if $s[x,y]$'s form a solution of instance $I$, then all terminals in $L[x,y], R[x,y], T[x,y], B[x,y]$ (for any $1\le x, y\le k$) are covered. Therefore, this will form a connected Steiner tree with $k^2$ many Steiner disks.
In the reverse direction,
let $D'\subseteq D_1$ be a set of $k^2$ Steiner disks that spans over all terminals in $D_2\cup D_3$. This is true when for every $1\le x,y\le k$, the set $D'$ contains a disk $d[x,y]\in D[x,y]$ that is centered at $(2x+\epsilon x+\epsilon a[x,y], 2y+\epsilon y+\epsilon b[x,y])$ for some $(a[x,y],b[x,y]) \in [n]\times [n]$. Indeed, we are required to choose one disk from $D[x,y]$ due to the reason that there is a terminal disk lying inside the rectangle bounding the centres of disks in $D[x,y]$. The claim is that $s[x,y]=(a[x,y],b[x,y])$'s form a solution of $I$. First of all, $d[x,y]\in D[x,y]$ implies that $s[x,y]\in S[x,y]$. Consider a cell $S[x,y]$. We have observed that it covers disk $q$ from $R[x,y]$ centered at $(2x+1+\epsilon x+\epsilon a, 2y+\epsilon y)$, but a disk $q'$ from $R[x,y]$
centered at $(2x+1+\epsilon x+\epsilon (a+1), 2y+\epsilon y)$ is not covered.
This is true for $L[x,y], T[x,y], B[x,y]$. Hence, if all terminals points from inside $S[x,y]$'s and $L[x,y], R[x,y], T[x,y], B[x,y]$ are covered by $k^2$ many Steiner disks, it would imply that $a[x,y]\ge a[x+1,y]$ and $b[x,y]\ge b[x,y+1]$.
Therefore, $s[x,y]$'s form the solution for \textsc{Grid Tiling with} $\ge$ instance $I$. This completes the proof.
\end{proof}}
\end{document} |
\begin{document}
\author{Johann Gehringer and Xue-Mei Li \\Imperial College London \blfootnote { [email protected], [email protected]}}
\title{Functional limit theorems for the fractional Ornstein-Uhlenbeck process}
\maketitle
\begin{abstract}
We prove a functional limit theorem for vector-valued functionals of the fractional Ornstein-Uhlenbeck process, providing the foundation for the fluctuation theory of slow/fast systems driven by both long and short range dependent noise. The limit process has both Gaussian and non-Gaussian components.
The theorem holds for any $L^2$ functions, whereas for functions with stronger integrability properties the convergence is shown to hold in the H\"older topology,
the rough topology for processes in $C^{\frac 12+}$. This leads to a `rough creation' / `rough homogenization' theorem, by which we mean the weak convergence of a family of random smooth curves to a non-Markovian random process with non-differentiable sample paths. In particular, we obtain effective dynamics for the second order problem and for the kinetic fractional Brownian motion model.
\end{abstract}
{ \scriptsize {\it keywords:} passive tracer, fractional noise, multi-scale,
mixed functional central and non-central limit theorems, rough creation, rough homogenization, rough topology }
{\scriptsize \textit{MSC Subject classification:} 34F05, 60F05, 60F17, 60G18, 60G22, 60H05, 60H07, 60H10}
\setcounter{tocdepth}{2}
\tableofcontents
\section{Introduction}
The functional limit theorem we study here lays the foundation for the fluctuation problem for a slow/fast system
with the fast variable given by a family of non-strong mixing stochastic processes, this will be discussed in \cite{Gehringer-Li-tagged}, see
\cite{Gehringer-Li-homo} for the preliminary version.
A pivot theorem for obtaining effective dynamics for the slow moving particles in a fast turbulent environment are scaling limit theorems for the convergence of the following functionals
\begin{equation}
X^\epsilon:=\left( X^{1,\epsilon}, \dots, X^{N,\epsilon}\right), \qquad
X^{k,\epsilon}= \alpha_k (\epsilon)\int_0^{t} G_k(y^{\epsilon}_s)ds,
\end{equation}
with weak convergence in $\mathcal{C}^\alpha([0,T],\R)$, where $T$ is some finite fixed time horizon, $\alpha(\epsilon)$ a suitable scaling and $G_k : \R \to \R$.
If $y_t^\epsilon=y_{\frac t \epsilon}$ and $y_t$ is a strong mixing process, $\alpha(\epsilon)=\frac 1{ \sqrt \epsilon}$ and the limit is
a Markov process, for details see e.g. the book \cite{Komorowski-Landim-Olla} and the references therein.
For stochastic processes whose auto-correlation function does not decay sufficiently fast at infinity there is no reason to have the $\sqrt \epsilon$ scaling or to obtain a diffusive limit. Furthermore, the scaling limit and the
limit function may depend on the individual functions~$G_k$.
In this article we take $y_t^\epsilon$ to be the stationary and rescaled fractional Ornstein-Uhlenbeck process with Hurst parameter $H$, which, for $H> \frac 1 2$, exhibits long range dependence (LRD) and is not strong-mixing. Our interest for long range dependent / non-strong mixing noise comes the time series data of the river Nile. In a study of water flows of the Nile river, Hurst and his colleagues \cite{Hurst} observed long range time dependence and found that the time dependence varies proportionally to $t^H$ where $H\sim 0.73$, by contrast, Brownian motions and stable processes have independent increments.
Fractional Brownian motions (fBM) were then proposed by Benoit Mandelbrot and John Van Ness \cite{Mandelbrot-VanNess} for modelling the Hurst phenomenon.
A fBM is a continuous mean zero Gaussian process with stationary increments and covariance ${\symb E}(B_t-B_s)^2=|t-s|^{2H}$, it is self-similar with similarity exponent $H$, and distinguished by the Gaussian property and stationary, but dependent increments. See e.g. \cite{Mishura} and the reference therein for stochastic calculus for fBM's.
Self-similar processes appeared also in mathematically rigorous descriptions of critical phenomena and in renormalisation theory. In \cite{Sinai}, Sinai constructed non-Gaussian self-similar fields;
while Dobrushin \cite{Dobrushin} studied self-similar fields subordinated to self-similar Gaussian fields (multiple Wiener integrals).
Those self-similar stochastic processes with stationary increments are a particular interesting class. When normalized to begin at $0$, to have mean $0$ and to have variance $1$ at $t=1$, they necessarily have the covariance $\frac 12(t^{2H}+s^{2H}-|t-s|^{2H})$. Those of Gaussian variety are fBMs. Hermite processes are non-Gaussian self-similar processes with stationary increments and the above mentioned covariance.
They appeared as scaling limits of functionals of long range dependent Gaussian processes, see \cite{Rosenblatt}. Jona-Lasinio was also concerned with the construction of a systematic theory of limit distributions for sums of `strongly dependent' random variables for which the classical central limit theorems do not hold, \cite{Jona-Lasinio}, see also the book \cite{Embrechts-Maejima}.
Let us first consider the convergence of one single component, the scalar case.
The scaling constant $\alpha_k(\epsilon)$ depends on the function $G_k$, and is a reflection of the self-similarity exponents of the limiting process.
If $\alpha_k(\epsilon)=\frac 1 {\sqrt \epsilon}$, the limit of $X^{k, \epsilon}$ is a Wiener process and the functional central limit theorem is expected to hold. Let $\mu$ denote the centred and normalized Gaussian measure on $\R$ and $m_k$ denote the Hermite rank of a centred function $G_k \in L^2(\mu)$,
which is the smallest non zero term in its chaos expansion. Let $(H,m)\mapsto H^*(m)$ denote the function given by (\ref{H-star}), which decreases with $m$. Then, the relevant scaling constants are given as below:
\begin{equation}\label{beta}
\begin{aligned}
&\alpha\left(\epsilon,H^*(m_k)\right) = \left\{\begin{array}{cl}
\frac 1 {\sqrt{\epsilon}}, \, \quad &\text{ if } \, H^*(m_k)< \frac 1 2,\\
\frac 1 {\sqrt{ \epsilon \vert \ln\left( \epsilon \right) \vert}}, \, \quad &\text{ if } \, H^*(m_k)= \frac 1 2, \\
\epsilon^{H^*(m)-1}, \quad \, &\text{ if } \, H^*(m_k) > \frac 1 2.
\end{array}\right.
\end{aligned}
\end{equation}
See Lemma \ref{Integrals} for a preliminary computation indicating the scales. In the past the limit theorems for the non-Gaussian limits had been called non-central limit theorems, we use the terminology `functional central limit theorems' for all cases.
The intuition for this comes from its counter part for sequences. If $Y_n$ is a mean zero, stationary, and strong mixing sequence, such that $\sigma_n^2={\symb E} (\sum_{i=1}^n Y_i)^2\to \infty$, $ {\symb E}(\sum_{i=1}^n Y_i)^4=O(\sigma_n^4)$,
then $ \frac 1 {\sigma_n} \sum_{i=1}^n Y_i {\longrightarrow} N(0,1)$.
If $Y_n$ is not strong mixing, this CLT may fail.
Indeed, if $X_n$ is a stationary mean zero variance $1$ Gaussian sequence with auto-correlation $r(n)\sim c {n^{-\gamma}}$ for some $\gamma\in (0, 1)$ and $c\in \R$ (allowing for negative correlations), $G$ a centred function with Hermite rank $m\ge 1$, and $A(n)$ a sequence such that
$$\lim_{n\to \infty} \mathrm{var}\left(\frac 1 {A(n)} \sum_{k=1}^n G(X_k)\right) =1.$$
Then, $z_n=\frac 1 {A(n)} \sum_{k=1}^{[nt]} G(X_n)$ is expected to converge in finite dimensional distributions.
The scaling constant $A(n)$ is of the order $n^{1-\frac 12 \gamma m}$ in the long range dependent case, of order $\sqrt n$ in the short range dependent case, and of order $\sqrt {n \ln n}$ for the borderline case, see \cite{Breuer-Major,Bai-Taqqu}.
By long range dependence, we mean $\sum_{n=1}^\infty |r(n)|= + \infty$.
The limit process, $\lim_{n\to \infty} z_n$, is a Wiener process for fast decaying correlations, i.e.
in case $\gamma\in (\frac 1 m, 1)$, \cite{Breuer-Major}.
In the borderline case, $\gamma=\frac 1m$, the scaling limit is also a Wiener process.
However if $\gamma \in (0, \frac 1m)$ the correlations fail to decay sufficiently fast, the scaling limit is a Hermite process in
the $m$-th chaos, \cite{ Dobrushin, Bai-Taqqu}.
The first convergence to a non-Gaussian similar process was shown in \cite{Rosenblatt} where the aim was to construct a not strong mixing sequence of random variables, he achieved this by showing the sequence of random variables has a non-Gaussian scaling limit which is now known as the Rosenblatt process. In \cite{Bai-Taqqu} vector valued combinations of
short and long range dependent sums were studied, however, the limit of each component is assumed to be moment determinate (which can only happen when they are in the $L^2$ chaos expansion of order less or equal to $2$). This is due to a restriction in the asymptotic independence result in \cite{Nourdin-Rosinski}, which was extended in \cite{Nourdin-Nualart-Peccati}.
We return to the continuous functional limit theorems.
For the scalar case, the continuous version CLT for $\gamma \in ( \frac 1 m ,1 )$ was obtained in \cite{BenHariz}, the borderline case $\gamma = \frac 1 m$ in \cite{Buchmann-Ngai-bordercase}. These are shown for the convergence in finite dimensional distribution and for $G$ to be a centred $L^2$ function. They also obtained uniform convergences in the continuous topology for a restrictive class of functions $G$ (assuming sufficiently fast decay of the coefficients in the Wiener chaos expansion).
This was extended in \cite{Nourdin-Nualart-Zintout} to vector valued $X^\epsilon$, when each component of $X^\epsilon$ falls in the Brownian case, with convergence understood in the sense of finite dimensional distributions. The result in \cite{Nourdin-Nualart-Zintout} was improved in \cite{Campese-Nourdin-Nualart}, where the fast chaos decay restrictions on $G_k$, for $G_k \in L^p$ for $p>2$, are removed with techniques from Malliavin calculus.
In the continuous long range dependent case Taqqu, \cite{Taqqu}, obtained convergence in the continuous topology.
These results, although fragmented (in some regimes these are only known for scalar valued processes or only at the level of sequences), provide a fairly good picture of what is going on.
There exists however no vector valued functional limit theorem with joint convergence, when the scaling limit of the components are mixed, in this article we provide a complete description for the joint convergence of $\{X^{k,\epsilon}\}$ for $G_k \in L^2(\mu)$. We have a functional limit theorem for vector valued processes whose components may exhibit both short and long range dependence. For $G_k$ satisfying a stronger integrability condition, we can also show weak convergence in the $\mathcal{C}^\alpha([0,T],\R^d)$-topology and for each fixed time in $L^2$ for the low Hermite rank case, which already have interesting applications.
Furthermore, they are the basis for the convergence in a suitable rough topology, which due to the change of the nature of the problem will appear in \cite{Gehringer-Li-homo} where rough path theory is used to study slow/fast systems, leading to `rough creation' / `rough homogenization' in which the effective limit is not necessarily a Markov process.
{\it Application.} Consider the second order equation on $\R$:
$$\begin{aligned}
\dot x_t^\epsilon &= \epsilon^{H-1} f(x^{\epsilon}_t) y_t^\epsilon, \quad x_0^\epsilon=x_0\\
dy_t^\epsilon&=-\frac 1 \epsilon y_t^\epsilon dt + \frac {\sigma} {\epsilon^H} dB_t^H, \, \, \, \, \, \, \, \, y_0 \sim \epsilon^{-H} \sigma \int_{-\infty}^0 e^{\frac { t-s } {\epsilon} } dB^H_s.\end{aligned}$$
Taking $\epsilon \to 0$, does $x_t^\epsilon$ converge? In case $H=\frac 12$ and $f=1$, this is essentially the Kramer-Smolouchowski limit (this is also called the kinetic Brownian motion model). For $H\not =\frac 12$ and for $f=1$ this was shown in \cite{Boufoussi-Ciprian, Zhang-08,Al-Talibi-Hilbert} to converge to a fBM, see also \cite{Friz-Gassiat-Lyons} for the case with a magnetic field. Given $H>\frac 13$ and $f\in C_b^3$ (for any $H$ if $f=1$), we can show $x_t^\epsilon$ converges to the solution of the equation $\dot x_t=f(x_t) \;d B_t^H $ with initial value $x_0$ where the integral in the differential equation is interpreted as a Riemann-Stieltjes integral.
Furthermore we obtain the following bound in $C^{\gamma'}$ where $0<\gamma'<\gamma < H$:
$$\left\| |x_\cdot^\epsilon -x_\cdot|_{\mathcal{C}^{\gamma'}([0,T])} \; \right\|_{L^p} \lesssim T^{\gamma} \epsilon^{H-\gamma},$$
This computation is straightforward, see Propositions \ref{th-example} and \ref{prop-linear-driver} for detail.
With the functional limit theorem below, Theorem A, we can conclude also the convergence of solutions of the equations, for $h \in \mathcal{C}^{2}_b(\R^d,\R^d)$ and $g \in \mathcal{C}_b(\R,\R)$,
$$\dot x_t^\epsilon= \alpha(\epsilon) f(x_t^\epsilon) \, G(y^\epsilon_t)+ h(x_t^\epsilon) \, g(y^\epsilon_t).$$
We show that
$ x_t^\epsilon$ converges in $\mathcal{C}^\gamma([0,T],\R)$ for $\gamma \in (0, H^*(m) \vee \frac 1 2 - \frac 1 p)$
either to the solution of the equation
$$d\bar x_t = c f(\bar x_t) \,dZ_t^{H^*(m),m}+\bar g\; h(\bar x_t),$$
where $Z_t^{H^*(m),m}$ is a Hermite process, or to the solution to the Stratonovich stochastic differential equation
$$d\bar x_t = c f(\bar x_t) \circ \,dW_t+\bar g\; h(\bar x_t) $$ where $W_t$ is a standard Wiener process (given enough integrability on $G$). Here $c$ is a specific constant (c.f. equation (\ref{c-square}) depending on $G$ arising from the homogenization procedure.
For the above we follow \cite{Campese-Nourdin-Nualart} and use Malliavin calculus to obtain suitable moment bounds on $\int_0^t G(y^{\epsilon}_s) ds$. These results appeared in the previous version of the current paper \cite{Gehringer-Li-homo}. Equations driven by fractional Brownian motions are also studied for the averaging regime, see \cite{Hairer-Li} and \cite{Fannjiang-Komorowski-2000}. A fluctuation theorem around the effective average was obtained \cite{bourguin2019typical}.
{Main Results}
We denote our underlying probability space by $(\Omega, \mathcal{F},\P)$. Let $\mu$ denote the standard Gaussian distribution and we choose $\sigma$ such that the stationary scaled fOU process, to be defined below, satisfies $y^{\epsilon}_t \sim \mu$. Let $\{H_m, m\ge 0\}$ be the orthogonal Hermite polynomials on $L^2( \mu)$, such that they have leading coefficient $1$ and $L^2(\mu)$ norm $\sqrt {m!} $.
Given $G\in L^2(\mu)$, then it posses an expansion of the form
$
G(x)=\sum_{k=0}^\infty c_k H_k(x)$,
where $ c_k=\frac 1 {k!}\langleG, H_k\rangle_{L^2(\mu)}$. A function $G$ is centred if and only if $c_0=0$. The smallest $m$ with $c_m\not =0$ is called the Hermite rank of $G$.
In case the correlations of $y^{\epsilon}_t$ do not decay sufficiently fast the path integral $\alpha (\epsilon)\int_0^t G( y_s^\epsilon)ds$ ought to be approximated by that of the first term of its Wiener chaos expansion. By orthogonality of the $H_m$'s it is sufficient to study the asymptotics of $\alpha (\epsilon)\int_0^t H_m( y_s^\epsilon)ds$ to deduce $\alpha(\epsilon)$.
Although the solutions to the fOU equation converge exponentially fast to each other, their autocorrelation function decays only algebraically. The indicator for the behaviour of $\alpha(\epsilon) \int_0^t H_m(y_s^\epsilon) ds$ turns out to be
\begin{equation}\label{H-star}
H^*(m) = m(H-1)+1,
\end{equation}
and the self-similarity exponent of the limiting process is determined by $\alpha(\epsilon,H^*(m))$.
For large $m$, the limit will be a Wiener process, and otherwise the limit $Z_t$ should have the scaling property:
$\epsilon^{H^*(m)} Z_{\frac t\epsilon} \sim Z_t$. Indeed, $Z_t$ are the self-similar Hermite processes.
To state the functional limit theorem concisely, we make the following convention,
\begin{convention}\label{convention}
Given a collection of functions $(G_k \in L^2(\mu), k\le N)$, we will label the high rank ones first, so the first $n$ functions
satisfy $H^*(m_k) \le \frac 12$, where $n \geq 0$, and the remaining satisfy $H^*(m_k) > \frac 1 2$. \end{convention}
\begin{theorem*}\label{theorem-CLT}
Let $y^\epsilon$ be the stationary solution to the scaled fractional Ornstein-Uhlenbeck equation (\ref{fOU}) with standard Gaussian distribution $\mu$. Let $G_k:\R\to \R$ be centred functions in $L^2(\mu)$ with Hermite ranks $m_k$. Write
$$G_k= \sum_{l=m_k}^\infty c_{k,l} H_l, \qquad \alpha_k(\epsilon)=\alpha(H^*(m_k), m_k), \quad X^{k,\epsilon}= \alpha_k (\epsilon)\int_0^{t} G_k(y^{\epsilon}_s)ds.$$
Set $$ X^{W, \epsilon}=\left( X^{1,\epsilon}, \dots, X^{n,\epsilon}\right), \qquad
X^{Z, \epsilon}=\left( X^{n+1,\epsilon}, \dots, X^{N,\epsilon}\right).$$
Then, the following holds:
\begin{enumerate}
\item \begin{itemize}
\item [(a)]
There exist stochastic processes $X^W=( X^1, \dots, X^n)$ and $X^Z=(X^{n+1}, \dots, X^N)$ such that on every finite interval $[0,T]$,
$$(X^{W,\epsilon}, X^{Z,\epsilon}) \longrightarrow (X^W, X^Z),$$
in the sense of finite dimensional distributions.
Furthermore, for any $t >0$
$$\lim_{\epsilon \to 0} \|X^{Z,\epsilon}_t \to X^Z_t\|_{L^2(\Omega)}=0.$$
\item [(b)] If furthermore each $G_k$ satisfies Assumption \ref{assumption-single-scale-not-continuous} below, the convergence is weakly in $\mathcal{C}^{\gamma}([0,T],\R^N)$ for every $\gamma < \frac 1 2 - \frac {1} {\min_{k \leq n } p_k}$, if there is at least one component converging to a Wiener process. Otherwise they converge in $\mathcal{C}^{\gamma}([0,T],\R^N)$ for every $\gamma < \min_{k>n} H^*(m_k) - \frac 1 {p_k} $.
\end{itemize}
\item
We now describe the limit $X= (X^W,X^Z)$.
\begin{enumerate}
\item [(1)] $X^W \in \R^n$ and $X^Z \in \R^{N-n}$ are independent.
\item [(2)] $ X^W = U \hat W_t$ where $ \hat W_t$ is a standard Wiener process and $U$ is a square root of
the matrix $(A^{i,j})$,
$$A^{i,j}=\int_0^{\infty} {\symb E}\left( G_i(y_s) G_j(y_0) \right) ds =
\sum_{q=m_i\vee m_j}^{\infty} c_{i,q}\; c_{j,q} \; (q!) \, \int_0^\infty \rho(r)^q\, dr$$
and $\rho(r)={\symb E} (y_ry_0)$. In other words, ${\symb E}\left( X^i_t X^j_s\right)= 2 (t \wedge s) A^{i,j}$ for $i,j\le n$.
\item [(3)] Let $Z_t^{H^*(m_k),m_k}$ be the Hermite processes, represented by (\ref{Hermite}).
Then,
$$ X^Z=(c_{n+1,m_{n+1}} Z_t^{n+1} , \dots, c_{N,m_{N}} Z_t^{N}),$$ where,
\begin{equation}\label{Hermite-2}
Z_t^{k}= \frac{m_k!}{K(H^*(m_k),m_k)} Z_t^{H^*(m_k),m_k}.
\end{equation}
We emphasize that the Wiener process $W_t$ defining the Hermite processes is the same for every $k$ (c.f. equation (\ref{Hermite})),
which is in addition independent of $\hat W_t$.
\end{enumerate}
\end{enumerate}
\end{theorem*}
\begin{assumption}[Functional Limit $\mathcal{C}^\gamma$ assumptions]
\label{assumption-single-scale-not-continuous}
Let $G_k\in L^{2}(\mu)$ with Hermite rank $m_k\ge~1$.
\begin{itemize}
\item { \it High rank case. } \quad If $H^*(m_k) \le \frac 1 2$, assume $G_k \in L^{p_k}(\mu)$ where $\frac 1 2 - \frac 1 {p_k} > \frac 1 3$ (i.e. $p_k >6$).
\item {\it Low rank case. } \quad If $H^*(m_k) > \frac 1 2$, assume $G_k \in L^{p_k}(\mu)$ where $ H^*(m_k) - \frac 1 {p_k} > \frac 1 2$.
\end{itemize}
\end{assumption}
\begin{remark}
\
The case $H=\frac 12$ is classical, and is not of interest here.
In this case the result is independent of the Hermite rank and the scaling is given by $\alpha(\epsilon)=\frac 1{\sqrt \epsilon}$, due to the exponential decay of correlations.
\end{remark}
An immediate application is the following rough homogenisation theorem for a toy model:
\begin{theorem*}
Let $H \in (\frac 1 3,1) \setminus \{ \frac 1 2 \}$, $f\in \mathcal{C}_b^3(\R^d, \R^d)$, $h \in \mathcal{C}^{2}_b(\R^d;\R^d)$, $G \in \mathcal{C}(\R,\R)$ satisfying Assumption \ref{assumption-single-scale-not-continuous} and $g \in \mathcal{C}_b(\R;\R)$.
Let $\alpha(\epsilon) = \alpha(\epsilon,H^*(m))$. Fix a finite time $T$ and consider
\begin{equation}\label{limit-eq}
\dot x_t^\epsilon =\alpha(\epsilon) f(x_t^\epsilon) G(y^{\epsilon}_t)+ h(x^{\epsilon}_t)g(y^{\epsilon}_t),
\qquad x_0^\epsilon=x_0.
\end{equation}
\begin{enumerate}
\item If
$H^*(m) > \frac 1 2$, $x_t^\epsilon$ converges weakly in $\mathcal{C}^{\gamma}([0,T],\R^d)$ to the solution to the Young differential equation
$d\bar x_t = c f(\bar x_t) \,dZ_t^{H^*(m),m} +\bar{g} h(\bar{x}_t) dt$ with initial value $x_0$ for $\gamma \in (0, H^*(m)- \frac 1 p)$.
\item If
$H^*(m) \leq \frac 1 2$,
$x_t^\epsilon$ converges weakly in $\mathcal{C}^\gamma([0,T],\R)$ to the solution of the Stratonovich stochastic differential equation
$d\bar x_t = c f(\bar x_t) \circ \,dW_t + \bar{g} h(\bar{x}_t) dt$ with $ \bar x_0=x_0$, where $\gamma \in(0, \frac 1 2- \frac 1 p)$.
\end{enumerate}
\end{theorem*}
We also take the liberty to point out an intermediate result on the joint convergence of stochastic processes in finite $L^2$ chaos, for it maybe of service. The proof is a slight modification of results in \cite{Ustunel-Zakai,Nourdin-Nualart-Peccati}.
{\bf Proposition \ref{proposition-spit-independence}.}
Let $q_1 \leq q_2 \leq \dots \leq q_n \leq p_1 \leq p_2 \leq \dots \le p_m$. Let $f_i^\epsilon \in L^2(\R^{p_i})$, $
g_i^\epsilon \in L^2(\R^{q_i})$, $F^{\epsilon}=\left(I_{p_1}(f^{\epsilon}_1), \dots , I_{p_m}(f^{\epsilon}_m)\right)$ and $G^{\epsilon}=\left(I_{q_1}(g^{\epsilon}_1), \dots, I_{q_n}(g^{\epsilon}_n)\right)$, where $I_q$ denotes the Wiener integral of order $q$. Suppose that
for every $i,j$, and any $1 \leq r \leq q_i$:
$$ \Vert f^{\epsilon}_j \otimes_r g^{\epsilon}_i \Vert \to 0.$$
Then $F^\epsilon \to U$ and $G^{\epsilon} \to V$ weakly imply that $(F^{\epsilon},G^{\epsilon}) \to (U,V)$ jointly, where $U$ and $V$ are taken to be independent random variables.
\section{Preliminaries}\label{preliminary}
We take the Hermite polynomials of degree $m$ to be $$H_m(x) = (-1)^m e^{\frac {x^2} {2}} \frac {d^m} {dx^m} e^{\frac {x^2} {2}}.$$ Thus, $H_0(x)=1$, $H_1(x)=x$.
Let $\hat H$ be the inverse of $H^*(m)=m(H-1)+1$: $$ \hat H(m)=\frac 1 m (H-1) + 1.$$
\subsection{Hermite processes}
The rank $1$ Hermite processes $Z^{H, 1}$ are fractional BMs, the formulation (\ref{Hermite}) below is exactly the
Mandelbrot-Van Ness representation for a fBM.
\begin{definition}\label{Hermite-processes}
Let $m\in \N$ with $\hat H(m)>\frac 12$. The class of {\it Hermite processes} of rank $m$ is given by the following mean-zero processes,
\begin{equation}\label{Hermite}
Z_t^{H,m}=\frac {K(H,m)} {m!} \int_{\R^m} \int_0^t \prod_{j=1}^m (s-{\mathbf x}i_j)_+^{ -(\frac 1 2 + \frac {1-H} {m})} \, ds \, d W({{\mathbf x}i_1}) \dots d W({{\mathbf x}i_m}),
\end{equation}
where the constant $K(H,m)$ is chosen so their variances are $1$ at $t=1$. \end{definition}
The integration in (\ref{Hermite}) is understood as a multiple Wiener-It\^o integral (over the region $\R^n$ without the diagonal). Note $\hat H(1)=H$.
By the properties of Wiener integrals, two Hermite processes $Z^{H, m}$ and $Z^{H',m'}$, defined by the same Wiener process, are uncorrelated if $m \not = m'$.
The Hermite processes have stationary increments and finite moments of all orders with covariance \begin{equation}
{\symb E}( Z_t^{H,m} Z_s^{H,m}) = \frac 1 2 ( t^{2H} + s^{2H} - \vert t-s \vert^{2H}).
\end{equation}
Therefore, using Kolmogorov's theorem \ref{Kolmogorov-theorem}, one can show that the Hermite processes $Z_t^{H,m}$ have sample paths of H\"older regularity up to $H$.
They are also self similar with exponent~$H$ which means
$ \lambda^H Z^{H,m} _{\frac \cdot\lambda} \sim Z^{H,m}_.$.
\begin{remark}
In some literature, see e.g. \cite{Maejima-Ciprian} where further details on Hermite processes can also be found,
the Hermite processes are defined with a different exponent as below:
$$\tilde Z_t^{H,m} =\frac {K(H,m)} {m!} \int_{\R^m} \int_0^t \prod_{j=1}^m (s-{\mathbf x}i_j)_+^{ H- \frac 3 2} \, ds \, d W({{\mathbf x}i_1}) \dots d W({{\mathbf x}i_m}).$$
The two notions are related by
\begin{equation}Z_t^{H^*(m), m}=\tilde Z_t^{H,m}, \qquad Z_t^{H,m}=\tilde Z_t^{\hat H(m),m}.
\end{equation}
We refer to \cite{Pipiras-Taqqu-book, Samorodnitsky, Cheridito-Kawaguchi-Maejima} for detailed studies of fBM's which are used in this article.
\end{remark}
\subsection{Fractional Ornstein-Uhlenbeck processes}\label{OU-section}
Let us normalise the fractional Brownian motion, so that $B^H_0=0$ and ${\symb E}(B^H_1)^2=1$.
Disjoint increments of $B_t^H$ have a covariance of the form:
\begin{equation*}
{\symb E}(B_t-B_s)(B_u-B_v)=\frac 12 \left( |t-v|^{2H}+|s-u|^{2H}-|t-u|^{2H}-|s-v|^{2H} \right).
\end{equation*}
We define the stationary fractional Ornstein-Uhlenbeck processes to be
$y_t = \sigma \int_{-\infty}^t e^{-\sigma(t-s) } dB^H_s$,
where $B^H_t$ is a two-sided fractional BM, $\sigma$ is chosen such that $y_t$ is distributed as $\mu=N(0,1)$.
It is the solution of the following Langevin equation:
$$dy_t = - y_t dt + \sigma d B^H_t, \qquad y_0 = \sigma \int_{-\infty}^0 e^{ s } dB^H_s.$$
We take $y_t^\epsilon$, the fast or rescaled fOU, to be the stationary solution of
\begin{equation}\label{fOU}
dy_t^\epsilon = -\frac 1 \epsilon y_t^\epsilon\, dt + \frac { \sigma} {{\epsilon}^H}\, d B^H_t.
\end{equation}
Observe that $ y_\cdot ^\epsilon$ and $ y_{\frac \cdot \epsilon} $ have the same distributions,
$y^\epsilon_t=\frac \sigma {\epsilon^H}\int_{-\infty}^t e^{-\frac 1 \epsilon (t-s) } dB^H_s$. In particular, both $y_t$ and $y^{\epsilon}_t$ are H\"older continuous with H\"older exponents $\gamma \in (0,H)$.
Let us denote their correlation functions by $\rho$ and $\rho^\epsilon$:
$$\rho(s,t):={\symb E}(y_sy_t), \qquad \rho^{\epsilon}(s,t):= {\symb E}(y^{\epsilon}_sy^{\epsilon}_t).$$
Let $\rho(s)={\symb E} (y_0y_s)$ for $s\ge 0$ and extended to $\R$ by symmetry, then $\rho(s,t)=\rho(t-s)$ and similarly for $\rho^{\epsilon}$.
For $H>\frac 12$, the set of functions for which Wiener integrals are defined include $L^2$ functions and
so $\rho$ posses an analytical expression.
Indeed, since
$${\symb E} (B^H_tB^H_s)=H(2H-1) \int_0^t\int_0^s |r_1-r_2|^{2H-2}dr_1dr_2,$$
we have
$$\frac{ \partial^2}{\partial t\partial s} {\symb E}(B^H_tB^H_s) =H(2H-1)|t-s|^{2H-2},$$ which is integrable, and therefore we may use the Wiener isometry to compute the covariances
\begin{equs}
{\symb E}(y_ty_s) =\sigma^2 H(2H-1) \int_{-\infty}^t\int_{-\infty}^s e^{-(s+t-r_1-r_2) } |r_1-r_2|^{2H-2} dr_1 dr_2.
\end{equs}
For $u>0$, we set
$$\rho(u)=\sigma^2 H(2H-1) \int_{-\infty}^{u}\int_{-\infty}^0 e^{-(u-r_1-r_2) } |r_1-r_2|^{2H-2} dr_1 dr_2.$$
Using this, the following correlation decay was shown in \cite{Cheridito-Kawaguchi-Maejima}.
\begin{lemma}
\label{correlation-lemma}
Let $H\in (0, \frac 12)\cup (\frac 12, 1)$. Then,
$\rho(s)=2\sigma^2 H(2H-1) s^{2H-2} +O(s^{2H-4})$ as $ s \to \infty$. In particular, for any $t\not =s$,
\begin{equation}\label{cor1}
|\rho(s,t)| \lesssim 1\wedge |t-s|^{2H-2}.
\end{equation}
\end{lemma}
Hence, $ \int_0^\infty \rho^m(s) ds$ is finite if and only if $ H^*(m)<\frac 12$, or $H=\frac 12$ and $m\in \N$, as in the latter the usual OU process admits exponential decay of correlations.
\begin{lemma}\label{Integrals}
Let $H\in (0,1) \setminus \{ \frac 1 2\}$, fix a finite time horizon $T$, then for $t \in [0,T]$ the following holds \emph{uniformly} for $\epsilon \in (0,\frac 1 2]$:
\begin{equation} \label{correlation-decay-2-1}
\left( \int_0^{\frac t \epsilon} \int_0^{\frac t \epsilon} \vert \rho(u,r) \vert^m\, dr \,du\right)^{\frac 12} \\
\lesssim
\left\{ \begin{array}{lc}
\sqrt {\frac t \epsilon \int_0^\infty \vert \rho(s) \vert^m } ds, \quad &\hbox {if} \quad H^*(m)<\frac 12,\\
\sqrt { (\frac t \epsilon) \vert \ln\left(\frac 1 \epsilon \right) \vert}, \quad &\hbox {if} \quad H^*(m)=\frac 12,\\
\left( \frac t \epsilon\right) ^{H^*(m)}, \quad &\hbox {if} \quad H^*(m)>\frac 12.
\end{array} \right.
\end{equation}
\begin{equation} \label{correlation-decay-2-2}
\left( \int_0^{t} \int_0^{t} \vert \rho^{\epsilon}(u,r) \vert^m\, dr \,du\right)^{\frac 12} \\
\lesssim
\left\{ \begin{array}{lc}
\sqrt { t \epsilon \int_0^\infty \vert \rho(s) \vert^m } ds, \quad &\hbox {if} \quad H^*(m)<\frac 12,\\
\sqrt { t \epsilon \vert \ln\left(\frac 1 \epsilon \right) \vert}, \quad &\hbox {if} \quad H^*(m)=\frac 12,\\
\left( \frac t \epsilon\right) ^{H^*(m)-1}, \quad &\hbox {if} \quad H^*(m)>\frac 12.
\end{array} \right.
\end{equation}
Note, if $H=\frac 12$, for and any $m \in \N$, the bound is always $ \sqrt {\frac t \epsilon} \int_0^\infty \rho^m(s) ds$.
In particular,
\begin{equation}\label{integral-10}
t \int_0^{t} \vert \rho^{\epsilon}(s)\vert^m ds
\lesssim \frac { t^{ \left(2H^*(m) \vee 1\right)}} { \alpha \left( \epsilon, H^*(m)\right)^2}.
\end{equation}
\end{lemma}
\begin{proof}
We first observe that
\begin{equation}\label{correlation-decay-3}
\int_0^\infty \rho^m(s) ds <\infty \quad \mathbb Longleftrightarrow \quad H^*(m)<\frac 12\quad
\mathbb Longleftrightarrow \quad H<1 -\frac 1{2m}.
\end{equation}
By a change of variables and using estimate (\ref{cor1}) on the decay of the auto correlation function,
\begin{align*}
\int_0^{\frac t \epsilon} \int_0^{\frac t \epsilon} \vert \rho(\vert u-r \vert)\vert^m dr du
&=2 \frac t \epsilon \int_0^{\frac t \epsilon} \vert \rho(s)\vert^m ds\\
&\lesssim \left\{ \begin{array}{cl}
\frac t \epsilon \int_0^\infty \rho^m(s) ds, &\hbox{ if }H^*(m)<\frac 12,\\
\left( \frac t \epsilon \right)^{2H^*(m)}, &\hbox{ otherwise.}
\end{array} \right. ,
\end{align*}
For the case $H^*(m)=\frac 1 2$ we use
\begin{align*}
\int_0^{\frac t \epsilon} \vert \rho(s) \vert^m ds &\leq \int_0^{\frac T \epsilon} \vert \rho(s) \vert^m ds
\lesssim \int_0^{\frac T \epsilon} \left(1 \wedge \frac 1 s\right) ds \lesssim \big\vert \ln \left( \frac T \epsilon \right) \big\vert
\lesssim \big\vert \ln \left( \frac 1 \epsilon \right) \big\vert.
\end{align*}
To complete the proof we observe that by a simple change of variables,
\begin{align*}
\int_0^{t} \int_0^{t} \vert \rho^{\epsilon}(u,r) \vert^m\, dr \,du &= \epsilon^2 \int_0^{\frac t \epsilon} \int_0^{\frac t \epsilon} \vert \rho(u,r) \vert^m\, dr \,du.
\end{align*}
\end{proof}
Next, we recall the Garsia-Rodemich-Romsey-Kolmogorv inequality.
\begin{lemma}\label{lemma-GRR}
[Garsia-Rodemich-Romsey-Kolmogorov inequality]
Let $T>0$.
Let $\theta: [0,T]\to \R^d$. For any positive numbers $\gamma, p$, there exists a constant $C(\gamma, p)$ such that
$$\sup_{s\not =t , s,t \in [0,T]} \frac{|\theta(t)-\theta (s)|}{|t-s|^\gamma} \le C(\gamma, p)
\left( \int_0^T \int_0^T \frac { |\theta_s-\theta_r|^p}{ {|s-r|}^{\gamma p+2}}ds dr\right)^{\frac 1p}.$$
\end{lemma}
See \cite[section A.2]{Friz-Victoir} in the Appendix, as well as \cite{Stroock-Varadhan-mult-dim-diffusion-processes}, for a proof.
As a consequence of this inequality one obtains the following theorem.
\begin{theorem}[Kolmogorov's Theorem]\label{Kolmogorov-theorem}
Let $\theta$ be a stochastic process.
Suppose that for $s,t\in [0, T]$, $p>1$ and $\delta>0$,
$${\symb E}|\theta(t)-\theta (s)|^p \le c_p |t-s|^{1+\delta},$$ where $c_p$ is a constant. Then, for $\gamma<\frac \delta p$, $\theta \in \mathcal{C}^{\gamma}([0,T],\R)$, and in particular
$$\||\theta|_{C^\gamma([0,T])}\|_p \le C(\gamma, p) ( c_p)^{\frac 1p} \left(\int_0^T \int_0^T |u-v|^{\delta -\gamma p-1} dudv\right)^{\frac 1p},
$$ where right hand side is finite when $\gamma \in (0, \frac \delta p)$.
\end{theorem}
\begin{lemma}\label{cty-lemma}
For any $\gamma \in (0, H)$, $p>1$,
the following estimates hold:
$$\sup_{s, t\ge 0}\frac{\|y_s-y_t\|_{L^p}}{ 1 \wedge |s-t|^{H}} \lesssim 1, \qquad {\symb E} \sup_{s\not =t, s,t\in [0,T]} \left( \frac{ |y_s-y_t |} { |t-s|^{\gamma}} \right)^p \lesssim T\; C(\gamma, p).$$
\end{lemma}
\begin{proof}
We use the fractional Ornstein-Uhlenbeck equation
$$y_s-y_r=- \int_r^s y_udu+B^H_s-B^H_r,$$
to obtain ${\symb E}|y_s-y_r|^2 \lesssim (s-r)^2 {\symb E} \vert y_1 \vert ^2+q|s-r|^{2H}$.
Using the stationarity of $y_t$, one also has $ {\symb E}|y_s-y_r|^{2} \leq 2{\symb E} |y_1|^2=2$.
Since for Gaussian random variables the $L^2$ norm controls the $L^p$ norm we have
$$\|y_s-y_r\|_{L^p} \lesssim \left\{\begin{aligned}
& 1, &\hbox{ if } |s-r|\ge 1;\\
& |s-r|^{H}, \quad &\hbox{ if }l |s-r|\le 1. \end{aligned}\right.$$
Thus, by symmetry and a change of variables,
$\int_0^T \int_0^T \frac { {\symb E}|y_s-y_r|^p}{ {|s-r|}^{ \gamma p+2}}dsdr\lesssim T$ and application of Kolmogorov's theorem \ref{Kolmogorov-theorem} concludes the proof.
\end{proof}
\section{Applications}
\subsection{The second order problem}
\label{example-fou}
If $x$ is a stochastic process, we write $x_{s,t}=x_t-x_s$.
\begin{proposition}\label{th-example}
Let $H\in (0,1)$, $\gamma \in (0,H)$, $p>1$ and fix a finite time~$T$.
Let $X^{\epsilon}_t = \epsilon^{H-1} \int_0^{t } y^\epsilon _{s} ds$, then,
$$\sup_{s, t \in [0,T]} \left \| X_{s,t}^\epsilon- \sigma B^H_{s,t}\right\|_{L^p} \lesssim \epsilon^H, \qquad
\left\| \left|X^\epsilon - \sigma B^H\right|_{\mathcal{C}^{\gamma'}([0,t],\R)}\right\|_{L^p} \lesssim t^{\gamma} \epsilon^{H-\gamma},$$
for any $\gamma'<\gamma<H$ and for $t \in [0,T]$.
\end{proposition}
\begin{proof}
Set
$v_t^\epsilon=\epsilon ^{H-1} y^\epsilon_{t}$, then, $v_t^\epsilon$ solves the following equation
$$dv_t^\epsilon=-\frac 1 \epsilon v_t^\epsilon dt + \frac {\sigma} {\epsilon} d B^H_t.$$
Using the equation for $v_t^\epsilon$ we have
$$X_{s,t}^\epsilon
= \epsilon^{H-1} \int_s^{t } y^\epsilon _{r} dr =\int_s^t v_r^\epsilon dr =\epsilon (v^{\epsilon}_t-v_s^\epsilon) + \sigma B^H_{s,t}.$$
Therefore, for any $p>1$,
\begin{align*}
\sup_{s,t \in [0,T]} \left \| X_{s,t}^\epsilon- \sigma B^H_{s,t}\right\|_{L^p} &=\sup_{s,t \in [0,T]} \left \| \epsilon (v_t^\epsilon-v_s^{\epsilon})\right\|_{L^p}
=\epsilon ^{H} \sup_{s,t \in [0,T]} \left\| y^{\epsilon}_{t}- y^{\epsilon}_s \right\|_{L^p} \lesssim \epsilon^H.
\end{align*}
In the last step we used the stationarity of $ y_t^\epsilon$.
Thus, applying Kolmogorov's theorem \ref{Kolmogorov-theorem} to $X^\epsilon- \sigma B^H$, we see that the following holds for any $t \in [0,T]$, $p>1$ and any $\gamma'<\gamma$
$$\||X^\epsilon-\sigma B^H|_{\mathcal{C}^{\gamma'}([0,t],\R)}\|_{L^p} \lesssim \epsilon^H \left({\frac t \epsilon}\right)^{\gamma},
$$
hence, the claim follows.
\end{proof}
As an application we consider the following slow/fast system,
\begin{equation}\label{example}
\left\{ \begin{aligned}
&\dot x_t^\epsilon= \epsilon^{H-1} f(x_t^\epsilon) \, y^\epsilon_t,
\\& x_0^\epsilon=x_0.
\end{aligned}
\right.
\end{equation}
To describe its limit, we review the concept of Young integrals. If $f, g: [0, T]\to \R$ with $f\in \mathcal{C}C^\alpha$ and $g\in \mathcal{C}C^\beta$ such that $\alpha +\beta>1$,
then the Riemann-Stieljes integral makes sense, and
$$\int_0^t f_s dg_s= \lim_{|\mathcal{C}P|\to 0} \sum_{[u,v] \subset \mathcal{C}P} f_u(g_u-g_v)\in \mathcal{C}C^\beta.$$
For details see\cite{Young36}; this integral is called a Young integral.
Since Young integrals have the regularity of its integrand, for $H>\frac 12$ the equation
$\dot x_t=f(x_t) \;dB^H_t$ makes sense.
In \cite{Lyons94}, it was shown that if $f \in \mathcal{C}_b^3$, then the equation has a unique global solution from each initial value.
This type of equations are Young equation, the simplest type of rough differential equation. The notation $\mathcal{C}_b^3$ denotes the space of bounded functions such that their first three derivatives are bounded.
\begin{proposition}\label{prop-linear-driver}
Let $H \in ( \frac 1 3, 1)$ and $f\in \mathcal{C}_b^3(\R^d,\R^d)$. Then for any $\gamma \in (0,H)$, $\gamma' < \gamma$, $x_t^\epsilon$ converges in $L^p$
in $\mathcal{C}^{\gamma'}([0,T], \R^d)$
to the solution of the rough differential equation:
\begin{equation}\dot x_t= \sigma f(x_t) \;dB^H_t, \, \, \, \, \, \, x_0 =x_0.\end{equation}
Furthermore, for $t \in [0,T]$,
$$\left\| |x^\epsilon -x|_{\mathcal{C}^{\gamma'}([0,t],\R)} \; \right\|_{L^p} \lesssim t^{\gamma} \epsilon^{H-\gamma}.$$
\end{proposition}
\begin{proof}
The idea is to consider equation (\ref{example}) as a Young/rough differential equation.
In case $H \in (\frac 1 2,1)$, we can rewrite our equation as
$$\dot x_t^\epsilon =f(x_t^\epsilon) dX_t^\epsilon,$$
where $X^{\epsilon}=\epsilon^{H-1} \int_0^{t } y^\epsilon _{s} ds$ is as in the previous lemma. Young's continuity theorem, see Theorem \ref{cty-rough}, states that $x^\epsilon$ converges weakly
provided $X^\epsilon$ converges weakly in a H\"older space of regularity greater than $\frac 1 2$.
For $H \in (\frac 1 3, \frac 1 2)$ we need to rewrite our equation into a rough differential equation,
$$\dot x_t^\epsilon =f(x_t^\epsilon) d{\mathbf X}_t^\epsilon,$$
where ${\mathbf X}^{\epsilon}$ is given by $X^{\epsilon}$ enhanced with its canonical lift
$${\mathbf X}X^{\epsilon}_{s,t} = \int_s^t (X^\epsilon_r - X^\epsilon_s) dX^\epsilon_r.$$
As we restrict ourselves to one dimension we obtain, by symmetry, ${\mathbf X}X^{\epsilon}_{s,t}= \frac 1 2 {(X^{\epsilon}_{s,t})^2}$, hence, ${\mathbf X}^{\epsilon}$ converges to a fBm $\sigma B^H$ enhanced with $\mathbb{B}^H_{s,t}=\frac 1 2 { \left( \sigma B^H_{s,t}\right)^2} $.
As the solution map, $\Phi$, to a RDE satisfies, see \cite{Friz-Hairer} or Theorem \ref{cty-rough} below,
$$|\Phi({\mathbf X}^\epsilon)-\Phi(\mathbf{B}^H)|_{\mathcal{C}^{\gamma'}} \lesssim \rho_{\gamma}({\mathbf X}^\epsilon, \mathbf{B}^H),$$
where $\rho_{\gamma}$ denotes the inhomogeneous rough path norm of regularity $\gamma$ and $\mathbf{B}^H = (\sigma B^H, \frac 1 2 { \left( \sigma B^H_{s,t}\right)^2} )$. Thus, the
$L^p$ convergence of the solutions follows from the $L^p$ convergence of the drivers, hence, we can conclude the proof by Proposition \ref{th-example}.
\end{proof}
\begin{remark}
Krammer-Smoluchowski limits/Kinetic fBM's are studied in
\cite{Boufoussi-Ciprian, Zhang-08,Al-Talibi-Hilbert}. See also \cite{Fannjiang-Komorowski-2000, Friz-Gassiat-Lyons, Friz-Hairer}.
\end{remark}
\begin{remark}
\label{remark-variance}
\
\begin{enumerate}
\item
For $H<\frac 12$ and $m=1$, Theorem \ref{theorem-CLT} appears to contradict with Proposition \ref{th-example}; in the first we claim the limit is a Brownian motion, whereas in the second we claim that it is a fractional Brownian motion. Both results are correct and can be easily explained. It lies in the fact that $\int_\R \rho(s)\, ds$ vanishes if $H<\frac 12$, and so the
Brownian motion limit is degenerate.
Since according to \cite{Cheridito-Kawaguchi-Maejima}, \begin{equation}\label{cor5}
\rho(s) = \sigma^2 \fracrac{\Gamma(2H+1) \sin(\pi H)}{2 \pi} \int_{\R} e^{isx} \fracrac{\vert x \vert^{1-2H}}{1 + x^2} dx,
\end{equation}
and by the decay estimate from (\ref{cor1}), $\rho$ is integrable,
$s(\lambda) $ is the value at zero of the inverse Fourier transform of $\rho(s)$, which is
up to a multiplicative constant $\fracrac{\vert \lambda \vert^{1-2H}}{1 + \lambda^2}$.
This is also the spectral density of $y_t$ and has value $0$ at $0$.
This means we have scaled too much and the correct scaling is to multiply the integral $\int_0^{t} y^{\epsilon}_s ds$ by
$\epsilon^{H-1}$ in which case we obtain a fBm as limit.
\item For $m>1$ and $H<\frac 12$ the Wiener limit is not trivial. Indeed,
$$\begin{aligned}
\int_\R \rho(s)^m ds
&=C \int_\R \stackrel{m} {\overbrace{ \int _\R \dots \int_{\R} }} \prod_{k=1}^m e^{isx_k} \fracrac{\vert x_k \vert^{1-2H}}{1 + |x_k|^2} dx_1\dots dx_m\, ds\\
&= C \stackrel{m} {\overbrace{ \int _\R \dots \int_{\R}}} \fracrac{\vert x_2+\dots + x_m \vert^{1-2H}}{1 + |x_2+\dots + x_m|^2} \prod_{k=2}^m \, \fracrac{\vert x_k \vert^{1-2H}}{1 + |x_k|^2} \not =0.
\end{aligned}$$
\end{enumerate}
\end{remark}
\subsection{The 1d fluctuation problem}
\label{homo}
In this section we give an application of Theorem A.
Given a function $g \in L^1(\mu)$ we denote $ \bar{g}=\int_{\R} g(y) \mu(dy)$.
\begin{lemma}\label{lemma-pseudo-ergodic}
The stationary Ornstein-Uhlenbeck process is ergodic. Thus, $\int_0^t g(y^{\epsilon}_s) ds\to t \bar g$ in probability for every $g \in L^1(\mu)$.
\end{lemma}
\begin{proof}
A stationary Gaussian process is ergodic if its spectral measure has no atom,
see \cite{Cornfeld-Fomin-Sinai, Samorodnitsky}. The spectral measure $F$ of a stationary Gaussian process is obtained from
Fourier transforming its correlation function and
$\rho(\lambda)=\int_\R e^{i \lambda x} dF(x)$.
According to \cite{Cheridito-Kawaguchi-Maejima}:
\begin{equation}\label{cor5-2}
\rho(s) = \fracrac{ \Gamma(2H+1) \sin(\pi H)}{2 \pi} \int_{\R} e^{isx} \fracrac{\vert x \vert^{1-2H}}{1 + x^2} dx,
\end{equation}
so the spectral measure is absolutely continuous with respect to the Lebesgue measure with spectral density, up to a non-zero constant, given by $s(x) = \frac { \vert x \vert^{1-2H}}{1+ x^2}$.
Since $\int_0^t g(y^{\epsilon}_t) dt$ equals $ \epsilon \int_0^{\frac t \epsilon } g(y_s) ds$ in law, the former converges in law to $ t \bar{g}$ by Birkhoff's ergodic theorem. The claim now follows as weak convergence to a constant implies convergence in probability.
\end{proof}
In the following proof we will need the following theorem from Young/rough path theory for details we refer to \cite{Friz-Hairer,Friz-Victoir,Lyons94,Lyons-Caruana-Levy}. We denote the space of rough paths of regularity $\beta$ by ${\mathcal F}C^{\beta}$.
\begin{theorem}\label{cty-rough}
Let $Y_0 \in \R^d, \beta \in (\frac1 3, 1), \, \gamma \in (\frac 1 2, 1) \, f \in \mathcal{C}^3_b(\R^d,\R^d) $, $h \in \mathcal{C}_b^{2}(\R^d,\R^d)$, ${\mathbf X} \in {\mathcal F}C^{\beta}([0,T],\R)$ and $\mathbf{Z} \in {\mathcal F}C^{\gamma}([0,T],\R)$ such that $\beta + \gamma >1$. Then, the differential equation
\begin{equation}\label{example-sde}
Y_t = Y_0 + \int_0^t f(Y_s) d\mathbf{X}_s + \int_0^t h(Y_s) d\mathbf{Z}_s
\end{equation}
has a unique solution which belongs to $\mathcal{C}^{\beta \wedge \gamma}$. Furthermore, the solution map $\Phi_{f,h}: ~\R\times {\mathcal F}C^{\beta}([0,T], \R) \times {\mathcal F}C^{\gamma}([0,T], \R)
\to \mathcal{C}^{\beta \wedge \gamma}([0,T],\R)$, where the first component is the initial condition and the second and third components the drivers, is continuous.
\end{theorem}
Given a centred function $G \in L^2(\mu) $, with chaos expansion $G=\sum_{k=m}^{\infty} c_k H_k$ and let $c \geq 0$ be given by
\begin{equation}\label{c-square}
c^2=\left\{ \begin{array}{ll} (\frac {c_m m!}{K(H,m)})^2, \quad & H^*(m)>\frac 12\\
2\sum_{k=m}^\infty (c_k)^2 k! \int_0^\infty \rho^k(s) ds, \quad &H^*(m)<\frac 12\\
2m!(c_m)^2, \quad &H^*(m)=\frac 12.
\end{array} \right.
\end{equation}
\begin{theorem}
Let $H \in (\frac 1 3,1) \setminus \{ \frac 1 2 \}$, $f\in \mathcal{C}_b^3(\R^d, \R^d)$, $h \in \mathcal{C}^{2}_b(\R^d;\R^d)$, $G \in \mathcal{C}(\R,\R)$ satisfying Assumption \ref{assumption-single-scale-not-continuous} and $g \in \mathcal{C}_b(\R;\R)$.
Let $\alpha(\epsilon) = \alpha(\epsilon,H^*(m))$. Fix a finite time $T$ and consider
\begin{equation}\label{limit-eq}
\dot x_t^\epsilon =\alpha(\epsilon) f(x_t^\epsilon) G(y^{\epsilon}_t)+ h(x^{\epsilon}_t)g(y^{\epsilon}_t),
\qquad x_0^\epsilon=x_0.
\end{equation}
\begin{enumerate}
\item If
$H^*(m) > \frac 1 2$, $x_t^\epsilon$ converges weakly in $\mathcal{C}^{\gamma}([0,T],\R^d)$ to the solution to the Young differential equation
$d\bar x_t = c f(\bar x_t) \,dZ_t^{H^*(m),m} +\bar{g} h(\bar{x}_t) dt$ with initial value $x_0$ for $\gamma \in (0, H^*(m)- \frac 1 p)$.
\item If
$H^*(m) \leq \frac 1 2$,
$x_t^\epsilon$ converges weakly in $\mathcal{C}^\gamma([0,T],\R)$ to the solution of the Stratonovich stochastic differential equation
$d\bar x_t = c f(\bar x_t) \circ \,dW_t + \bar{g} h(\bar{x}_t) dt$ with $ \bar x_0=x_0$, where $\gamma \in(0, \frac 1 2- \frac 1 p)$.
\end{enumerate}
\end{theorem}
\begin{proof} \label{proof-theorem-C}
As in Proposition \ref{prop-linear-driver} we can rewrite our equations as Young/rough differential equations and therefore reduce our analysis to the drivers $\left(\alpha(\epsilon) \int_0^t G(y^{\epsilon}_s) ds, \int_0^t g(y^{\epsilon}_s) ds\right)$.
By Theorem \ref{theorem-CLT}, $\alpha(\epsilon) \int_0^t G(y^{\epsilon}_s) ds$ converges in finite dimensional distributions either to a Wiener or a Hermite process. By Lemma \ref{lemma-pseudo-ergodic}, $\int_0^t g(y^{\epsilon}_s) ds$ converges in probability to the deterministic path $t \bar{g}$. Hence,
$\left(\alpha(\epsilon) \int_0^t G(y^{\epsilon}_s) ds, \int_0^t g(y^{\epsilon}_s) ds\right)$ converges jointly in finite dimensional distributions.
Furthermore, $\Vert\int_0^t g(y^{\epsilon}_s) ds \Vert_{\infty} \leq t \Vert g \Vert_{\infty}$, this combined with the moment bounds obtained in Theorem \ref{theorem-CLT} enables us to apply Theorem \ref{cty-rough} to conclude the proof.
\end{proof}
\begin{remark}
The constant $c$ could be $0$, for further details see Remark \ref{remark-variance}.
\end{remark}
\section{Proof of Theorem A}
We first establish the $L^2(\Omega)$ convergence of $X_t^\epsilon= \alpha(\epsilon)\int_0^{t} G(y^{\epsilon}_s)ds$,
where $G=\sum_{k=m}^\infty c_k H_k$ has low Hermite rank, followed by a reduction theorem. We then prove moment bounds and conclude the proof of Theorem~\ref{theorem-CLT}.
\subsection{Preliminary lemmas}
The basic scalar valued functional limit theorem, for low rank Hermite functions, was proved in \cite{Taqqu} for
$\epsilon^{H^*(m)} \int_0^{\frac t \epsilon} G(X_s) ds$ with $X_t = \int_{\R} p(t-{\mathbf x}i) dW_{{\mathbf x}i}$ a moving average,
where in order to prove convergence one uses the self-similarity of a Wiener process leading to weak convergence as this equivalence of course is only in law.
Nevertheless, in our case we can choose a properly scaled fast variable and write, $y_t^{\epsilon} = \int_{\R} \hat{g} (\frac {t-{\mathbf x}i} {\epsilon}) dW_{{\mathbf x}i}$ for a function $\hat g$, and thus avoid using self-similarity.
The key idea is to write a Wiener integral representation beginning with
\begin{equation}\label{y-integral}
\begin{aligned}
y_t^\epsilon&=\epsilon^{-H} \sigma \int_{-\infty}^t e^{-\frac {t-r} \epsilon} dB^H_r
=\int_{\R} h_\epsilon(t,s) dW_s, \quad \hbox{ where,}\\
h_\epsilon(t,s)&= \epsilon^{-\frac 12}
\frac {\sigma} {c_1(H)} e^{-\frac {t-s} \epsilon} \int_0^{\frac {t-s} \epsilon} e^v \;v_{+}^{H-\frac 32}\; dv,
\end{aligned}\end{equation}
and $c_1(H) = \sqrt{ \int_{-\infty}^0 \left( (1-s)^{H-\frac 1 2} - (-s)^{H- \frac 1 2} \right)^2 ds + \frac 1 {2H} }$.
This can be obtained by applying the integral representation for fBM's:
\begin{equation}\label{fbm-i}\begin{aligned}
B_t^H=\int_{-\infty}^{\infty} g(t,s)dW_s, \qquad \hbox{ where } \quad
g(t,s)= \frac {1} {c_1(H)} \int_{0}^t (r-s)_+^{H-\frac 32} dr,\end{aligned}\end{equation} and
by repeated applications of integration by parts (to the Young integrals):
\begin{align*}
&\sigma \int_{-\infty}^t e^{- \frac {t-s} {\epsilon}} dB^H_s = \sigma B^H_t - \frac \sigma \epsilon \int_{-\infty}^t e^{- \frac {t-s} {\epsilon}} B^H_s ds\\
&= \sigma B_t^H - \frac \sigma \epsilon \int_{-\infty}^t e^{- \frac {t-s} {\epsilon}} \left( \int_{\R} g(s,r) dW_r \right)ds
=\sigma \int_{\R} \int_{-\infty}^t e^{- \frac {t-s} {\epsilon}} \partial_s g(s,r) ds dW_r\\
&= \frac{\sigma} {c_1(H)} \int_{\R} \int_{-\infty}^t e^{- \frac {t-s} {\epsilon}} (s-r)_{+}^{H- \frac 3 2} ds dW_r.
\end{align*}
One may also use the following, see \cite{Pipiras-Taqqu}, taking $f\in L^1\cap L^2$:
$$
\int_{\R} f(u) dB^H_u = \frac {1} {c_1(H)}\int_{\R} \int_{\R} f(u) (u-s)_{+}^{H- \frac 3 2} \,du \,dW_s.$$
\begin{lemma}\label{kernel lemma} Let $\lambda$ denote the Lebesgue measure, then as $\epsilon\to 0$,
$\epsilon^{H^*(m) -1} \int_0^t H_m(y^{\epsilon}_s) ds$ converges to $ \frac { m!} { K(H,m)} Z_t^{H^*(m),m}$ in $L^2(\Omega)$.
Equivalently,
\begin{equation}
\left \Vert \int_0^t
\prod_{i=1}^m h_\epsilon(s, u_i) ds - \int_0^t \prod_{i=1}^m (s- u_i)_+^{H-\frac 32}ds\right \Vert_{L^2(\R^m, \lambda)} \to 0.
\end{equation}
\end{lemma}
\begin{proof}
This can be shown by applying \cite[Theorem 4.7]{Taqqu}, where weak convergence is obtained. With a small modification and using \cite[Lemma 4.5, Lemma 4.6]{Taqqu} directly we obtain the $L^2(\Omega)$ convergence:
$$ {\symb E} \left( \int_{\R^m} \int_0^t \prod_{i=1}^m p\left( \frac {s- {\mathbf x}i} {\epsilon}\right) \epsilon^{H- \frac 3 2} ds dW_{{\mathbf x}i} - \frac{Z^{H^*(m),m}} {K(H,m)} \right)^{ 2 }\to 0,$$
using the Wiener integral representation of the Hermite processes this is equivalent, by a multiple Wiener-It\^o isometry, to
\begin{equation}\label{explicit-kernel-2}
\int_{\R^m} \left( \int_0^t \prod_{i=1}^m p\left( \frac {s- {\mathbf x}i_i} {\epsilon}\right) \epsilon^{H - \frac 3 2} ds - \int_0^t \prod_{i=1}^m \left( s- {\mathbf x}i_i \right)_{+}^{H- \frac 3 2} ds \right)^2 d{\mathbf x}i_1 \dots d{\mathbf x}i_m \to 0.
\end{equation}
Examining Taqqu's proof, we note that in fact the $L^2$ convergence of (\ref{explicit-kernel-2})
is obtained under the following conditions.
\begin{itemize}
\item[{1}] $\int_{\R} p(s)^2 ds < \infty$.
\item[{2}] $ \vert p(s) \vert \leq C s^{H- \frac 3 2} L(u)$ for almost all $s>0$.
\item[{3}] $p(s) \sim s^{H - \frac 3 2 } L(s)$ as $s \to \infty$.
\item[{4}] There exists a constant $\gamma$ such that $0<\gamma< (1-H)\wedge (H- (1-\frac 1 {2m}))$ such that $\int_{-\infty}^0 \vert p(s) p(xy+s)\vert ds = o(x^{2H-2} L^2(x)) y^{2H-2-2\gamma}$ as $x \to \infty$ uniformly in $y \in (0,t]$.
\end{itemize}
where $L$ denotes a slowly varying function (for every $\lambda >0$ $\lim_{x \to \infty} \fracrac{L(\lambda x)}{L(x)} ) =1$).
Set \begin{equation}\label{g} \hat{g}(s)= \frac {\sigma} {c_1(H)} e^{-s} \int_0^s e^u u_{+}^{H- \frac 3 2} du,
\end{equation}
then,
\begin{align*}
y^{\epsilon}_t
= \epsilon^{-\frac 1 2}\int_{- \infty}^{t} \hat{g}\left( \frac{t-s} {\epsilon} \right) dW_s.
\end{align*}
We are now in Taqqu's framework and it is only left the check $\hat{g}$ defined by (\ref{g}) satisfies these conditions. To increase readability we suppress the constant $\frac {\sigma} {c_1(H)}$ in the following computations.
For $s<1$,
\begin{align*}
e^{-s} \int_0^s e^u u^{H- \frac 3 2 } du &\leq \int_0^s u^{H- \frac 3 2 } du
\lesssim s^{H - \frac 1 2}.
\end{align*}
We calculate for $s>1$ via integration by parts
\begin{align*}
e^{-s} \int_0^s e^u u^{H- \frac 3 2 } du &\leq
e^{-s} \int_0^1 e^u u^{H- \frac 3 2 } du + e^{-s} \int_1^s e^u u^{H- \frac 3 2 } du \\
&\lesssim e^{-s} +s^{H- \frac 3 2} + e^{-s} \int_1^s e^u u^{H- \frac 5 2 } du
\lesssim s^{H- \frac 3 2}.
\end{align*}
This of course implies that $\hat{g}$ is $L^2(\lambda)$ integrable.
Finally observe that $\int_{-\infty}^0 \vert \hat{g}(s) \hat{g}(xy+s)\vert ds = 0$ as $\hat{g}(s)=0$ for $s<0$.
With these we apply \cite[Theorem 4.7]{Taqqu} to conclude the $L^2$ convergence of the kernels.
\end{proof}
\begin{lemma}\label{L^2-kernel}
Let $G\in L^2(\mu)$ be a centred function with Hermite rank $m$ satisfying $H^*(m)> \frac 1 2$. Let $H\in ( \frac 1 2 ,1)$.
Then the following statements hold for the stationary scaled fOU process $y^{\epsilon}_s$. Fix $t>0$, then,
$$\left\| \epsilon^{H^*(m)-1} \int_{0}^{t} G(y^{\epsilon}_s) ds- \frac {c_m m!} { K(H^*(m),m)} Z^{H^*(m),m}_t\right\|_{L^2(\Omega)} \to 0.$$
\end{lemma}
\begin{proof}
For $G= H_m$ the claim has already been shown in Lemma \ref{kernel lemma}.
To conclude the claim in the case of a general $G$, we compute,
\begin{equation}\label{reduction_Hermite}
\begin{split}
&\left\| \epsilon^{H^*(m)-1} \int_{0}^{t} (G-c_m H_m)(y^{\epsilon}_s) ds\right\|^2_{L^2(\Omega)}
= \epsilon^{2H^*(m)-2} \sum_{k=m+1}^{\infty} c_k^2 k! o(\epsilon^{2H^*(m)-2})
\to 0
\end{split}
\end{equation}
as $\sum_{k=m+1}^{\infty} c_k^2 \sqrt{k!} < \infty$ as $G \in L^2(\mu)$.
This finishes the proof.
\end{proof}
The fact that only the first term in the chaos expansion gives a contributions is in the literature often called a reduction lemma. In the high Hermite rank case however it is not possible to restrict one's analysis to a pure Hermite polynomial, but as the next lemma shows finite linear combinations are indeed sufficient. To make the application later on easier we directly prove it in the multi-dimensional case.
\begin{lemma}[Reduction Lemma]\label{reduction}
Fix $H \in (0,1)\setminus\{\frac 12\}$.
For $M\in \N$, define the truncated functions:
$$G_{k,M} = \sum_{j=m_k}^{M} c_{k,j} H_j, \qquad X^{k,\epsilon}_{M}(t)= \alpha_k(\epsilon) \int_0^{t} G_{k,M}(y^{\epsilon}_s)ds.$$
If for every $M \in \N$,
$(X^{1,\epsilon}_{M}, \dots, X^{N,\epsilon}_{M})\stackrel{(\epsilon\to 0)} \longrightarrow (X^{1}_{M}, \dots, X^{N}_{M})$
in finite dimensional distributions, then,
$$\left(X^{1,\epsilon}, \dots, X^{N,\epsilon}\right) \stackrel{(\epsilon\to 0)} \longrightarrow (X^{1}, \dots, X^{N})$$
in finite dimensional distributions.
\end{lemma}
\begin{proof}
Firstly,
\begin{align*}
X^{k,\epsilon}(t) - X^{k,\epsilon}_{M}(t) &= \alpha_k(\epsilon) \int_0^{t} \Big( G_k(y^{\epsilon}_s)- G_{k,M} (y^{\epsilon}_s)\Big)ds
= \alpha_k(\epsilon) \int_0^{t} \sum_{j=M+1}^\infty c_{k,j} H_j(y^{\epsilon}_s)ds.
\end{align*}
Using properties of the Hermite polynomials we obtain
\begin{align*}
&{\symb E} \left( \alpha_k(\epsilon) \int_0^{t} \sum_{j=M+1}^\infty c_{k,j} H_j(y^{\epsilon}_s)ds \right)^2
= \alpha_k(\epsilon)^2 \int_0^{t} \int_0^{t} \sum_{j=M+1}^\infty (c_{k,j})^2 {\symb E} \left( H_j(y^{\epsilon}_s) H_j(y^{\epsilon}_r) \right) dr ds\\
&= \alpha_k(\epsilon)^2 \sum_{j=M+1}^\infty (c_{k,j})^2 j! \int_0^{t} \int_0^{t} \rho^{\epsilon}(\vert s-r \vert)^j dr ds
\lesssim \,\sum_{j=M+1}^\infty (c_{k,j})^2 j!
\end{align*}
As $\sum_{j=m}^\infty (c_{k,j})^2 j! < \infty$ we obtain $ \sum_{j=M+1}^\infty (c_{k,j})^2 j!\to 0$ as $M \to \infty$. Thus,
\begin{equation}\label{Bill}
\lim_{M \to \infty}\lim_{\epsilon \to 0}{\symb E} \left( \alpha_k(\epsilon) \int_0^{t} G_k(y^{\epsilon}_s)ds - \alpha_k(\epsilon) \int_0^{t} G_{k,M}(y^{\epsilon}_s)ds \right)^2 \to 0,
\end{equation}
Let $ \{t_{\gamma_{k,l}}, k\le N, l\le A \} $ be a sequence of positive numbers.
Now, by the triangle inequality,
$$\lim_{M \to \infty}\lim_{\epsilon \to 0}
\left\Vert \sum_{k,l} \gamma_{k,l} \left( X^{k,\epsilon} (t_l) - X^{k,\epsilon}_M (t_l) \right) \right\Vert_{L^2(\Omega)} \to 0.$$
With Theorem 3.2 in \cite{Billingsley} this proves the claim.
\end{proof}
\subsection{Moment bounds}
\label{pre}
We will use some results from Malliavin Calculus. Let $x_s$ be a stationary Gaussian process with $\beta(s) = {\symb E} \left( x_s x_0 \right)$, such that $\beta(0)=1$. As a real separable Hilbert space we use $\mathscr{H} = L^2(\R_+,\nu)$ where for a Borel-set $A$ we have $\nu(A) = \int_{\R_+} \mathbf{1}_{A} (s) d\beta_s$. Let ${\mathscr H}^{\otimes q}$ denote the $q$-th tensor product of ${\mathscr H}$. For $h\in {\mathscr H}$, we may define the Wiener integrals $W(h)=\int_0^\infty h_s dx_s$ by $W([a,b])=x(b)-x(a)$ (where $a, b\ge 0$), linearity and the Wiener isometry
($\langle\mathbf{1}_{[0,t]}, \mathbf{1}_{[0,s]}\rangle=\beta(t-s)$).
Iterated Wiener integrals are defined similarly and by its values on indictor functions:
$I_m(\mathbf{1}_{A_1\times \dots \times A_m})=\prod_{i=1}^m W (A_i)$ where $A_i$ are pairwise disjoint Borel subsets of $\R_+$.
If ${\mathcal F}$ denotes the $\sigma$-field generated by $x$,
then any ${\mathcal F}$-measurable $L^2(\Omega)$ function $F$ has the chaos expansion:
$F={\symb E} F+ \sum_{m=1}^\infty I_m(f_m)$ where $f_m\in L^2( \R_+^m)$. This is due to the fact that $L^2(\Omega)=\bigoplus_{m=0}^{\infty} {\mathscr H}_m$ where ${\mathscr H}_m$ is the closed linear space generated by $\{ H_m(W(h)): \Vert h \Vert_{L^2}=1\}$, $H_m$ are the $m$-th Hermite polynomials, and ${\mathscr H}_m=I_m(L_{\hbox{sym}}^2(\R_{+}^m))$.
The last fact is due to $H_m(W(h))= I_m( h^{\otimes ^{m} })$. In the following
${\mathbb{D}^{k,p}(\mathscr{H}^{\otimes m})}$ denotes the closure of Malliavin smooth random variables under the following norm
$\Vert u \Vert_{\mathbb{D}^{k,p}(\mathscr{H}^{\otimes m})} = \left( \sum_{j=0}^{k} {\symb E} \left( \Vert D^j u \Vert_{\mathscr{H}^{\otimes m}}^p \right) \right)^{\frac 1 p}$.
\begin{lemma}[Meyer's inequality] \cite{Nourdin-Peccati}
\label{Meyer}
Let $\delta$ denote the divergence operator.
Then for $u \in \mathbb{D}^{k,p}(\mathscr{H}^{\otimes m})$,
\begin{equation}
\Vert \delta^m(u) \Vert_{L^p(\Omega)} \lesssim \sum_{k=0}^m \Vert u \Vert_{\mathbb{D}^{k,p}(\mathscr{H}^{\otimes m})}.
\end{equation}
\end{lemma}
\begin{lemma}\label{representation}
\cite{Campese-Nourdin-Nualart}
If $G:\R\to \R$ is a function of Hermite rank $m$, then $G$ has the following multiple Wiener-It\^o-integral representation:
\begin{equation}\label{Lp-eq}
G(x_s) = \delta^m\left( G_m(x_s) \mathbf{1}_{[0,s]}^{\otimes m}\right),
\end{equation}
where $G_m$ has the following properties: \begin{itemize}
\item [(1)] $\Vert G_m \Vert_{L^p(\mu)} \lesssim \Vert G \Vert_{L^p(\mu)}$,
\item [(2)]
$G_m(x_1)$ is $m$ times Malliavin differentiable and its $k^{th}$ derivative, denoted by $G_m^{(k)}(x_1)\mathbf{1}_{[0,1]}^{\otimes k}$,
satisfies $ \Vert G_m^{(k)} \Vert_{L^p(\mu)} \lesssim \Vert G \Vert_{L^p(\mu)}$.
\end{itemize}
\end{lemma}
In the lemma below we estimate the moments of $\int_0^{t} G(x_{\frac r \epsilon}) dr$, where we need the multiple Wiener-It\^o-integral representation above to transfer the
correlation function to $L^2$ norms of indicator functions. We use an idea from \cite{Campese-Nourdin-Nualart} for the
estimates below.
\begin{lemma}\label{Lp-bounds}
Let $x_t=W([0,t])$ be a stationary Gaussian process with correlation $\beta(t)=\big| {\symb E}(x_t x_0)\big|$, stationary distribution $\mu$ and ${\mathscr H}$ the $L^2$ space over $\R_{+}$ with measure $\beta(r) dr$. If $G$ is a function of Hermite rank $m$ and $ G \in L^p(\mu)$, for $p>2$, then,
\begin{equation}\label{le5.4-1}
{\begin{split}
\left\Vert \frac {1} {\epsilon} \int_0^{t } G(x_{\frac r \epsilon}) dr \right\Vert_{L^p(\Omega)}
&\lesssim \Vert G \Vert_{L^p(\mu)} \left ( \int_0^{\frac t \epsilon} \int_0^{\frac t \epsilon} \beta(\vert u -r \vert)^m dr du \right)^{\frac 1 2}.\end{split}}
\end{equation}
For the stationary scaled fractional OU process $y^{\epsilon}_t$, we have
\begin{equation}\label{le5.4-2}
{\begin{split}
\left\Vert \frac {1} {\epsilon} \int_0^{t } G( {y_r^\epsilon}) dr \right\Vert_{{L^p(\Omega)}}
&\lesssim \left\{ \begin{array}{lc}
\Vert G \Vert_{L^p(\mu)}\; \sqrt {\frac t \epsilon \int_0^\infty \big|\rho^m(s) \big|ds } , \quad &\hbox {if} \quad H^*(m)<\frac 12,\\
\Vert G \Vert_{L^p(\mu)} \; \sqrt {\frac t \epsilon \ln| \frac {1} \epsilon|}, \quad &\hbox {if} \quad H^*(m)=\frac 12,\\
\Vert G \Vert_{L^p(\mu)} \; \left( \frac t \epsilon\right) ^{H^*(m)}, \quad &\hbox { otherwise.}
\end{array} \right.
\end{split}},
\end{equation}
in particular,
\begin{equation}
\left\Vert \int_0^{t } G(y^{\epsilon}_r) dr \right\Vert_{L^p(\Omega)} \lesssim \frac {\Vert G \Vert_{L^p(\mu)} t^{H^*(m) \vee \frac 1 2}} {\alpha(\epsilon,H^*(m))}.
\end{equation}
\end{lemma}
\begin{proof}
We first use Lemma \ref{representation} and then apply Meyer's inequality from Lemma \ref{Meyer} to obtain
\begin{align*}
&\left\Vert \frac {1} {\epsilon} \int_0^{t } G(x_{\frac r \epsilon}) dr \right\Vert_{L^p(\Omega)} =\left\Vert \int_0^{\frac t \epsilon} G(x_r) dr \right\Vert_{L^p(\Omega)}
=\left \Vert \int_0^{\frac t \epsilon}\delta^m\left( G_m(x_r) \mathbf{1}_{[0,r]}^{\otimes m}\right)\, dr\right \Vert_{L^p(\Omega)} \\
&\lesssim \sum_{k=0}^m \left \Vert \int_0^{\frac t \epsilon} D^k \left(G_m(x_r) \mathbf{1}_{[0,r]}^{\otimes m}\right) dr
\right\Vert_{L^{p}(\Omega,\mathscr{H}^{\otimes m +k})}
= \sum_{k=0}^m\left \Vert \int_0^{\frac t \epsilon} G_m^{(k)} (x_r) \mathbf{1}_{[0,r]}^{\otimes {m +k}} dr
\right\Vert_{L^{p}(\Omega,\mathscr{H}^{\otimes m +k})}.
\end{align*}
Here for $G_m$ is as given in Lemma \ref{representation} and $G_m^{(k)}$ denotes its k-th order Malliavin derivative.
We estimate the individual terms using the linearity of the inner product and the isometry $\langle \mathbf{1}_{[0,r]}, \mathbf{1}_{[0,s]}\rangle_{\mathscr H}={\symb E}(x_rx_s)=\beta(r-s)$,
\begin{align*} &\left( \left \Vert \int_0^{\frac t \epsilon} G_m^{(k)} (x_r) \mathbf{1}_{[0,r]}^{\otimes {m +k}} dr
\right\Vert_{\mathscr{H}^{\otimes m +k}}\right)^2
= \left\langle \int_0^{\frac t \epsilon} G_m^{(k)} (x_r) \mathbf{1}_{[0,r]}^{\otimes {m +k}} dr , \int_0^{\frac t \epsilon} G_m^{(k)} (x_u) \mathbf{1}_{[0,r]}^{\otimes {m +k}} du
\right\rangle_{{\mathscr H}^{\otimes m +k}}\\
& = \int_0^{\frac t \epsilon} \int_0^{\frac t \epsilon} G_m^{(k)} (x_r) G_m^{(k)} (x_u) \langle\mathbf{1}_{[0,r]}^{\otimes {m +k}} , \mathbf{1}_{[0,u]}^{\otimes {m +k}} \rangle_{{\mathscr H}^{\otimes m +k}}\, dr du\\
& = \int_0^{\frac t \epsilon} \int_0^{\frac t \epsilon} G_m^{(k)} (x_r) G_m^{(k)} (x_u) \Big( \beta(r-u)\Big) ^{m+k} \, dr\,du.
\end{align*}
Using Minkowski's inequality we obtain
\begin{align*}
&\sum_{k=0}^m\left \Vert \int_0^{\frac t \epsilon} G_m^{(k)} (x_r) \mathbf{1}_{[0,r]}^{\otimes {m +k}} dr
\right\Vert_{L^{p}(\Omega,\mathscr{H}^{\otimes m +k})}\\
&\leq \sum_{k=0}^m \left( \left\Vert
\int_0^{\frac t \epsilon} \int_0^{\frac t \epsilon} G_m^{(k)} (x_r) G_m^{(k)} (x_u) \beta(r-u)^{m+k} drdu
\right\Vert_{L^{\frac p 2}(\Omega)}\right)^{\frac 1 2}\\
&\leq \sum_{k=0}^m \left(
\int_0^{\frac t \epsilon} \int_0^{\frac t \epsilon} \left\Vert G_m^{(k)} (x_r) G_m^{(k)} (x_u) \right\Vert_{L^{\frac p 2}(\Omega) } \beta(r-u)^{m+k} drdu
\right)^{\frac 1 2}.
\end{align*}
We then estimate ${\symb E} |G_m^{(k)} (x_r) G_m^{(k)} (x_u)|^{\frac p 2}$ by H\"older's inequality and use the fact that $x_t$ is stationary. Since the right hand side is then controlled by
\begin{align*}
RHS & \leq \sum_{k=0}^m \Vert G_m^{(k)} \Vert_{L^p(\mu)}
\left( \int_0^{\frac t \epsilon}\int_0^{\frac t \epsilon}\vert \beta(\vert u-r \vert) \vert^{m+k} dr du\right)^{\frac 1 2}
\lesssim \Vert G \Vert_{L^p(\mu)} \left( \int_0^{\frac t \epsilon}\int_0^{\frac t \epsilon} \vert \beta(\vert u-r \vert) \vert^{m} dr du \right)^{\frac 1 2},
\end{align*}
concluding (\ref{le5.4-1}).
We finally apply Lemma \ref{Integrals} to conclude (\ref{le5.4-2}).
\end{proof}
Now we are ready to prove our main theorem.
\subsection{Concluding the proof}
\begin{proof}
\
\textbf{Step 1, CLT in the pure Wiener case}\\
We first deal with the high Hermite rank components.
For $k \leq n$ we define the truncated functions $G_{k,M}=\sum_{j=m_k}^{M} c_{k,j} H_j$ and set
$$
X^{k, \epsilon}_M= \alpha_k(\epsilon) \int_0^{t} G_{k,M}(y^{\epsilon}_s) ds.
$$
Then, by the reduction Lemma \ref{reduction} above, it is sufficient to show the convergence of $(X^{1,\epsilon}_M, \dots , X^{n,\epsilon}_M)$ for every $M$. By \cite{BenHariz} and \cite{Buchmann-Ngai-bordercase} each component alone converges to a Wiener process. Hence, as each $X^{k,\epsilon}_M$ belongs to a finite chaos we can make use of the normal approximation theorem from \cite[Theorem 6.2.3]{Nourdin-Peccati}: if each component of a family of mean zero vector valued stochastic processes, with components of the form $I_{q_i}(f_{i,n}) $, where $f_{i,n}$ are symmetric $L^2$ functions in $q_i$ variables, converges in law to a Gaussian process, then they converge jointly in law to a vector valued Gaussian process, provided that their covariance functions converge. Furthermore, the covariance functions of the limit distribution are $\lim_{\epsilon \to 0} {\symb E} [ X^{i,\epsilon}(t) X^{j,\epsilon}(s)] $.
Let $m= \min(m_i,m_j)$ we use $${\symb E} (H_k(y^{\epsilon}_t)H_l(y^{\epsilon}_s))=\delta_{k,l} \left({\symb E}(y^{\epsilon}_sy^{\epsilon}_t)\right)^k$$ to obtain, for $s \leq t$,
$$\begin{aligned} &{\symb E}\left[ \alpha_i(\epsilon) \alpha_j(\epsilon)\int_0^{t}G_{i,M}(y^{\epsilon}_u)du \int_0^{s} G_{j,M}(y^{\epsilon}_r) dr \right] \\
&= \sum_{k=m}^M \alpha_i(\epsilon) \alpha_j(\epsilon) c_{i,k} c_{j,k} (k!)^2 \int_0^{t}\int_0^{s} ({\symb E}(y^{\epsilon}_r y^{\epsilon}_u))^k dr du\\
&= \sum_{k=m}^M \alpha_i(\epsilon) \alpha_j(\epsilon) c_{i,k} c_{j,k} (k!)^2 \left( \int_0^{s} \int_0^{s} \rho^{\epsilon}(u-r)^k dr du + \int_{s }^{t} \int_0^{s} \rho^{\epsilon}(u-r)^k dr du \right).
\end{aligned}$$
By Lemma \ref{Integrals} we obtain, for $\epsilon \to 0$,
\begin{align*}
\alpha_i(\epsilon) \alpha_j(\epsilon) \int_{s }^{t} \int_0^{s} \rho^{\epsilon}(u-r)^k dr du \to 0.
\end{align*}
Hence,
$$\begin{aligned}
\lim_{\epsilon \to 0} RHS &= 2 \sum_{k=m}^M c_{i,k} c_{j,k} (k!)^2 \lim_{\epsilon \to 0} \left( \epsilon \alpha_i(\epsilon) \alpha_j(\epsilon) s \int_0^{\frac s {\epsilon}} (\rho(v))^k dv\right) \\
&= 2 \left( s \wedge t \right) \sum_{k=m}^M c_{i,k} c_{j,k} (k!)^2 \int_0^\infty \rho(u)^k du\\
&= 2 \left( s \wedge t \right) \,\int_0^\infty {\symb E} (G_{i,M}(y_s) G_{j,M}(y_0) )ds
,\end{aligned}$$
proving the finite chaos case.
We now prove that the correlations of the limit converge as $ M \to \infty$. Indeed,
\begin{align*}
\lim_{M \to \infty} 2 \left( s \wedge t \right) \sum_{k=m}^M c_{i,k} c_{j,k} (k!)^2 \int_0^\infty \rho(u)^k du
&= 2 \left( s \wedge t \right) \sum_{k=m}^{\infty} c_{i,k} c_{j,k} (k!)^2 \int_0^\infty \rho(u)^k du\\
&= 2 \left( s \wedge t \right) \,\int_0^\infty {\symb E} (G_i(y_s) G_j(y_0) )ds.
\end{align*}
As $G_{i,M} \to G_i$ in $L^2(\mu)$, and similarly for $j$, this proves the joint convergence of the high Hermite rank components.
\textbf{Step 2, CLT in the pure Hermite case}\\
In this step we focus on the vector component whose entries satisfy $H^*(m_k)> \frac 1 2$. Recall,
this implies ${H> \frac 1 2}$.
By Lemma \ref{L^2-kernel}, evaluations of each component of $(X^{n+1}, \dots, X^N)$ converge in $L^2(\Omega)$. Hence, they converge as well jointly in $L^2(\Omega)$. Now, choose finitely many points $t_{k,j} \in [0,T]$ and constants $ a_{k,j} \in \R$, then, $\sum_{k,j} a_{k,j} X^{k,\epsilon}_{t_{k,j}}$ converges in $L^2(\Omega)$ to $\sum_{k,j} a_{k,j} X^{k}_{t_{k,j}}$ and thus we may conclude joint convergence in finite dimensional distributions by an application of the Cramer-Wold theorem.
\textbf{Step 3, Joint convergence}\\
We have already shown that $X^{W,\epsilon} \to X^W$ and $X^{Z,\epsilon} \to X^Z$ in finite dimensional distributions, it is only left to prove their joint convergence.
By Lemma \ref{reduction} and Equation (\ref{reduction_Hermite}) we may again reduce the problem to
$$G_i= \sum_{k=m_i}^M c_{i,k} H_k, \quad G_j= c_{j,m_j} H_{m_j}, \qquad 1 \leq i \leq n,\; j>n.$$
Now, we can rewrite $H_m(y^{\epsilon}_s)= I_m(f^{m,\epsilon}_s)$, where $I_m$ denotes a $m$-fold Wiener-It\^o integral and a function $f^{m,\epsilon}_s \in L^2(\R^m,\mu)$.
Now, for $1 \leq i \leq n$ we obtain,
\begin{align*}
\alpha_i(\epsilon) \int_0^t G_i(y^{\epsilon}_s)ds &= \alpha_i(\epsilon) \int_0^t \sum_{k=m_i}^{M} c_{i,k} H_k(y^{\epsilon}_s) ds
\\
&= \alpha_i(\epsilon)\int_0^t \sum_{k=m_i}^{M} c_{i,k} I_k(f^{k,\epsilon}_s) ds= \sum_{k=m_i}^{M} c_{i,k} I_k(\hat {f}^{k,\epsilon}_t),
\end{align*}
where $$\hat f^{k,\epsilon}_t = \int_0^t f^{k,\epsilon}_s ds.$$
Similarly for $j>n$,
\begin{align*}
\int_0^t G_j(y^{\epsilon}_s)ds &= \int_0^t c_{j,m_j} H_{m_j}(y^{\epsilon}_s) ds
= c_{j,m_j} I_{m_j}(\hat {f}^{m_j,\epsilon}_t).
\end{align*}
Hence, we only need to show that the collection of stochastic processes of the form $I_{m_k}(\hat {f}_t^{m_k,\epsilon})$ converges jointly in finite dimensional distribution.
It is thus sufficient to show that for every finite collection of times, $t_{1}, \dots, t_Q \in [0,T]$,
the vector,
$\left\{I_{k}(\hat {f}^{k,\epsilon}_{t_{l}}), k=m, \dots, M, l =1,\dots, Q \right\}$ converges jointly, where $m = \min_{k=1, \dots, N} m_k$.
Let $n_0$ denote the smallest natural number such that $H^*(n_0) < \frac 1 2$. For $k > n_0$, the collection $I_k(\hat {f}^{k,\epsilon}_{t_{l}})$ converges to a normal distribution, hence, by the moment bounds in Lemma \ref{Lp-bounds}
$ \|(k!) I_k(\hat {f}^{k,\epsilon}_{t_{l}})\|_{H^{\otimes k}}= \sqrt{ {\symb E} \left( I_{k}(\hat {f}^{k,\epsilon}_{t_l})^2\right) } $ converges to a constant.
The convergence of such a sequence is equivalent to the convergence of the contractions, as defined in Equation (\ref{def-contraction}), of their kernels. Indeed, by a generalised fourth moment theorem \cite[Theorem 1]{Nualart-Peccati},
we have
$$
\Vert \hat {f}^{k,\epsilon}_{t_{l}} \otimes_r \hat {f}^{k,\epsilon}_{t_{l}} \Vert_{{\mathscr H}^{2k-2r}} \to 0, \qquad r =1 ,\dots , k-1.
$$
By Cauchy-Schwarz we obtain
for $r =1, \dots, k_1$,
$$\left \Vert \hat {f}^{k_1,\epsilon}_{t_{l_1}} \otimes_r \hat {f}^{k_2,\epsilon}_{t_{l_2}}\right \Vert_{{\mathscr H}^{k_1+k_2-2r}}
\leq \left\Vert \hat {f}^{k_1,\epsilon}_{t_{l_1}} \otimes_r \hat {f}^{k_1,\epsilon}_{t_{l_1}} \;
\right \Vert_{{\mathscr H}^{p-r}} \; \left \Vert \hat {f}^{k_2,\epsilon}_{t_{l_2}} \otimes_r \hat {f}^{k_2,\epsilon}_{t_{l_2}} \right\Vert_{{\mathscr H}^{q-r}}
\to 0,$$
for all $ t_{l_1},t_{l_2} \in [0,T], \, 1 \leq k_1 \leq n_0 < k_2 \leq M$.
We can now apply an asymptotic independence result, Proposition \ref{proposition-spit-independence} in the Appendix, to conclude the joint convergence
in finite dimensional distributions of $X^\epsilon$ to $(X^W,X^Z)$. Furthermore, $X^W$ is independent of $X^Z$.
The correlations between $X^i_t$ and $X^j_{t'}$, where $i,j>n$, are $0$ if $m_i \not = m_j$, otherwise given by the $L^2$ norm of their integrands, which follows from the Wiener-It\^o isometry and are given by
$$
c_{i,m_i} c_{j,m_j} \int_{0}^t \! \! \!\int_{0}^{t'} \! \! \int_{\R^{m_i}} \prod_{i=1}^{m_i} \left( s - {\mathbf x}i_i\right)_{+}^{\hat H(m) - \frac 3 2} \prod_{i=1}^{m_i} \left( r - {\mathbf x}i_i\right)_{+}^{\hat H(m) - \frac 3 2} d{\mathbf x}i_1 \dots {\mathbf x}i_{m_i} dr ds.
$$
\textbf{Step 4, Convergence in the H\"older norms.}\\
We first choose $\gamma'\in \Big( \gamma, (H^*(m_k) \wedge \frac 1 2) - \frac 1 {p_k}\Big)$. Then,
using the Markov-Chebyshev inequality, we obtain that, as $M$ tends to $ \infty$,
\begin{align*}
\P\left( \vert X^{k,\epsilon} \vert_{\mathcal{C}^{\gamma'}([0,T],\R)} > M \right) &\leq \frac {\Vert \theta|_{\mathcal{C}^{\gamma'}([0,T],\R)} \Vert_{p_k} } {M^{p_k}}\\
&\to 0,
\end{align*}
since $ \||\theta|_{ \mathcal{C}^{\gamma'}}\|_{p_k} < \infty$ by Lemma \ref{Lp-bounds} and an application of Kolmogorov's theorem \ref{Kolmogorov-theorem}.
Furthermore, since $\mathcal{C}^{\gamma'([0,T];\R)} $ is compactly embedded in $\mathcal{C}^\gamma([0,T];\R)$ for $1>\gamma' > \gamma$, by an type Arzela-Ascoli argument, the sets $ \{ X^{k,\epsilon}_t : \vert X^{k,\epsilon} \vert_{\mathcal{C}^{\gamma'}([0,T],\R)} \leq M \} $ are sequentially compact in $\mathcal{C}^{\gamma}([0,T],\R)$. Hence, $\{X^{k,\epsilon}_t$\} is tight in $\mathcal{C}^{\gamma}([0,T],\R)$. See e.g. \cite{Friz-Victoir}. As tightness in each component implies joint tightness, we obtain that $X^{\epsilon}$ is tight in $\mathcal{C}^{\gamma}([0,T],\R^N)$, where $\gamma \in (0, \frac 1 2 - \frac {1} {\min_{k \leq n} p_k}) $ in case $0<n$ and $\gamma \in ( 0, \min_{k>n} H^*(m_k) - \frac {1} {p_k} ) $ otherwise.
By the above discussion, any limit of a converging subsequence has the same finite dimensional distributions, hence, every subsequence converges to the same limit. This concludes the proof for the convergence in the H\"older norm.
\end{proof}
\section{Appendix: Joint convergence by asymptotic independence}
For the proof in the previous section we need the following which modifies results from \cite{Nourdin-Rosinski} and \cite{Nourdin-Nualart-Peccati}. Let $I_p(f)$ denote the $p^{th}$ iterated It\^o-Wiener integral of a symmetric function $f$ of $p$ variables,
$$I_p(f)= p!\int_{-\infty}^{\infty} \int_{-\infty}^{s_{p-1}}\dots \int_{-\infty}^{s_2} f(s_1, \dots, s_p)dW_{s_1} dW_{s_2} \dots dW_{s_p}.$$
If $f\in L^2(\R^p)$ and $g\in L^2(\R^q)$ are symmetric functions and $p, q\ge 1$,
their $r^{th}$-contraction is given by
\begin{equation}\label{def-contraction}
f\otimes_r g=\int_{\R^r} f(x_1, \dots, x_{p-r}, s_1, \dots, s_r) g(y_1, \dots, y_{q-r}, s_1, \dots, s_r) ds_1 \dots ds_r,
\end{equation}
where $r \leq p \wedge q$.
If $f\otimes_1g=\int_{\R} f(x_1, \dots, x_{p-1}, s) g(y_1, \dots, y_{q-1}, s) ds$ vanishes, so do all higher order contractions.
\begin{proposition}\label{proposition-spit-independence}
Let $q_1 \leq q_2 \leq \dots \leq q_n \leq p_1 \leq p_2 \leq \dots \le p_m$. Let $f_i^\epsilon \in L^2(\R^{p_i})$, $
g_i^\epsilon \in L^2(\R^{q_i})$, $F^{\epsilon}=\left(I_{p_1}(f^{\epsilon}_1), \dots , I_{p_m}(f^{\epsilon}_m)\right)$ and $G^{\epsilon}=\left(I_{q_1}(g^{\epsilon}_1), \dots, I_{q_n}(g^{\epsilon}_n)\right)$. Suppose that
for every $i,j$, and any $1 \leq r \leq q_i$:
$$ \Vert f^{\epsilon}_j \otimes_r g^{\epsilon}_i \Vert \to 0.$$
Then $F^\epsilon \to U$ and $G^{\epsilon} \to V$ weakly imply that $(F^{\epsilon},G^{\epsilon}) \to (U,V)$ jointly, where $U$ and $V$ are taken to be independent random variables.
\end{proposition}
These results benefit from the insights of \"{U}st\"{u}nel-Zakai \cite{Ustunel-Zakai} on the independence of two iterated integrals $I_p(f)$ and $I_q(g)$. They are independent if and only if the 1-contraction between $f$ and $g$ vanishes almost surely with respect to the Lebesgue measure.
An asymptotic independence result follows as below,
\begin{lemma}\cite[Thm. 3.1]{Nourdin-Nualart-Peccati} \label{contraction-covariance}
Let $F^{\epsilon}=I_p(f^{\epsilon})$ and $G^{\epsilon}=I_q(g^{\epsilon})$, where $f^\epsilon\in L^2(\R^p)$ and $g^\epsilon\in L^2(\R^q)$ . Then,
$${\mathrm{Cov}} \left( \left({F^{\epsilon}}\right)^2,\left({G^{\epsilon}}\right)^2 \right) \to 0$$
is equivalent to
$ \Vert f^{\epsilon} \otimes_r g^{\epsilon}\Vert \to 0$,
for $1\leq r \leq p \wedge q$.
\end{lemma}
It is also known that if two integrals $I_p(f)$ and $I_q(g)$ are independent, then
their Malliavin derivatives are orthogonal, see \cite{Ustunel-Zakai}. This explains why Malliavin calculus comes into prominent play,
which has been developed to its perfection in \cite[Lemma 3.2]{Nourdin-Nualart-Peccati}.
Given a smooth test function $\phi$ we define,
$$ \Vert \phi \Vert_{q} = \Vert \phi \Vert_{\infty} + \sum_{\vert k \vert =1}^{q} \left\Vert \frac { \partial^{k}} {\partial^k x} \right\Vert_{\infty},$$
where the sum runs over multi-indices $k=(k_1, \dots, k_m)$.
Let $L=-\delta D$ and
throughout this section $f_i:\R^{p_i}\to \R$ and $g:\R^q\to \R$ denote symmetric functions.
\begin{lemma}\cite{Nourdin-Nualart-Peccati}
\label{a.i.-key-ineq}
Let $q\leq p_i$, $g\in L^2(\R^{q})$, $G=I_q(g)$, $f_i\in L^2(\R^{p_i})$, and $F_i= I_{p_i}(f_i)$ with ${\symb E} (F_i^2) = 1$. Set $F=(F_1, \dots, F_m)$ and let $\theta $ be a smooth test function. Then,
$$ {\symb E} \,\left \vert \left\langle (I-L)^{-1} \theta(F)DF_j,DG\right \rangle_{{\mathscr H}} \right\vert \leq \, c\,\Vert \theta \Vert_{q}\, {\mathrm{Cov}}(F_j^2,G^2), $$
where $c$ is a constant depending on
$\Vert F \Vert_{L^2}$, $\Vert G \Vert_{L^2} $, and $q$, $m$, $p_1, \dots, p_m$.
\end{lemma}
The final piece of the puzzle is the observation that
the defect in being independent is quantitatively controlled by the covariance of the squares of the relative components. The following is from
\cite{Nourdin-Nualart-Peccati}, our only modification is to take $G$ to be vector valued.
Let $g_i:\R^{q_i}\to \R$ be symmetric functions.
\begin{lemma}\label{nnp-extension}
Given $F=\left(I_{p_1}(f_1), \dots I_{p_m}(f_m)\right)$ and $G=\left(I_{q_1}(g_1), \dots, I_{q_n}(g_n)\right)$ such that $p_k \geq q_l$ for every pair of $k,l$.
Then, for all test functions $\phi$ and $ \psi$, the following holds for some constant $c$, depending on $\Vert F \Vert_{L^2}$, $\Vert G \Vert_{L^2} $, and $m$, $n$, $p_1, \dots, p_m$, $q_1, \dots, q_n$,
$$ {\symb E} \left( \phi(F) \psi(G) \right) -{\symb E} \left( \phi(F) \right) {\symb E} \left( \psi(G) \right) \leq c \Vert D\psi \Vert_{\infty} \Vert \phi \Vert_{q_n} \sum_{i=1}^{m} \sum_{j=1}^n {\mathrm{Cov}}(F_i^2,G_j^2)$$
\end{lemma}
\begin{proof} Define $L^{-1} (\sum_{k=0}^\infty I_k(h_m))=\sum_{k=1}^\infty \frac 1 k I_k(h_m) \in {\mathbb D}^{2,2}$.
The key equality is $-DL^{-1}=(I-L)^{-1}D$. As in \cite{Nourdin-Nualart-Peccati},
\begin{align*}
\phi(F) - {\symb E}(\phi(F)) &= LL^{-1} \phi(F)= \sum_{j=1}^m \delta((I-L)^{-1} \partial_j \phi(F) DF_j).
\end{align*}
Multiplying both sides by $\psi(G)$, taking expectations and using integration by parts we obtain
\begin{align*}
&{\symb E} \left( \phi(F) \psi(G) \right) -{\symb E} \left( \phi(F) \right) {\symb E} \left( \psi(G) \right)
= \sum_{j=1}^m \sum_{i=1}^n {\symb E} \left( \langle (I-L)^{-1} \partial_j \phi(F) DF_j, D G_i \rangle_{{\mathscr H}} \partial_i\psi(G) \right)\\
&\leq \Vert D\psi \Vert_{\infty} \sum_{j=1}^m \sum_{i=1}^n \left| {\symb E} \left( \langle (I-L)^{-1} \partial_j \phi(F) DF_j, D G_i \rangle_{{\mathscr H}} \right)\right|.
\end{align*}
To conclude, apply to each summand Lemma \ref{a.i.-key-ineq} with $\theta = \partial_j \phi$ and $G=G_i$.
\end{proof}
\begin{lemma}\label{expectation-split}
Let $F^{\epsilon}=\left(I_{p_1}(f^{\epsilon}_1), \dots I_{p_m}(f^{\epsilon}_m)\right)$ and $G^{\epsilon}=\left(I_{q_1}(g^{\epsilon}_1), \dots, I_{q_n}(g^{\epsilon}_n)\right)$ with $q_1 \leq q_2, \dots , q_n \leq p_1 \leq p_2\leq \dots\leq p_m$. Then for every $i \leq m ,j \leq n$,
$$ \Vert f^{\epsilon}_j \otimes_r g^{\epsilon}_i \Vert \to 0, \quad 1\le r \le p_j\wedge q_i$$
implies that for any smooth test functions $\phi$ and $\psi$,
$$ {\symb E} \left( \psi(F^{\epsilon}) \psi(G^{\epsilon}) \right) - {\symb E} \left( \psi(F^{\epsilon}) \right) {\symb E} \left( \psi(G^{\epsilon})\right) \to 0.$$
\end{lemma}
\begin{proof}
Just combine Lemma \ref{nnp-extension} and Lemma \ref{contraction-covariance}.
\end{proof}
Finally, we finish the proof of Proposition \ref{proposition-spit-independence}.
\begin{proof}
Since $(F^{\epsilon},G^{\epsilon})$ is bounded in $L^2(\Omega)$ it is tight. Now choose a weakly converging subsequence $(F^n,G^n)$ with limit denoted by $(X,Y)$.
Let $\phi$ and $\psi$ be smooth test functions, then
by Lemma \ref{expectation-split} and the bounds on $\phi, \psi$, we pass to the limit under the expectation sign and obtain
$${\symb E}\left( \phi(X) \psi(Y) \right)={\symb E}\left( \phi(X) \right) {\symb E}\left( \psi(Y) \right).$$
Thus every limit measure is the product measure determined by $U$ and $V$, hence, $(F^{\epsilon},G^{\epsilon})$ converges as claimed.
\end{proof}
{\it Acknowledgement.} This research is partially funded by an EPSRC Roth studentship.
\end{document} |
\begin{document}
\title{Generalized Stern-Gerlach Effect for Chiral Molecules}
\author{Yong Li}
\affiliation{Department of Physics, University of Basel, Klingelbergstrasse
82, 4056 Basel, Switzerland}
\author{C. Bruder}
\affiliation{Department of Physics, University of Basel, Klingelbergstrasse
82, 4056 Basel, Switzerland}
\author{C. P. Sun}
\affiliation{Institute of Theoretical Physics, Chinese Academy of Sciences, Beijing,
100080, China}
\date{\today }
\begin{abstract}
The Stern-Gerlach effect is well-known as spin-dependent splitting of a beam
of atoms with magnetic moments by a magnetic-field gradient. Here, we show
that an induced gauge potential may lead to a similar effect for chiral
molecules. In the presence of three inhomogeneous light fields, the
center-of-mass of a three-level chiral molecule is subject to an optically
induced gauge potential, and the internal dynamics of the molecules can be
described as an adiabatic evolution in the reduced pseudo-spin subspace of the
two lowest energy levels. We demonstrate numerically that such an induced
gauge potential can lead to observable pseudo-spin dependent and
chirality-dependent generalized Stern-Gerlach effects for mixed left- and right-handed chiral molecules under realistic conditions.
\end{abstract}
\pacs{03.65.-w,03.65.Vf,11.15.-q,42.50.-p}
\maketitle
\emph{Introduction.} The Stern-Gerlach experiment \cite{SG} is one of the
milestones in the development of quantum theory. The observation of the
splitting of a beam of silver atoms in their ground states in a non-uniform
magnetic field led to the concept of the electronic spin. In this paper we
will show that even in the absence of a magnetic-field gradient, the
center-of-mass of certain atoms or molecules in optical fields will follow
different trajectories corresponding to different inner states. This
phenomenon is straightforwardly explained as a generalized Stern-Gerlach
effect by the optically-induced gauge potential \cite{wilz,moody,sun-ge}.
This induced gauge potential consists of the effective vector and scalar
potentials, which result from the adiabatic variable separation of the slow
spatial and fast inner dynamics of the atom according to the generalized
Born-Oppenheimer approximation \cite{sun-ge}. Recently, there has been
considerable interest to implement various pseudo-spin dependent induced gauge
potentials for cold atoms. Examples include the induced monopole
\cite{zhang-li-sun}, and the spin Hall effect for cold atoms \cite{zhu,liu},
in direct analogy to the spin Hall effect due to the spin-orbit coupling in
condensed matter physics \cite{SHE}. Here, we would like to consider
consequences of the induced gauge potential in systems of cold chiral
molecules \cite{Kral01,Kral03,shapiro} that manifest themselves as a
generalized Stern-Gerlach effect.
We consider a chiral molecule (see Fig.~\ref{Fig01}), which is
described by a cyclic three-level system
\cite{Kral01,Kral03,Liu05,cyclic,sun05} where any two of the three
levels are coupled by a classical optical field. A specific example
are cyclic three-level ($\Delta$-type) chiral molecules, e.g., the
$D_2S_2$ enantiomers in Ref. \cite{Kral03} when only the lowest three
states in each well are considered. Such symmetry-breaking systems can
also be implemented using an asymmetric well and its mirror
\cite{Kral01} (i.e., one asymmetric well and its mirror form a
symmetric double well which supports chirality), or a superconducting
circuit acting as an effective atom \cite{Liu05}. It will be shown
that the optically-induced gauge potentials for the chiral molecules
will be both chirality-dependent and pseudo-spin-dependent when the
internal dynamics of chiral molecules are described as an adiabatic
evolution in the reduced pseudo-spin subspace of the two lowest energy
levels. Thus, the generalized Stern-Gerlach effect can be used to
distinguish molecules with different chiralities, suggesting a
discrimination method to separate chiral mixtures.
\begin{figure}
\caption{ (Color online) Model of
three-level $\Delta$-type left-(a) and right-(b) handed chiral molecules,
coupled to laser beams with Rabi frequencies $\pm\Omega_{12}
\label{Fig01}
\end{figure}
\emph{Model.} We first consider a general case of symmetry-breaking molecule
having a $\Delta$-type or cyclic three-level configuration (e.g., see the
left-handed chiral molecule in Fig.~\ref{Fig01}(a)). The ground state
$|1\rangle$ and the metastable state $|2\rangle$ are coupled to the excited
state $|3\rangle$ through spatially varying classical laser fields, with the
Rabi frequencies $\Omega_{13}$ and $\Omega_{23}$, respectively.
In contrast to the $\Lambda$-type system, an additional coupling between
$|1\rangle$ and $|2\rangle$ is applied by the third classical laser field
with the Rabi frequency $\Omega_{12}$.
The total wave function $|\Psi(\mathbf{r})\rangle=\sum_{j=1}^{3}\psi_{j}(\mathbf{r}
)|j\rangle$ of the cyclic molecule, where $\mathbf{r}$ denotes the molecular
center-of-mass, is governed by the total Hamiltonian $H=\mathbf{p}
^{2}/(2m)+U(\mathbf{r})+H_{\mathrm{inn}}$, where $m$ is the molecular mass.
The trapping potential $U(\mathbf{r})=\sum_{j}U_{j}(\mathbf{r})|j\rangle
\langle j|$ is diagonal in the basis of inner states $|j\rangle$, and the
inner Hamiltonian $H_{\mathrm{inn}}$ contains the free terms\ $\omega
_{j}\left\vert j\right\rangle \langle j|$\ and the Rabi coupling terms
$\Omega_{jl}\exp(-i\nu_{jl}t)\left\vert j\right\rangle \langle l|+$H.c.
\ ($j=1,2,3;$ $l>j$) where $\omega_{j}$ corresponds to the inner level
energies. From now on we assume $\hbar=1$. Here, the frequencies of the three
classical optical fields are $\nu_{jl}$ matching the transition $\left\vert
j\right\rangle \rightarrow\left\vert l\right\rangle $ with the Rabi
frequencies $\Omega_{jl}=\mu_{jl}E_{jl}=\left\vert \Omega_{jl}(t)\right\vert
\exp(i\phi_{jl})$, respectively; $\mu_{jl}$ are the electrical dipole matrix
elements, and $E_{jl}$ the envelopes of electric fields corresponding to
the optical fields that couple levels $j$ and $l$; $\phi_{jl}$ are the corresponding phases.
We now consider the case that the optical field of Rabi frequency $\Omega
_{12}$ is resonant to the transition $\left\vert 1\right\rangle \rightarrow
\left\vert 2\right\rangle $, while the other two optical fields are in
two-photon resonance with the same single-photon detuning $\Delta=\omega
_{3}-\omega_{2}-\nu_{23}=\omega_{3}-\omega_{1}-\nu_{13}$ (see Fig.
\ref{Fig01}(a)). For position-independent or adiabatically varying
$\Omega_{jl}$, the inner Hamiltonian $H_{\mathrm{inn}}$ can be re-written in a
time-independent form
\begin{equation}
H_{\mathrm{inn}}^{\prime}=\Delta\left\vert 3\right\rangle \left\langle
3\right\vert +\sum_{l>j=1}^{3} \Omega_{jl}\left\vert j\right\rangle
\left\langle l\right\vert +\text{H.c.} \label{hamil}
\end{equation}
in the interaction picture.
From now on, we assume large detuning and weak coupling: $|\Delta|\gg
|\Omega_{13}|$ $\sim|\Omega_{23}|$ $\gg|\Omega_{12}|$, so that we can use a
canonical transformation \cite{Frohlich-Nakajima,sun05} to eliminate the
excited level $\left\vert 3\right\rangle $ from the Hamiltonian (\ref{hamil}).
To this end we decompose the Hamiltonian as $H_{\mathrm{inn}}^{\prime}
=H_{0}+H_{1}+H_{2}$ with the zeroth-order Hamiltonian $H_{0}=\Delta\left\vert
3\right\rangle \left\langle 3\right\vert $, the first-order term $H_{1}
=\Omega_{13}\left\vert 1\right\rangle \left\langle 3\right\vert +\Omega
_{23}\left\vert 2\right\rangle \left\langle 3\right\vert +$H.c., and
second-order term $H_{2}=\Omega_{12}\left\vert 1\right\rangle \left\langle
2\right\vert +$H.c.. Then the unitary transformation
\cite{Frohlich-Nakajima,sun05} $H_{\mathrm{eff}}^{\mathrm{inn}}=\exp
(-S)H_{\mathrm{inn}}^{\prime}\exp(S)\simeq H_{0}+[H_{1},S]/2+H_{2}$ defined by
the anti-Hermitian operator $S=(\Omega_{13}\left\vert 1\right\rangle
\left\langle 3\right\vert +\Omega_{23}\left\vert 2\right\rangle \left\langle
3\right\vert -$ H.c.$)/\Delta$ results in the following second-order Hamiltonian
\begin{align}
H_{\mathrm{eff}}^{\mathrm{inn}} & =\Delta\left\vert 3\right\rangle
\left\langle 3\right\vert +\Lambda_{1}\left\vert 1\right\rangle \left\langle
1\right\vert +\Lambda_{2}\left\vert 2\right\rangle \left\langle 2\right\vert
\nonumber\\
& +(ge^{i\Phi}\left\vert 1\right\rangle \left\langle 2\right\vert
+\text{H.c.}), \label{hamilL-eff}
\end{align}
where the energy shifts $\Lambda_{i}$ are given by $\Lambda_{1}=-|\Omega
_{13}|^{2}/\Delta$, $\Lambda_{2}=-|\Omega_{23}|^{2}/\Delta$, and the effective
coupling is $g\exp(i\Phi)=\Omega_{12}-\Omega_{13}\Omega_{23}^{\ast}/\Delta$.
The instantaneous eigen-states of $H_{\mathrm{eff}}^{\mathrm{inn}}$ are
obtained as $|\chi_{3}\rangle=\left\vert 3\right\rangle $, and the dressed
states
\begin{align}
|\chi_{1}\rangle & =\cos\theta\left\vert 1\right\rangle +e^{-i\Phi}\sin
\theta\left\vert 2\right\rangle ,\nonumber\\
|\chi_{2}\rangle & =-\sin\theta\left\vert 1\right\rangle +e^{-i\Phi}
\cos\theta\left\vert 2\right\rangle
\end{align}
with the corresponding eigenvalues $\lambda_{j}=\Lambda_{j}-(-1)^{j}
g\tan\theta$ for $j=1,2$ and $\lambda_{3}=\Delta$ where $\theta$ is given by
$\tan2\theta=2g/(\Lambda_{1}-\Lambda_{2})$.
\emph{Induced gauge potentials.} In the new inner dressed-state basis
$\{|\chi_{1}\rangle,|\chi_{2}\rangle,|\chi_{3}\rangle\}$, the full quantum
state $\left\vert \Psi(\mathbf{r})\right\rangle =\sum_{j=1}^{3}\psi
_{j}(\mathbf{r})|j\rangle$ should be represented as $|\Psi(\mathbf{r}
)\rangle=\sum_{j=1}^{3}\tilde{\psi}_{j}(\mathbf{r})|\chi_{j}\rangle$, where
the wave functions $\tilde{\psi}=(\tilde{\psi}_{1},\tilde{\psi}_{2}
,\tilde{\psi}_{3})^{T}$ obey the Schr\"{o}dinger equation $i\partial_{t}
\tilde{\psi}=\tilde{H}\tilde{\psi}$ with the effective Hamiltonian $\tilde
{H}=(i\mathbf{\nabla}+\mathbf{\underline{A}(r)})^{2}/(2m)+\underline
{V}(\mathbf{r})$. Here, the induced gauge potentials, i.e., the vector
potential $\mathbf{\underline{A}(r)}$ and the scalar potential $\underline
{V}(\mathbf{r})$, are two $3\times3$ matrices defined by $\mathbf{A}
_{j,l}=i\langle\chi_{j}|\mathbf{\nabla}\chi_{l}\rangle$ and $V_{j,l}
=\lambda_{j}\delta_{j,l}+\langle\chi_{j}|U(\mathbf{r})\mathbf{|}\chi
_{l}\rangle$. The off-diagonal elements of $\mathbf{\underline{A}}$ and
$\underline{V}$ can be neglected: the Born-Oppenheimer approximation can be
applied to show that they vanish if the adiabatic condition applies
\cite{sun-ge}. Furthermore, the inner excited state $|\chi_{3}\rangle
=\left\vert 3\right\rangle $, whose eigen-energy $\lambda_{3}=\Delta$ is much
larger than the other inner eigen-energies $\lambda_{1}$ and $\lambda_{2}$, is
decoupled from the other inner dressed states. Thus, the three-level cyclic
system is reduced to the subsystem spanned by the two lower eigenstates
$\left\{ |\chi_{1}\rangle,|\chi_{2}\rangle\right\} $, which are robust to
atomic spontaneous emission. This results in an effective spin-1/2 system with
pseudo-spin up and down states $\left\vert \uparrow\right\rangle \equiv
|\chi_{1}\rangle$ and $\left\vert \downarrow\right\rangle \equiv|\chi
_{2}\rangle$.
The Schr\"{o}dinger equation of the effective two-level system in the
pseudo-spin-1/2 basis $\left\{ \left\vert \uparrow\right\rangle ,\left\vert
\downarrow\right\rangle \right\} $ is governed by the diagonal effective
Hamiltonian $\tilde{H}_{\mathrm{eff}}=H_{\uparrow}\left\vert \uparrow
\right\rangle \left\langle \uparrow\right\vert +H_{\downarrow}\left\vert
\downarrow\right\rangle \left\langle \downarrow\right\vert $, where
\begin{equation}
H_{\sigma}=\frac{1}{2m}(i\mathbf{\nabla}+\mathbf{A}_{\sigma})^{2}+V_{\sigma
}(\mathbf{r}),\text{ }\left( \sigma=\uparrow,\downarrow\right) .
\end{equation}
Here, $\mathbf{A}_{\sigma}=i\langle\chi_{\sigma}|\mathbf{\nabla}\chi_{\sigma
}\rangle$ is the spin-dependent induced vector potential and
\begin{align}
V_{\sigma}(\mathbf{r}) & =\lambda_{\sigma}+\langle\chi_{\sigma}
|U|\chi_{\sigma}\rangle\nonumber\\
& +\frac{1}{2m}[\langle\mathbf{\nabla}\chi_{\sigma}|\mathbf{\nabla}
\chi_{\sigma}\rangle+|\langle\chi_{\sigma}|\mathbf{\nabla}\chi_{\sigma}
\rangle|^{2}]
\end{align}
is the reduced optically-induced scalar potential \cite{Ruseckas05} for the
spin-$\sigma$ component where $\lambda_{\uparrow,\downarrow}:=\lambda_{1,2}$.
We now consider a specific configuration of three Gaussian laser beams
co-propagating in the $-\hat{z}$ direction. The spatial profiles of the
corresponding Rabi frequencies $\Omega_{jl}$ are assumed to be of Gaussian
form
\begin{equation}
\Omega_{jl}=\Omega_{jl}^{0}e^{-(x-x_{jl})^{2}/\sigma_{jl}^{2}}e^{-ik_{jl}
z}\text{ },
\end{equation}
where $j<l=1,2,3,$ $\Omega_{jl}^{0}$ are real constants, the wave vectors
satisfy $k_{12}+k_{23}-k_{13}=0$, and the center positions are assumed to be
$x_{13}=-x_{23}=\Delta x$, $x_{12}=0$. The explicit form of the vector
potentials are
\begin{equation}
\mathbf{A}_{\uparrow}=-k_{12}\sin^{2}\theta\mathbf{\hat{e}}_{z},\text{
}\mathbf{A}_{\downarrow}=-k_{12}\cos^{2}\theta\mathbf{\hat{e}}_{z}.
\label{gauge-left}
\end{equation}
Thus, the different spin states of the molecule will have opposite
spin-dependent effective magnetic fields $\mathbf{B}_{\uparrow}
=-\mathbf{B_{\downarrow}}$ according to $\mathbf{B}_{\sigma}=\mathbf{\nabla
}\times\mathbf{A}_{\sigma}$ \cite{Juzeliunas2006}.
The internal state of the molecule is prepared in the spin $up$ and $down$ by
using the laser beams. The external atomic trap $U(\mathbf{r})$ is turned off
at time $t=0$, and the molecules fall due to gravity with an acceleration $G$
along the direction $\hat{z}$ \cite{note}. The scalar potentials $V_{\sigma}$
for spin-up and down molecules are given explicitly as
\begin{align}
V_{\uparrow}(\mathbf{r}) & =\lambda_{\uparrow}+\frac{1}{2m}[k_{12}^{2}
\sin^{2}\theta(1+\sin^{2}\theta)+\left( \partial_{x}\theta\right)
^{2}],\nonumber\\
V_{\downarrow}(\mathbf{r}) & =\lambda_{\downarrow}+\frac{1}{2m}[k_{12}
^{2}\cos^{2}\theta(1+\cos^{2}\theta)+\left( \partial_{x}\theta\right) ^{2}].
\label{scalar-left}
\end{align}
The spin-dependent induced vector potential $\mathbf{A}_{\sigma}(\mathbf{r})$
and scalar potential $V_{\sigma}(\mathbf{r})$ lead to the following equations
of orbital motion
\begin{align}
\dot{x}_{\sigma} & =\frac{p_{\sigma,x}}{m},\text{ }\dot{z}_{\sigma}
=\frac{p_{\sigma,z}-A_{\sigma,z}}{m},\text{ }\dot{p}_{\sigma,z}=mG,\nonumber\\
\dot{p}_{\sigma,x} & =\frac{1}{m}\left[ (\partial_{x}A_{\sigma,z}
)p_{\sigma,z}-A_{\sigma,z}\partial_{x}A_{\sigma,z}\right] -\partial
_{x}V_{\sigma}.\ \label{motion equation}
\end{align}
Hence, there will be a Stern-Gerlach-like effect, i.e., different spatial
motion of the cyclic molecules corresponding to different initial states of
spin up and down. In contrast to the standard Stern-Gerlach effect, the
effective magnetic field is not required to have a gradient. Here and in the
following, we treat the orbital motion as classical because of the large
molecular mass and weak effective gauge potentials.
\emph{Generalized Stern-Gerlach effect.} In the large detuning and
weak-coupling limit, the above approach works well for any type of cyclic
three-level system. It can also be applied in an experimentally feasible
scheme to detect the chirality of molecules, since the left- and
right-handed molecules have different Stern-Gerlach-like effects. Physically,
left- and right-handed molecules have the same intrinsic properties except the
antisymmetry of the total phase for the three coupled Rabi frequencies
\cite{Kral01,Kral03}. Hence, we can define $\Omega_{ij}^{L}\equiv\Omega_{ij}$
as the Rabi frequencies for the left-handed molecules, and define the Rabi
frequencies for the right-handed ones: $\Omega_{12}^{R}\equiv-\Omega_{12}$ and
$\Omega_{13}^{R}\equiv\Omega_{13}$, $\Omega_{23}^{R}\equiv\Omega_{23}$ for the
same coupling optical fields (see Fig. \ref{Fig01}(a,b)). Therefore the
difference in chirality leads to two different effective couplings,
\begin{equation}
g_{L/R}e^{i\Phi_{L/R}}=\pm\Omega_{12}-\frac{1}{\Delta}\Omega_{13}\Omega
_{23}^{\ast},
\end{equation}
(the first indexes of the l.h.s correspond to the above symbols of the r.h.s.)
which results in two different effective inner Hamiltonians
\begin{align}
H_{\mathrm{eff}}^{\mathrm{inn}(Q)} & =\Delta\left\vert 3\right\rangle
_{QQ}\left\langle 3\right\vert +\Lambda_{1}\left\vert 1\right\rangle
_{QQ}\left\langle 1\right\vert +\Lambda_{2}\left\vert 2\right\rangle
_{QQ}\left\langle 2\right\vert \nonumber\\
& +(g_{Q}e^{i\Phi_{Q}}\left\vert 1\right\rangle _{QQ}\left\langle
2\right\vert +\text{H.c.}), \ \ (Q=L,R).
\end{align}
\begin{figure}
\caption{ (Color online)
Schematic illustration of the generalized Stern-Gerlach experiment of oriented chiral molecules. (a) Mixed chiral molecules trapped by the external potential
$U(\mathbf{r}
\label{Fig02}
\end{figure}
Initially, the mixed left- and right-handed oriented molecules, which are
spatially confined due to the external trap potential,
are subject to the three coupling optical fields as seen in Fig. \ref{Fig02}(a) and reduced to the spin-state space $\{\left\vert \uparrow\right\rangle
_{L/R},\left\vert \downarrow \right\rangle _{L/R}\}$. At time $t=0$,
the external trap potential is turned off and the molecules will fall
due to gravity. As in the above consideration for the general case of
cyclic-type molecules in
Eqs.~(\ref{hamilL-eff})-(\ref{motion equation}),
we can obtain the optically-induced potentials and
molecular classical trajectories for left- and right-handed molecules,
respectively. This is schematically illustrated in
Fig. \ref{Fig02}(b), which shows that the generalized Stern-Gerlach
effect splits the initial cloud into four subsets, since the effective
gauge potentials depend both on spin and chirality.
\begin{figure}
\caption{(Color online) The positions of an oriented molecular
ensemble with an initial Gaussian position distribution
($\rho(x,z)$=$(2\pi\sigma_{r}
\label{Fig03}
\end{figure}
To make this picture of a generalized Stern-Gerlach effect more quantitative,
we show in Fig.~\ref{Fig03} the typical position of an oriented
ensemble of mixed left-
and right-handed molecules and spin states subject to gravity (in the $\hat
{z}$-direction). For temperatures below $1$ $\mu$K, the initial velocity of
the molecules can be neglected. Figures~\ref{Fig03}(a-c) show the $\hat{x}
$-$\hat{z}$-plane positions of such a molecular ensemble with an initial
Gaussian position distribution at the origin at different times. The spatial
separation of molecules with different spin projections is clearly visible. By
choosing a different value of $\Omega_{12}^{0}$ and $\Omega_{13}^{0}
\Omega_{23}^{0}/\Delta$, we also obtain a spatial separation of molecules with
different chirality, see Fig. \ref{Fig03}(d-f). The separation is partial in
the following sense: for our choice of parameters, right-handed molecules in
the spin-up state are deflected to finite values of $x$, whereas the other
three components are not deflected and their trajectories remain close to
$x=0$. By changing $\Omega_{12}^{0}\rightarrow-\Omega_{12}^{0}$, the role of
left and right in Fig. \ref{Fig03}(a-f) is interchanged. In Fig. \ref{Fig04}
we show the effective magnetic fields (i.e., the curl of the vector
potentials) and scalar potentials leading to this behavior. Figure
\ref{Fig04}(a) shows the effective magnetic fields corresponding to all the
subplots in Fig. \ref{Fig03} [the effective magnetic field is the same in
Figs. 3(a-c) and 3(d-f)]. For the situation in Fig. \ref{Fig03}(a-c), the
effects of the scalar potential (which is not shown) can be neglected: the
magnetic fields are dominant and make the molecules in the spin-up state move
along the $-x$-direction (spin-down states along the $x$-direction). In
Fig.~\ref{Fig03}(d-f) the scalar potentials are dominant (Fig.~\ref{Fig04}(b))
and will trap the molecules in the area around $x=0$, except for the
$\left\vert \uparrow\right\rangle _{R}$ molecules that are deflected.
\begin{figure}
\caption{(Color online)
(a) Effective magnetic field corresponding to Fig. \ref{Fig03}
\label{Fig04}
\end{figure}
Stern-Gerlach experiments can also be used to obtain and measure
superpositions of spin states.
However, our effect described above does not work for
superpositions of left- and right-handed chiral states (even if many studies about teleporting, preparating and measuring superpositions of chiral states \cite{harris} appeared recently), since this
would require considering higher excited symmetric/antisymmetric
states. We will leave this interesting question for future works.
Although the protocol presented here is idealized since inter-molecular
interactions are neglected, it provides a promising way to spatially separate
molecules of different chiralities. A similar generalized Stern-Gerlach
effect has been proposed for $\Lambda$-type systems where the Rabi
frequencies $\Omega_{12}$ between the two lower inner states vanish
\cite{zhu}. However, this effect is chirality-independent. Thus, in contrast
to our configuration, the effect discussed in \cite{zhu} cannot be used to
distinguish and separate left- and right-handed molecules.
\emph{Conclusion.} In conclusion, we have studied the orbital effects
of internal adiabatic transitions on the center-of-mass motion of oriented
chiral molecules. We have shown that under the conditions described above,
the center-of-mass motion of the molecules depends on both chirality
and spin due to the optically induced gauge potentials and can be
interpreted as a generalized Stern-Gerlach effect. This leads to the
possibility of spatially separating molecules of different
chiralities.
This work was supported by the European Union under contract
IST-3-015708-IP EuroSQIP, by the Swiss NSF, and the NCCR Nanoscience,
and also by the NSFC and NFRPC of China.
\end{document} |
\begin{document}
\setlength{\unitlength}{0.01in}
\linethickness{0.01in}
\begin{center}
\begin{picture}(474,66)(0,0)
\multiput(0,66)(1,0){40}{\line(0,-1){24}}
\multiput(43,65)(1,-1){24}{\line(0,-1){40}}
\multiput(1,39)(1,-1){40}{\line(1,0){24}}
\multiput(70,2)(1,1){24}{\line(0,1){40}}
\multiput(72,0)(1,1){24}{\line(1,0){40}}
\multiput(97,66)(1,0){40}{\line(0,-1){40}}
\put(143,66){\makebox(0,0)[tl]{\footnotesize Proceedings of the Ninth Prague Topological Symposium}}
\put(143,50){\makebox(0,0)[tl]{\footnotesize Contributed papers from the symposium held in}}
\put(143,34){\makebox(0,0)[tl]{\footnotesize Prague, Czech Republic, August 19--25, 2001}}
\end{picture}
\end{center}
\setcounter{page}{271}
\title[Fuzzy functions and $L$-Top]{Fuzzy functions and an extension of
the category $L$-Top of Chang-Goguen $L$-topological spaces}
\author{Alexander P. \v{S}ostak}
\address{Department of Mathematics\\
University of Latvia\\
Riga\\
Latvia}
\email{[email protected]}
\thanks{Partly supported by grant 01.0530 of Latvijas Zin\=atnes Padome}
\subjclass[2000]{03E72, 18A05, 54A40}
\keywords{Fuzzy category}
\thanks{This article will be revised and submitted for publication
elsewhere.}
\thanks{Alexander P. \v{S}ostak,
{\em Fuzzy functions and an extension of the category $L$-Top of
Chang-Goguen $L$-topological spaces},
Proceedings of the Ninth Prague Topological Symposium, (Prague, 2001),
pp.~271--294, Topology Atlas, Toronto, 2002}
\begin{abstract}
We study $\mathcal{F}TOP(L)$, a fuzzy category
with fuzzy functions in the role of morphisms. This category has the
same objects as the category L-TOP of Chang-Goguen L-topological spaces,
but an essentially wider class of morphisms---so called fuzzy functions
introduced earlier in our joint work with U. H\"ohle and H. Porst.
\end{abstract}
\maketitle
\section*{Introduction}
In research works where fuzzy sets are involved, in particular, in
Fuzzy Topology, mostly certain usual functions are taken as
morphisms: they can be certain mappings between corresponding sets, or
between the fuzzy powersets of these sets, etc.
On the other hand, in our joint works with U.~H\"ohle and H.E.~Porst
\cite{HPS1}, \cite{HPS2} a certain class of $L$-relations (i.e.\ mappings
$F: X\times Y \to L$) was distinguished which we view as ($L$-){\it fuzzy
functions} from a set $X$ to a set $Y$; these fuzzy functions play the
role of morphisms in an {\it $L$-fuzzy category} of sets {$\mathcal{F}SET(L)$},
introduced in \cite{HPS2}.
Later on we constructed a fuzzy category {$\mathcal{F}TOP(L)$}\ related to topology
with fuzzy functions in the role of morphisms, see \cite{So2000}.
Further, in \cite{So2001} a certain uniform counterpart of {$\mathcal{F}TOP(L)$}\ was
introduced.
Our aim here is to continue the study of {$\mathcal{F}TOP(L)$}.
In particular, we show that the top frame {$\mathcal{F}TOP(L)$}$^\top$ of the fuzzy
category {$\mathcal{F}TOP(L)$}\ is a topological category in H. Herrlich's sense
\cite{AHS}) over the top frame {$\mathcal{F}SET(L)$}$^\top$ of the fuzzy category {$\mathcal{F}SET(L)$}.
In order to make exposition self-contained, we start with Section
1 Prerequisities, where we briefly recall the three basic concepts which
are essentially used in this work: they are the concepts of a $GL$-monoid
(see e.g.\ \cite{Ho91}, \cite{Ho94}, etc.);
of an $L$-valued set (see e.g.\ \cite{Ho92}, etc.), and of an
$L$-fuzzy
category (see e.g.\ \cite{So91}, \cite{So92}, \cite{So97}, etc.).
In Section 2 we consider basic facts about fuzzy functions and introduce
the $L$-fuzzy category {$\mathcal{F}SET(L)$}\ \cite {HPS1}, \cite{HPS2}.
The properties of this fuzzy category and some related categories are the
subject of Section 3.
{$\mathcal{F}SET(L)$}\ is used as the ground category for the $L$-fuzzy category {$\mathcal{F}TOP(L)$}\
whose objects are Chang-Goguen $L$-topological spaces \cite{Ch},
\cite{Go73}, and whose morphisms are certain fuzzy functions,
i.e.\ morphisms from {$\mathcal{F}SET(L)$}. Fuzzy category {$\mathcal{F}TOP(L)$}\ is considered in
Section 4.
Its crisp top frame {$\mathcal{F}TOP(L)^\top$}\ is studied in Section 5.
In particular, it is shown that {$\mathcal{F}TOP(L)^\top$}\ is a topological category over
{$\mathcal{F}SET(L)$}$^\top$.
Finally, in Section 6 we consider the behaviour of the topological
property of compactness with respect to fuzzy functions --- in other
words in the context of the fuzzy category {$\mathcal{F}TOP(L)$}\ and, specifically, in
the context of the category {$\mathcal{F}TOP(L)^\top$}.
\section{Prerequisities}
\subsection{$GL$-monoids}
Let $(L, \leq)$ be a complete infinitely distributive lattice, i.e.\
$(L, \leq)$ is a partially ordered set such that for every subset $A
\subset L$ the join $\bigvee A$ and the meet $\bigwedge A$ are defined
and
$(\bigvee A) \wedge \alpha = \bigvee \{ a\wedge \alpha) \mid a \in A \}$
and
$(\bigwedge A) \vee \alpha = \bigwedge \{a\vee \alpha) \mid a \in A \}$
for every $\alpha \in L$.
In particular,
$\bigvee L =: \top$ and $\bigwedge L =: \bot$
are respectively the universal upper and the universal lower bounds
in $L$.
We assume that $\bot \ne \top$, i.e.\ $L$ has at least two elements.
A $GL-$monoid (see \cite{Ho91}, \cite{Ho92}, \cite{Ho94}) is a complete
lattice enriched with a further binary operation
$*$, i.e.\ a triple $(L, \leq, *)$ such that:
\begin{enumerate}
\item[(1)]
$*$ is monotone, i.e.\ $\alpha \leq \beta$ implies
$\alpha * \gamma \leq \beta * \gamma$,
$\forall \alpha, \beta, \gamma \in {\it L}$;
\item[(2)]
$*$ is commutative, i.e.\ $\alpha * \beta = \beta * \alpha$,
$\forall \alpha, \beta \in {\it L}$;
\item[(3)]
$*$ is associative, i.e.\
$\alpha * (\beta * \gamma) = (\alpha * \beta) * \gamma$,
$\forall \alpha, \beta, \gamma \in L$;
\item[(4)]
$(L,\leq,*)$ is integral, i.e.\ $\top$ acts as the unity:
$\alpha * \top = \alpha$, $\forall \alpha \in {\it L}$;
\item[(5)]
$\bot$ acts as the zero element in $(L, \leq, *)$,
i.e.\ $\alpha * \bot = \bot$, $\forall \alpha \in {\it L}$;
\item[(6)]
$*$ is distributive over arbitrary joins, i.e.\
$\alpha * (\bigvee_j \beta_j) = \bigvee_j (\alpha * \beta_j)$,
$\forall \alpha \in {\it L}, \forall \{ \beta_j : j \in J \} \subset {\it L}$;
\item[(7)]
$(L, \leq, *)$ is divisible, i.e.\ $\alpha \leq \beta$ implies
existence of $\gamma \in L$ such that $\alpha = \beta * \gamma$.
\end{enumerate}
It is known that every $GL-$monoid is residuated, i.e.\ there exists a
further binary operation ``$\longmapsto$'' (implication) on $L$ satisfying the
following condition:
$$\alpha * \beta \leq \gamma \Longleftrightarrow
\alpha \leq (\beta \longmapsto \gamma)
\qquad \forall \alpha, \beta, \gamma \in L.$$
Explicitly implication is given by
$$\alpha \longmapsto \beta =
\bigvee \{ \lambda \in L \mid \alpha * \lambda \leq \beta \}.$$
Below we list some useful properties of $GL-$monoids
(see e.g.\ \cite{Ho91}, \cite{Ho92}, \cite{Ho94}):
\begin{enumerate}
\item[(i)]
$\alpha \longmapsto \beta = \top \Longleftrightarrow \alpha \leq \beta$;
\item[(ii)]
$\alpha \longmapsto (\bigwedge_i \beta_i) = \bigwedge_i (\alpha \longmapsto \beta_i)$;
\item[(iii)]
$(\bigvee_i \alpha_i) \longmapsto \beta = \bigwedge_i (\alpha_i \longmapsto \beta)$;
\item[(v)]
$\alpha * (\bigwedge_i \beta_i) = \bigwedge_i (\alpha * \beta_i)$;
\item[(vi)]
$(\alpha \longmapsto \gamma ) * (\gamma \longmapsto \beta) \leq \alpha \longmapsto \beta$;
\item[(vii)]
$\alpha * \beta \leq (\alpha * \alpha) \vee (\beta * \beta)$.
\end{enumerate}
Important examples of $GL$-monoids are Heyting algebras and
$MV$-alg\-ebras.
Namely, a {\it Heyting algebra} is $GL$-monoid of the type
$(L,\leq,\wedge,\vee,\wedge)$ (i.e.\ in case of a Heyting algebra
$\wedge = *$), cf.\ e.g.\ \cite{Jhst}.
A $GL$-monoid is called an {\it $MV$-algebra} if
$(\alpha \longmapsto \bot) \longmapsto \bot = \alpha \quad \forall \alpha \in L$,
\cite{Ch58}, \cite{Ch59}, see also \cite[Lemma 2.14]{Ho94}.
Thus in an $MV$-algebra an order reversing involution $^c: L \to L$ can
be naturally defined by setting
$\alpha^c := \alpha \longmapsto \bot \quad \forall \alpha \in L$.
If $X$ is a set and $L$ is a $GL$-monoid, then the fuzzy powerset
$L^X$ in an obvious way can be pointwise endowed with a structure
of a $GL$-monoid.
In particular the $L$-sets $1_X$ and $0_X$ defined by $1_X (x):= \top$
and $0_X (x) := \bot$ $\forall x \in X$ are respectively the universal
upper and lower bounds in $L^X$.
In the sequel $L$ denotes an arbitrary $GL$-monoid.
\subsection{$L$-valued sets}
Following U.~H\"ohle (cf.\ e.g.\ \cite{Ho92}) by a (global) {\it
$L$-valued set} we call a pair $(X,E)$ where $X$ is a set and $E$
is an {\it $L$-valued equality}, i.e.\ a mapping $E: X \times X \to
L$ such that
\begin{enumerate}
\item[(1eq)]
$E(x,x) = \top$
\item[(2eq)]
$E(x,y) = E(y,x) \quad \forall x, y \in X$;
\item[(3eq)]
$E(x,y)*E(y,z) \leq E(x,z) \quad \forall x, y, z \in X$.
\end{enumerate}
A mapping $f: (X,E_X) \to (Y,E_Y)$ is called {\it extensional} if
$$E_X(x,x') \leq E_Y(f(x),f(x'))\ \forall x,x' \in X.$$
Let $SET(L)$ denote the category whose objects are $L$-valued sets and
whose morphisms are extensional mappings between the corresponding
$L$-valued sets.
Further, recall that an $L$-set, or more precisely, an $L$-subset of a
set $X$ is just a mapping $A: X \to L$.
In case $(X,E)$ is an $L$-valued set, its $L$-subset $A$ is called {\it
extensional} if
$$\bigvee_{x\in X} A(x) * E(x,x') \leq A(x') \quad \forall x' \in X.$$
\subsection{{\it L}-fuzzy categories}
\begin{definition}[\cite{So91}, \cite{So92}, \cite{So97}, \cite{So99}]
An {\it L}-fuzzy category is a quintuple $\mathcal{C} = \mathcal{C}CC$ where
$\mathcal{C}_\bot = (\mathcal{O}b(\mathcal{C}), \mathcal{M}(\mathcal{C}), \circ)$ is a usual (classical) category
called {\em the bottom frame} of the fuzzy category $\mathcal{C}$;
$\omega: \mathcal{O}b(\mathcal{C}) \longrightarrow {\it L} $ is an $L$-subclass of the class of
objects $\mathcal{O}b(\mathcal{C})$ of $\mathcal{C}_\bot$ and $\mu : \mathcal{M}(\mathcal{C}) \longrightarrow {\it L} $ is an
$L$-subclass of the class of morphisms $\mathcal{M}(\mathcal{C})$ of $\mathcal{C}_\bot$.
Besides $\omega$ and $\mu$ must satisfy the following conditions:
\begin{enumerate}
\item[(1)]
if $f: X \to Y$, then $ \mu (f) \leq \omega (X) \wedge \omega (Y)$;
\item[(2)]
$\mu (g \circ f) \geq \mu (g) * \mu (f)$ whenever composition $g \circ f$
is defined;
\item[(3)]
if $e_X: X \to X$ is the identity morphism, then $\mu(e_X) = \omega(X)$.
\end{enumerate}
\end{definition}
Given an {\it L}-fuzzy category $ \mathcal{C} = \mathcal{C}CC$ and $X \in \mathcal{O}b(\mathcal{C})$, the intuitive
meaning of the value $\omega (X)$ is the {\it degree} to which a
potential object $X$ of the {\it L}-fuzzy category $\mathcal{C}$ is indeed its
object; similarly, for $f \in \mathcal{M}(\mathcal{C})$ the intuitive meaning of $\mu (f)$ is
the degree to which a potential morphism $f$ of $\mathcal{C}$ is indeed its
morphism.
\begin{definition}
Let $\mathcal{C} = \mathcal{C}CC$ be an {\it L}-fuzzy category.
By an ({\it L}-fuzzy) subcategory of $\mathcal{C}$ we call an {\it L}-fuzzy category
$$\mathcal{C}' = \mathcal{C}CCS$$
where $\omega' \leq \omega$ and $\mu' \leq \mu$.
A subcategory $\mathcal{C}'$ of the category $\mathcal{C}$ is called full if
$\mu'(f) = \mu(f) \wedge \omega'(X) \wedge \omega'(Y)$ for every
$f \in \mathcal{M}_\mathcal{C} (X,Y)$, and all $X$,$Y \in \mathcal{O}b(\mathcal{C})$.
\end{definition}
Thus an {\it L}-fuzzy category and its subcategory have the same classes of
potential objects and morphisms.
The only difference of a subcategory from the whole category is in
{\it L}-fuzzy classes of objects and morphisms, i.e.\ in the belongness
degrees of potential objects and morphisms.
Let $\mathcal{C} = (\mathcal{O}b(\mathcal{C}), \mathcal{M}(\mathcal{C}), \circ)$ be a crisp category and
$\mathcal{D} = (\mathcal{O}b(\mathcal{D}), \mathcal{M}(\mathcal{D}), \circ)$ be its subcategory.
Then for every $GL$-monoid {\it L}\ the category $\mathcal{D}$ can be identified with
the {\it L}-fuzzy subcategory
$$\tilde{\mathcal{D}} = (\mathcal{O}b(\mathcal{C}), \omega', \mathcal{M}(\mathcal{C}), \mu', \circ)$$
of $\mathcal{C}$ such that
$\omega'(X) = \top$ if $X \in \mathcal{O}b(\mathcal{D})$ and
$\omega'(X) = \bot$ otherwise;
$\mu' (f) = \top$ if $f \in \mathcal{M}(\mathcal{D})$ and $\mu' (f) = \bot$ otherwise.
In particular, ${\tilde{\mathcal{D}}}_\top = \mathcal{D}$.
On the other hand sometimes it is convenient to identify a fuzzy
subcategory
$$\mathcal{C}' = \mathcal{C}CCS$$
of the fuzzy category
$$\mathcal{C} = \mathcal{C}CC$$
with the fuzzy category
$$\mathcal{D} = \mathcal{D}DD$$
where
$$\mathcal{O}b(\mathcal{D}) := \{X \in \mathcal{O}b(\mathcal{C}) \mid \omega'(X) \ne \bot \}$$
and
$$\mathcal{M}(\mathcal{D}) := \{f \in \mathcal{M}(\mathcal{C}) \mid \mu'(f) \ne \bot \}$$
and $\omega_\mathcal{D}$ and $\mu_\mathcal{D}$ are restrictions of
$\omega'$
and $\mu'$ to $\mathcal{O}b(\mathcal{D})$ and $\mathcal{M}(\mathcal{D})$ respectively.
\section{Fuzzy functions and fuzzy category {$\mathcal{F}SET(L)$}}
As it was already mentioned above, the concept of a fuzzy function and
the corresponding fuzzy category {$\mathcal{F}SET(L)$}\ were introduced in \cite{HPS1},
\cite{HPS2}.
There were studied also basic properties of fuzzy functions.
In this section we recall those definitions and results from \cite{HPS2}
which will be needed in the sequel\footnote{Actually, the subject of
\cite{HPS1}, \cite{HPS2} was a more general fuzzy category $L$-{$\mathcal{F}SET(L)$}\
containing {$\mathcal{F}SET(L)$}\ as a full subcategory.
However, since for the merits of this work the category {$\mathcal{F}SET(L)$}\ is of
importance, when discussing results from \cite{HPS1}, \cite{HPS2} we
reformulate (simplify) them for the case of {$\mathcal{F}SET(L)$}\ without mentioning
this every time explicitly}.
Besides some new needed facts about fuzzy functions will be established
here, too.
\subsection{Fuzzy functions and category {$\mathcal{F}SET(L)$}}
\begin{definition}[cf.\ {\cite[2.1]{HPS1}}]
A fuzzy function\footnote{Probably, the name {\it an $L$-fuzzy
function} would be more adequate here.
However, since the $GL$-monoid $L$ is considered to be fixed, and since
the prefix ``$L$'' appears in the text very often, we prefer to say just
{\it a fuzzy function}}
$F$ from an $L$-valued set $(X,E_X)$ to $(Y,E_Y)$ (in symbols
$F: (X,E_X) \rightarrowtail (Y,E_Y))$) is a mapping
$F: X \times Y \to L$ such that
\begin{enumerate}
\item[(1ff)]
$F(x,y) * E_Y(y,y') \leq F(x,y')
\quad \forall x \in X, \forall y,y' \in Y$;
\item[(2ff)]
$E_X(x,x') * F(x,y) \leq F(x',y)
\quad \forall x,x' \in X, \forall y \in Y$;
\item[(3ff)]
$F(x,y) * F(x,y') \leq E_Y(y,y')
\quad \forall x \in X, \forall y,y' \in Y$.
\end{enumerate}
\end{definition}
{\small Notice that conditions (1ff)--(2ff) say that $F$ is a
certain $L-$relation, while axiom (3ff) together with evaluation
$\mu(F)$ (see Subsection \ref{fsetl}) specify that the $L$-relation
$F$ is a fuzzy {\it function}}.
\begin{remark}
Let $F: (X,E_X) \rightarrowtail (Y,E_Y)$ be a fuzzy function, $X' \subset X$,
$Y' \subset Y$, and let the $L$-valued equalities $E_{X'}$ and $E_{Y'}$
on $X'$ and $Y'$ be defined as the restrictions of the equalities $E_X$
and $E_Y$ respectively.
Then defining a mapping $F': X'\times Y' \to L$ by the equality
$F'(x,y) = F(x,y)\ \forall x \in X', \forall y \in Y'$
a fuzzy function $F': (X',E_{X'}) \rightarrowtail (Y',E_{Y'})$ is obtained.
We refer to it as the {\it restriction of $F$} to the subspaces
$(X',E_{X'})$ $(Y',E_{Y'})$
\end{remark}
Given two fuzzy functions $F: (X,E_X) \rightarrowtail (Y,E_Y)$ and
$G: (Y,E_Y) \rightarrowtail (Z,E_Z)$ we define their {\it composition}
$G \circ F: (X,E_X) \rightarrowtail (Z,E_Z)$ by the formula
$$(G\circ F)(x,z) = \bigvee_{y\in Y} \bigl( F(x,y) * G(y,z))\bigr).$$
In \cite{HPS2} it was shown that the composition $G \circ F$ is indeed a
fuzzy function and that the operation of composition is associative.
Further, if we define the identity morphism by the corresponding
$L$-valued equality:
$E_X: (X,E_X) \rightarrowtail (X,E_X),$ we come to a category {$\mathcal{F}SET(L)$}\
whose objects are $L$-valued sets and whose morphisms are fuzzy functions
$F: (X,E_X) \rightarrowtail (Y,E_Y)$.
\subsection{Fuzzy category {$\mathcal{F}SET(L)$}}\label{fsetl}
Given a fuzzy function $F: (X,E_X) \rightarrowtail (Y,E_Y)$ let
$$\mu(F) = \inf_x \sup_y F(x,y).$$
Thus we define an $L$-subclass $\mu$ of the class of all morphisms of
{$\mathcal{F}SET(L)$}.
In case $\mu(F) \geq \alpha$ we refer to $F$ as a {\it fuzzy
$\alpha$-function}.
If $F: (X,E_X) \rightarrowtail (Y,E_Y) \mbox{ and } G: (Y,E_Y) \rightarrowtail (Z,E_Z)$ are
fuzzy functions, then $\mu (G \circ F) \geq \mu(G) * \mu(F)$ \cite{HPS2}.
Further, given an $L$-valued set $(X,E)$ let
$\omega(X,E) := \mu(E) = \inf_x E(x,x) = \top$.
Thus a {\it fuzzy category} {$\mathcal{F}SET(L)$} = $(FSET(L), \omega, \mu)$ is
obtained.
\begin{example}\label{ex-crisp}
Let $* = \wedge$ and $E_Y$ be a crisp equality on $Y$, i.e.\
$E_Y(y,y') = \top \mbox{ iff } y = y'$, and
$E_Y(y,y') = \bot$ otherwise.
Then every fuzzy function $F: (X,E_X) \rightarrowtail (Y,E_Y)$ such that
$\mu(F) = \top$ is uniquely determined by a usual function
$f: X \to Y$.
Indeed, let $f(x) = y \mbox{ iff } F(x,y) = \top$.
Then condition (3ff) implies that there cannot be $f(x)=y$, $f(x) = y'$
for two different $y$, $y' \in Y$ and condition $\mu(F) = \top$
guarantees that for every $x \in X$ one can find $y \in Y$ such
that $f(x)=y$.
If besides $E_X$ is crisp, then, vice versa, every mapping $f: X \to Y$
can be viewed as a fuzzy mapping $F: (X,E_X) \rightarrowtail (Y,E_Y)$ (since the
conditions of extensionality (2ff) and (3ff) are automatically fulfilled
in this case)
\end{example}
\begin{remark}
If $F'\colon (X',E_{X'}) \rightarrowtail (Y,E_{Y})$ is the restriction of
$F\colon (X,E_X) \rightarrowtail (Y,E_Y)$ (see Remark above) and
$\mu(F) \geq \alpha$, then $\mu(F') \geq \alpha$.
However, generally the restriction
$F'\colon (X',E_{X'}) \rightarrowtail (Y',E_{Y'})$ of $F\colon (X,E_X) \rightarrowtail (Y,E_Y)$
may fail to satisfy condition $\mu(F') \geq \alpha$.
\end{remark}
\subsection{Images and preimages of $L$-sets under fuzzy
functions}\label{impref}
Given a fuzzy function $F: (X, E_X) \rightarrowtail (Y, E_Y)$ and $L$-subsets
$A: X \to L$ and $B: Y \to L$ of $X$ and $Y$ respectively, we define the
fuzzy set $F^{\rightarrow}(A): Y \to L$ (the image of $A$ under $F$) by
the equality $F^{\rightarrow}(A)(y) = \bigvee_x F(x,y) * A(x)$ and the
fuzzy set $F^{\leftarrow}(B): X \to L$ (the preimage of $B$ under $F$)
by the equality $F^{\leftarrow}(B)(x) = \bigvee_y F(x,y) * B(y)$.
Note that if $A \in L^X$ is extensional, then
$F^{\rightarrow}(A) \in L^Y$ is extensional (by (2ff)) and if
$B \in L^Y$ is extensional, then $F^{\leftarrow}(B) \in L^X$ is
extensional (by (3ff)).
\begin{proposition}[Basic properties of images and preimages of $L$-sets
under fuzzy functions]
\label{im-pr}
\mbox{}
\begin{enumerate}
\item
$F^{\rightarrow}(\bigvee_{i \in \mathcal{I}} (A_i) =
\bigvee_{i \in \mathcal{I}} F^{\rightarrow}(A_i)
\qquad \forall \{A_i: i \in {\mathcal{I}} \} \subset L^X$;
\item
$F^{\rightarrow}(A_1\bigwedge A_2) \leq
F^{\rightarrow}(A_1) \bigwedge F^{\rightarrow}(A_2)
\qquad
\forall A_1, A_2 \in L^X$;
\item
If $L$-sets $B_i$ are {\em extensional}, then
$$
\bigwedge_{i \in {\mathcal{I}}}
F^{\leftarrow}(B_i)*\mu(F)^2
\leq
F^{\leftarrow}(\bigwedge_{i \in \mathcal{I}} B_i)
\leq
\bigwedge_{i \in \mathcal{I}} F^{\leftarrow}(B_i)
\qquad \forall \{B_i: i \in \mathcal{I} \} \subset L^Y.
$$
In particular, if $\mu(F) = \top$, then
$F^{\leftarrow}(\bigwedge_{i\in \mathcal{I}} B_i) =
\bigwedge_{i \in \mathcal{I}} F^{\leftarrow}(B_i)$
for every family of extensinal $L$-sets
$\{B_i: i \in \mathcal{I} \} \subset L^Y$.
\item
$F^{\leftarrow}(\bigvee_{i \in \mathcal{I}} B_i) =
\bigvee_{i \in \mathcal{I}} F^{\leftarrow}(B_i)
\qquad \forall \{B_i: i \in {\mathcal{I}} \} \subset L^Y$.
\item
$A*\mu(F)^2 \leq F^{\leftarrow}(F^{\rightarrow}(A))$
for every $A \in L^X$.
\item
$F^{\to}\bigl(F^{\gets}(B)\bigr) \leq B$
for every {\em extensional} $L$-set $B\in L^Y$.
\item
$F^{\leftarrow}(c_Y) \geq \mu(F) * c$ where $c_Y: Y \to L$ is the constant
function taking value $c \in L$.
In particular, $F^{\leftarrow}(c_Y) = c$ if $\mu(F) = \top$.
\end{enumerate}
\end{proposition}
\begin{proof}
(1).
$$
\begin{array}{lll}
\bigl(\bigvee_i F^{\rightarrow}(A_i)\bigr)(y)&
=&
\bigvee_i \bigvee_x \bigl(F(x,y) * A_i(x) \bigr)\\
&
=&
\bigvee_x \bigvee_i \bigl(F(x,y) * A_i (x)\bigr)\\
&
=&
\bigvee_x (F(x,y) * (\bigvee_i A_i)(x))\\
&
=&
F^{\rightarrow}(\bigvee_i A_i)(y).
\end{array}
$$
(2).
The validity of (2) follows from the monotonicity of $F^{\to}$.
(3).
To prove property 3 we first establish the following inequality
\begin{equation}\label{basic1}
\bigvee_{y\in Y}\bigl(F(x,y)\bigr)^2 \geq
\bigl(\bigvee_{y\in Y} F(x,y)\bigr)^2.
\end{equation}
Indeed, by a property (vii) of a GL-monoid
$$
\begin{array}{lll}
\mathcal{B}igl(\bigvee_{y\in Y} F(x,y)\mathcal{B}igr)^2&
=&
\mathcal{B}igl(\bigvee_{y\in Y} F(x,y)\mathcal{B}igr) * \mathcal{B}igl(\bigvee_{y'\in Y}
F(x,y')\mathcal{B}igr)\\
&
=&
\bigvee_{y,y'\in Y} \mathcal{B}igl(F(x,y) * F(x,y') \mathcal{B}igr)\\
&
\leq&
\bigvee_{y,y'\in Y} \mathcal{B}igl(F(x,y)^2 \vee F(x,y')^2\mathcal{B}igr)\\
&
=&
\bigvee_{y\in Y} \mathcal{B}igl(F(x,y)\mathcal{B}igr)^2.
\end{array}$$
In particular, it follows from (\ref{basic1}) that
\begin{equation}\label{basic2}
\forall x \in X\
\bigvee_{y\in Y} \bigl(F(x,y)\bigr)^2 \geq \mu(F)^2.
\end{equation}
Now, applying (\ref{basic2}) and taking into account extensionality
of
$L$-sets $B_i$, we proceed as follows:
$$
\begin{array}{lll}
\multicolumn{3}{l}{
\mathcal{B}igl(\bigwedge_i F^{\gets}(B_i)\mathcal{B}igr)(x) * \bigl(\mu(F)\bigr)^2
}
\\
&
\leq&
\mathcal{B}igl(\bigwedge_i \bigl(\bigvee_{y_i \in Y} (F(x,y_i)*B_i(y_i)\bigr)\mathcal{B}igr) *
\bigvee_{y\in Y}\bigl(F(x,y)\bigr)^2\\
&
=&
\bigvee_{y\in Y} \mathcal{B}igl(\bigl(F(x,y)\bigr)^2 *
\bigwedge_i\bigl(\bigvee_{y_i \in Y} F(x,y_i) * B_i(y_i)\bigr)\\
&
=&
\bigvee_{y\in Y}\mathcal{B}igl(F(x,y) * \bigl(\bigwedge_i \bigl(F(x,y) *
\bigl(\bigvee_{y_i \in Y} F(x,y_i) * B_i(y_i)\bigr)\bigr)\mathcal{B}igr)\\
&
=&
\bigvee_{y\in Y}\mathcal{B}igl(F(x,y) * \bigl(\bigwedge_i \bigl(\bigvee_{y_i\in
Y} (F(x,y) * F(x,y_i)\bigr) \bigr) * B_i(y_i)\bigr)\mathcal{B}igr)\\
&
\leq&
\bigvee_{y\in Y}\mathcal{B}igl(F(x,y) * \bigl(\bigwedge_i \bigl(\bigvee_{y_i \in Y}
E(y,y_i) * B_i(y_i)\bigr)\bigr)\mathcal{B}igr)\\
&
\leq&
\bigvee_{y\in Y}\bigl(F(x,y) * \bigl(\bigwedge_i B_i(y)\bigr)\bigr)\\
&
=&
F^{\gets}\bigl(\bigwedge_i B_i)\bigr)(x),$$
\end{array}
$$
and hence
$$\mathcal{B}igl(\bigwedge_i F^{\gets}(B_i)\mathcal{B}igr)* \bigl(\mu(F)\bigr)^2
\leq F^{\gets}\bigl(\bigwedge_i B_i)\bigr).$$
To complete the proof notice that the inequality
$$F^{\leftarrow}(\bigwedge_{i \in \mathcal{I}} B_i) \leq
\bigwedge_{i \in \mathcal{I}} F^{\leftarrow}(B_i)$$
is obvious.
(4).
The proof of (4) is similar to the proof of (1) and is therefore
omitted.
(5).
Let $A \in L^X$, then
$$
\begin{array}{lll}
F^{\leftarrow}(F^{\rightarrow}(A))(x)&
=&
\bigvee_y (F(x,y)*F^{\rightarrow}(A)(y))\\
&
=&
\bigvee_y \bigl(F(x,y)*\bigl(\bigvee_{x'} F(x',y) * A(x') \bigr)\bigr)\\
&
\geq&
\bigvee_y (F(x,y)^2 * A(x))\\
&
\geq&
(\mu(F))^2 * A(x)
\end{array}
$$
for every $x\in X$, and hence 5 holds.
(6).
To show property 6 assume that $B\in L^Y$ is extensional.
Then
$$
\begin{array}{lll}
F^{\rightarrow}(F^{\leftarrow}(B))(y)&
=&
\bigvee_x\bigl(F(x,y) * F^{\gets}(B)(x)\bigr)\\
&
=&
\bigvee_x F(x,y) * \bigl(\bigvee_{y'} F(x,y') * B(y') \bigr)\\
&
=&
\bigvee_{x \in X, y\in Y} \bigl(F(x,y) * F(x,y') * B(y')\bigr)\\
&
\leq&
E_Y(y,y')*B(y')\\
&
\leq&
B(y),
\end{array}
$$
and hence
$F^{\to}\bigl(F^{\gets}(B)\bigr) \leq B$.
(7).
The proof of property 7 is straightforward and therefore omitted.
\end{proof}
\begin{comments}
\mbox{}
\begin{enumerate}
\item
Properties 1,2 and 4 were proved in \cite[Proposition 3.2]{HPS2}.
Here we reproduce these proofs in order to make the article
self-contained.
\item
The inequality in item 2 of the previous proposition obviously
cannot be improved even in the crisp case.
\item
One can show that the condition of extensionality cannot be omitted
in items 3 and 6.
\item
The idea of the proof of Property 3 was communicated to the author by
U.~H\"ohle in Prague at TOPOSYM in August 2001.
\item
In \cite{HPS2} there was established the following version of
Property 3 without the assumption of extensionality of $L$-sets
$B_i$ in case $L$ is completely distributive:
$$ {(\bigwedge_{i \in \mathcal{I}} F^{\leftarrow}(B_i))}^5 \leq
F^{\leftarrow}(\bigwedge_{i \in \mathcal{I}} B_i) \leq
\bigwedge_{i \in \mathcal{I}} F^{\leftarrow}(B_i) \qquad
\forall \{B_i: i \in \mathcal{I} \} \subset L^Y$$
and
$$ \bigwedge_{i \in \mathcal{I}} F^{\leftarrow}(B_i) =
F^{\leftarrow}(\bigwedge_{i \in \mathcal{I}} B_i) \qquad
\forall \{B_i: i \in \mathcal{I} \} \subset L^Y, \mbox{ in case } * =
\wedge$$
\end{enumerate}
\end{comments}
\subsection{Injectivity, surjectivity and bijectivity of fuzzy functions}
\begin{definition}\label{inj}
A fuzzy function $F: (X, E_X) \rightarrowtail (Y, E_Y)$ is called
{\it injective}, if
\begin{itemize}
\item[(inj)]
$F(x,y) * F(x',y) \leq E_X(x,x') \quad
\forall x,x' \in X, \forall y \in Y$.
\end{itemize}
\end{definition}
\begin{definition}\label{sur}
Given a fuzzy function $F: (X, E_X) \rightarrowtail (Y, E_Y)$, we define its
degree of surjectivity by the equality:
$$\sigma(F) := \inf_y \sup_x F(x,y)$$
In particular, a fuzzy function $F$ is called $\alpha$-surjective, if
$\sigma(F) \geq \alpha$.
In case $F$ is injective and $\alpha$-surjective, it is called
{\it $\alpha$-bijective}.
\end{definition}
\begin{remark}
Let $(X,E_X)$, $(Y,E_Y)$ be $L$-valued sets and $(X',E_{X'})$,
$(Y',E_{Y'})$ be their subspaces.
Obviously, the restriction $F'\colon (X',E_{X'}) \rightarrowtail (Y',E_{Y'})$
of an injection $F\colon (X,E_X) \rightarrowtail (Y,E_Y)$ is an injection.
The restriction $F'\colon (X,E_{X}) \rightarrowtail (Y',E_{Y'})$ of an
$\alpha$-surjection $F\colon (X,E_X) \rightarrowtail (Y,E_Y)$ is an
$\alpha$-surjection.
On the other hand, generally the restriction
$F'\colon (X',E_{X'}) \rightarrowtail (Y',E_{Y'})$ of an $\alpha$-surjection
$F\colon (X,E_X) \rightarrowtail (Y,E_Y)$ may fail to be an $\alpha$-surjection.
\end{remark}
A fuzzy function $F: (X, E_X) \rightarrowtail (Y, E_Y)$ determines a fuzzy
{\it relation} $F^{-1}: X\times Y \to L$ by setting
$F^{-1}(y,x) = F(x,y)$\ $\forall x \in X$, $\forall y \in Y$.
\begin{proposition}[Basic properties of injections, $\alpha$-surjections
and $\alpha$-bi\-jections]
\label{in-sur}
\mbox{}
\begin{enumerate}
\item
$F^{-1} :(Y,E_Y) \rightarrowtail (X,E_X)$ is a fuzzy function iff $F$ is injective
(actually $F^{-1}$ satisfies (3ff) iff F satisfies (inj))
\item
$F$ is $\alpha$-bijective iff $F^{-1}$ is $\alpha$-bijective.
\item
If $F$ is injective, and L-sets $A_i$ are extensional, then
$$
(\bigwedge_i F^{\rightarrow}(A_i))*(\sigma(F))^2
\leq
F^{\rightarrow}(\bigwedge_i A_i) \leq \bigwedge_i F^{\rightarrow}(A_i)
\qquad \forall \{A_i : i \in {\mathcal{I}}\} \subset L^X.
$$
In particular, if $F$ is $\top$-bijective, then
$$
(\bigwedge_i F^{\rightarrow}(A_i) = F^{\rightarrow}(\bigwedge_i A_i)
\qquad \forall \{A_i : i \in {\mathcal{I}}\} \subset L^X.$$
\item
$F^{\rightarrow}(F^{\leftarrow}(B)) \geq \sigma(F)^2 * B$.
In particular, if $F$ is $\top$-surjective and $B$ is extensional, then
$F^{\rightarrow}(F^{\leftarrow}(B)) = B$.
\item
$F^{\rightarrow}(c_X) \geq \sigma(F) * c$ where $c_X: X \to L$ is the
constant function with value $c$.
In particular,
$F^{\rightarrow}(c_X) = c$ if $\sigma(F) = \top$.
\end{enumerate}
\end{proposition}
\begin{proof}
Properties 1 and 2 follow directly from the definitions.
(2).
The proof of Property 3 is analogous to the proof of item 3 of
Proposition \ref{im-pr}:
First, reasoning as in the proof of $(\ref{basic1})$ we establish
the following inequality
\begin{equation}\label{basic3}
\bigvee_{x\in X}\bigl(F(x,y)\bigr)^2 \geq
\bigl(\bigvee_{x\in X} F(x,y)\bigr)^2.
\end{equation}
In particular, from here it follows that
\begin{equation}\label{basic4}
\forall y \in Y\ \bigvee_{x\in X} \bigl(F(x,y)^2\bigr) \geq \sigma(F)^2.
\end{equation}
Now, applying $(\ref{basic4})$ and taking into account extensionality of
$L$-sets $A_i$, we proceed as follows:
$$
\begin{array}{lll}
\multicolumn{3}{l}{
\mathcal{B}igl(\bigwedge_i F^{\to}(A_i)\mathcal{B}igr)(x) * \bigl(\sigma(F)\bigr)^2
}
\\
&
\leq&
\mathcal{B}igl(\bigwedge_i \bigl(\bigvee_{x_i \in X} (F(x_i,y)*A_i(x_i)\bigr)\mathcal{B}igr)
* \bigvee_{x\in X}\bigl(F(x,y)\bigr)^2
\\
&
=&
\bigvee_{x\in X} \mathcal{B}igl(\bigl(F(x,y)\bigr)^2 *
\bigwedge_i\bigl(\bigvee_{x_i \in X} F(x_i,y) * A_i(x_i)\bigr)
\\
&
=&
\bigvee_{x\in X}\mathcal{B}igl(F(x,y) * \bigl(\bigwedge_i \bigl(F(x,y) *
\bigl(\bigvee_{x_i \in X} F(x_i,y) * A_i(x_i)\bigr)\bigr)\mathcal{B}igr)
\\
&
=
&
\bigvee_{x\in X}\mathcal{B}igl(F(x,y) * \bigl(\bigwedge_i \bigl(\bigvee_{x_i\in
X} (F(x,y) * F(x_i,y)\bigr) \bigr) * A_i(x_i)\bigr)\mathcal{B}igr)
\\
&
\leq&
\bigvee_{x\in X}\mathcal{B}igl(F(x,y) * \bigl(\bigwedge_i \bigl(\bigvee_{x_i \in X}
E_X(x,x_i) * A_i(x_i)\bigr)\bigr)\mathcal{B}igr)
\\
&
\leq&
\bigvee_{x\in X}\bigl(F(x,y) * \bigl(\bigwedge_i A_i(x)\bigr)\bigr)
\\
&
=&
F^{\to}\bigl(\bigwedge_i A_i)\bigr)(y),
\end{array}
$$
and hence
$$\mathcal{B}igl(\bigwedge_i F^{\to}(A_i)\mathcal{B}igr)* \bigl(\sigma(F)\bigr)^2
\leq F^{\to}\bigl(\bigwedge_i A_i)\bigr).$$
To complete the proof notice that the inequality
$$F^{\rightarrow}(\bigwedge_{i \in \mathcal{I}} A_i)
\leq
\bigwedge_{i \in \mathcal{I}} F^{\rightarrow}(A_i)$$
is obvious.
(4).
Let $B \in L^Y$, then
$$
\begin{array}{lll}
F^{\rightarrow}(F^{\leftarrow}(B))(y)&
=&
\bigvee_x \bigl(F(x,y)*F^{\gets}(B)(x)\bigr)\\
&
=&
\bigvee_x
F(x,y) * \bigl(\bigvee_{y'} F(x,y') * B(y) \bigr)\\
&
\geq&
\bigvee_x F(x,y) * F(x,y) * B(y)\\
&
\geq&
\sigma (F)^2 * B(y),
\end{array}
$$
and hence the first inequality in item 4 is proved. From here and
Proposition \ref{im-pr} (6) the second statement of item 4 follows.
(5)
The proof of the last property is straightforward and therefore omitted.
\end{proof}
\begin{question}
We do not know whether inequalities in items 3, 4 and 5 can be improved.
\end{question}
\begin{comments}
\mbox{}
\begin{enumerate}
\item
Properties 1 and 2 were first established in \cite{HPS2}.
\item
In \cite{HPS2} the following version of Property 3 was proved:\\
If $L$ is completely distributive and $F$ is injective,
then
$$
(\bigwedge_i F^{\rightarrow}(A_i))^5
\leq
F^{\rightarrow}(\bigwedge_i A_i)
\leq
\bigwedge_i F^{\rightarrow}(A_i)
\qquad \forall \{A_i : i \in {\mathcal{I}}\} \subset L^X
$$
and
$$F^{\rightarrow}(\bigwedge_i A_i) =
\bigwedge_i F^{\rightarrow}(A_i)
\quad \mbox{ in case } \wedge = *$$
No extensionality is assumed in these cases.
\item
In case of an ordinary function $f:X \to Y$ the equality
$$f^{\to}\bigl(\bigwedge_{i\in \mathcal{I}} (A_i)\bigr) =
\bigwedge_{i\in\mathcal{I}} f^{\to}(A_i)$$
holds just under assumption that $f$ is injective.
On the other hand, in case of a fuzzy function $F$ to get a reasonable
counterpart of this property we need to assume that $F$ is bijective.
The reason for this, as we see it, is that in case of an ordinary
function $f$, when proving the equality, we actually deal only with
points belonging to the image $f(X)$, while the rest of $Y \setminus
f(X)$ does not play any role.
On the other hand, in case of a fuzzy function $F: X \rightarrowtail Y$ the whole
$Y$ is ``an image of $X$ to a certain extent'', and therefore, when
operating with images of $L$-sets, we need to take into account, to what
extent a point $y$ is in the ``image'' of $X$.
\end{enumerate}
\end{comments}
\section{Further properties of fuzzy category {$\mathcal{F}SET(L)$}}
In this section we continue to study properties of the fuzzy category
{$\mathcal{F}SET(L)$}.
As different from the previous section, were our principal interest
was in the ``set-theoretic'' aspect of fuzzy functions, here shall
be mainly interested in their properties of ``categorical nature''.
First we shall specify the two (crisp) categories related to {$\mathcal{F}SET(L)$}:
namely, its bottom frame {$\mathcal{F}SET(L)$}$^\bot$ (={$\mathcal{F}SET(L)$}\ (this category was
introduced already in Section 2) and its top frame {$\mathcal{F}SET(L)$}$^\top$.
The last one will be of special importance for us.
By definition its morphisms $F$ satisfy condition $\mu(F) = \top$, and as
we have seen in the previous section, fuzzy functions satisfying this
condition ``behave themselves much more like ordinary functions'' than
general fuzzy functions.
Respectively, the results which we are able to establish about
{$\mathcal{F}SET(L)$}$^\top$ and about topological category {$\mathcal{F}TOP(L)^\top$} based on it, are more
complete and nice, then their more general counterparts.
Second, note that the ``classical'' category $SET(L)$ of $L$-valued sets
can be naturally viewed as a subcategory of {$\mathcal{F}SET(L)$}$^\top$.
In case $L=\{0,1\}$ obviously the two categories collapse into the
category SET of sets.
On the other hand, starting with the category SET
(=SET$(\{0,1\})$) (i.e.\ $L=\{0,1\}$) of sets and enriching it with
respective fuzzy functions, we obtain again the category SET as
{$\mathcal{F}SET(L)$}$^\top$ and obtain the category of sets and partial functions as
{$\mathcal{F}SET(L)$}$^\bot$.
\subsection{Preimages of $L$-valued equalities under fuzzy functions}
Let an $L$-valued set $(Y,E_Y)$, a set $X$ and a mapping
$F: X \times Y \to L$ be given.
We are interested to find the largest $L$-valued equality $E_X$ on $X$
for which $F: (X,E_X) \to (Y,E_Y)$ is a fuzzy function.
This $L$-valued equality will be called {\it the preimage of $E_Y$ under
$F$} and will be denoted $F^{\gets}(E_Y)$.
Note first that the axioms
\begin{enumerate}
\item[(1ff)]
$F(x,y)*E_Y(y,y') \leq F(x,y')$, and
\item[(3ff)]
$F(x,y)*F(x,y') \leq E(y,y')$
\end{enumerate}
do not depend on the $L$-valued equality on $X$ and hence we have to
demand that the mapping $F$ originally satisfies them.
To satisfy the last axiom
\begin{enumerate}
\item[(2ff)]
$E_X(x,x') * F(x,y) \leq F(x',y)$
\end{enumerate}
in an ``optimal way'' we define
$$E_X(x,x') :=
\bigwedge_y
\mathcal{B}igl(\bigl( F(x,y) \longmapsto F(x',y)\bigr) \wedge
\bigl(F(x',y) \longmapsto F(x,y) \bigr)\mathcal{B}igr).$$
Then $E_X: X\times X \to L$ is an $L$-valued equality on $X$.
Indeed, the validity of properties $E_X(x,x) = \top$ and
$E_X(x,x') = E_X(x',x)$ is obvious.
To establish the last property, i.e.\
$E_X(x,x') * E_X(x',x'') \leq E_X(x,x'')$,
we proceed as follows:
$$
\begin{array}{lll}
\multicolumn{3}{l}{
E_X(x,x') * E_X(x',x'')
}
\\
&
=&
\bigwedge_y
\mathcal{B}igl(
\bigl(F(x,y) \longmapsto F(x',y)\bigr) \wedge
\bigl(F(x',y) \longmapsto F(x,y)\bigr)
\mathcal{B}igr)
\\
&
&
*
\bigwedge_y
\mathcal{B}igl(
\bigl(F(x',y) \longmapsto F(x'',y)\bigr) \wedge
\bigl(F(x'',y) \longmapsto F(x',y)\bigr)
\mathcal{B}igr)
\\
&
\leq&
\bigwedge_y
\mathcal{B}igl(
\bigl(F(x,y) \longmapsto F(x',y)\bigr) \wedge
\bigl(F(x',y) \longmapsto F(x,y) \bigr)
\\
&
&
\ *
\bigl(F(x',y) \longmapsto F(x'',y)\bigr) \wedge
\bigl(F(x'',y) \longmapsto F(x',y)\bigr)
\mathcal{B}igr)
\\
&
\leq&
\bigwedge_y
\mathcal{B}igl(
\bigl(F(x,y) \longmapsto F(x',y)\bigr) *
\bigl(F(x',y) \longmapsto F(x,y) \bigr)
\\
&
&
\ \wedge
(\bigl(F(x',y) \longmapsto F(x'',y)\bigr) *
\bigl(F(x'',y) \longmapsto F(x',y) \bigr)
\mathcal{B}igr)
\\
&
\leq&
\bigwedge_y
\mathcal{B}igl(
\bigl(F(x,y) \longmapsto F(x'',y)\bigr) \wedge
\bigl(F(x'',y) \longmapsto F(x,y)\bigr)
\mathcal{B}igr)
\\
&
=&
E_X(x,x'').
\end{array}
$$
Further, just from the definition of $E_X$ it is clear that $F$
satisfies the axiom {\it (2ff)} and hence it is indeed a fuzzy
function $F: (X,E_X) \rightarrowtail (Y,E_Y).$ Moreover, from the definition
of $E_X$ it is easy to note that it is really the largest $L$-valued
equality on $X$ for which $F$ satisfies axiom {\it (2ff)}.
Finally, note that the value $\mu(F)$ is an inner property of the
mapping $F: X\times Y \to L$ and does not depend on $L$-valued
equalities on these sets.
\begin{question}
We do not know whether the preimage $F^{\gets}(E_Y)$ is the initial
structure for the source $F: X \rightarrowtail (Y,E_Y)$ in {$\mathcal{F}SET(L)$}.
Namely, given an $L$-valued set $(Z,E_Z)$ and a ``fuzzy quasi-function''
$G: (Z,E_Z) \rightarrowtail X$ is it true that composition
$F \circ G: (Z,E_Z) \rightarrowtail (Y,E_Y)$ is a fuzzy function if and only if
$G: (Z,E_Z) \rightarrowtail (X,E_X)$ is a fuzzy function?
By a fuzzy quasi-function we mean that $G$ satisfies properties {\it
(1ff)} and {\it (3ff)} which do not depend on the equality on $X$.
\end{question}
\subsection{Images of $L$-valued equalities under fuzzy functions}
Let an $L$-valued set $(X,E_X)$, a set $Y$ and a mapping
$F: X \times Y \to L$ be given.
We are interested to find the smallest $L$-valued equality $E_Y$ on $Y$
for which $F: (X,E_X) \to (Y,E_Y)$ is a fuzzy function.
This $L$-valued equality will be called {\it the image of $E_X$
under $F$} and will be denoted $F^{\to}(E_X)$.
Note first that the axiom
\begin{enumerate}
\item[(2ff)]
$E_X(x,x') * F(x,y) \leq F(x',y)$
\end{enumerate}
does not depend on the $L$-valued equality on $Y$ and hence we have to
demand that the mapping $F$ originally satisfies it.
Therefore we have to bother that $F$ satisfies the remaining two axioms:
\begin{enumerate}
\item[(1ff)] $F(x,y)*E_Y(y,y') \leq F(x,y')$, and
\item[(3ff)] $F(x,y)*F(x,y') \leq E(y,y')$
\end{enumerate}
These conditions can be rewritten in the form of the double inequality:
$$
\begin{array}{lll}
F(x,y)*F(x,y')&
\leq&
E_Y(y,y')\\
&
\leq&
\bigl(F(x,y')\longmapsto F(x,y)\bigr) \wedge \bigl(F(x,y) \longmapsto F(x,y')\bigr).
\end{array}
$$
Defining $E_Y$ by the equality
$$E_Y(y,y') = \bigvee_x \bigl(F(x,y) * F(x,y')\bigr),$$
we shall obviously satisfy both of them.
Moreover, it is clear that $E_Y$ satisfies property (3ff) and besides
$E_Y$ cannot be diminished without loosing this property.
Hence we have to show only that $E_Y$ is indeed an $L$-valued
equality.
However, to prove this we need the assumption that $\sigma(F) = \top$,
that is $F$ is $\top$-surjective.
Note that
$$E_Y(y,y) = \bigvee_x \bigl(F(x,y)*F(x,y)\bigr) \geq (\sigma(F))^2,$$
and hence the first axiom is justified in case $\sigma(F) = \top$.
The equality $E_Y(y,y') = E_Y(y',y)$ is obvious.
Finally, to establish the last property, we proceed as follows.
Let $y,y',y'' \in Y$.
Then
$$
\begin{array}{lll}
\multicolumn{3}{l}{
E_Y(y,y')*E_Y(y',y'')
}
\\
&
=&
\bigvee_x\bigl(F(x,y)*F(x,y')\bigr)
\,
*
\,
\bigvee_x\bigl(F(x,y')*F(x,y'')\bigr)
\\
&
=&
\bigvee_{x,x'} \bigl(F(x,y)*F(x,y')*
F(x',y')*F(x',y'')\bigr)\\
&
\leq&
\bigvee_{x,x'} \bigl(F(x,y)* E_X(x,x')*F(x',y'')\bigr)\\
&
\leq&
\bigvee_x \bigl(F(x,y) * F(x,y'')\bigr)
\\
&
=&
E(y,y'').
\end{array}
$$
\begin{question}
We do not know whether the image $F^{\to}(E_X)$ is the final
structure for the sink $F: (X,E_X) \rightarrowtail Y$ in {$\mathcal{F}SET(L)$}\ in case
$\sigma(F) = \top$.
Namely, given an $L$-valued set $(Z,E_Z)$ and a ``fuzzy
almost-function'' $G: Y \rightarrowtail (Z,E_Z) $ is it true that composition
$F \circ G: (Z,E_Z) \rightarrowtail (Y,E_Y)$ is a fuzzy function if and only if
$G: (Y,E_Y) \rightarrowtail (Z,E_Z)$ is a fuzzy function?
By a fuzzy almost-function we mean that $G$ satisfies property
(2ff) which does not depend on the equality on $Y$.
\end{question}
\subsection{Products in {$\mathcal{F}SET(L)$}T}
Let $\mathcal{Y} = \{(Y_i, E_i): i \in \mathcal{I} \}$ be a family of $L$-valued sets and
let $Y = \prod_i Y_i $ be the product of the corresponding sets.
We introduce the $L$-valued equality $E: Y\times Y \to L$ on $Y$ by
setting $E_Y(y,y') = \bigwedge_{i\in\mathcal{I}} E_i(y_i,{y'}_i)$ where
$y=(y_i)_{i\in\mathcal{I}}$, $y'=({y'}_i)_{i\in\mathcal{I}}$.
Further, let $p_i :Y \to Y_i$ be the projection.
Then the pair $(Y,E)$ thus defined with the family of projections
$p_i :Y \to Y_i$, $i \in \mathcal{I}$, is the product of the family $\mathcal{Y}$ in the
category {$\mathcal{F}SET(L)$}T.
To show this notice first that, since the morphisms in this category are
fuzzy functions, a projection $p_{i_0}: Y \to Y_{i_0}$ must be realized
as the fuzzy function $p_{i_0}: Y \times Y_{i_0} \to L$ such that
$p_{i_0}(y,y^0_{i_0}) = \top$ if and only if the $i_0$-coordinate
of $y$ is $y^0_{i_0}$ and $p_{i_0}(y,y^0_{i_0}) = \bot$ otherwise.
Next, let $F_i: (X,E_X) \rightarrowtail (Y_i,{E_i})$, $i \in \mathcal{I}$ be a family of
fuzzy functions.
We define the fuzzy function $F: (X,E_X) \rightarrowtail (Y,E_Y)$ by the equality:
$$F(x,y) = \bigwedge_{i\in\mathcal{I}} F_i(x,y_i).$$
It is obvious that $\mu(F) = \top$ and hence $F$ is in {$\mathcal{F}SET(L)$}T.
Finally, notice that the composition
$$(X,E_X) \
\stackrel{F}{\longrightarrow} \
(Y,E_Y) \
\stackrel{p_{i_0}}{\longrightarrow} \
(Y_{i_0},E_{i_0})$$
is the fuzzy function
$$F_{i_0}: (X,E_X) \rightarrowtail (Y_{i_0},E_{i_0}).$$
Indeed, let $x^0 \in X$ and $y^0_{i_0} \in Y_{i_0}$.
Then, taking into account that $\mu(F_i) = \top$ for all $i \in \mathcal{I}$,
we get
$$
\begin{array}{lll}
(p_{i_0} \circ F)(x^o,y^0_{i_0})&
=&
\bigvee_{y\in Y}
\bigl(p_{i_0}(y,y^0_{i_0}) \wedge F(x^0,y)\bigr)\\
&
=&
\bigvee_{y\in Y}
\bigl(p_{i_0}(y,y^0_{i_0}) \wedge \bigwedge_{i\in\mathcal{I}} F_i(x^0,y_i)\bigr)\\
&
=&
F_{i_0}(x^0,y_{i_0}).
\end{array}$$
\begin{question}
We do not know whether products in {$\mathcal{F}SET(L)$}\ can be defined in a reasonable
way.
\end{question}
\subsection{Coproducts in {$\mathcal{F}SET(L)$}}
Let $\mathcal{X}$ = $\{(X_i,E_i): i \in \mathcal{I} \}$ be a family of $L$-valued sets,
let $X = \bigcup X_i$ be the disjoint sum of sets $X_i$.
Further, let $q_i: X_i \to X$ be the inclusion map.
We introduce the $L$-equality on $X_0$ by setting $E(x,x') = E_i(x,x')$
if $(x,x') \in X_i \times X_i$ for some $i \in \mathcal{I}$ and $E(x,x') = \bot$
otherwise (cf.\ \cite{Ho92}).
An easy verification shows that $(X,E)$ is the coproduct of $\mathcal{X}$ in
{$\mathcal{F}SET(L)$}\ and hence, in particular, in {$\mathcal{F}SET(L)$}T.
Indeed, given a family of fuzzy functions $F_i: (X_i,E_i) \to (Y,E_Y)$,
let the fuzzy function
$$\oplus_{i\in\mathcal{I}} F_i: (X,E) \to (Y,E_Y)$$
be defined by
$$\oplus_{i\in\mathcal{I}} F_i(x,y) = F_{i_0}(x,y) \mbox{ whenever } x \in X_{i_0}.$$
Then for $x=x_{i_0} \in X_{i_0}$ we have
$$
\begin{array}{lll}
\bigl(\oplus_{i\in\mathcal{I}} F_i \circ q_{i_0} \bigr)(x,y)&
=&
\bigvee_{x'\in X}
\mathcal{B}igl(q_{i_0}(x,x') \wedge \bigl(\oplus_{i\in\mathcal{I}} F_i (x',y)\bigr)\mathcal{B}igr)\\
&
=&
F_{i_0}(x,y).
\end{array}
$$
\subsection{Subobjects in {$\mathcal{F}SET(L)$}}
Let $(X,E)$ be an $L$-valued set, let $Y \subset X$ and let $e: Y \to X$
be the natural embedding.
Further, let $E_Y := e^{\gets}(E)$ be the preimage of the $L$-valued
equality $E$.
Explicitly, in this case this means that $E_Y(y,y') = E(y,y')$ for all
$y,y' \in Y$.
One can easily see that $(Y,E_Y)$ is a subobject of $(X,E)$ in the fuzzy
category {$\mathcal{F}SET(L)$}.
\section{Fuzzy category {$\mathcal{F}TOP(L)$}}
\subsection{Basic concepts}
\begin{definition}[see \cite{So2000}, cf.\ also \cite{Ch}, \cite{Go73},
\cite{HoSo99}]\label{LFTop}
A family $\tau_X \subset L^X$ of {\it extensional}
$L$-sets\footnote{Since $L$-topology is defined on an {\it $L$-valued set}
$X$ the condition of extensionality of elements of $L$-topology seems
natural.
Besides the assumption of extensionality is already implicitly included
in the definition of a fuzzy function.}
is called an $L$-topology on an $L$-valued set $(X,E_X)$ if it is closed
under finite meets, arbitrary joins and contains $0_X$ and $1_X$.
Corresponding triple $(X,E_X,\tau_X)$ will be called an $L$-valued
$L$-topological space or just an $L$-topological space for short.
A fuzzy function $F: (X,E_X,\tau_X) \rightarrowtail (Y,E_Y,\tau_Y)$ is called
{\it continuous} if $F^{\leftarrow}(V) \in \tau_X$ for all
$V \in \tau_Y$.
\end{definition}
$L$-topological spaces and continuous fuzzy mappings between them form
the fuzzy category which will be denoted {$\mathcal{F}TOP(L)$}.
Indeed, let
$$F\colon (X,E_X,\tau_X) \rightarrowtail (Y,E_Y,\tau_Y)\
\mbox{and}\
G\colon (Y,E_Y,\tau_Y) \rightarrowtail (Z,E_Z,\tau_Z)$$
be continuous fuzzy functions and let $W \in \tau_Z$.
Then
$$
\begin{array}{lll}
(G \circ F)^{\gets}(W)(x)&
=&
\bigvee_z\mathcal{B}igl(\bigl(G \circ F)(x,z) * W(z)\mathcal{B}igr)\\
&
=&
\bigvee_{z,y} \mathcal{B}igl(F(x,y)*G(y,z)*W(z)\mathcal{B}igr).
\end{array}
$$
On the other hand, $G^{\gets}(W)(y) = \bigvee_z G(y,z) * W(z)$ and
$$
\begin{array}{lll}
F^{\gets}\bigl(G^{\gets}(W)\bigr)(x)&
=&
\bigvee_y F(x,y) *\bigl(\bigvee_z (G(y,z) * W(z))\bigr)\\
&
=&
\bigvee_{z,y} \mathcal{B}igl(F(x,y)*G(y,z)*W(z)\mathcal{B}igr).
\end{array}
$$
Thus $(G \circ F)^{\gets}(W) = G^{\gets}(F^{\gets}(W))$ for every $W$,
and hence composition of continuous fuzzy functions is continuous.
Besides, we have seen already before that
$\mu(G \circ F) \geq \mu(G) * \mu(F)$.
Finally, $E_X^{\leftarrow}(B) = B$ for every {\it extensional}
$B \in L^X$ and hence the identity mapping
$E_X: (X,E_X,\tau_X) \rightarrowtail (X,E_X,\tau_X)$ is continuous.
\begin{remark}
In case when $L$-valued equality $E_X$ is crisp, i.e.\ when $X$ is an
ordinary set, the above definition of an L-topology on $X$ reduces to the
``classical'' definition of an $L$-topology in the sense of Chang and
Goguen, \cite{Ch}, \cite{Go73}.
\end{remark}
\begin{remark}
Some (ordinary) subcategories of the fuzzy category {$\mathcal{F}TOP(L)$}\ will be of
special interest for us.
Namely, let {$\mathcal{F}TOP(L)$}$^\bot$ =: FTOP(L) denote the bottom frame of {$\mathcal{F}TOP(L)$},
let {$\mathcal{F}TOP(L)^\top$}\ be the top frame of {$\mathcal{F}TOP(L)$}, and finally let L-TOP(L) denote
the subcategory of {$\mathcal{F}TOP(L)$}\ whose morphisms are ordinary functions.
Obviously the ``classical'' category L-TOP of Chang-Goguen $L$-topological
spaces can be obtained as a full subcategory L-TOP(L) whose objects carry
crisp equalities.
Another way to obtain L-TOP is to consider fuzzy subcategory of {$\mathcal{F}TOP(L)$}
whose objects carry crisp equalities and whose morphisms satisfy
condition $\mu(F) > \bot.$
\end{remark}
In case when $L$ is an $MV$-algebra and involution $^c: L \to L$ on $L$
is defined in the standard way, i.e.\ $\alpha^c := \alpha \longmapsto \bot$
we can reasonably introduce the notion of a closed $L$-set in an
$L$-topological space:
\begin{definition}
An $L$-set $A$ in an $L$-topological space $(X,E_X,\tau_X)$ is called
closed if $A^c \in \tau_X$ where $A^c \in L^X$ is defined by the equality
$$A^c(x) := A(x) \longmapsto \bot \quad \forall x \in X.$$
\end{definition}
Let $\mathcal{C}_X$ denote the family of all closed $L$-sets in $(X,E_X,\tau_X)$.
In case when $L$ is an $MV$-algebra the families of sets $\tau_X$ and
$\mathcal{C}_X$ mutually determine each other:
$$A \in \tau_X \Longleftrightarrow A^c \in \mathcal{C}_X.$$
\subsection{Analysis of continuity}
Since the operation of taking preimages $F^{\leftarrow}$ commutes with
joins, and in case when $\mu(F)=\top$ also with meets (see Proposition
\ref{im-pr}), one can easily verify the following
\begin{theorem}\label{cont}
Let $(X,E_X,\tau_X)$ and $(Y,E_Y,\tau_Y)$ be $L$-topological spaces,
$\beta_Y$ be a base of $\tau_Y$, $\xi_Y$ its subbase and let $F: X \rightarrowtail Y$
be a fuzzy function.
Then the following are equivalent:
\begin{enumerate}
\item[(1con)]
$F$ is continuous;
\item[(2con)]
for every $V \in \beta_Y$ it holds $F^{\leftarrow}(V) \in \tau_X$;
\item[(3con)]
$F^{\leftarrow}(Int_Y(B)) \leq Int_X (F^{\leftarrow}(B))$,
for every $B \in L^Y$ where $Int_X$ and $Int_Y$ are the corresponding
$L$-interior operators on $X$ and $Y$ respectively.
\end{enumerate}
In case when $\mu(F) = \top$ these conditions are equivalent also to the
following
\begin{enumerate}
\item[(4con)]
for every $V \in \xi_Y$ it holds $F^{\leftarrow}(V) \in \tau_X$.
\end{enumerate}
\end{theorem}
In case when $L$ is an MV-algebra one can characterize continuity of a
fuzzy function by means of closed $L$-sets and $L$-closure operators:
\begin{theorem}\label{cont-cl}
Let $(L,\leq,\vee,\wedge,*)$ be an MV-algebra, $(X,E_X,\tau_X)$ and
$(Y,E_Y,\tau_Y)$ be $L$-topological spaces and $F: X \rightarrowtail Y$ be a
fuzzy function.
Further, let $\mathcal{C}_X$, $\mathcal{C}_Y$ denote the families of closed $L$-sets and
$cl_X$, $cl_Y$ denote the closure operators in $(X,E_X,\tau_X)$ and
$(Y,E_Y,\tau_Y)$ respectively.
Then the following two conditions are equivalent:
\begin{enumerate}
\item[(1con)]
$F$ is continuous;
\item[(5con)]
For every $B \in \mathcal{C}_Y$ it follows $F^{\leftarrow}(B) \in \mathcal{C}_X$.
\end{enumerate}
In case when $\mu(F) = \top$, the previous conditions are equivalent to
the following:
\begin{enumerate}
\item[(6con)]
For every $A \in L^X$ it holds $F^{\rightarrow}(cl_X(A)) \leq
cl_Y(F^{\rightarrow}(A)).$
\end{enumerate}
\end{theorem}
\begin{proof}
In case when $L$ is equipped with an order reversing involution, as it is
in our situation, families of closed and open $L$-sets mutually determine
each other.
Therefore, to verify the equivalence of (1con) and (5con) it is sufficient
to notice that for every $B \in L^Y$ and every $x \in X$ it holds
$$
\begin{array}{lll}
F^{\leftarrow}(B^c)(x)&
=&
\bigvee_y \bigl(F(x,y) * (B(y) \longmapsto \bot)\bigr)\\
&
=&
\bigvee_y \bigl(F(x,y) * B(y) \longmapsto \bot \bigr)\\
&
=&
\bigl(\bigvee_y (F(x,y) * B(y))\bigr)^c\\
&
=&
(F^{\leftarrow}(B))^c(x),
\end{array}
$$
and hence
$$
F^{\leftarrow}(B^c) = ( F^{\leftarrow}(B))^c \quad \forall B \in L^Y,
$$
i.e.\ operation of taking preimages preserves involution.
To show implication (5con) $\Longrightarrow$ (6con) under assumption
$\mu(F)=\top$ let $A \in L^X$.
Then, according to Proposition \ref{im-pr} (5),
$$A \leq F^{\leftarrow}(F^{\rightarrow}(A))
\leq F^{\leftarrow}(cl_Y(F^{\rightarrow}(A))),$$
and hence, by (5con), also
$$cl_X (A) \leq F^{\leftarrow}(cl_Y (F^{\rightarrow}(A))).$$
Now, by monotonicity of the image operator and by Proposition \ref{im-pr}
(6) (taking into account that $cl_X A$ is extensional as a closed
$L$-set), we get:
$$
F^{\rightarrow}(cl_X (A))
\leq
F^{\rightarrow}\bigl(F^{\leftarrow}(cl_Y (F^{\rightarrow}(A)))\bigr)
\leq
cl_Y (F^{\rightarrow}(A)).$$
Conversely, to show implication (6con) $\Longrightarrow$ (5con)
let $B \in \mathcal{C}_Y$ and let $F^{\gets}(B) := A$. Then, by (6con),
$$F^{\to}(cl_X (A)) \leq cl_Y (F^{\to} (A)) \leq cl_Y (B) = B.$$
In virtue of Proposition \ref{im-pr} (5) and taking into account that
$\mu(F) = \top$, it follows from here that
$cl_X (A) \leq F^{\gets}(B) = A$, and hence $cl_X (A) = A$.
\end{proof}
\subsection{Fuzzy $\alpha$-homeomorphisms and fuzzy $\alpha$-homeomorphic
spaces}
The following definition naturally stems from Definitions \ref{inj},
\ref{sur} and \ref{LFTop} and item 2 of Proposition \ref{im-pr}:
\begin{definition}
Given $L$-topological spaces $(X,E_X,\tau_X)$ and $(Y,E_Y,\tau_Y)$, a
fuzzy function $F: X \rightarrowtail Y$ is called a fuzzy $\alpha$-homeomorphism if
$\mu(F) \geq \alpha$, $\sigma(F) \geq \alpha$, it is injective,
continuous, and the inverse fuzzy function $F^{-1}: Y \rightarrowtail X$ is also
continuous.
Spaces $(X,E_X,\tau_X)$ and $(Y,E_Y,\tau_Y)$ are called fuzzy
$\alpha$-homeomorphic if there exists a fuzzy $\alpha$-homeomorphism
$F: (X,E_X,\tau_X) \rightarrowtail (Y,E_Y,\tau_Y)$.
\end{definition}
One can easily verify that composition of two fuzzy
$\alpha$-homeomorphisms is a fuzzy $\alpha^2$-home\-omorphism;
in particular, composition of fuzzy $\top$-homeo\-morphisms is a fuzzy
$\top$-homeo\-morphism, and hence fuzzy $\top$-homeo\-morph\-isms
determine the equivalence relation $\stackrel{\top}{\approx}$ on the class
of all $L$-topological spaces.
Besides, since every (usual) homeomorphism is
obviously a fuzzy $\top$-homeo\-morphism, homeomorphic spaces are also
fuzzy $\top$-homeo\-morphic:
$$(X,E_X,\tau_X) \approx (Y,E_Y,\tau_Y)
\Longrightarrow (X,E_X,\tau_X) \stackrel{\top}{\approx} (Y,E_Y,\tau_Y).$$
The converse generally does not hold:
\begin{example}
Let $L$ be the unit interval $[0,1]$ viewed as an $MV$-algebra
(i.e.\ $\alpha * \beta = max\{\alpha+\beta-1, 0\}$, let $(X,\varrho)$ be
an uncountable separable metric space such that $\varrho(x,x') \leq 1$
$\forall x,x' \in X$, and let $Y$ be its countable dense subset.
Further, let the $L$-valued equality on $X$ be defined by
$E_X(x,x') := 1 - \varrho(x,x')$ and let $E_Y$ be its restriction to $Y$.
Let $\tau_X$ be any $L$-topology on an $L$-valued set $(X,E_X)$ (in
particular, one can take $\tau_X := \{c_X \mid c \in [0,1] \}$).
Finally, let a fuzzy function $F: (X,E_X) \rightarrowtail (Y,E_Y)$ be defined by
$F(x,y) := 1 - \varrho(x,y)$.
It is easy to see that $F$ is a $\top$-homeomorphism and hence
$(X,E_X,\tau_X) \stackrel{\top}{\approx} (Y,E_Y,\tau_Y)$.
On the other hand $(X,E_X,\tau_X) \not{\approx} (Y,E_Y,\tau_Y)$ just for
set-theoretical reasons.
\end{example}
\section{Category {$\mathcal{F}TOP(L)^\top$}}
Let {$\mathcal{F}TOP(L)^\top$}\ be the top-frame of {$\mathcal{F}TOP(L)$}, i.e.\ {$\mathcal{F}TOP(L)^\top$}\ is a category
whose objects are the same as in {$\mathcal{F}TOP(L)$}, that is L-topological spaces,
and morphisms are continuous fuzzy functions
$F: (X,E_X,\tau_X) \rightarrowtail (Y,E_Y,\tau_Y)$ such that $\mu(F) = \top.$
Note, that as different from the fuzzy category {$\mathcal{F}TOP(L)$}, {$\mathcal{F}TOP(L)^\top$}\ is a
usual category. Applying Theorem \ref{cont}, we come to the following
result:
\begin{theorem}\label{cont-top}
Let $(X,E_X,\tau_X)$ and $(Y,E_Y,\tau_Y)$ be $L$-topological spaces,
$\beta_Y$ be a base of $\tau_Y$, $\xi_Y$ its subbase and let $F: X \rightarrowtail Y$
be a fuzzy function.
Then the following conditions are equivalent:
\begin{enumerate}
\item[(1con)]
$F$ is continuous;
\item[(2con)]
for every $V \in \beta_Y$ it holds $F^{\leftarrow}(V) \in \tau_X$;
\item[(3con)]
$F^{\leftarrow}(Int_Y(B)) \leq Int_X (F^{\leftarrow}(B))$, where
$Int_X$ and $Int_Y$ are the corresponding $L$-interior operators on $X$
and $Y$ respectively;
\item[(4con)]
for every $V \in \xi_Y$ it holds $F^{\leftarrow}(V) \in \tau_X$.
\end{enumerate}
\end{theorem}
In case when $L$ is an MV-algebra, we get from \ref{cont-cl}
\begin{theorem}\label{cont-cltop}
Let $(L,\leq,\vee,\wedge,*)$ be an MV-algebra, $(X,E_X,\tau_X)$ and
$(Y,E_Y,\tau_Y)$ be $L$-topological spaces and $F: X \rightarrowtail Y$ be a
a morphism in {$\mathcal{F}TOP(L)^\top$}.
Further, let $\mathcal{C}_X$, $\mathcal{C}_Y$ denote the families of closed $L$-sets and
$cl_X$, $cl_Y$ denote the closure operators on $(X,E_X,\tau_X)$ and
$(Y,E_Y,\tau_Y)$ respectively.
Then the following two conditions are equivalent:
\begin{enumerate}
\item[(1con)]
$F$ is continuous;
\item[(5con)]
For every $B \in \mathcal{C}_Y$ it follows $F^{\leftarrow}(B) \in \mathcal{C}_X$;
\item[(6con)]
For every $A \in L^X$ it holds
$F^{\rightarrow}(cl_X(A)) \leq cl_Y(F^{\rightarrow}(A))$.
\end{enumerate}
\end{theorem}
\begin{theorem}\label{topcat}
$$\mbox{
{$\mathcal{F}TOP(L)^\top$} is a topological category over the category {$\mathcal{F}SET(L)$}T.
}$$
\end{theorem}
\begin{proof}
Since intersection of any family of L-topologies is an L-topology,
{$\mathcal{F}TOP(L)^\top$}\ is fiber complete.
Therefore we have to show only that any structured source in {$\mathcal{F}SET(L)$}T\
$F: (X,E_X) \rightarrowtail (Y,E_Y,\tau_Y)$ has a unique initial lift.
Let
$$\tau_X := F^{\gets}(\tau_Y):= \{F^{\gets}(V) \mid V \in \tau_Y \}.$$
Then from theorem \ref{im-pr} it follows that $\tau_X$ is closed under
taking finite meets and arbitrary joins.
Furthermore, obviously $F^{\gets}(0_Y) = 0_X$ and taking into account
condition $\mu(F)=\top$ one easily establishes that $F^{\gets}(1_Y) = 1_X$.
Therefore, taking into account that preimages of extensional $L$-sets are
extensional, (see Subsection \ref{impref}) we conclude that the family
$\tau_X$ is
an $L$-topology on $X$.
Further, just from the construction of $\tau_X$ it is clear that
$F: (X,E_X,\tau_X) \rightarrowtail (Y,E_Y,\tau_Y)$ is continuous and, moreover,
$\tau_X$ is the weakest L-topology on $X$ with this property.
Let now $(Z,E_Z,\tau_Z)$ be an $L$-topological space and
$H: (Z,E_Z) \rightarrowtail (X,E_X)$ a fuzzy function such that the composition
$G := H \circ F: (Z,E_Z,\tau_Z) \rightarrowtail (Y,E_Y,\tau_Y)$ is continuous.
To complete the proof we have to show that $H$ is continuous.
Indeed, let $U \in \tau_X$.
Then there exists $V \in \tau_Y$ such that $U = F^{\gets}(V)$.
Therefore
$$H^{\gets}(U) = H^{\gets}\bigl(F^{\gets}(V)\bigr)
= G^{\gets}(V) \in \tau_Z$$
and hence $H$ is continuous.
\end{proof}
\subsection{Products in {$\mathcal{F}TOP(L)^\top$}}
Our next aim is to give an explicite description of the product in {$\mathcal{F}TOP(L)^\top$}.
Given a family $\mathcal{Y} = \{(Y_i, E_i, \tau_i): i \in \mathcal{I} \}$ of
$L$-topological spaces, let $(Y,E)$ be the product of the corresponding
$L$-valued sets $\{(Y_i, E_i): i \in \mathcal{I} \}$ in {$\mathcal{F}SET(L)$}T and let
$p_{i}: Y \to Y_{i}$ be the projections.
Further, for each $U_i \in \tau_i$ let $\hat U_i := p_i^{-1}(U_i)$.
Then the family $\xi := \{\hat U_i: U_i \in \tau_i, i \in \mathcal{I} \}$ is a
subbase of an $L$-topology $\tau$ on the product $L$-valued set $(X,E_X)$
which is known to be the product $L$-topology for L-topological spaces
$\{(Y_i,\tau_i) \mid i \in \mathcal{I} \}$ in the category L-TOP.
In its turn the triple $(Y,E,\tau)$ is the product of $L$-topological
spaces $\{(Y_i, E_i, \tau_i): i \in \mathcal{I} \}$ in the category {$\mathcal{F}TOP(L)^\top$}.
Indeed, let $(Z,E_Z,\tau_Z)$ be an $L$-topological space and
$\{ F_i: Z \rightarrowtail Y_i \mid i \in \mathcal{I} \}$ be a family of continuous fuzzy
mappings.
Then, defining a mapping $F: Z\times Y \to L$ by
$F(z,y) = \wedge_{i\in\mathcal{I}} F_i(z,y_i)$ we obtain a fuzzy function
$F: Z \rightarrowtail Y$ such that $\mu(F) = \bigwedge_{i\in\mathcal{I}}\mu(F_i) = \top$ and
besides it can be easily seen that for every $i_0 \in \mathcal{I}$, every $z \in
Z$, and for every $U_{i_0} \in \tau_{i_0}$ it holds:
$$\begin{array}{lll}
F^{\leftarrow}(\hat U_{i_0})(z)&
=&
\bigvee_{y \in Y}
\mathcal{B}igl(\bigl(\bigwedge_{i\in\mathcal{I}}F_i(z,y_i)\bigr) * U_{i}(y_i)\mathcal{B}igr)
\\
&
=&
\bigvee_{y_i \in Y_i}
\mathcal{B}igl(\bigwedge_{i\in\mathcal{I}} \bigl(F_i(z,y_i)\bigr) * U_{i_0}(y_{i_0})\mathcal{B}igr)\\
&
=&
\bigwedge_{i\in \mathcal{I}}
\mathcal{B}igl(\bigvee_{y_i \in Y_i} F_i(z,y_i)\mathcal{B}igr)
\wedge
\bigvee_{y_{i_0} \in X_{i_0}}
\bigl(F_{i_0} (z,y_{i_0}) * U_{i_0}(y_{i_0})\bigr)
\\
&
=&
\top \wedge F^{\gets}_{i_0}(U_{i_0})(z)\\
&
=&
F^{\gets}_{i_0}(U_{i_0})(z).
\end{array}$$
Hence continuity of all $F_i$ guarantees the continuity of
$F: (Z,E_Z,\tau_Z) \rightarrowtail (Y,E,\tau)$.
Thus $(Y,E,\tau)$ is indeed the product of
$\mathcal{Y} = \{(Y_i, E_i, \tau_i): i \in \mathcal{I} \}$ in {$\mathcal{F}SET(L)$}$^\top$.
\begin{question}
We do not know whether products in the fuzzy category {$\mathcal{F}TOP(L)$}\ exist.
\end{question}
\subsection{Subspaces in {$\mathcal{F}TOP(L)^\top$}}
Let $(X,E,\tau)$ be an $L$-valued $L$-topological space, $Y \subset X$ and
let $(Y,E_Y)$ be the subobject of the L-valued set $(X,E)$ in the category
{$\mathcal{F}SET(L)$}.
Further, let $\tau_Y$ be the subspace L-topology, that is $(Y,\tau_Y)$ is
a subspace of the $L$-topological space $(X,\tau)$ in the category
L-TOP.
Then it is clear that the triple $(Y,E_Y,\tau_Y)$ is the subobject of
$(X,E, \tau)$ in the category {$\mathcal{F}TOP(L)^\top$} (and in the fuzzy category {$\mathcal{F}TOP(L)$}\
as well).
\subsection{Coproducts in {$\mathcal{F}TOP(L)^\top$}}
Given a family $\mathcal{X} := \{(X_i,E_i,\tau_i) \}$ of $L$-topological spaces
let $(X,E)$ be the direct sum of the corresponding $L$-valued sets
$(X_i,E_i)$ in {$\mathcal{F}SET(L)$}.
Further, let $\tau$ be the $L$-topology on $X$ determined by the subbase
$\bigcup_{i\in\mathcal{I}} \tau_i \subset 2^X$.
In other words $(X,\tau)$ is the coproduct of $L$-topological spaces
$(X_i,\tau_i)$ in the category L-TOP.
Then the triple $(X,E,\tau)$ is the coproduct of the family
$\mathcal{X} := \{(X_i,E_i,\tau_i) \}$ in the category {$\mathcal{F}TOP(L)^\top$} (and in the fuzzy
category {$\mathcal{F}TOP(L)$} as well).
Indeed, let $q_i: (X_i,E_i,\tau_i) \to (X,E,\tau), i\in\mathcal{I}$ denote the
canonical embeddings.
Further, consider an $L$-topological space $(Y,E_Y,\tau_Y)$ and a family
of continuous fuzzy functions $F_i: (X_i,E_i,\tau_i) \rightarrowtail (Y,E_Y,\tau_Y)$.
Then, by setting $F(x,y) := F_i(x_i,y)$ whenever $x = x_i \in X_i$, we
obtain a continuous fuzzy function $F: (X,E,\tau) \rightarrowtail (Y,E_Y,\tau_Y)$
(i.e.\ a mapping $F: X\times Y \to L$) such that $F_i = q_i \circ F$ for
every $i \in \mathcal{I}$.
\subsection{Quotients in {$\mathcal{F}TOP(L)^\top$}}
Let $(X,E_X,\tau_X)$ be an $L$-topological space,
let $q: X \to Y$ be a surjective mapping.
Further, let $q^{\to}(E_X) =: E_Y$ be the image of the $L$-valued
equality $E_X$ and let
$\tau_Y = \{V \in L^Y \mid q^{-1}(V) \in \tau_X \}$, that is $\tau_Y$ is
the quotient $L$-topology determined by the mapping $q: (X,\tau) \to Y$
in the category L-TOP.
Then $(Y,E_Y,\tau_Y)$ is the quotient object in the category {$\mathcal{F}TOP(L)^\top$}.
Indeed, consider a fuzzy function $F: (X,E_X,\tau_X) \rightarrowtail (Z,E_Z,\tau_Z)$
and let $G: (Y,E_Y) \rightarrowtail (Z,E_Z)$ be a morphism in {$\mathcal{F}SET(L)$}\ such that
$q \circ G = F$.
Then an easy verification shows that the fuzzy function
$G: (Y,E_Y,\tau_Y) \rightarrowtail (Z,E_Z,\tau_Z)$ is continuous (i.e.\ a morphism
in {$\mathcal{F}TOP(L)^\top$}) if and only if $F: (X,E_X,\tau_X) \to (Z,E_Z,\tau_Z)$ is
continuous (i.e.\ a morphism in {$\mathcal{F}TOP(L)^\top$}).
Our next aim is to consider the behaviour of some topological properties
of L-valued L-topological spaces in respect of fuzzy function. In this
work we restrict our interest to the property of compactness. Some other
topological properties, in particular, connectedness and separation
properties will be studied in a subsequent work.
\section{Compactness}
\subsection{Preservation of compactness by fuzzy functions}
One of the basic facts of general topology --- both classic and
``fuzzy'', is preservation of compactness type properties by continuous
mappings.
Here we present a counterpart of this fact in {$\mathcal{F}TOP(L)$}.
However, since in literature on fuzzy topology different definitions of
compactness can be found, first we must specify which one of compactness
notions will be used.
\begin{definition}\label{comp}
An $L$-topological space $(X,E,\tau)$ will be called
$(\alpha, \beta)$-compact where $\alpha, \beta \in L$, if for every family
$\mathcal{U} \subset \tau$ such that $\bigvee \mathcal{U} \geq \alpha$ there exists a finite
subfamily $\mathcal{U}_0 \subset \mathcal{U}$ such that $\bigvee\mathcal{U}_0 \geq \beta$.
An $(\alpha,\alpha)$-compact space will be called just
$\alpha$-compact.\footnote{Note that Chang's definition of compactness
\cite{Ch} for a $[0,1]$-topological space is equivalent to our
$1$-compactness.
An $[0,1]$-topological space is compact in Lowen's sense \cite{Lo76} if it
is $(\alpha,\beta)$-compact for all $\alpha \in [0,1]$ and all
$\beta < \alpha$}
\end{definition}
\begin{theorem}\label{pr-comp}
Let $(X,E_X, \tau_X)$, $(Y,
E_Y, \tau_Y)$ be $L$-topological spaces, $F:X \rightarrowtail Y $ be a continuous fuzzy
function such that $\mu(F) \geq \beta$, and $\sigma(F) \geq \gamma$. If $X$
is $\alpha * \beta$-compact, then $Y$ is $(\alpha,
\alpha*\beta*\gamma)$-compact.
\end{theorem}
\begin{proof}
Let $\mathcal{V} \subset \tau_Y$ be such that $\bigvee \mathcal{V} \geq \alpha$.
Then, applying Proposition \ref{im-pr} (4), (7) and taking in view
monotonicity of $F^{\gets}$, we get
$$\bigvee_{V \in \mathcal{V}} F^{\leftarrow}(V) =
F^{\leftarrow}(\bigvee_{V \in \mathcal{V}}V) \geq F^{\leftarrow}(\alpha) \geq \alpha
* \beta.$$ Now, since $(X,E_X,\tau_X)$ is $\alpha * \beta$-compact, it
follows that there exists a finite subfamily $\mathcal{V}_0 \subset \mathcal{V}$
such that
$$\bigvee_{V\in
\mathcal{V}_0} F^{\leftarrow}(V) \geq \alpha*\beta.$$
Applying Propositions
\ref{im-pr} (6),(4) and \ref{in-sur} (5) we obtain:
$$\bigvee_{V\in\mathcal{V}_0} V \geq
F^{\rightarrow}\mathcal{B}igl(F^{\leftarrow}\bigl(\bigvee_{V\in\mathcal{V}_0}
V\bigr)\mathcal{B}igr) =
F^{\rightarrow}\mathcal{B}igl(\bigvee_{V\in\mathcal{V}_0}
\bigl(F^{\leftarrow}(V)\bigr)\mathcal{B}igr)
\geq
F^{\rightarrow}\bigl(\alpha * \beta\bigr) \geq
\alpha*\beta*\gamma.$$
\end{proof}
\begin{corollary} Let $(X,E_X, \tau_X)$, $(Y,E_Y,\tau_Y)$ be
$L$-topological spaces, $F:X \rightarrowtail Y$ be a fuzzy function such that $\mu(F)
= \top $ and $\sigma(F) = \top$. If $X$ is $\alpha$-compact,
then $Y$ is also $\alpha$-compact.
\end{corollary}
\subsection{Compactness in case of an $MV$-algebra}
In case $L$ is an MV-algebra one can characterize compactness by systems
of closed $L$-sets:
\begin{proposition}
Let $(X,E_X,\tau_X)$ be an $L$-topological space and let $\mathcal{C}_X$ be the
family of its closed $L$-sets.
Then the space $(X,E_X,\tau_X)$ is $(\alpha,\beta)$-compact if and only
if for every $\mathcal{A} \subset \mathcal{C}_X$ the following implication follows:
$$
\mbox{if } \bigwedge_{A\in \mathcal{A}_0} A \not\leq \beta^c
\mbox{ for every finite family } \mathcal{A}_0 \subset \mathcal{A},
\mbox{ then } \bigwedge_{A\in \mathcal{A}} A \not\leq \alpha^c.
$$
\end{proposition}
\begin{proof}
One has just to take involutions ``$\longmapsto \bot$'' in the definition of
$(\alpha,\beta)$-compactenss and apply De Morgan law.
\end{proof}
\subsection{Perfect mappings: case of an MV-algebra $L$}
In order to study preservation of compactness by preimages of fuzzy
functions we introduce the property of $(\alpha,\beta)$-perfectness of a
fuzzy function.
Since we shall operate with closed $L$-sets, from the beginning it will
be assumed that $L$ is an $MV$-algebra.
First we shall extend the notion of compactness for $L$-subsets of
$L$-topolog\-ical spaces.
We shall say that an $L$-set $S: X \to L$ is $(\alpha,\beta)$-compact if
for every family $\mathcal{A}$ of closed $L$-sets of $X$ the following
implication holds:
$$
\mbox{ if } S\wedge\bigl(\bigwedge_{A\in \mathcal{A}_0} A\bigr) \not\leq \beta^c
\mbox{ for every finite } \mathcal{A}_0 \in \mathcal{A}
\mbox{ then }
S\wedge\bigl(\bigwedge_{A\in \mathcal{A}} A \bigr) \not\leq \alpha^c.
$$
Further, since the preimage $F^{\gets}(y_0): X \to L$ of a point
$y_0 \in Y$ under a fuzzy function $F: X \rightarrowtail Y$ is obviously determined
by the equality
$$F^{\gets}(y_0)(x) = \bigvee_{y\in Y} F(x,y)*y_0(y) = F(x,y_0),$$
the general definition of $(\alpha,\beta)$-compactness of an $L$-set in
this case means the following:
The preimage $F^{\gets}(y_0)$ of a point $y_0$ under a fuzzy function $F$
is $(\alpha,\beta)$-compact if for every family $\mathcal{A}$ of closed sets of
$X$ the following implication holds:
$$
\bigvee_x \mathcal{B}igl(F(x,y_0)\wedge(\bigwedge_{A\in \mathcal{A}_0} A(x))\mathcal{B}igr)
\not\leq \beta^c\ \
\forall \mathcal{A}_0 \subset \mathcal{A}, |\mathcal{A}_0| < \aleph_0
$$
implies
$$\bigvee_x \bigl(F(x,y_0) \wedge
(\bigwedge_{A\in \mathcal{A}} A(x))\bigr) \not\leq \alpha^c.$$
Now we can introduce the following
\begin{definition}
A continuous fuzzy mapping $F\colon (X,E_X,\tau_X) \rightarrowtail (Y,E_Y,\tau_Y)$ is
called $(\alpha,\beta)$-perfect if
\begin{itemize}
\item
$F$ is closed, i.e.\ $F^{\to}(A) \in \mathcal{C}_Y$ for every $A \in \mathcal{C}_X;$
\item
the preimage $F^{\gets}(y)$ of every point $y \in Y$ is
$(\alpha,\beta)$-compact.
\end{itemize}
\end{definition}
\begin{theorem}
Let $F: (X,E_X,\tau_X) \rightarrowtail (Y,E_Y,\tau_Y)$ be an
$(\alpha,\gamma)$-perfect fuzzy function such that $\mu(F) = \top$ and
$\sigma(F) = \top$.
If the space $(Y,E_Y,\tau_Y)$ is $(\gamma,\beta)$-compact, then the space
$(X,E_X,\tau_X)$ is $(\alpha,\beta)$-compact.
\end{theorem}
\begin{proof}
Let $\mathcal{A}$ be a family of closed $L$-sets in $X$ such that
$\bigwedge_{A\in\mathcal{A}_0} A \not \leq \beta^c$.
Without loss of generality we may assume that $\mathcal{A}$ is closed under taking
finite meets.
Let $B := B_A := F^{\to}(A)$ and let $\mathcal{B} := \{B_A: A \in \mathcal{A} \}$.
Then, since $\mu(F) = \top$, by \ref{im-pr} (7) it follows that
$B \not \leq \beta^c\ \forall B \in \mathcal{B}$, and moreover, since $\mathcal{A}$ is
assumed to be closed under finite meets,
$$
\begin{array}{lll}
B_{A_1}\wedge\ldots\wedge B_{A_n}&
=&
F^{\to}(A_1)\wedge\ldots\wedge F^{\to}(A_n)\\
&
\geq&
F^{\to}(A_1 \wedge\ldots\wedge A_n)\\
&
=&
F^{\to}(A),
\end{array}
$$
for some $A \in \mathcal{A}$, and hence
$\bigwedge_{B\in\mathcal{B}_0}(B) \not \leq \beta^c$
for every finite subfamily $\mathcal{B}_0 \subset \mathcal{B}$.
Hence, by $(\gamma,\beta)$-compactness of the space $(Y,E_Y,\tau_Y)$ we
conclude that $\bigwedge_{B \in \mathcal{B}}(B) \not \leq \gamma^c$, and therefore
there exists a point $y_0 \in Y$ such that
$F^{\to}(A)(y_0) = B_A(y_0) \not \leq \gamma^c$ for all $A \in \mathcal{A}$.
Now, applying $(\alpha,\gamma)$-compactness of the preimage
$F^{\gets}(y_0)$ and recalling that $\mathcal{A}$ was assumed to be closed under
taking finite meets, we conclude that
$$\bigvee_x \bigl(F(x,y_0) \wedge
(\bigwedge_{A\in \mathcal{A}} A(x))\bigr)
\not\leq \alpha^c.$$
and hence, furthermore,
$$\bigwedge_{A\in\mathcal{A}} A \not\leq \alpha^c.$$
\end{proof}
\providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace}
\providecommand{\MR}{\relax\ifhmode\unskip\space\fi MR }
\providecommand{\MRhref}[2]{
\href{http://www.ams.org/mathscinet-getitem?mr=#1}{#2}
}
\providecommand{\href}[2]{#2}
\end{document} |
\begin{document}
\title{Asymptotic behavior and rigidity results for symmetric solutions of the elliptic system $\Delta u=W_u(u)$ \\}
\author{Nicholas D. Alikakos\footnote{\rm The first author was partially supported through the project PDEGE – Partial Differential Equations
Motivated by Geometric Evolution, co-financed by the European Union – European Social
Fund (ESF) and national resources, in the framework of the program Aristeia of the ‘Operational
Program Education and Lifelong Learning’ of the National Strategic Reference Framework
(NSRF).}
\footnote{The research of N. Alikakos has been co-financed by the European Union – European Social
Fund (ESF) and Greek national funds through the �� Operational Program
Education and Lifelong Learning’ of the National Strategic Reference
Framework (NSRF) - Research Funding Program: THALES}\ \ \ and Giorgio Fusco}
\date{}
\maketitle
\begin{abstract}
We study symmetric vector minimizers of the Allen-Cahn energy and establish various results concerning their structure and their asymptotic behavior.
\end{abstract}
2010 {\it Mathematical Subject Classification} {Primary: 35J47, 35J50; Secondary: 35J20}
\section{Introduction}
The problem of describing the structure of bounded solutions $u:\Omega\rightarrow\mathbb{R}^m$ of the equation
\begin{eqnarray}\label{system-0}
\left\{\begin{array}{l}
\Delta u=f(u),\quad x\in\Omega \\
u=u_0,\quad x\in\partial\Omega,
\end{array}\right.
\end{eqnarray}
where $f:\mathbb{R}^m\rightarrow\mathbb{R}^m$ is a smooth map and $\Omega\subset\mathbb{R}^n$ is a smooth domain that can be bounded or unbounded and may also enjoy symmetry properties, is a difficult and important problem which has attracted the interest of many authors in the last twenty five years see \cite{gnn}, \cite{bcn}, \cite{bcn1} and \cite{eh} just to mention a few. Questions concerning monotonicity, symmetry and asymptotic behavior are the main objectives of these investigations. Most of the existing literature concerns the scalar case $m=1$ where a systematic use of the maximum principle and its consequences are the main tools at hand. For the vector case $m\geq 2$ we mention the works \cite{bgs} and \cite{gs} where the control of the asymptotic behavior of solutions was basic for proving existence. In this paper we are interested in the case where $f(u)=W_u(u)$ is the gradient of a potential $W:\mathbb{R}^m\rightarrow\mathbb{R}$ and $u$ is a minimizer for the action functional $\int\frac{1}{2}\vert\nabla v\vert^2+W(v)$ in the sense of the following
\begin{definition}\label{definition-stable}
A map $u\in C^2(\Omega;\mathbb{R}^m)\cap L^\infty(\Omega;\mathbb{R}^m)$, $\Omega\subset\mathbb{R}^n$ an open set, is said to be a \underline{{\it minimizer}} or \underline{{\it minimal}} if for each bounded open lipshitz set $\Omega^\prime\subset\Omega$ it results
\begin{eqnarray}
J_{\Omega^\prime}(u)=\min_{v\in W_0^{1,2}(\Omega^\prime;\mathbb{R}^m)} J_{\Omega^\prime}(u+v),\quad\quad
J_{\Omega^\prime}(v)=\int_{\Omega^\prime}\frac{1}{2}\vert\nabla v\vert^2+W(v),
\end{eqnarray}
that is $u|_{\Omega^\prime}$ is an absolute minimizers in the set of $W^{1,2}(\Omega^\prime;\mathbb{R}^m)$ maps which coincide with $u$ on $\partial\Omega^\prime$.
\end{definition}
Clearly if $u:\Omega\rightarrow\mathbb{R}^m$ is minimal then it is a solution of the Euler-Lagrange equation associated to the functional $J_{\Omega^\prime}$ which is the vector Allen-Cahn equation
\begin{equation}\label{system}
\Delta u=W_u(u),\quad x\in\Omega.
\end{equation}
We will work in the context of reflection symmetries.
Our main results are Theorem \ref{main} on the asymptotic behavior of symmetric minimizers and Theorem \ref{main-1} and Theorem \ref{triple} on the {\it rigidity} of symmetric minimizers. Rigidity meaning that, under suitable assumptions, a symmetric minimizer $u:\mathbb{R}^n\rightarrow\mathbb{R}^m$ must in effect depend on a number of variables $k<n$ strictly less than the dimension $n$ of the domain space. These theorems, in the symmetric setting, are vector counterparts of analogous results which are well known in the scalar case $m=1$ \cite{bar} \cite{far}. However in the vector case there is more structure as we explain after the statement of Theorem \ref{main-2}. In \cite{af3} we discuss a rigidity theorem where the assumption of symmetry is removed.
We let $G$ a reflection group acting both on the domain space $\Omega\subseteq\mathbb{R}^n$ and on the target space $\mathbb{R}^m$. We assume that $W:\mathbb{R}^m\rightarrow\mathbb{R}$ a $C^3$ potential such that
\begin{description}
\item[${\bf H}_1$]$W$ is symmetric with respect to $G$: $W(g u)=W(u),\;\text{ for }\;g\in G,\; u\in\mathbb{R}^m$.
\end{description}
For Theorem \ref{main} and Theorem \ref{main-1} $G=S$ the group of order $2$ generated by the reflection $\mathbb{R}^d\ni z\mapsto\hat{z}\in\mathbb{R}^d$ in the plane $\{z_1=0\}$:
\[\hat{z}=(-z_1,z_2,\ldots,z_d),\;d=n,\,m.\]
In this case the symmetry of $W$ is expressed by $W(\hat{u})=W(u),\;u\in\mathbb{R}^m$. For Theorem \ref{triple} $G=T$ the group of order $6$ of the symmetries of the equilateral triangle. $T$ is generated by the reflection $\gamma$ in the plane $\{z_2=0\}$ and $\gamma_\pm$ in the plane $\{z_2=\pm\sqrt{3}z_1\}$.
We let $F\subset\mathbb{R}^d$, $d=n$ or $d=m$ a fundamental region for the action of $G$ on $\mathbb{R}^d$. If $G=S$ we take $F=\mathbb{R}_+^d=\{z:z_1>0\}$. If $G=T$ we take $F=\{z:0<z_2<\sqrt{3}z_1,\;z_1>0\}$.
\begin{description}
\item[${\bf H}_2$] There exists $a\in\overline{F}$ such that:
\begin{eqnarray}
0=W(a)\leq W(u),\; u\in\overline{F}.
\end{eqnarray}
Moreover $a$ is nondegenerate in the sense that the quadratic form $D^2W(a)(z,z)$ is positive definite.
\end{description}
In the symmetric setting we assume minimality in the class of symmetric variations:
\begin{definition}\label{definition-stable-s} Assume that $\Omega\subset\mathbb{R}^n$ and
$u\in C^2(\Omega;\mathbb{R}^m)\cap L^\infty(\Omega;\mathbb{R}^m)$, are symmetric
\begin{equation}\label{symmetric-equiv}
\begin{split}
&x\in\Omega\mathbb{R}ightarrow\;g x \in\Omega,\;\text{ for }\;g\in G,\\
&u(g x )= g u(x),\;\text{ for }\;g\in G,\;x\in\Omega.
\end{split}
\end{equation} Then $u$ is said to be a symmetric minimizer if for each bounded open symmetric lipschitz set $\Omega^\prime\subset\Omega$ and for each symmetric $v\in W_0^{1,2}(\Omega^\prime;\mathbb{R}^m)$ it results
\begin{eqnarray}
J_{\Omega^\prime}(u)\leq J_{\Omega^\prime}(u+v).
\end{eqnarray}
\end{definition}
In the following by a minimizer we will always mean a symmetric minimizer in the sense of the definition above.
\begin{theorem}\label{theorem-1}
Assume $G=S$ and assume that $W$ satisfies ${\bf H}_1-{\bf H}_2$. Assume that $\Omega\subseteq\mathbb{R}^n$ is {\it convex-symmetric} in the sense that
\begin{eqnarray}
x=(x_1,\dots,x_n)\in\Omega\mathbb{R}ightarrow(t x_1,\dots,x_n)\in\Omega, \text{ for } \vert t\vert\leq 1.
\end{eqnarray}
Let $\mathcal{Z}=\{z\in\mathbb{R}^m:z\neq a, W(z)=0\}$ and let
$u:\Omega\rightarrow\mathbb{R}^m$ a minimizer that satisfies
\begin{eqnarray}
\vert u(x)-z\vert>\delta,\;\text{ for }\;z\in\mathcal{Z},\; d(x,\partial\Omega^+)\geq d_0,\;x\in\Omega^+,
\end{eqnarray}
$\Omega^+=\{x\in\Omega:x_1>0\}$, and
\begin{equation}\label{assumed-bound}
\vert u\vert+\vert\nabla u\vert\leq M,\;\text{ for }\;x\in\Omega,
\end{equation}
for some $M>0$
Then there exist $k_0, K_0>0$ such that
\begin{eqnarray}\label{exponential-0}
\vert u-a\vert\leq K_0e^{-k_0 d(x,\partial\Omega^+)},\;\text{ for }\;x\in\Omega^+.
\end{eqnarray}
\end{theorem}
\begin{proof}
A minimizer $u$ satisfies the assumptions of Theorem $1.2$ in \cite{fu} that implies the result.
\end{proof}
Examples of minimizers that satisfy the hypothesis of Theorem \ref{theorem-1} are provided (see \cite{af2}) by the entire equivariant solutions of (\ref{system}) constructed in \cite{af1}, \cite{a1}, \cite{f}.
The gradient bound in (\ref{assumed-bound}) is a consequence of the smoothness of $\Omega$ or, as in the case of the entire solutions referred to above, follows from the fact that $u$ is the restriction to a non smooth set of a smooth map.
We denote $C_S^{0,1}(\overline{\Omega},\mathbb{R}^m)$ the set of lipschitz symmetric maps $v:\overline{\Omega}\rightarrow\mathbb{R}^m$ that satisfy the bounds
\begin{equation}\label{bounds}
\begin{split}
&\|v\|_{C^{0,1}(\overline{\Omega},\mathbb{R}^m)}\leq M,\\
&\vert v-a\vert +\vert\nabla v\vert\leq K_0e^{-k_0 d(x,\partial\Omega^+)},\;x\in\Omega^+.
\end{split}
\end{equation}
We remark that from (\ref{exponential-0}) and elliptic regularity, after redefining $k_0$ and $K_0$ if necessary, we have
\begin{equation}\label{gradu-expo}
u\in C_S^{0,1}(\overline{\Omega},\mathbb{R}^m),
\end{equation}
for the minimizer in Theorem \ref{theorem-1}.
\begin{theorem}\label{main}
Assume $W$, $\Omega$ and $u:\Omega\rightarrow\mathbb{R}^m$ as in Theorem \ref{theorem-1}. Assume moreover that
\begin{description}
\item[${\bf H}_3$] The problem
\begin{eqnarray}
\left\{\begin{array}{l}
u^{\prime\prime}=W_u(u),\quad s\in\mathbb{R} \\
u(-s)=\hat{u}(s),\;s\in\mathbb{R},\\
\lim_{s\rightarrow+\infty}u(s)=a,
\end{array}\right.
\end{eqnarray}
has a unique solution $\bar{u}:\mathbb{R}\rightarrow\mathbb{R}^m.$
\item[${\bf H}_4$] the operator $T$ defined by
\begin{eqnarray}
D(T)=W_S^{2,2}(\mathbb{R},\mathbb{R}^m),\quad\quad Tv=-v^{\prime\prime}+W_{uu}(\bar{u})v,
\end{eqnarray}
where $W_S^{2,2}(\mathbb{R},\mathbb{R}^m)\subset W^{2,2}(\mathbb{R},\mathbb{R}^m)$ is the subspace of symmetric maps, has a trivial kernel.
\end{description}
Then there exist $k, K>0$ such that
\begin{eqnarray}\label{exp-baru}
\vert u(x)-\bar{u}(x_1)\vert\leq Ke^{-kd(x,\partial\Omega)},\quad x\in\Omega.
\end{eqnarray}
\end{theorem}
\begin{theorem}\label{main-1}
Assume that $\Omega=\mathbb{R}^n$ and that $W$ and $u:\mathbb{R}^n\rightarrow\mathbb{R}^m$ are as in Theorem \ref{main}. Then
$u$ is unidimensional:
\begin{equation}
u(x)=\bar{u}(x_1),\;x\in\mathbb{R}^n.\hskip3cm
\end{equation}
\end{theorem}
\begin{theorem}\label{main-2}
Assume $\Omega=\{x\in\mathbb{R}^n:\;x_n>0\}$, $W$ and $u:\Omega\rightarrow\mathbb{R}^m$ as in Theorem \ref{main}. Then
\[u(x)=\bar{u}(x_1),\;\text{ on }\;\partial\Omega\;\mathbb{R}ightarrow\;u(x)=\bar{u}(x_1),\;\text{ on }\;\Omega.\]
\end{theorem}
From \cite{af1}, \cite{a1} and \cite{f}, we know that given a finite reflection group $G$, provided $W$ is invariant under $G$, there exists a $G$-equivariant solutions $u:\mathbb{R}^n\rightarrow\mathbb{R}^m$ of the system (\ref{system}). It is natural to ask about the asymptotic behavior of these solutions. In particular, given a unit vector $\nu=(\nu_1,\dots,\nu_n)\in\mathbb{R}^n$ one may wonder about the existence of the limit
\begin{eqnarray}\label{limit}
\lim_{\lambda\rightarrow +\infty}u(x^\prime+\lambda\nu)=\tilde{u}(x^\prime),
\end{eqnarray}
where $x^\prime$ is the projection of $x=x^\prime +\lambda\nu$ on the hyperplane orthogonal to $\nu$. One can conjecture that this limit does indeed exist and that $\tilde{u}$ is a solution of the same system equivariant with respect to the subgroup $G_\nu\subset G$ that leave $\nu$ fixed, the stabilizer of $\nu$. In \cite{af1}, \cite{a1} and \cite{f} an exponential estimate analogous to (\ref{exponential-0}) in Theorem \ref{theorem-1} was established. This gives a positive answer to this conjecture for the case where $\nu$ is inside the set $D=\text{Int}\cup_{g\in G_a}g\overline{F}$. Here $F$ is a fundamental region for the action of $G$ on $\mathbb{R}^d$, $d=n,\,m$ and $G_a\subset G$ is the subgroup that leave $a$ fixed. Under the assumptions ${\bf H}_3$ and ${\bf H}_4$ Theorem \ref{main} goes one step forward and shows that the conjecture is true when $\nu$ belongs to the interior of one of the walls of the set $D$ above and $G_\nu$ is the subgroup of order two generated by the reflection with respect to that wall. In the proof of Theorem \ref{main} the estimate (\ref{exponential-0}) is basic. Once the exponential estimate in Theorem \ref{main} is established, we conjecture that, under assumptions analogous to ${\bf H}_3$ and ${\bf H}_4$, the approach developed in the proof of Theorem \ref{main} can be used to handle the case where $\nu$ belongs to the intersection of two walls of $D$. We also expect that, under the assumption that at each step $\tilde{u}$ is unique and hyperbolic, the process can be repeated to show the whole hierarchy of limits corresponding to all possible choice of $\nu$ and always $\tilde{u}$ is a solution of the system equivariant with respect to the subgroup $G_\nu$. This program is motivated by the analogy between equivariant connection maps and minimal cones \cite{a2}. Theorem \ref{triple} below is an example of such a splitting result \cite{gmt} in the diffused interface set-up.
Our next result concerns minimizers equivariant with respect to the symmetry group $T$ of the equilateral triangle. We can imagine that $T=G_\nu$ for some $\nu$ that belongs to the intersection of two walls of $D$.
The following assumptions ${\bf H}^\prime_3$ and ${\bf H}^\prime_4$, in the case at hand $G=T$, correspond to the assumption ${\bf H}_3$ and ${\bf H}_4$ in Theorem \ref{main}
\begin{description}
\item[${\bf H}^\prime_3$] The problem
\begin{eqnarray}
\left\{\begin{array}{l}
u^{\prime\prime}=W_u(u),\quad s\in\mathbb{R} \\
u(-s)=\gamma u(s),\;s\in\mathbb{R},\\
\lim_{s\rightarrow+\infty}u(s)=\gamma_\pm a,
\end{array}\right.
\end{eqnarray}
has a unique solution $\bar{u}:\mathbb{R}\rightarrow\mathbb{R}^m.$
\item[${\bf H}^\prime_4$] the operator $T$ defined by
\begin{eqnarray}
D(T)=W_\gamma^{2,2}(\mathbb{R},\mathbb{R}^m),\quad\quad Tv=-v^{\prime\prime}+W_{uu}(\bar{u})v,
\end{eqnarray}
where $W_\gamma^{2,2}(\mathbb{R},\mathbb{R}^m)\subset W^{2,2}(\mathbb{R},\mathbb{R}^m)$ is the subspace of the maps that satisfy $u(-s)=\gamma u(s)$, has a trivial kernel.
\end{description}
Then we have the assumptions concerning uniqueness and hyperbolicity of $\tilde{u}$
\begin{description}
\item[${\bf H}_5$] There is a unique $G$-equivariant solution $\tilde{u}:\mathbb{R}^2\rightarrow\mathbb{R}^m$ of (\ref{system})
\begin{equation}\label{g-equivariance}
\tilde{u}(g s)=g \tilde u(s),\;\text{ for }\;g\in T,\;s\in\mathbb{R}^2
\end{equation}
that satisfies the estimate
\begin{equation}\label{exp-est-two}
\vert\tilde{u}(s)-a\vert\leq K e^{-kd(s,\partial D)},\;\text{ for }\;s\in\mathbb{R}^2,
\end{equation}
where $D=\mathrm{Int}\overline{F}\cup \gamma\overline{F}$.
\item[${\bf H}_6$] the operator $\mathcal{T}$ defined by
\begin{eqnarray}
D(\mathcal{T})=W_G^{2,2}(\mathbb{R}^2,\mathbb{R}^m),\quad\quad \mathcal{T}v=-\Delta v+W_{uu}(\bar{u})v,
\end{eqnarray}
where $W_T^{2,2}(\mathbb{R}^2,\mathbb{R}^m)\subset W^{2,2}(\mathbb{R}^2,\mathbb{R}^m)$ is the subspace of $T$-equivariant maps, has a trivial kernel.
\end{description}
We are now in the position of stating
\begin{theorem}\label{triple}
Assume that $W$ satisfies ${\bf H}_1$ and ${\bf H}_2$ with $a=(1,0)$ and moreover that $0=W(a)<W(u)$ for $u\in\overline{F}$. Assume that ${\bf H}^\prime_3$, ${\bf H}^\prime_4$ and ${\bf H}_5$, ${\bf H}_6$ hold.
Let $u:\mathbb{R}^n\rightarrow\mathbb{R}^m$, $n\geq 3$ and $m\geq 2$ be a $T$-equivariant minimizer that satisfies(\ref{assumed-bound}) and, for some $\delta, d_0>0$ the condition
\begin{equation}\label{stay-away}
\vert u(x)-\gamma_\pm a\vert\geq\delta\;\text{ for }\;d(x,\partial D)>d_0,\;x\in D,
\end{equation}
where $D=\{x\in\mathbb{R}^n: \vert x_2\vert<\sqrt{3} x_1,\;x_1>0\}$.
Then $u$ is two-dimensional:
\begin{equation}\label{two}
u(x)=\tilde{u}(x_1,x_2),\;x\in\mathbb{R}^n.
\end{equation}
\end{theorem}
\begin{remark}
If instead of a minimizers defined on $\mathbb{R}^n$ we had considered a minimizer defined on a subset $\Omega\subset\mathbb{R}^n$, instead of (\ref{two}), the conclusion of Theorem \ref{triple} would be exponential convergence of $u$ to $\tilde{u}$ similar to (\ref{exp-baru}).
\end{remark}
Theorem \ref{triple} is an example of a De Giorgi type result for systems where monotonicity is replaced by minimality ( see \cite{aac},\cite{jm} and section 3 in \cite{s}). It is the PDE analog of the fact that a minimal cone $\mathcal{C}$ in $\mathbb{R}^n$ with the symmetry of the equilateral triangle is necessarily of the form $\mathcal{C}=\tilde{\mathcal{C}}\times\mathbb{R}^{n-2}$, with $\tilde{\mathcal{C}}$ is the triod in the plane.
For De Giorgi type results for systems, for general solutions , but under
monotonicity hypotheses on the potential W, we refer to Fazly and
Ghoussoub \cite{fg}.
The rest of the paper is devoted to the proofs. In Section \ref{sec-main} we prove Theorem \ref{main} in Section \ref{basic} and Section \ref{replacement-lemmas} we prove a number of Lemmas that are basic for the proof of Theorem \ref{main} that we conclude in Sections \ref{proof-main} and \ref{sec-exp}. Theorems \ref{main-1} and \ref{main-2} and Theorem \ref{triple} are proved in Section \ref{main-final} and Section \ref{sec-triple}.
\section{The proof of Theorem \ref{main}}\label{sec-main}
The proof of Theorem \ref{main} that we present here, from an abstract point of view, has a lot in common with the proof of Theorem 1.2 in \cite{fu}. We will remark on this point later and spend a few words to motivate the various lemmas that compose the proof of Theorem \ref{main}. We begin with some notation and two basic lemmas.
\subsection{Basic lemmas}\label{basic}
In the following we use the notation $x=(s,\xi)$ with $x_1=s$ and $(x_2,\dots,x_n)=\xi$.
From (\ref{bounds}) it follows that, if $(l,\xi)\in\Omega^+$ satisfies $ d((l,\xi),\partial\Omega^+)\geq l$, then the map $s\rightarrow u(s,\xi), s\in[-l,l],$ that we still denote with $u$ satisfies
the bound
\begin{eqnarray}\label{v-vs-exp-bound}
\vert u-a\vert+\vert u_s\vert\leq K_0e^{-k_0 s}, \text{ for } s\in[0,l].
\end{eqnarray}
We denote by $E_l^\mathrm{xp}\subset C^1([-l,l]:\mathbb{R}^m)$ the set of symmetric maps $v:[-l,l]\rightarrow\mathbb{R}^m$ that satisfy
\begin{equation}\label{define-exp}
\vert v\vert+\vert v_s\vert\leq K e^{-k s}, \text{ for } s\in[0,l]
\end{equation}
for some $k, K>0$. We refer to $E_l^\mathrm{xp}$ as the exponential class.
\noindent We let $T_l$ the operator defined by
\begin{eqnarray}
D_l(T_l)=\{v\in W_S^{2,2}([-l,l],\mathbb{R}^m):v(\pm l)=0\},\quad\quad T_lv=-v^{\prime\prime}+W_{uu}(\bar{u})v.
\end{eqnarray}
\noindent For $l\in(0,+\infty]$ we let $\langle v,w\rangle_l=\int_{-l}^lvw$ denote the inner product in $L^2((-l,l),\mathbb{R}^m)$. We let $\|v\|_l=\langle v, v\rangle_l^{\frac{1}{2}}$ and
$\|v\|_{1,l}=\|v\|_{W^{1,2}([-l,l],\mathbb{R}^m)}$.
\noindent For the standard inner product in $\mathbb{R}^m$ we use the notation $(\cdot,\cdot)$.
\noindent It follows directly from (\ref{define-exp}) that $\|v\|_{1,l}\leq C=\frac{K}{\sqrt{k}}$. We set
\begin{eqnarray}
\mathcal{B}_l^{1,2}:=\{v\in W_S^{1,2}([-l,l],\mathbb{R}^m):
v(\pm l)=0;\; \|v\|_{1,l}\leq C\},
\end{eqnarray}
where $W_S^{1,2}([-l,l],\mathbb{R}^m)$ is the subspace of symmetric maps. Let $\mathbb{S}$ be defined by
\begin{eqnarray}
\mathbb{S}=\{\nu\in W_S^{1,2}([-l,l],\mathbb{R}^m): \|\nu\|_l=1\}
\end{eqnarray}
and set $q_\nu=\max\{q:q\nu\in\mathcal{B}_l^{1,2}\}$.
\begin{lemma}\label{strict-minimizer}
Assume $H_1$ and $H_2$ as in Theorem \ref{main} and let ${\bf e}_l:\mathcal{B}_l^{1,2}\rightarrow\mathbb{R}$ be defined by
\begin{eqnarray}
{\bf e}_l(v):=\frac{1}{2}(\langle \bar{u}_s+v_s, \bar{u}_s+v_s\rangle_l- \langle \bar{u}_s, \bar{u}_s\rangle_l)+\int_{-l}^l (W(\bar{u}+v)-W(\bar{u})).
\end{eqnarray}
Then there exist $l_0>0,\, q^\circ >0 \text{ and } c>0$ such that, for all $l\geq l_0$, we have
\begin{eqnarray}\label{iota-properties}
\left\{\begin{array}{l}
D_{qq}{\bf e}_l(q\nu)\geq c^2,\quad \text{ for } q\in[0,q^\circ]\cap[0,q_\nu],\;\nu\in\mathbb{S},\\\\
{\bf e}_l(q\nu)\geq{\bf e}_l(q^\circ\nu),\;\, \text{ for } q^\circ\leq q\leq q_\nu,\;\nu\in\mathbb{S},\\\\
{\bf e}_l(q\nu)\geq \tilde{{\bf e}}_l(p,q,\nu):={\bf e}_l(p\nu)+D_q{\bf e}_l(p\nu)(q-p)
,\\\quad\hskip4cm \text{ for } 0\leq p<q\leq q_\nu\leq q^\circ,\;\nu\in\mathbb{S},\\\\
D_p\tilde{{\bf e}}_l(p,q,\nu)\geq 0 ,\quad \text{ for } 0\leq p<q\leq q_\nu\leq q^\circ,\;\nu\in\mathbb{S}.
\end{array}\right.
\end{eqnarray}
\end{lemma}
\begin{remark}
${\bf e}_l$ is a kind of an {\it effective} potential. Indeed, as we shall see, in the proof of Theorem \ref{main} the map $L^2((-l,l),\mathbb{R}^m)\ni q\mapsto{\bf e}_l(q\nu)$ plays a role similar to the one of the usual potential $\mathbb{R}\ni q\mapsto W(a+q\nu)$ in the proof of Theorem 1.2 in \cite{fu}.
\end{remark}
\begin{proof}
By differentiating twice ${\bf e}_l(q\nu)$ with respect to $q$ gives
\begin{eqnarray}
D_{qq}{\bf e}_l(q\nu)&=&\int_{-l}^l(\nu_s,\nu_s)+\int_{-l}^lW_{uu}(\bar{u}+q\nu)(\nu,\nu)\\\nonumber
&=&
D_{qq}{\bf e}_l(q\nu)|_{q=0}+\int_{-l}^l(W_{uu}(\bar{u}+q\nu)-W_{uu}(\bar{u}))(\nu,\nu).
\end{eqnarray}
From the interpolation inequality:
\begin{equation}
\begin{split}
\|v\|_{L^\infty}\leq &\sqrt{2}\|v\|_{1,l}^{\frac{1}{2}}\|v\|_l^{\frac{1}{2}},\\
\leq &\sqrt{2}\|v\|_{1,l},
\end{split}
\end{equation}
for $q\nu\in\mathcal{B}_l^{1,2}$
we get via the second inequality
\begin{equation}
\|q\nu\|_{L^\infty}\leq \sqrt{2}C,
\end{equation}
and via the first
\begin{equation}
\|\nu\|_{L^\infty}\leq \sqrt{2}C^{\frac{1}{2}}q^{-\frac{1}{2}}.
\end{equation}
Therefore we have
\begin{eqnarray}\label{w-uu-estimate}
\vert W_{u_iu_j}(\bar{u}(s)+ q\nu(s))-W_{u_iu_j}(\bar{u}(s))\vert\leq
\sqrt{2}C^{\frac{1}{2}}\overline{W}^{\prime\prime\prime}q^{\frac{1}{2}},
\end{eqnarray}
where $\overline{W}^{\prime\prime\prime}$ is defined by
\begin{eqnarray}
\overline{W}^{\prime\prime\prime}:=\max_{\left.\begin{array}{l}
1\leq i,j,k\leq m\\
s\in\mathbb{R}, \vert\tau\vert\leq 1
\end{array}\right.}W_{u_iu_ju_k}(\bar{u}(s)+ \tau\sqrt{2}C).
\end{eqnarray}
From (\ref{w-uu-estimate}) we get
\begin{eqnarray}\label{int-wuu-wuu-estimate}
\vert\int_{-l}^l(W_{uu}(\bar{u}+q\nu)-W_{uu}(\bar{u}))(\nu,\nu)\vert\leq
C_1q^\frac{1}{2},
\end{eqnarray}
where $C_1>0$ is a constant independent of $l$.
We now observe that
\begin{eqnarray}\label{tl-equal-t}
D_{qq}{\bf e}_l(q\nu)|_{q=0}=\langle T_l\nu,\nu\rangle_l=
\langle T\tilde{\nu},\tilde{\nu}\rangle_\infty,
\end{eqnarray}
where $\tilde{\nu}$ is the trivial extension of $\nu$ to $\mathbb{R}$. $T$ is a self-adjoint operator which is positive by the minimality of $\bar{u}$. Therefore assumption ${\bf H}_5$ implies that the point spectrum of $T$ is bounded below by a positive number. From ${\bf H}_2$ the smallest eigenvalue $\mu$ of the matrix $W_{uu}(a)$ is positive and Persson's Theorem in \cite{ag} implies that also the remaining part of the spectrum of $T$, the essential spectrum, is bounded below by $\mu>0$. It follows that the spectrum of $T$ is bounded below by a positive constant $0<\tilde{\mu}\leq\mu$. From this (\ref{tl-equal-t}) and Theorem 13.31 in \cite{r} it follows
\begin{eqnarray}
D_{qq}{\bf e}_l(q\nu)|_{q=0}\geq\tilde{\mu},
\end{eqnarray}
which together with (\ref{int-wuu-wuu-estimate}) implies
\begin{eqnarray}
D_{qq}{\bf e}_l(q\nu)|\geq\tilde{\mu}\geq c^2:=\frac{\tilde{\mu}}{2},\;\;\text{ for }\,q\in[0,\bar{q}]\cap[0,q_\nu],
\end{eqnarray}
where $\bar{q}=\frac{1}{4}\frac{\tilde{\mu}^2}{C_1}$.
This concludes the proof of (\ref{iota-properties})$_1$.
We now consider the problem
\begin{eqnarray}\label{constrained-minimization}
\min_{\left.\begin{array}{l}
v\in\mathcal{B}_l^{1,2}\\
\|v\|_l\geq \bar{q}
\end{array}\right.} {\bf e}_l(v)
\end{eqnarray}
Since the constraint in problem (\ref{constrained-minimization}) is closed with respect to weak convergence in $W_0^{1,2}$, if $\bar{v}_l$ is a minimizer of problem (\ref{constrained-minimization}), we have $\bar{v}_l\neq 0$.
This implies
\begin{eqnarray}
{\bf e}_l(\bar{v}_l)=\alpha_l>0.
\end{eqnarray}
Indeed the uniqueness assumption about the minimizer $\bar{u}$ implies that $v\equiv 0$ is the unique minimizer of ${\bf e}_l$. We have
\begin{eqnarray}\label{alpha-infinity}
\liminf_{l\rightarrow+\infty}\alpha_l=\alpha>0.
\end{eqnarray}
To prove this we assume that instead there is a sequence $l_k$ such that $\lim_{k\rightarrow+\infty}\alpha_{l_k}=0$. We can also assume that the sequence
$\tilde{\bar{v}}_{l_k}$ of the trivial extensions of $\bar{v}_{l_k}$ converges weakly in $W^{1,2}$ to a map $\bar{v}$ which by lower semicontinuity satisfies
\begin{eqnarray}
{\bf e}_\infty(\bar{v})=0.
\end{eqnarray}
This is in contradiction with the assumption that $v\equiv 0$ is the unique minimizer of ${\bf e}_\infty$ indeed the constraint in problem (\ref{constrained-minimization}) persists in the limit and implies $\bar{v}\neq 0$. This establishes (\ref{alpha-infinity}) and concludes the proof of (\ref{iota-properties})$_2$ with $q^\circ=\min\{\bar{q},\alpha\}$.
\noindent The last two inequalities in (\ref{iota-properties}) are straightforward consequences of (\ref{iota-properties})$_1$.
\end{proof}
\begin{lemma}\label{l-infinity-less-l-2}
Let $u$ as in Theorem \ref{theorem-1} and assume that
\begin{eqnarray}\label{lemma-assumption}
(l,\xi)\in\Omega^+,\; d((l,\xi),\partial\Omega^+\geq l,
\end{eqnarray}
then
there is a constant $C_2>0$ independent of $l>1$, such that
\begin{eqnarray}\label{l-infinity-less-l-2-1}
\|u(\cdot,\xi)-\bar{u}\|_{L^\infty([-l,l],\mathbb{R}^m)}\leq C_2
\| u(\cdot,\xi)-\bar{u}\|_l^\frac{2}{3}.
\end{eqnarray}
\end{lemma}
\begin{proof}
From (\ref{lemma-assumption}) $u(\cdot,\xi)$ satisfies (\ref{v-vs-exp-bound}). Since also $\bar{u}$ satisfies (\ref{v-vs-exp-bound}). There is $\bar{s}\in[0,l]$ such
that $\vert u(s,\xi)-\bar{u}(s)\vert\leq m=:\vert u(\bar{s},\xi)-\bar{u}(\bar{s})\vert$. From this and $\vert u(\cdot,\xi)_s-\bar{u}_s\vert\leq 2K_0$ it follows
\begin{eqnarray}
\vert u(s,\xi)-\bar{u}(s)\vert\geq m
(1-2K_0\vert s-\bar{s}\vert),\;\,\text{ for } s\in[-l,l]\cap[\bar{s}-\frac{m}{2K_0},\bar{s}+\frac{m}{2K_0}]
\end{eqnarray}
and a simple computation gives (\ref{l-infinity-less-l-2-1}).
\end{proof}
Before continuing with the proof, we explain the meaning of the lemmas that follow.
Given $l, r>0$ and $\varsigma\in\mathbb{R}^{n-1}$ we let $\mathcal{C}_l^r(\varsigma)\subset\mathbb{R}^n$ the cylinder
\begin{eqnarray}
\mathcal{C}_l^r(\varsigma):=\{(s,\xi):-l<s<l;\,\vert \xi-\varsigma\vert<r\}.
\end{eqnarray}
Lemma \ref{lemma-1}, Lemma \ref{lemma-2} and Lemma \ref{lemma-w-q} describe successive deformations through which, fixed $\lambda>0$ and $\varrho>0$ and $\bar{q}\in(0,q^\circ)$, we transform the minimizer $u$ first into a map $v$ then into $w$ and finally into a map $w^{\bar{q}}$ that satisfies the conditions
\begin{equation}\label{conditions-w-q}
\begin{split}
& w^{\bar{q}}=u,\;\text{ on }\;\Omega\setminus\mathcal{C}_{l+\lambda}^{r+2\varrho}(\varsigma),\\
& w^{\bar{q}}(l+\frac{\lambda}{2},\xi)=\bar{u}(l+\frac{\lambda}{2}),\;\text{ for }\;\vert\xi-\varsigma\vert\leq r+\frac{\varrho}{2},\\
&\|w^{\bar{q}}(\cdot,\xi)-\bar{u}(\cdot)\|_{l+\frac{\lambda}{2}}\leq\bar{q},\;\text{ for }\;\vert\xi-\varsigma\vert\leq r+\frac{\varrho}{2}
\end{split}
\end{equation}
The deformations described in these lemmas are complemented by precise quantitative estimates on the amount of energy required for the deformation (see (iii) in Lemma \ref{lemma-1}, (iii) in Lemma \ref{lemma-2} and (\ref{quantitative-w-wq}) in Lemma \ref{lemma-w-q}).
Lemma \ref{lemma-1} describes the deformation of $u$ into a map $v$ that coincides with $\bar{u}$ on the lateral boundary of $\mathcal{C}_{l+\frac{\lambda}{2}}^{r+\varrho}(\varsigma)$:
\begin{equation}\label{coincide-lateral}
\begin{split}
& v=u,\;\text{ outside }\;\mathcal{C}_{l+\lambda}^{r+2\varrho}(\varsigma)\setminus\overline{\mathcal{C}}_l^{r+2\varrho}(\varsigma)\\
&\|w (\cdot,\xi)-\bar{u}(\cdot)\|_{l+\frac{\lambda}{2}}\leq\bar{q},\;\text{ for }\;\vert\xi-\varsigma\vert= r+\frac{\varrho}{2}.
\end{split}
\end{equation}
Lemma \ref{lemma-2} describes the deformation of $v$ into a map $w$ that satisfies
\begin{equation}\label{coincide-l2}
\begin{split}
& w=v,\;\text{ outside }\;\mathcal{C}_{l+\frac{\lambda}{2}}^{r+\varrho}(\varsigma)\setminus\overline{\mathcal{C}}_{l+\frac{\lambda}{2}}^r(\varsigma)\\
&\|w (\cdot,\xi)-\bar{u}(\cdot)\|_{l+\frac{\lambda}{2}}\leq\bar{q},\;\text{ for }\;\vert\xi-\varsigma\vert= r+\frac{\varrho}{2}.
\end{split}
\end{equation}
Lemma \ref{quantitative-estimate0} and Corollary \ref{corollary} show that we can replace $w^{\bar{q}}$ with a map $\omega$ that coincides with $w^{\bar{q}}$ outside $\mathcal{C}_{l+\frac{\lambda}{2}}^{r+\frac{\varrho}{2}}(\varsigma)$ and has less energy than $w^{\bar{q}}$. Moreover Corollary \ref{corollary} yields a quantitative estimate for the energy difference.
In Sec.\ref{proof-main} we put together all these energy estimates and show (see Proposition \ref{l-2-bound}) that the assumption that
\[\|u (\cdot,\varsigma)-\bar{u}(\cdot)\|_l\geq q^\circ\]
if $r>0$ is sufficiently large, is incompatible with the minimality of $u$. Thus establishing that, if a sufficiently large cylinder $\mathcal{C}_{l+\frac{\lambda}{2}}^{r+\frac{\varrho}{2}}(\varsigma)$ is contained in $\Omega$, then we have the estimate
\[\|u (\cdot,\varsigma)-\bar{u}(\cdot)\|_l< q^\circ,\]
which is the main step in the proof of Theorem \ref{main}.
\subsection{Replacement Lemmas}\label{replacement-lemmas}
\begin{lemma}\label{lemma-1}
Let $\lambda \text{ and } \varrho>0$ be fixed. Assume that $\mathcal{C}_{l+\lambda}^{r+2\varrho}(\varsigma)\subset\Omega$ satisfies
\begin{eqnarray}\label{distance-m-l}
d(\mathcal{C}_{l+\lambda}^{r+2\varrho}(\varsigma),\partial\Omega)\geq l+\lambda.
\end{eqnarray}
Then there exists a map $v\in C_S^{0,1}(\overline{\Omega},\mathbb{R}^m)$ such that
\begin{description}
\item[(i)] $v=u,\;\text{ on }\, \overline{\Omega}\setminus
(\mathcal{C}_{l+\lambda}^{r+2\varrho}(\varsigma)\setminus
\overline{\mathcal{C}}_{l}^{r+2\varrho}(\varsigma))$,
\item[(ii)] $v(l+\frac{\lambda}{2},\xi)=\bar{u}(l+\frac{\lambda}{2}),\;\text{ for }\, \vert\xi-\varsigma\vert\leq r+\varrho$.
\item[(iii)] $J_{\mathcal{C}_{l+\lambda}^{r+2\varrho}(\varsigma)}(v)-
J_{\mathcal{C}_{l+\lambda}^{r+2\varrho}(\varsigma)}(u)\leq C_0 r^{n-1}e^{-2k l}$,
\end{description}
where $C_0>0$ is a constant independent of $l$ and $r$.
\end{lemma}
\begin{proof}
For $(s,\xi)\in \overline{\mathcal{C}}_{l+\lambda}^{r+\varrho}(\varsigma)
\setminus\mathcal{C}_{l}^{r+\varrho}(\varsigma)$ we define $v$ by
\begin{eqnarray}\label{v-definition-1}
v(s,\xi)=
(1-\vert 1-2\frac{s-l}{\lambda}\vert)\bar{u}(s)+ \vert 1-2\frac{s-l}{\lambda}\vert u(s,\xi),\hskip2cm\\\nonumber\hskip4cm s\in[l,l+\lambda],\, \vert \xi-\varsigma\vert\leq r+\varrho.
\end{eqnarray}
It remains to define $v(s,\xi) \text{ for } (s,\xi)\in(l,l+\lambda)\times\{\xi:r+\varrho<\vert \xi-\varsigma\vert<r+2\varrho\}$.
Set
\begin{eqnarray}
B u(s,\xi)=\vert\frac{s-l-\lambda}{\lambda}\vert u(l,\xi)+\frac{s-l}{\lambda}u(l+\lambda,\xi),\\\nonumber
\tilde{u}(s,\xi)=u(s,\xi)-B u(s,\xi).\hskip2.5cm
\end{eqnarray}
Note that by (\ref{v-definition-1}) $\vert\xi-\varsigma\vert=r+\varrho$ implies $v(l,\xi)=u(l,\xi),\; v(l+\lambda,\xi)=u(l+\lambda,\xi)$ and therefore we have
\begin{eqnarray}
\vert\xi-\varsigma\vert=r+\varrho\mathbb{R}ightarrow B u(s,\xi)=B v(s,\xi),
\end{eqnarray}
where $v$ is defined in (\ref{v-definition-1}). Set
\begin{eqnarray}
\hat{v}(s,\xi)=
v(s,(r+\varrho)\frac{\xi-\varsigma}{\vert\xi-\varsigma\vert}+\varsigma)-
B u(s,(r+\varrho)\frac{\xi-\varsigma}{\vert\xi-\varsigma\vert}+\varsigma),
\end{eqnarray}
where again $v$ is defined in (\ref{v-definition-1}). With these notations we complete the definition of $v$ by setting
\begin{eqnarray}\label{v-definition-2}
v(s,\xi)=B u(s,\xi)
+\frac{\vert \xi-\varsigma\vert-r-\varrho}{\varrho}\tilde{u}(s,\xi)
+\frac{2\varrho+r-\vert \xi-\varsigma\vert}{\varrho}\hat{v}(s,\xi),\\\nonumber
\text{ for } (s,\xi)\in(l,l+\lambda)\times\{\xi:r+\varrho<\vert \xi-\varsigma\vert<r+2\varrho\}.
\end{eqnarray}
Statement (i) and (ii) are obvious consequences of the definition of $v$. Direct inspection of (\ref{v-definition-1}) and (\ref{v-definition-2}) shows that $v$ is continuous. From (\ref{v-definition-1}) $v(s,\xi)$ is a linear combination of $\bar{u}(s)$ and $u(s,\xi)$ computed for $s\in[l,l+\lambda]$. A similar statement applies to $v(s,\xi)$ in (\ref{v-definition-2}) since
$B u(s,\xi),\,\hat{v}(s,\xi)$ and $\tilde{u}(s,\xi)$ are linear combinations of $u(s,\xi)$ and $v(s,\xi)$ in (\ref{v-definition-1}) computed for $s\in[l,l+\lambda]$. From this, assumption (\ref{distance-m-l}) and (\ref{v-vs-exp-bound}) we conclude
\begin{eqnarray}\label{energy-density-bound}
\vert v-a\vert+\vert\nabla v\vert\leq C_3 e^{-k_0 l} \;\,\text{ for }
(s,\xi)\in \mathcal{C}_{l+\lambda}^{r+2\varrho}(\varsigma)\setminus
\overline{\mathcal{C}}_{l}^{r+2\varrho}(\varsigma),
\end{eqnarray}
where $C_3>0$ is a constant independent of $l$ and $r$. From (\ref{energy-density-bound}) and the assumptions on the potential $W$ it follows
\begin{eqnarray}
\frac{1}{2}\nabla v\vert^2+W(v)\leq C_4e^{-2k_0 l},
\end{eqnarray}
which together with $\mathcal{H}^n(\mathcal{C}_{l+\lambda}^{r+2\varrho}(\varsigma)\setminus
\overline{\mathcal{C}}_{l}^{r+2\varrho}(\varsigma))\leq C_5 r^{n-1}$ concludes the proof.
\end{proof}
Given a number $0<\bar{q}< q^\circ$, let $A_{\bar{q}}$ be the set
\begin{eqnarray}
A_{\bar{q}}:=\{\xi: \|v(\cdot,\xi)-\bar{u}(\cdot)\|_{l+\frac{\lambda}{2}}>\bar{q},\,\vert \xi-\varsigma\vert< r+\varrho\},
\end{eqnarray}
where $v$ is the map constructed in Lemma \ref{lemma-1}.
\begin{lemma}\label{lemma-2}
Let $v$ as before and let $S:=A_{\bar{q}}\cap\{\xi: r<\vert \xi-\varsigma\vert< r+\varrho\}$. Then there is a constant $C_1>0$ independent from $l \text{ and } r$ and a map $w\in C_S^{0,1}(\overline{\Omega},\mathbb{R}^m)$ such that
\begin{description}
\item[(i)] $w=v \text{ on } \overline{\Omega}\setminus(\mathcal{C}_{l+\frac{\lambda}{2}}^{r+\varrho}(\varsigma)
\setminus\overline{\mathcal{C}}_{l+\frac{\lambda}{2}}^{r}(\varsigma))$
\item[(ii)] $\| w-\bar{u}\|_{l+\frac{\lambda}{2}}\leq \bar{q}, \text{ for } \vert\xi-\varsigma\vert=r+\frac{\varrho}{2}.$
\item[(iii)] $J_{\mathcal{C}_{l+\frac{\lambda}{2}}^{r+\varrho}(\varsigma)
\setminus\overline{\mathcal{C}}_{l+\frac{\lambda}{2}}^{r}(\varsigma)}(w)-
J_{\mathcal{C}_{l+\frac{\lambda}{2}}^{r+\varrho}(\varsigma)
\setminus\overline{\mathcal{C}}_{l+\frac{\lambda}{2}}^{r}(\varsigma)}(v) \leq C_1\mathcal{H}^{n-1}(S)$.
\end{description}
\end{lemma}
\begin{proof}
Set
\begin{equation}\label{qv-and-nuv}
\begin{split}
& q^v(\xi)=\|v(\cdot,\xi)-\bar{u}(\cdot)\|_{l+\frac{\lambda}{2}},\\
& \nu^v(s,\xi)=\frac{v(s,\xi)-\bar{u}(s)}{q^v(\xi)},
\end{split}\text{ for }\;s\in(-l-\frac{\lambda}{2},l+\frac{\lambda}{2}),\;\xi\in S.
\end{equation}
and, for $s\in(-l-\frac{\lambda}{2},l+\frac{\lambda}{2}),\;\xi\in S$, define
\begin{equation}\label{w}
\begin{split}
& w(s,\xi)=\bar{u}(s)+q^w(\xi)\nu^v(s,\xi),\\
& q^w(\xi)=(1-\vert 1-2\frac{\vert\xi-\varsigma\vert-r}{\varrho}\vert)\bar{q}+
\vert 1-2\frac{\vert\xi-\varsigma\vert-r}{\varrho}\vert q^v(\xi).
\end{split}
\end{equation}
From this definition it follows that $w$ coincides with $v=\bar{u}+q^v\nu^v$ if $\vert\xi-\varsigma\vert=r$ or $\vert\xi-\varsigma\vert=r+\varrho$ or $q^v=\bar{q}$. This shows that $w$ coincides with $v$ on the boundary of the set $(-l-\frac{\lambda}{2},l+\frac{\lambda}{2})\times S$ and proves (i). From (\ref{w}) also follows that $q^w=\bar{q}$ for $\vert\xi-\varsigma\vert=r+\frac{\varrho}{2}$ for $\xi\in S$. This and the definition of $S$ imply (ii). To prove (iii)
we note that
\begin{eqnarray}\label{w-bar-u}
\vert w-\bar{u}\vert=\vert q^w\nu^v\vert\leq\vert q^v\nu^v\vert=\vert v-\bar{u}\vert, \text{ for } s\in(-l-\frac{\lambda}{2},l+\frac{\lambda}{2}),\;\xi\in S.
\end{eqnarray}
which implies
\begin{eqnarray}
\vert w-a\vert\leq Ke^{-k s},\;\text{ for }\;s\in(0,l+\frac{\lambda}{2}),\;\xi\in S.
\end{eqnarray}
Therefore we have
\begin{eqnarray}\label{potential-bound}
\int_{-l-\frac{\lambda}{2}}^{l+\frac{\lambda}{2}}(W(w)-W(v))
\leq\int_{-l-\frac{\lambda}{2}}^{l+\frac{\lambda}{2}}W(w)\leq C,
\text{ for } \xi\in S.
\end{eqnarray}
We can write
\[w=\frac{q^w}{q^v}(v-\bar{u}),\;\text{ for }\;s\in(0,l+\frac{\lambda}{2}),\;\xi\in S\]
therefore we have, using also (\ref{energy-density-bound})
\begin{equation}\label{computation}
\begin{split}
& w_s=\frac{q^w}{q^v}(v_s-\bar{u}_s)\;\mathbb{R}ightarrow\;\vert w_s\vert\leq K e^{-k\vert s\vert},\\
& w_{\xi_j}=(\frac{q^w}{q^v})_{\xi_j}(v-\bar{u})+\frac{q^w}{q^v}v_{\xi_j}.
\end{split}
\end{equation}
From $q^v_{\xi_j}=\langle \nu^v,v_{\xi_j}\rangle_{l+\frac{\lambda}{2}}$ and (\ref{w}) it follows
\begin{equation}\label{computation1}
\begin{split}
& (\frac{q^w}{q^v})_{\xi_j}=\vert 1-2\frac{\vert\xi-\varsigma\vert-r}{\varrho}\vert_{\xi_j}(1-\frac{\bar{q}}{q^v})
-(1-\vert 1-2\frac{\vert\xi-\varsigma\vert-r}{\varrho}\vert)\frac{\bar{q}}{(q^v)^2}\langle \nu^v,v_{\xi_j}\rangle_{l+\frac{\lambda}{2}},\\
& \mathbb{R}ightarrow\;\vert (\frac{q^w}{q^v})_{\xi_j}\vert\leq\frac{2}{\varrho}+\frac{1}{q^v}\|v_{\xi_j}\|_{l+\frac{\lambda}{2}}.
\end{split}
\end{equation}
where we have also used $\frac{\bar{q}}{q^v}\leq 1$ for $\xi\in S$. From (\ref{computation1}) and (\ref{computation1}) it follows
\[\vert w_{\xi_j}\vert\leq(\frac{2}{\varrho}+\frac{\|v_{\xi_j}\|_{l+\frac{\lambda}{2}}}{\bar{q}})\vert v-\bar{u}\vert+
\vert v_{\xi_j}\vert\leq K e^{-k\vert,\;\text{ for }\; s\vert}s\in(-l-\frac{\lambda}{2},l+\frac{\lambda}{2}),\;\xi\in S,\]
where we have also used (\ref{energy-density-bound}).
From this and (\ref{computation}) we conclude
\begin{eqnarray}
\int_{-l-\frac{\lambda}{2}}^{l+\frac{\lambda}{2}}(\vert\nabla w\vert^2-\vert\nabla v\vert^2)\leq
\int_{-l-\frac{\lambda}{2}}^{l+\frac{\lambda}{2}}\vert\nabla w\vert^2\leq C,
\text{ for } \xi\in S.
\end{eqnarray}
This inequality together with (\ref{potential-bound}) conclude the proof.
\end{proof}
\begin{lemma}\label{lemma-w-q}
Let $w$ the map constructed in Lemma \ref{lemma-2}. Define $w^{\bar{q}}$ by setting
\begin{eqnarray}
w^{\bar{q}}=\left\{\begin{array}{l}
\bar{u}+\bar{q}\nu^v, \text{ for } (s,\xi)\in\mathcal{C}_{l+\frac{\lambda}{2}}^{r+\frac{\varrho}{2}}(\varsigma),\;\xi\in A_{\bar{q}},\\\\
w, \text{ for } (s,\xi)\in\mathcal{C}_{l+\frac{\lambda}{2}}^{r+\frac{\varrho}{2}}(\varsigma),\;\xi\not\in A_{\bar{q}},
\text{ and for } (s,\xi)\not\in\mathcal{C}_{l+\frac{\lambda}{2}}^{r+\frac{\varrho}{2}}(\varsigma).
\end{array}\right.
\end{eqnarray}
Then $w^{\bar{q}}\in C_S^{0,1}(\overline{\Omega},\mathbb{R}^m)$ and
\begin{eqnarray}\label{quantitative-w-wq}
J_{\mathcal{C}_{l+\frac{\lambda}{2}}^{r+\frac{\varrho}{2}}(\varsigma)}(w^{\bar{q}})
-J_{\mathcal{C}_{l+\frac{\lambda}{2}}^{r+\frac{\varrho}{2}}(\varsigma)}(w)\leq 0.
\end{eqnarray}
\end{lemma}
\begin{proof}
We have $w-\bar{u}=q^w\nu^w$ and $q^w>\bar{q}$ on $A_{\bar{q}}$. Therefore, recalling the definition of ${\bf e}_l$ and Lemma \ref{strict-minimizer} we have
\begin{eqnarray}\label{difference-w-wq}
J_{\mathcal{C}_{l+\frac{\lambda}{2}}^{r+\frac{\varrho}{2}}(\varsigma)}(w^{\bar{q}})
-J_{\mathcal{C}_{l+\frac{\lambda}{2}}^{r+\frac{\varrho}{2}}(\varsigma)}(w)&=& \int_{\tilde{A}_{\bar{q}}}
({\bf e}_{l+\frac{\lambda}{2}}(\bar{q}\nu^w)-{\bf e}_{l+\frac{\lambda}{2}}(q^w\nu^w))d\xi\\\nonumber
&&+
\frac{1}{2}
\sum_j\int_{\tilde{A}_{\bar{q}}}(\langle w^{\bar{q}}_{\xi_j},w^{\bar{q}}_{\xi_j}\rangle_{l+\frac{\lambda}{2}}-
\langle w_{\xi_j},w_{\xi_j}\rangle_{l+\frac{\lambda}{2}})d\xi\\\nonumber
&\leq&
\frac{1}{2}
\sum_j\int_{\tilde{A}_{\bar{q}}}(\langle w^{\bar{q}}_{\xi_j},w^{\bar{q}}_{\xi_j}\rangle_{l+\frac{\lambda}{2}}-
\langle w_{\xi_j},w_{\xi_j}\rangle_{l+\frac{\lambda}{2}})d\xi,
\end{eqnarray}
To conclude the proof we note that for $\xi\in\tilde{A}_{\bar{q}}$
\begin{equation}\label{wj-expressions}
\begin{split}
& w_{\xi_j}^{\bar{q}}=\bar{q}\nu_{\xi_j}^v,\;\mathbb{R}ightarrow\;\langle w_{\xi_j}^{\bar{q}},w_{\xi_j}^{\bar{q}}\rangle_{l+\frac{\lambda}{2}}=\bar{q}^2\langle \nu_{\xi_j}^v,\nu_{\xi_j}^v\rangle_{l+\frac{\lambda}{2}},\\
& w_{\xi_j}=q_{\xi_j}^w\nu+q^w\nu_{\xi_j}^v,\;\mathbb{R}ightarrow\;\langle w_{\xi_j},w_{\xi_j}\rangle_{l+\frac{\lambda}{2}}= (q_{\xi_j}^w)^2+(q^w)^2\langle \nu_{\xi_j}^v,\nu_{\xi_j}^v\rangle_{l+\frac{\lambda}{2}}
\end{split}
\end{equation}
where we have also used that $\langle \nu^v,\nu_{\xi_j}^v\rangle_{l+\frac{\lambda}{2}}=0$. Form (\ref{wj-expressions}) it follows
\[\langle w^{\bar{q}}_{\xi_j},w^{\bar{q}}_{\xi_j}\rangle_{l+\frac{\lambda}{2}}-
\langle w_{\xi_j},w_{\xi_j}\rangle_{l+\frac{\lambda}{2}}=-(q_{\xi_j}^v)^2+(\bar{q}^2-(q^w)^2)\langle \nu_{\xi_j}^v,\nu_{\xi_j}^v\rangle_{l+\frac{\lambda}{2}}\leq 0,\]
for $\xi\in\tilde{A}_{\bar{q}}$. This and (\ref{difference-w-wq}) prove (\ref{quantitative-w-wq}).
\end{proof}
Next we show that we can associate to $w^{\bar{q}}$ a map $\omega$ which coincides with $w^{\bar{q}}$ on $\Omega\setminus \mathcal{C}_{l+\frac{\lambda}{2}}^{r+\frac{\varrho}{2}}(\varsigma)$ and has less energy than $w^{\bar{q}}$. Moreover we derive a quantitative estimate of the energy difference. We follow closely the argument in \cite{fu}. First we observe that, if we define $q^\ast:=q^{w^{\bar{q}}}$, we can represent $J_{\mathcal{C}_{l+\frac{\lambda}{2}}^{r+\frac{\varrho}{2}}(\varsigma)}(w^{\bar{q}})$ in the {\it polar} form
\begin{eqnarray}
J_{\mathcal{C}_{l+\frac{\lambda}{2}}^{r+\frac{\varrho}{2}}(\varsigma)}(w^{\bar{q}})-
J_{\mathcal{C}_{l+\frac{\lambda}{2}}^{r+\frac{\varrho}{2}}(\varsigma)}(\bar{u})
\hskip7cm\\\nonumber
\hskip2cm=
\int_{B_{\varsigma,r +\frac{\varrho}{2}}\cap\{q^\ast>0\}}\frac{1}{2}(\vert\nabla q^\ast\vert^2+{q^\ast}^2\sum_j\langle \nu_{\xi_j}^w,\nu_{\xi_j}^w\rangle_{l+\frac{\lambda}{2}})+{\bf e}_{l+\frac{\lambda}{2}}(q^\ast\nu^w).
\end{eqnarray}
This follows from $\nu^w=\nu^v$ and from $\langle \nu^v,\nu_{\xi_j}^v\rangle_{l+\frac{\lambda}{2}}=0$ that implies
\[\sum_j\langle w_{\xi_j}^{\bar{q}},w_{\xi_j}^{\bar{q}}\rangle_{l+\frac{\lambda}{2}}=\vert\nabla q^\ast\vert^2+{q^\ast}^2\sum_j\langle \nu_{\xi_j}^w,\nu_{\xi_j}^w\rangle_{l+\frac{\lambda}{2}}\]
and from the definition of ${\bf e}_l$ in Lemma \ref{strict-minimizer}.
We remark that the definition of $q^\ast \text{ and } w^{\bar{q}}$ imply
\begin{eqnarray}
q^\ast&\leq& \bar{q}, \text{ on } B_{\varsigma,r +\frac{\varrho}{2}},\\\nonumber
q^\ast&=& \bar{q}, \text{ on } A_{\bar{q}}\cap B_{\varsigma,r +\frac{\varrho}{2}}. \end{eqnarray}
\begin{lemma}\label{quantitative-estimate0}
Let $\varphi:B_{\varsigma,r +\frac{\varrho}{2}}\rightarrow\mathbb{R}$ the solution of
\begin{eqnarray}\label{phi-comparison}
\left\{\begin{array}{l}
\Delta\varphi=c^2\varphi, \text{ in } B_{\varsigma,r +\frac{\varrho}{2}}\\
\varphi=\bar{q}, \text{ on } \partial B_{\varsigma,r +\frac{\varrho}{2}}.
\end{array}\right.
\end{eqnarray}
Then there is a map $\omega\in C_S^{0,1}(\overline{\Omega},\mathbb{R}^m)$ with the following properties
\begin{eqnarray}
\left\{\begin{array}{l}
\omega=w^{\bar{q}},\; \text{ on }\; \Omega\setminus \mathcal{C}_{l+\frac{\lambda}{2}}^{r+\frac{\varrho}{2}}(\varsigma),\\\\
\omega=q^\omega\nu^w+ \bar{u},\; \text{ on }\; \mathcal{C}_{l+\frac{\lambda}{2}}^{r+\frac{\varrho}{2}}(\varsigma),\\\\
q^\omega\leq\varphi\leq\bar{q},\; \text{ on }\; \mathcal{C}_{l+\frac{\lambda}{2}}^{r+\frac{\varrho}{2}}(\varsigma).
\end{array}\right.
\end{eqnarray}
Moreover
\begin{eqnarray}\label{quantitative-estimate}
J_{\mathcal{C}_{l+\frac{\lambda}{2}}^{r+\frac{\varrho}{2}}(\varsigma)}( w^{\bar{q}})-
J_{\mathcal{C}_{l+\frac{\lambda}{2}}^{r+\frac{\varrho}{2}}(\varsigma)}(\omega)\hskip7.5cm
\\\nonumber\hskip2cm\geq
\int_{B_{\varsigma,r +\frac{\varrho}{2}}\cap\{q^\ast>\varphi\}}
({\bf e}_{l+\frac{\lambda}{2}}(q^\ast\nu^w)-{\bf e}_{l+\frac{\lambda}{2}}(\varphi\nu^w)
-D_q{\bf e}_{l+\frac{\lambda}{2}}(\varphi\nu^w)(q^\ast-\varphi))d\xi.
\end{eqnarray}
\end{lemma}
\begin{proof}
Let $b>0$, $b\leq\min_{\xi\in B_{\varsigma,r +\frac{\varrho}{2}}}\varphi$ be fixed and let $A_b\subset B_{\varsigma,r +\frac{\varrho}{2}}$ the set $A_b:=\{\xi\in B_{\varsigma,r +\frac{\varrho}{2}}:q^\ast>b\}$. $A_b$ is an open set since $w^{\bar{q}}=\bar{u}+q^\ast\nu^w$ is continuous by construction. Let
\begin{eqnarray}\label{reduced-action}
\mathcal{J}_{A_b}(p)=\int_{A_b}(\frac{1}{2}\vert\nabla p\vert^2+{\bf e}_{l+\frac{\lambda}{2}}(\vert p\vert\nu^w))d\xi,
\end{eqnarray}
Since $A_b$ is open and $q^\ast\in L^\infty(A_b,\mathbb{R})$ there exists a minimizer $p^\ast\in q^\ast+W_0^{1,2}(A_b,\mathbb{R})$ of the problem
\begin{eqnarray}
\mathcal{J}_{A_b}(p^\ast)=\min_{q^\ast+W_0^{1,2}(A_b,\mathbb{R})}{\mathcal{J}_{A_b}}.
\end{eqnarray}
We also have
\begin{eqnarray}
0\,\leq\,p^\ast\,\leq\,\bar{q}.
\end{eqnarray}
This follows from (\ref{iota-properties}) that implies $\mathcal{J}_{A_b}(\frac{p^\ast+\vert p^\ast\vert}{2})\leq\mathcal{J}_{A_b}(p^\ast)$ and therefore $p^\ast\geq 0$. The other inequality is a consequence of $\mathcal{J}_{A_b}(\min\{p^\ast,\bar{q}\})\leq\mathcal{J}_{A_b}(p^\ast)$ which follows from $\int_{A_b}\vert\nabla(\min\{p^\ast,\bar{q}\})\vert^2\leq
\int_{A_b}\vert\nabla p^\ast\vert^2$ and from (\ref{iota-properties}).
Since the map $q\rightarrow {\bf e}_{l+\frac{\lambda}{2}}(\vert q\vert\nu^w))$ is a $C^1$ map, we can write the variational equation
\begin{eqnarray}\label{rho-variation0}
\int_{A_b}((\nabla p^\ast,\nabla\gamma)+D_q{\bf e}_{l+\frac{\lambda}{2}}( p^\ast\nu^w)\gamma)d\xi=0,
\end{eqnarray}
for all $\gamma\in W_0^{1,2}(A_b,\mathbb{R})\cap L^\infty(A_b)$. In particular, if we define $A_b^*:=\{x\in A_b: p^\ast>\varphi\}$, we have
\begin{eqnarray}\label{rho-variation}
\int_{A_b^*}((\nabla p^\ast,\nabla\gamma)+D_q{\bf e}_{l+\frac{\lambda}{2}}( p^\ast\nu^w)\gamma)d\xi=0,
\end{eqnarray}
for all $\gamma\in W_0^{1,2}(A_b,\mathbb{R})\cap L^\infty(A_b)$ that vanish on $A_b\setminus A_b^*$.
If we take $\gamma=(p^\ast-\varphi)^+$ in (\ref{rho-variation}) and use (\ref{iota-properties})$_2$ which implies $D_q{\bf e}_{l+\frac{\lambda}{2}}( p^\ast\nu^w)\geq c^2 p^\ast$ we get
\begin{eqnarray}\label{rho-variation1}
\int_{A_b^*}((\nabla p^\ast,\nabla(p^\ast-\varphi))+c^2 p^\ast(p^\ast-\varphi))d\xi\leq 0,
\end{eqnarray}
This inequality and
\begin{eqnarray}\label{phi-variation1}
\int_{A_b^*}((\nabla\varphi,\nabla(p^\ast-\varphi))+c^2\varphi(p^\ast-\varphi))dx=0,
\end{eqnarray}
that follows from (\ref{phi-comparison}) imply
\begin{eqnarray}\label{rho-variation2}
\int_{A_b^*}(\vert\nabla(p^\ast-\varphi)\vert^2+c^2(p^\ast-\varphi)^2)d\xi\leq 0.
\end{eqnarray}
That is $\mathcal{H}^n(A_b^*)=0$
which together with $p^\ast\leq\varphi$ on $A_b\setminus A_b^*$ shows that
\begin{eqnarray}\label{rho-min-phi}
p^\ast\leq\varphi, \text{ for } \xi\in A_b.
\end{eqnarray}
Let $\omega$ be the map defined by setting
\begin{eqnarray}\label{v-definition}
\omega=\left\{\begin{array}{l}
w^{\bar{q}},\text{ for } (s,\xi)\in \Omega\setminus
(-l-\frac{\lambda}{2}, l+\frac{\lambda}{2})\times A_b,\\\\
\bar{u}+q^\omega\nu^w=\bar{u}+\min\{p^\ast,q^\ast\}\nu^w, \text{ for } \xi\in A_b.
\end{array}\right.
\end{eqnarray}
Note that this definition, the definition of $A_b$ and (\ref{rho-min-phi}) imply
\begin{eqnarray}\label{q-omega-min-phi}
q^\omega\leq\varphi, \text{ for } \xi\in B_{\varsigma,r+\frac{\varrho}{2}}.
\end{eqnarray}
From (\ref{v-definition}) we have
\begin{eqnarray}\label{diff-energy}
J_{\mathcal{C}_{l+\frac{\lambda}{2}}^{r+\frac{\varrho}{2}}(\varsigma)}( w^{\bar{q}})-
J_{\mathcal{C}_{l+\frac{\lambda}{2}}^{r+\frac{\varrho}{2}}(\varsigma)}(\omega)
\hskip8cm\\\nonumber\geq
\int_{A_b\cap\{p^\ast<q^\ast\}}(\frac{1}{2}(\vert\nabla q^\ast\vert^2-\vert\nabla p^\ast\vert^2+((q^\ast)^2-(p^\ast)^2)\sum_{j=1}^{n} \langle \nu_{\xi_j}^w, \nu_{\xi_j}^w \rangle_{l+\frac{\lambda}{2}})\\\nonumber+{\bf e}_{l+\frac{\lambda}{2}}( q^\ast\nu^w)-{\bf e}_{l+\frac{\lambda}{2}}( p^\ast\nu^w))d\xi\\\nonumber \geq\int_{A_b\cap\{p^\ast<q^\ast\}}(\frac{1}{2}(\vert\nabla q^\ast\vert^2-\vert\nabla p^\ast\vert^2 +{\bf e}_{l+\frac{\lambda}{2}}( q^\ast\nu^w)-{\bf e}_{l+\frac{\lambda}{2}}( p^\ast\nu^w))d\xi\\\nonumber \geq\int_{A_b\cap\{p^\ast<q^\ast\}}(\frac{1}{2}\vert\nabla q^\ast-\nabla p^\ast\vert^2\hskip5.5cm\\\nonumber \hskip1.7cm+{\bf e}_{l+\frac{\lambda}{2}}( q^\ast\nu^w)-{\bf e}_{l+\frac{\lambda}{2}}( p^\ast\nu^w)d\xi-D_q{\bf e}_{l+\frac{\lambda}{2}}( p^\ast\nu^w)(q^\ast-p^\ast))d\xi\geq 0.
\end{eqnarray}
where we have used
\begin{eqnarray}
\frac{1}{2}(\vert\nabla q^\ast\vert^2-\vert\nabla p^\ast\vert^2)&=&\frac{1}{2}\vert\nabla q^\ast-\nabla p^\ast\vert^2 +(\nabla p^\ast,\nabla( q^\ast-p^\ast)),\\\nonumber\\\nonumber \text{ and }\quad\quad\quad\quad\quad&&\\\nonumber\\\nonumber
\int_{A_b\cap\{p^\ast<q^\ast\}}(\nabla p^\ast,\nabla(q^\ast-p^\ast)) &=&-\int_{A_b\cap\{p^\ast<q^\ast\}}D_q{\bf e}_{l+\frac{\lambda}{2}}( p^\ast\nu^w)(q^\ast-p^\ast)d\xi,
\end{eqnarray}
which follows from (\ref{rho-variation0}) with $\gamma=(q^\ast-p^\ast)^+$.
From (\ref{iota-properties}$_3$) and (\ref{rho-min-phi}) we have
\begin{eqnarray}
{\bf e}_{l+\frac{\lambda}{2}}( q^\ast\nu^w)-\tilde{{\bf e}}_{l+\frac{\lambda}{2}}(p^\ast,q^\ast,\nu^w)\geq
{\bf e}_{l+\frac{\lambda}{2}}( q^\ast\nu^w)-\tilde{{\bf e}}_{l+\frac{\lambda}{2}}(\varphi, q^\ast,\nu^w).
\end{eqnarray}
From this and (\ref{q-omega-min-phi}) which implies
\begin{eqnarray}
B_{\varsigma,r+\frac{\varrho}{2}}\cap\{\phi<q^\ast\}=
A_b\cap\{\phi<q^\ast\}\subset
A_b\cap\{p^\ast<q^\ast\},
\end{eqnarray}
we have
\begin{eqnarray}
\int_{A_b\cap\{p^\ast<q^\ast\}} {\bf e}_{l+\frac{\lambda}{2}}( q^\ast\nu^w)-{\bf e}_{l+\frac{\lambda}{2}}( p^\ast\nu^w)-D_q{\bf e}_{l+\frac{\lambda}{2}}( p^\ast\nu^w)(q^\ast-p^\ast)d\xi\\\nonumber
\geq
\int_{B_{\varsigma,r+\frac{\varrho}{2}}\cap\{\varphi<q^\ast\}} {\bf e}_{l+\frac{\lambda}{2}}( q^\ast\nu^w)-{\bf e}_{l+\frac{\lambda}{2}}( p^\ast\nu^w)-D_q{\bf e}_{l+\frac{\lambda}{2}}( p^\ast\nu^w)(q^\ast-p^\ast)d\xi\\\nonumber
\geq
\int_{B_{\varsigma,r+\frac{\varrho}{2}}\cap\{\varphi<q^\ast\}} {\bf e}_{l+\frac{\lambda}{2}}( q^\ast\nu^w)-{\bf e}_{l+\frac{\lambda}{2}}( \varphi\nu^w)-D_q{\bf e}_{l+\frac{\lambda}{2}}(\varphi\nu^w)(q^\ast-\varphi))d\xi.
\end{eqnarray}
The inequality (\ref{quantitative-estimate}) follows from this and (\ref{diff-energy}).
\end{proof}
\begin{corollary}\label{corollary}
Let $w^{\bar{q}}$ as before and let $\omega\in C_S^{0,1}(\overline{\Omega},\mathbb{R}^m)$ the map constructed in Lemma \ref{quantitative-estimate0}. Then there is a number $c_1>0$ independent from $l, r, \lambda$ and $\varrho$ such that
\begin{eqnarray}
J_{\mathcal{C}_{l+\frac{\lambda}{2}}^{r+\frac{\varrho}{2}}(\varsigma)}( w^{\bar{q}})-
J_{\mathcal{C}_{l+\frac{\lambda}{2}}^{r+\frac{\varrho}{2}}(\varsigma)}(\omega)
\geq
c_1\mathcal{H}^{n-1}(A_{\bar{q}}\cap B_{\varsigma,r}).
\end{eqnarray}
\end{corollary}
\begin{proof}
Set $R=r+\frac{\varrho}{2}$, then we have $\varphi(\xi)=\bar{q}\phi(\vert \xi-\varsigma\vert,R)$ with $\phi(\cdot,R):[0,R]\rightarrow\mathbb{R}$ a positive function which is strictly increasing in $(0,R]$. Moreover we have $\phi(R,R)=1$ and
\begin{eqnarray}\label{phi-l}
R_1<R_2,\;t\in(0, R_1)\;\mathbb{R}ightarrow\;\;\phi(R_1-t,R_1)>\phi(R_2-t,R_2).
\end{eqnarray}
Note that $\xi\in B_{\varsigma,r}$ implies $\varphi(\xi)\leq \bar{q}\phi(r,r+\frac{\varrho}{2})$.
Therefore for $\xi\in B_{\varsigma,r}\cap A_{\bar{q}}$ we have
\begin{eqnarray}\label{diff-potential}\\\nonumber
\hskip.5cm{\bf e}_{l+\frac{\lambda}{2}}(\bar{q}\nu^w)
-{\bf e}_{l+\frac{\lambda}{2}}(\varphi\nu^w)
-D_q{\bf e}_{l+\frac{\lambda}{2}}(\varphi\nu^w)(\bar{q}-\varphi)\hskip5cm\\\nonumber
=\int_\varphi^{\bar{q}}(D_q{\bf e}_{l+\frac{\lambda}{2}}(s\nu^w)
-D_q{\bf e}_{l+\frac{\lambda}{2}}(\varphi\nu^w))ds\hskip4.5cm\\\nonumber
\hskip2cm\geq c^2\int_\varphi^{\bar{q}}(s-\varphi)ds=\frac{1}{2}c^2(\bar{q}-\varphi)^2\geq
\frac{1}{2}c^2\bar{q}^2(1-\phi(r,r+\frac{\varrho}{2}))^2,
\end{eqnarray}
where we have also used (\ref{iota-properties})$_1$.
The corollary follows from this inequality, from (\ref{quantitative-estimate}) and from the fact that, by (\ref{phi-l}), the last expression in (\ref{diff-potential}) is increasing with $r$. Therefore, for $r\geq r_0$, for some $r_0>0$, we can assume
\begin{eqnarray}
c_1=\frac{1}{2}c^2\bar{q}^2(1
-\phi(r_0,r_0+\frac{\varrho}{2}))^2.
\end{eqnarray}
\end{proof}
\subsection{Conclusion of the proof of Theorem \ref{main}}\label{proof-main}
Let $u$ as in Theorem \ref{main} and $l_0,\,q^\circ$ as in Lemma \ref{strict-minimizer} and assume that $\varsigma$ is such that
\begin{eqnarray}
\| u(\cdot,\varsigma)-\bar{u}\|_l\geq q^\circ,
\end{eqnarray}
for some $l\geq l_0$.
Then $u\in C_S^{0,1}(\overline{\Omega},\mathbb{R}^m)$ implies that, there is $r_0>0$ independent from $l\geq l_0$ such that,
\begin{eqnarray}\label{sigma-0}
\| u(\cdot,\xi)-\bar{u}\|_l\geq \bar{q},\, \text{ for } \vert \xi-\varsigma\vert\leq r_0.
\end{eqnarray}
Let $j_0\geq 0,$ be minimum value of $j$ that violated the inequality
\begin{eqnarray}\label{inequality}
c_1\frac{r_0^{n-1}}{2}(1+\frac{c_1}{C_1})^j\leq C_1((r_0+(j+1)\varrho)^{n-1}-(r_0+j\varrho)^{n-1}),
\end{eqnarray}
where $c_1 \text{ and } C_2$ are the constants in Corollary \ref{corollary} and Lemma \ref{lemma-2}.
Let $l^\circ\geq l_0$ be fixed so that
\begin{eqnarray}\label{l-0-sufficiently-large}
C_0(r_0+j_0\varrho)^{n-1}e^{-k l^\circ}\leq c_1\theta_{n-1}\frac{r_0^{n-1}}{2},
\end{eqnarray}
where $C_0$ is defined in Lemma \ref{lemma-1} and $\theta_n$ is the measure of the unit ball in $\mathbb{R}^n$,
\begin{proposition}\label{l-2-bound}
Let $\lambda, \varrho, \bar{q}\in(0,q^\circ) \text{ and } l^\circ\geq l_0$ fixed as before and let $r^\circ=r_0+j_0\varrho$ where $j_0\geq 0$ is the minimum value of $j$ that violates (\ref{inequality}). Assume $l\geq l^\circ$ and assume that $\mathcal{C}_{l+\lambda}^{r^\circ+2\varrho}(\varsigma)\subset\Omega$ satisfies
\begin{eqnarray}
d(\mathcal{C}_{l+\lambda}^{r^\circ+2\varrho}(\varsigma),\partial\Omega)\geq l+\lambda.
\end{eqnarray}
Then
\begin{eqnarray}
q^u(\varsigma)=\| u(\cdot,\varsigma)-\bar{u}\|_{l+\frac{\lambda}{2}}< q^\circ.
\end{eqnarray}
\end{proposition}
\begin{proof}
Suppose instead that
\begin{eqnarray}
\| u(\cdot,\varsigma)-\bar{u}\|_{l+\frac{\lambda}{2}} \geq q^\circ,
\end{eqnarray}
and set
\begin{eqnarray}\label{sigma-0-definition}
\sigma_0:=\theta_{n-1}\frac{r_0^{n-1}}{2}.
\end{eqnarray}
Then $l^\circ\geq l_0$ and (\ref{sigma-0})) imply
\begin{eqnarray}
\mathcal{H}^{n-1}(A_{\bar{q}}\cap B_{\varsigma,r_0})\geq 2\sigma_0.
\end{eqnarray}
For each $0\leq j\leq j_0$ let $r_j:=r_0+j\varrho$ and let $v_j,\,w_j,\,w_j^{\bar{q}} \text{ and } \omega_j$ the maps $v,\,w,\,w^{\bar{q}} \text{ and } \omega$ defined in Lemma \ref{lemma-1},\,Lemma \ref{lemma-2},\, Lemma \ref{lemma-w-q} and Lemma \ref{quantitative-estimate0} with $l\geq l^\circ \text{ and } r=r_j$. Then from these Lemmas and Corollary \ref{corollary} we have
\begin{eqnarray}
\left.\begin{array}{l}
J(u)_{\mathcal{C}_{l+\lambda}^{r_j^\circ+2\varrho}(\varsigma)}
-J(v_j)_{\mathcal{C}_{l+\lambda}^{r_j^\circ+2\varrho}(\varsigma)}\geq-C_0r_j^{n-1}e^{-k l^\circ},\\\\
J(v_j)_{\mathcal{C}_{l+\lambda}^{r_j^\circ+2\varrho}(\varsigma)}
-J(w_j)_{\mathcal{C}_{l+\lambda}^{r_j^\circ+2\varrho}(\varsigma)}\geq-C_1\mathcal{H}^{n-1}(A_{\bar{q}}\cap (\overline{B}_{\varsigma,r_{j+1}}\setminus B_{\varsigma,r_j})),\\\\
J(w_j)_{\mathcal{C}_{l+\lambda}^{r_j^\circ+2\varrho}(\varsigma)}
-J(w_j^{\bar{q}})_{\mathcal{C}_{l+\lambda}^{r_j^\circ+2\varrho}(\varsigma)}\geq 0,\\\\
J(w_j^{\bar{q}})_{\mathcal{C}_{l+\lambda}^{r_j^\circ+2\varrho}(\varsigma)}
-J(\omega_j)_{\mathcal{C}_{l+\lambda}^{r_j^\circ+2\varrho}(\varsigma)}\geq c_1\mathcal{H}^{n-1}(A_{\bar{q}}\cap \overline{B}_{\varsigma,r_j}).
\end{array}\right.
\end{eqnarray}
From this and the minimality of $u$ it follows
\begin{eqnarray}\label{inequality-1}
\hskip1.5cm 0\geq -C_0r_j^{n-1}e^{-k l^\circ}-C_1\mathcal{H}^{n-1}(A_{\bar{q}}\cap (\overline{B}_{\varsigma,r_{j+1}}\setminus B_{\varsigma,r_j}))+ c_1\mathcal{H}^{n-1}(A_{\bar{q}}\cap \overline{B}_{\varsigma,r_j}).
\end{eqnarray}
Define
\begin{eqnarray}
\sigma_j:=\mathcal{H}^{n-1}(A_{\bar{q}}\cap B_{\varsigma,r_j})-\sigma_0, \text{ for } j\geq 1.
\end{eqnarray}
If $j_0=0$ the inequality (\ref{inequality-1}), using also (\ref{l-0-sufficiently-large}), implies
\begin{eqnarray}\label{inequality-2}
0\geq -c_1\sigma_0-C_1\sigma_1+2C_1\sigma_0+2c_1\sigma_0\geq c_1\sigma_0-C_1(\sigma_1-\sigma_0).
\end{eqnarray}
If $j_0>0$ in a similar way we get
\begin{eqnarray}\label{inequality-3}
0\geq -c_1\sigma_0-C_1(\sigma_{j-1}-\sigma_j)+c_1(\sigma_j+\sigma_0)= c_1\sigma_j-C_1(\sigma_{j+1}-\sigma_j).
\end{eqnarray}
From (\ref{inequality-2}) and (\ref{inequality-3}) it follows
\begin{eqnarray}
\sigma_j\geq (1+\frac{c_1}{C_1})^j\sigma_0,
\end{eqnarray}
and therefore, using also (\ref{sigma-0-definition})
\begin{eqnarray}\label{inequality-4}
c_1(1+\frac{c_1}{C_1})^j\theta_{n-1}\frac{r_0^{n-1}}{2}\leq C_1(\sigma_{j+1}-\sigma{j})\leq C_1\theta_{n-1}(r_{j+1}^{n-1}-r_j^{n-1}).
\end{eqnarray}
This inequality is equivalent to (\ref{inequality}). It follows that, on the basis of the definition of $j_0$, putting $j=j_0$ in (\ref{inequality-4}) leads to a contradiction with the minimality of $u$.
\end{proof}
\subsection{The exponential estimate}\label{sec-exp}
\begin{lemma}\label{lemma-case-2}
Assume $r>r^\circ+2\varrho$ and $l>l^\circ+\lambda$ and assume that $\mathcal{C}_l^r(\varsigma_0)\subset\Omega$ satisfies
\begin{eqnarray}
d(\mathcal{C}_l^r(\varsigma_0),\partial\Omega)\geq l.
\end{eqnarray}
Then there are constants $K_1 \text{ and } k_1>0$ independent of $r>r^\circ+2\varrho$ and $l>l^\circ+\lambda$ such that
\begin{eqnarray}
\| u(\cdot,\varsigma_0)-\bar{u}\|_l^\frac{1}{2}
\leq K_1e^{-k_1r}.
\end{eqnarray}
\end{lemma}
\begin{proof}
From $r>r^\circ+2\varrho$ it follows that $\vert\varsigma-\varsigma_0\vert\leq r-(r^\circ+2\varrho)$ implies
\begin{eqnarray}
d(\mathcal{C}_l^{r^\circ+2\varrho}(\varsigma),\partial\Omega)\geq l.
\end{eqnarray}
Therefore we can invoke Proposition \ref{l-2-bound} to conclude that
\begin{eqnarray}
\| u(\cdot,\varsigma)-\bar{u}\|\leq \bar{q}, \text{ for } \vert\varsigma-\varsigma_0\vert\leq r-(r^\circ+2\varrho).
\end{eqnarray}
Let $\varphi:B_{\varsigma_0,r -(r^\circ+2\varrho)}\rightarrow\mathbb{R}$ the solution of
\begin{eqnarray}\label{phi-comparison-1}
\left\{\begin{array}{l}
\Delta\varphi=c^2\varphi, \text{ in } B_{\varsigma_0,r -(r^\circ+2\varrho)}\\\\
\varphi=\bar{q}, \text{ on } \partial B_{\varsigma_0,r -(r^\circ+2\varrho)}.
\end{array}\right.
\end{eqnarray}
Then we have
\begin{eqnarray}\label{q-omega-min-phi-1}
\| u(\cdot,\varsigma)-\bar{u}\|\leq \varphi(\varsigma), \text{ for } \varsigma\in B_{\varsigma_0,r -(r^\circ+2\varrho)}.
\end{eqnarray}
This follows by the same argument leading to (\ref{q-omega-min-phi}) in the proof of Lemma \ref{quantitative-estimate0}. Indeed, if (\ref{q-omega-min-phi-1}) does not hold, then by proceeding as in the proof of Lemma \ref{quantitative-estimate0} we can construct a competing map $\omega$ that satisfies (\ref{q-omega-min-phi-1}) and has less energy than $u$ contradicting its minimality property. In particular (\ref{q-omega-min-phi-1}) implies
\begin{eqnarray}\label{q-omega-min-phi-2}
\|u(\cdot,\varsigma_0)-\bar{u}\|\leq \varphi(\varsigma_0).
\end{eqnarray}
On the other hand it can be shown, see Lemma 2.4 in \cite{flp}, that there is a constant $h_0>0$ such that
\[\phi(0,r)\leq e^{-h_0 r};\;\text{ for }\;r\geq r_0\]
From this and (\ref{q-omega-min-phi-2}) we get
\begin{eqnarray}
\varphi(\varsigma_0)=\bar{q}\phi(0,r-(r^\circ+2\varrho))\leq
\bar{q}e^{h_0(r^\circ+2\varrho)}e^{-h_0r}=K_1e^{-k_1r}.
\end{eqnarray}
This concludes the proof with $K_1=\bar{q}e^{h_0(r^\circ+2\varrho)}$ and $k_1=h_0$.
\end{proof}
We are now in the position of proving the exponential estimate (i) in Theorem \ref{main}. We distinguish two cases:
\begin{description}
\item[Case $1$] $x=(s,\xi)\in\Omega \text{ satisfies } s>\frac{1}{2}d(x,\partial\Omega)$. In this case, taking also into account that $\Omega$ satisfies $\bf{(i)}$, we have
\begin{eqnarray}
d(x,\partial\Omega^+)\geq \frac{1}{2}d(x,\partial\Omega).
\end{eqnarray}
From this and Theorem \ref{theorem-1} it follows
\begin{eqnarray}\label{case-1-estimate}
\vert u(s,\xi)-\bar{u}(s)\vert\leq \vert u(s,\xi)-a\vert+\vert \bar{u}(s)-a\vert\hskip4.5cm\\\nonumber\hskip2.5cm\leq K_0e^{-k_0d(x,\partial\Omega^+)}+\bar{K}e^{-\bar{k}s}
\leq(K_0+\bar{K})e^{-\frac{1}{2}\min\{k_0,\bar{k}\}d(x,\partial\Omega)},
\end{eqnarray}
where we have also used
\begin{eqnarray}
\vert\bar{u}(s)-a\vert\leq\bar{K}e^{-\bar{k}s}.
\end{eqnarray}
\item[Case $2$] $x=(s,\xi)\in\Omega \text{ satisfies } 0\leq s\leq\frac{1}{2}d(x,\partial\Omega)$. In this case, elementary geometric considerations and the assumption $\bf{(i)}$ on $\Omega$ imply the existence of $\alpha\in(0,1)$ ($\alpha=\frac{1}{4}$ will do) such that
\begin{eqnarray}\label{case-2-distance}
\mathcal{C}_{s+\alpha d(x)}^{\alpha d(x)}(\xi)&\subset&\Omega\hskip.8cm \text{ and }\\\nonumber
d(\mathcal{C}_{s+\alpha d(x)}^{\alpha d(x)}(\xi),\partial\Omega)&\geq& s+\alpha d(x),
\end{eqnarray}
where we have set $d(x):=d(x,\partial\Omega)$.
From (\ref{case-2-distance}) and Lemma \ref{lemma-case-2} it follows
\begin{eqnarray}
\| u(\cdot,\xi)-\bar{u}\|_l
\leq K_1e^{-k_1\alpha d(x)},\, \text{ for } d(x)>r^\circ+2\varrho.
\end{eqnarray}
This and Lemma \ref{l-infinity-less-l-2} imply, recalling $d(x)=d(x,\partial\Omega)$,
\begin{eqnarray}\label{case-2-exponential}
\vert u(s,\xi)-\bar{u}(s)\vert\leq K_1^\frac{2}{3}e^{-\frac{2}{3}k_1\alpha d(x,\partial\Omega)}.
\end{eqnarray}
\end{description}
The exponential estimate follows from (\ref{case-2-exponential}) and (\ref{case-2-exponential}).
\subsection{The proof of Theorems \ref{main-1} and \ref{main-2}}\label{main-final}
If $\Omega=\mathbb{R}^n$ the proof of Theorem \ref{main} simplifies since we can avoid the technicalities needed in the case that $\Omega$ is bounded in the $s=x_1$ direction and assume $l=+\infty$. The possibility of working with $l=+\infty$ is based on the following lemma
\begin{lemma}\label{l-infty}
Let $u:\mathbb{R}^n\rightarrow\mathbb{R}^m$ the symmetric minimizer in Theorem \ref{theorem-1}. Given a smooth open set $O\subset\mathbb{R}^{n-1}$ let $\mathbb{R}\times O$ the cylinder $\mathbb{R}\times O=\{(s,\xi): s\in\mathbb{R},\;\xi\in\ O\}$. Then
\begin{equation}\label{min-infty}
J_{\mathbb{R}\times O}(u)=\min_{v\in u+W_{0 S}^{1,2}(\mathbb{R}\times O;\mathbb{R}^m)}J_{\mathbb{R}\times O}(v),
\end{equation}
where $W_{0 S}^{1,2}(\mathbb{R}\times O;\mathbb{R}^m)$ is the subset of $W_S^{1,2}(\mathbb{R}\times O;\mathbb{R}^m)$ of the maps that satisfy $v=0$ on $\partial\mathbb{R}\times O$.
\end{lemma}
\begin{proof}
Assume there are $\eta>0$ and $v\in W_{0 S}^{1,2}(\mathbb{R}\times O;\mathbb{R}^m)$ such that
\begin{equation}\label{cont-assum}
J_{\mathbb{R}\times O}(u)-J_{\mathbb{R}\times O}(v)\geq\eta.
\end{equation}
For each $l>0$ define $\tilde{v}\in W_{0 S}^{1,2}(\mathbb{R}\times O;\mathbb{R}^m)$ by
\[
\tilde{v}=\left\{\begin{array}{l} v,\quad\text{ for }\;s\in[0,l],\;\xi\in O,\\
(1+l-s)v+(s-l)u, \;s\in[l,l+1],\;\xi\in O,\\
u,\quad\text{ for } \;s\in[l,+\infty),\;\xi\in O.
\end{array}\right.\]
The minimality of $u$ implies
\begin{equation}\label{before-limit}
0\geq J_{[-l-1,l+1]\times O}(u)-J_{[-l-1,l+1]\times O}(\tilde{v})=J_{[-l-1,l+1]\times O}(u)-J_{[-l,l]\times O}(v)+
\mathrm{O}(e^{-k l}),
\end{equation}
where we have also used the fact that both $u$ and $v$ belong to $W_S^{1,2}(\mathbb{R}\times O;\mathbb{R}^m)$. Taking the limit for $l\rightarrow +\infty$ in (\ref{before-limit}) yields
\[0\geq J_{\mathbb{R}\times O}(u)-J_{\mathbb{R}\times O}(v)\]
in contradiction with (\ref{cont-assum}).
\end{proof}
Once we know that $u$ satisfies (\ref{min-infty}) the same arguments leading to Proposition \ref{l-2-bound} imply the existence of $r^\circ>0$ such that
\begin{equation}\label{stay-below-q0}
\mathbb{R}\times B_{r^\circ}(\xi)\subset\mathbb{R}^n\;\mathbb{R}ightarrow\;\|u(\cdot,\xi)-\bar{u}\|_{\infty}<q^\circ,
\end{equation}
where $B_{r^\circ}(\xi)\subset\mathbb{R}^{n-1}$ is the ball of center $\xi$ and radius $r^\circ$.
Since the condition $\mathbb{R}\times B_{r^\circ}(\xi)\subset\mathbb{R}^n$ is trivially satisfied for each $\xi\in\mathbb{R}^{n-1}$ we have
\[\|u(\cdot,\xi)-\bar{u}\|_{\infty}<q^\circ,\;\text{ for every }\;\xi\in\mathbb{R}^{n-1}.\]
To conclude the proof we observe that everything has been said concerning $q^\circ$ can be repeated verbatim for each $q\in(0,q^\circ)$. It follows that for each $q\in(0,q^\circ]$ there is a $r(q)>0$ such that (\ref{stay-below-q0}) holds with $q$ in place of $q^\circ$ and $r(q)$ in place of $r^\circ$. Therefore we have
\[\|u(\cdot,\xi)-\bar{u}\|_{\infty}<q,\;\text{ for every }\;\xi\in\mathbb{R}^{n-1}.\]
Since this holds for each $q\in(0,q^\circ]$ we conclude
\[u(\cdot,\xi)=\bar{u},\;\text{ for every }\;\xi\in\mathbb{R}^{n-1}\]
which complete the proof of Theorem \ref{main-1}.
To prove Theorem \ref{main-2} we note that, if $\Omega=\{x\in\mathbb{R}^n: x_n>0\}$, then arguing as in the proof of Theorem \ref{main-1} above, we get that, given $q>0$ there exists $l_q>0$ such that
\[\xi_n> l_q,\quad\mathbb{R}ightarrow\quad\|u(\cdot,\xi)-\bar{u}\|_{L^\infty}<q.\]
From this, the boundary condition
\[\xi_n=0,\quad\mathbb{R}ightarrow\quad\|u(\cdot,\xi)-\bar{u}\|_{L^\infty}=0,\]
and the reasoning in the proof of Lemma \ref{lemma-w-q} it follows
\[\|u(\cdot,\xi)-\bar{u}\|_{L^\infty}<q,\;\text{ for each }\;\xi_n\geq 0,\,q>0.\]
The proof of Theorem \ref{main-2} is complete.
\section{The proof of Theorem \ref{triple}}\label{sec-triple}
From an abstract point of view the proof of Theorem \ref{triple} is essentially the same as the proof of Theorem \ref{main-1} after quantities like $q^u$ and $\nu^u$ are reinterpreted and properly redefined in the context of maps equivariant with respect to the group $G$ of the equilateral triangle. We divide the proof in steps pointing out the correspondence with the corresponding steps in the proof of Theorem \ref{main-1}. We write $x\in\mathbb{R}^n$ in the form $x=(s,\xi)$ with $s=(s_1,s_2)\in\mathbb{R}^2$ and $\xi=(x_2,\ldots,x_n)\in\mathbb{R}^{n-2}$.
\begin{description}
\item[Step 1]
\end{description}
From assumption (\ref{stay-away}) in Theorem \ref{triple} and equivariance it follows
\begin{equation}\label{stay-away-1}
\begin{split}
& \vert u(x)-a\vert\geq\delta,\;\vert u(x)-g_-a\vert>\delta,\;\text{ for }\;x\in g_+D,\;d(x,\partial g_+D)\geq d_0,\\
& \vert u(x)-a\vert\geq\delta,\;\vert u(x)-g_+a\vert>\delta,\;\text{ for }\;x\in g_-D,\;d(x,\partial g_-D)\geq d_0.
\end{split}
\end{equation}
From this and assumptions ${\bf H}^\prime_3$ and ${\bf H}^\prime_4$ it follows that we can apply Theorem \ref{main} with $\Omega=\mathbb{R}^n\setminus\overline{D}$ and $a_\pm=g_\pm a$ to conclude that there exist $k, K>0$ such that
\begin{equation}\label{exp-est-t}
\vert u(s_1,s_2,\xi)-\bar{u}(s_2)\vert\leq K e^{-k d(x,\partial(\mathbb{R}^n\setminus\overline{D}))},\;x\in\mathbb{R}^n\setminus\overline{D}.
\end{equation}
In exactly the same way we establish that
\begin{equation}\label{exp-est-2}
\vert \tilde{u}(s_1,s_2)-\bar{u}(s_2)\vert\leq K e^{-k d(s,\partial(\mathbb{R}^2\setminus\overline{D_2}))},\;s\in\mathbb{R}^2\setminus\overline{D_2},
\end{equation}
where $D_2\subset\mathbb{R}^2=\{s:\vert s_2\vert<\sqrt{3}s_1,\;s_1>0\}$.
From (\ref{exp-est-t}), (\ref{exp-est-2}) and equivariance it follows
\begin{equation}\label{exp-est-3}
\vert u(s,\xi)- \tilde{u}(s)\vert\leq K e^{-k\vert s\vert},\;\text{ for }\;s\in\mathbb{R}^2,\;\xi\in\mathbb{R}^{n-2}.
\end{equation}
\begin{description}
\item[Step 2]
\end{description}
Let $C_G^{0,1}(\mathbb{R}^n;\mathbb{R}^m)$ the set of lipshizt maps $v:\mathbb{R}^n\rightarrow\mathbb{R}^m$ which are equivariant under $G$ and satisfy
\begin{equation}\label{exp-est-4}
\begin{split}
&\vert v(s,\xi)- \tilde{u}(s)\vert\leq K e^{-k\vert s\vert},\\
&\vert\nabla_s v(s,\xi)-\nabla_s\tilde{u}(s)\vert\leq K e^{-k\vert s\vert},\\
&\vert\nabla_\xi v(s,\xi)\vert\leq K e^{-k\vert s\vert},
\end{split}\;\text{ for }\;s\in\mathbb{R}^2,\;\xi\in\mathbb{R}^{n-2},
\end{equation}
We remark that from (\ref{exp-est-3}) we have $u\in C_G^{0,1}(\mathbb{R}^n;\mathbb{R}^m)$ for the minimizer $u$ in Theorem \ref{triple}. If $O\subset\mathbb{R}^{n-2}$ is an open bounded set with a lipshitz boundary we let $C_G^{0,1}(\mathbb{R}^2\times O;\mathbb{R}^m)$ the set of equivariant maps that satisfy (\ref{exp-est-4}) for $\xi\in O$. We denote $C_{0,G}^{0,1}(\mathbb{R}^2\times O;\mathbb{R}^m)$ the subset of $C_G^{0,1}(\mathbb{R}^2\times O;\mathbb{R}^m)$ of the maps the vanish on the boundary of $\mathbb{R}^2\times O$. The spaces $W_G^{1,2}(\mathbb{R}^2\times O;\mathbb{R}^m)$ and $W_{0,G}^{1,2}(\mathbb{R}^2\times O;\mathbb{R}^m)$ are defined in the obvious way. The exponential estimates in the definition of these function spaces and the same argument in the proof of Lemma \ref{l-infty} imply
\begin{lemma}\label{l-infty-t}
Let $u:\mathbb{R}^n\rightarrow\mathbb{R}^m$ the $G$-equivariant minimizer in Theorem \ref{triple}. Given an open bounded lipshitz set $O\subset\mathbb{R}^{n-2}$ we have
\begin{equation}\label{min-infty-t}
J_{\mathbb{R}^2\times O}(u)=\min_{v\in u+W_{0,G}^{1,2}(\mathbb{R}^2\times O;\mathbb{R}^m)}J_{\mathbb{R}^2\times O}(v),
\end{equation}
\end{lemma}
\begin{description}
\item[Step 3]
\end{description}
In analogy with the definition of ${\bf e}(v)$ in Lemma \ref{strict-minimizer}, for $v\in W_G^{1,2}(\mathbb{R}^n;\mathbb{R}^m)$, we define the {\it effective} potential ${\bf E}(v)$ for the case at hand. We set
\begin{equation}\label{energy-t}
{\bf E}(v)=\frac{1}{2}(\langle\nabla_s\tilde{u}+\nabla_s v,\nabla_s\tilde{u}+\nabla_s v\rangle-\langle\nabla_s\tilde{u},\nabla_s\tilde{u}\rangle)+\int_{\mathbb{R}^2}(W(\tilde{u}+v)-W(\tilde{u}))ds,\;\xi\in\mathbb{R}^{n-2}.
\end{equation}
With this definition we can represent the energy $J_{\mathbb{R}^2\times O}(v)$ of a generic map $v\in W_G^{1,2}(\mathbb{R}^2\times O;\mathbb{R}^m)$ in the {\it polar} form
\begin{equation}\label{polar-form-t}
J_{\mathbb{R}^2\times O}(v)=\int_O\frac{1}{2}\big((\vert\nabla_\xi q^v\vert^2+(q^v)^2\sum_j\langle\nu_{\xi_j}^v,\nu_{\xi_j}^v\rangle)+
{\bf E}(q^v\nu^v)\big)d\xi,
\end{equation}
where $\langle,\rangle$ denotes the standard inner product in $L^2(\mathbb{R}^2;\mathbb{R}^m)$ and $q^v$ and $\nu^v$ are defined by
\begin{equation}\label{qn-nuv-t}
\begin{split}
& q^v(\xi)=\| v(\cdot,\xi)-\tilde{u}\|_{L^2(\mathbb{R}^2;\mathbb{R}^m)},\;\text{ for }\;\xi\in O\\
&\nu^v(s,\xi)=\frac{v(s,\xi)-\tilde{u}(s)}{q^v(\xi)},\;\text{ if }\;q^v(\xi)>0.
\end{split}
\end{equation}
From and assumptions ${\bf H}^\prime_5$ and ${\bf H}^\prime_5$, arguing exactly as in the proof of Lemma \ref{strict-minimizer} we prove
\begin{lemma}\label{strict-minimizer-t}
${\bf H}^\prime_5$ and ${\bf H}^\prime_5$.
Then there exist $q^\circ >0 \text{ and } c>0$ such that
\begin{eqnarray}\label{iota-properties-t}
\left\{\begin{array}{l}
D_{qq}{\bf E}(q\nu)\geq c^2,\quad \text{ for } q\in[0,q^\circ]\cap[0,q_\nu],\;\nu\in\mathbb{S},\\\\
{\bf E}(q\nu)\geq{\bf E}(q^\circ\nu),\;\, \text{ for } q^\circ\leq q\leq q_\nu,\;\nu\in\mathbb{S},\\\\
{\bf E}(q\nu)\geq \tilde{{\bf E}}(p,q,\nu):={\bf E}(p\nu)+D_q{\bf E}(p\nu)(q-p)
,\\\quad\hskip4cm \text{ for } 0\leq p<q\leq q_\nu\leq q^\circ,\;\nu\in\mathbb{S},\\\\
D_p\tilde{{\bf E}}(p,q,\nu)\geq 0 ,\quad \text{ for } 0\leq p<q\leq q_\nu\leq q^\circ,\;\nu\in\mathbb{S}.
\end{array}\right.
\end{eqnarray}
\end{lemma}
\begin{description}
\item[Step 4]
\end{description}
Based on this lemma and on the polar representation of the energy (\ref{polar-form-t}) we can follow step by step the arguments in Sec. 2 to establish the analogous of Proposition \ref{l-2-bound}. Actually the argument simplifies since by Lemma \ref{l-infty-t} we can work directly in $\mathbb{R}^2\times O$ rather then in bounded cylinders as in Sec. 2. For example the analogous of Lemma \ref{lemma-1} is not needed. In conclusion, by arguing as in Sec .2, we prove that, given $q\in(0,q^\circ]$, there is $r(q)>0$ such that
\begin{equation}\label{t-t}
\mathbb{R}^2\times B_{r(q)}(\xi)\subset\mathbb{R}^n\;\;\mathbb{R}ightarrow\;\;q^u(\xi)=\|u(\cdot,\xi)\tilde{u}\|_{L^2(\mathbb{R}^2;\mathbb{R}^m)}<q,
\end{equation}
where $B_{r(q)}(\xi)\subset\mathbb{R}^{n-2}$ is the ball of center $\xi$ and radius $r(q)$. Since the condition on the l.h.s. of (\ref{t-t}) is trivially satisfied for all $\xi\in\mathbb{R}^{n-2}$ and for all $q\in(0,q^\circ]$ we have
\[u(s,\xi)=\tilde{u}(s),\;\text{ for }\;s\in\mathbb{R}^2,\;\xi\in\mathbb{R}^{n-2}\]
which concludes the proof.
\vskip.2cm
Department of Mathematics, University of Athens, Panepistemiopolis, 15784 Athens, Greece; e-mail: {\texttt{[email protected]}}
\vskip.2cm
\noindent Universit\`a degli Studi dell'Aquila, Via Vetoio, 67010 Coppito, L'Aquila, Italy; e-mail:{\texttt{[email protected]}}
\end{document} |
{{\mathfrak b}eta}gin{document}
\title[Unitary groups and ramified extensions]{Unitary groups and ramified extensions}
{\mathfrak a}uthor{J. Cruickshank}
{\mathfrak a}ddress{School of Mathematics, Statistics and Applied Mathematics , National University of Ireland, Galway, Ireland}
{\varepsilon}mail{[email protected]}
{\mathfrak a}uthor{F. Szechtman}
{\mathfrak a}ddress{Department of Mathematics and Statistics, Univeristy of Regina, Canada}
{\varepsilon}mail{[email protected]}
\thanks{The second author was supported in part by an NSERC discovery grant}
{{\mathfrak m}athfrak s}ubjclass[2010]{15A21, 15A63, 11E39, 11E57, 20G25}
{\mathfrak k}eywords{unitary group; skew-hermitian form; local ring}
{{\mathfrak b}eta}gin{abstract} We classify all non-degenerate skew-hermitian forms defined over certain local rings, not necessarily commutative,
and study some of the fundamental properties of the associated unitary groups, including their orders when the ring in question is finite.
{\varepsilon}nd{abstract}
{\mathfrak m}aketitle
{{\mathfrak m}athfrak s}ection{Introduction}
More than half a century ago \cite{D} offered a systematic study
of unitary groups, as well as other classical groups, over fields
and division rings. Thirty five years later, \cite{HO} expanded
this study to fairly general classes of rings. In particular, the
normal structure, congruence subgroup, and generation problems for
unitary (as well as other classical) groups are addressed in
\cite{HO} in great generality. In contrast, the problem of
determining the order of a unitary group appears in \cite{HO} only
in the classical case of finite fields, as found in \cite[\S
6.2]{HO}.
General formulae for the orders of unitary groups defined over a
finite ring where 2 is unit were given later in \cite{FH}. The proofs in \cite{FH} are
fairly incomplete and, in fact, the formulae in \cite[Theorem 3]{FH} are incorrect when the involutions induced on the given
residue fields are not the identity (even the order of the classical unitary groups defined over finite fields is wrong in this case).
The argument in \cite[Theorem 3]{FH} is primarily based on a reduction homomorphism, stated without
proof to be surjective.
Recently, the correct orders of unitary groups defined over a finite local
ring where 2 is invertible were given in \cite{CHQS}, including
complete proofs. It should be noted that the forms underlying
these groups were taken to be hermitian, which ensured the
existence of an orthogonal basis. Moreover, the unitary groups
from \cite{CHQS} were all extensions of orthogonal or unitary
groups defined over finite fields.
In the present paper we study the unitary group $U_n(A)$
associated to a non-degenerate skew-hermitian form $h:V\times V\to
A$ defined on a free right $A$-module $V$ of finite rank $n$,
where $A$ is a local ring, not necessarily commutative, endowed
with an involution $*$ that satisfies $a-a^*{\mathfrak i}n{{\mathfrak m}athfrak r}$ for all $a{\mathfrak i}n
A$, and ${{\mathfrak m}athfrak r}$ is the Jacobson radical of $A$. It is also assumed
that ${{\mathfrak m}athfrak r}$ is nilpotent and $2{\mathfrak i}n U(A)$, the unit group of $A$.
These conditions occur often, most commonly when dealing with
ramified quadratic extensions of quotients of local principal
ideal domains with residue field of characteristic not 2 (see
Example {{\mathfrak m}athfrak r}ef{tresdos} for more details). A distinguishing feature
of this case, as opposed to that of \cite{CHQS}, is that $h(v,v)$
is a non-unit for every $v{\mathfrak i}n V$. In particular, $V$ lacks an
orthogonal basis. As the existence of an orthogonal basis is the
building block of the theory developed in \cite{CHQS}, virtually
all arguments from \cite{CHQS} become invalid under the present
circumstances. Moreover, it turns out that now $n=2m$ must be even
and, when $A$ is finite, $U_{2m}(A)$ is an extension of the
{\varepsilon}mph{symplectic} group ${{\mathfrak m}athrm {Sp}}_{2m}(q)$ defined over the residue
field $F_q=A/{{\mathfrak m}athfrak r}$. In view of the essential differences between the
present case and that of \cite{CHQS}, we hereby develop, from the
beginning, the tools required to compute $|U_{2m}(A)|$ when $A$ is
finite and the above hypotheses apply. In particular, a detailed
and simple proof that the reduction homomorphism is surjective is
given.
The paper is essentially self-contained and its contents are as
follows. It is shown in \S{{\mathfrak m}athfrak r}ef{s1} that $n=2m$ must be even, and
that $V$ admits a basis relative to which the Gram matrix of $h$
is equal to
$$
J={\lambda}eft(
{{\mathfrak b}eta}gin{array}{cc}
0 & 1 \\
-1 & 0 \\
{\varepsilon}nd{array}
{{\mathfrak m}athfrak r}ight),
$$
where all blocks have size $m\times m$. Thus, $U_{2m}(A)$ consists of all $X{\mathfrak i}n {{\mathfrak m}athrm{ GL}}_{2m}(A)$ such that
$$
X^*JX=J,
$$
where $(X^*)_{ij}=(X_{ji})^*$. We prove in \S{{\mathfrak m}athfrak r}ef{s2} that
$U_{2m}(A)$ acts transitively on basis vectors of $V$ having the
same length. An important tool is found in \S{{\mathfrak m}athfrak r}ef{s3}, namely the
fact that the canonical reduction map $U_{2m}(A)\to U_{2m}(A/{\mathfrak i})$
is surjective, where ${\mathfrak i}$ is a $*$-invariant ideal of $A$ (the
proof of the corresponding result from \cite{CHQS} makes extensive
use of the fact that an orthogonal basis exists). The surjectivity
of the reduction map allows us to compute, in \S{{\mathfrak m}athfrak r}ef{s4}, the
order of $U_{2m}(A)$ when $A$ is finite, by means of a series of
reductions (a like method was used in \cite{CHQS}). We find that
{{\mathfrak b}eta}gin{equation}{\lambda}abel{or1}
|U_{2m}(A)|=|{{\mathfrak m}athfrak r}|^{2m^2-m}|{\mathfrak m}|^{2m} |{{\mathfrak m}athrm {Sp}}_{2m}(q)|,
{\varepsilon}nd{equation}
where ${\mathfrak m}=R\cap{{\mathfrak m}athfrak r}$ and $R$ is the additive group of all $a{\mathfrak i}n A$
such that $a^*=a$. We also obtain in~\S{{\mathfrak m}athfrak r}ef{s4} the order of the
kernel, say $U_{2m}({\mathfrak i})$, of the reduction map $U_{2m}(A)\to
U_{2m}(A/{\mathfrak i})$. Here $U_{2m}({\mathfrak i})$ consists of all $1+X{\mathfrak i}n
U_{2m}(A)$ such that $X{\mathfrak i}n M_{2m}({\mathfrak i})$. When ${\mathfrak i}{\mathfrak n}eq A$, we obtain
{{\mathfrak b}eta}gin{equation}{\lambda}abel{or3}
U_{2m}({\mathfrak i})=|{\mathfrak i}|^{2m^2-m}|{\mathfrak i}\cap{\mathfrak m}|^{2m}.
{\varepsilon}nd{equation}
A totally independent way of computing $|U_{2m}(A)|$ is offered in
\S{{\mathfrak m}athfrak r}ef{s5}, where we show that
{{\mathfrak b}eta}gin{equation}{\lambda}abel{or2}
|U_{2m}(A)|=\frac{|{{\mathfrak m}athfrak r}|^{m(m+1)}|A|^{m^2} (q^{2m}-1)(q^{2(m-1)}-1)\cdots (q^2-1)}{|S|^{2m}}.
{\varepsilon}nd{equation}
Here $S$ stands for the additive group of all $a{\mathfrak i}n A$ such that
$a^*=-a$. It should be noted that ({{\mathfrak m}athfrak r}ef{or1})-({{\mathfrak m}athfrak r}ef{or2}) are
valid even when $A$ is neither commutative nor principal. We also
prove in \S{{\mathfrak m}athfrak r}ef{s5} that the number of basis vectors of $V$ of any
given length is independent of this length and equal~to
$$
(|A|^{2m}-|{{\mathfrak m}athfrak r}|^{2m})/|S|.
$$
In this regard, in \S{{\mathfrak m}athfrak r}ef{s6} we demonstrate that the order of the
stabilizer, say $S_v$, in $U_{2m}(A)$ of a basis vector $v$ is
independent of $v$ and its length, obtaining
$$
|S_v|=|U_{2(m-1)}(A)|\times |A|^{2m-1}/|S|.
$$
We end the paper in \S{{\mathfrak m}athfrak r}ef{s7}, where a refined version of ({{\mathfrak m}athfrak r}ef{or1}) and ({{\mathfrak m}athfrak r}ef{or2}) is given when $A$ is commutative and principal.
Virtually all the above material will find application in a forthcoming paper on the Weil representation of $U_{2m}(A)$.
{{\mathfrak m}athfrak s}ection{Non-degenerate skew-hermitian forms}{\lambda}abel{s1}
Let $A$ be a ring with $1{\mathfrak n}eq 0$. The Jacobson radical of $A$ will be denoted by ${{\mathfrak m}athfrak r}$ and the unit group of $A$ by $U(A)$.
We assume that $A$ is endowed with an involution $*$, which we interpret to mean an antiautomorphism of order ${\lambda}eq 2$. Note that if $*=1_A$
then $A$ is commutative. Observe also that ${{\mathfrak m}athfrak r}$ as well as all of its powers are $*$-invariant ideals of $A$.
We fix a right $A$-module $V$ and we view its dual $V^*$ as a right $A$-module via
$$
({{\mathfrak a}lpha}pha a)(v)=a^* {{\mathfrak a}lpha}pha(v),\quad v{\mathfrak i}n V,a{\mathfrak i}n A,{{\mathfrak a}lpha}pha{\mathfrak i}n V^*.
$$
We also fix a skew-hermitian form on $V$, that is, a function $h:V\times V\to A$ that is linear in the second variable
and satisfies
$$
h(v,u)=-h(u,v)^*,\quad u,v{\mathfrak i}n V.
$$
In particular, we have
$$
h(u+v,w)=h(u,w)+h(v,w)\text{ and }h(ua,v)=a^*h(u,v),\quad u,v,w{\mathfrak i}n V,a{\mathfrak i}n A.
$$
Associated to $h$ we have an $A$-linear map $T_h:V\to V^*$, given by
$$
T_h(v)=h(v,-),\quad v{\mathfrak i}n V.
$$
We assume that $h$ is non-degenerate, in the sense that $T_h$ is an isomorphism.
{{\mathfrak b}eta}gin{lemma}{\lambda}abel{deco} Suppose $V=U\perp W$, where $U,W$ are submodules of $V$. Then the restriction $h_U$ of $h$ to $U\times U$ is non-degenerate.
{\varepsilon}nd{lemma}
{{\mathfrak b}eta}gin{proof} Suppose $T_{h_U}(u)=0$ for some $u{\mathfrak i}n U$. Since $h(u,W)=0$ and $V=U+W$, it follows that $h(u,V)=0$, so $u=0$ by the non-degeneracy of $h$. This proves that $T_{h_U}$ is injective.
Suppose next that ${{\mathfrak a}lpha}{\mathfrak i}n U^*$. Extend ${{\mathfrak a}lpha}$ to ${{\mathfrak b}eta}{\mathfrak i}n V^*$ via ${{\mathfrak b}eta}(u+w)={{\mathfrak a}lpha}(u)$. Since $h$ is non-degenerate, there is $v{\mathfrak i}n V$ such that
${{\mathfrak b}eta}=T_h(v)$. Now $v=u+w$ for some $u{\mathfrak i}n U$ and $w{\mathfrak i}n W$. We claim that ${{\mathfrak a}lpha}=T_{h_U}(u)$. Indeed, given any $z{\mathfrak i}n U$, we have
$$
[T_{h_U}(u)](z)=h_U(u,z)=h(u,z)=h(u+w,z)=h(v,z)={{\mathfrak b}eta}(z)={{\mathfrak a}lpha}(z).
$$
{\varepsilon}nd{proof}
The Gram matrix $M{\mathfrak i}n M_k(A)$ of a list of vectors $v_1,\dots,v_k{\mathfrak i}n V$
is defined by $M_{ij}=(v_i,v_j)$.
{{\mathfrak b}eta}gin{lemma}{\lambda}abel{gram} Suppose the Gram matrix of $u_1,\dots,u_k{\mathfrak i}n A$, say $M$, is invertible.
Then $u_1,\dots,u_k$ are linearly independent.
{\varepsilon}nd{lemma}
{{\mathfrak b}eta}gin{proof} Suppose $u_1a_1+\cdots+u_ka_k=0$. Then $h(u_i,u_1)a_1+\cdots+h(u_i,u_k)a_k=0$ for every $1{\lambda}eq i{\lambda}eq k$.
Since $M$ is invertible, we deduce that $a_1=\cdots=a_k=0$.
{\varepsilon}nd{proof}
We make the following assumptions on $A$ for the remainder of the paper:
{\mathfrak m}edskip
(A1) $A$ is a local ring (this means that $A/{{\mathfrak m}athfrak r}$ is a division ring or, alternatively, that every element of $A$ is either in $U(A)$ or in ${{\mathfrak m}athfrak r}$).
(A2) $2{\mathfrak i}n U(A)$.
(A3) If $a{\mathfrak i}n A$ and $a^*=-a$ then $a{\mathfrak i}n{{\mathfrak m}athfrak r}$; in particular, $b-b^*{\mathfrak i}n{{\mathfrak m}athfrak r}$ for all $b{\mathfrak i}n A$.
(A4) ${{\mathfrak m}athfrak r}$ is nilpotent; the nilpotency degree of ${{\mathfrak m}athfrak r}$ will be denoted by $e$.
{{\mathfrak b}eta}gin{exa}{\lambda}abel{tresdos}{{{\mathfrak m}athfrak r}m Let $B$ be a local commutative ring with nilpotent Jacobson radical ${\mathfrak b}$. Suppose $2{\mathfrak i}n U(B)$ and let ${{\mathfrak m}athfrak s}i$
be an automorphism of $B$ of order ${\lambda}eq 2$. Consider
the twisted polynomial ring $C=B[t; {{\mathfrak m}athfrak s}i]$. Then $C$ has a unique involution $*$ that sends $t$ to $-t$ and fixes every $b{\mathfrak i}n B$. Thus
$$
(b_0+b_1t+b_2 t^2+b_3 t^3+\cdots)^*=b_0-b_1^{{\mathfrak m}athfrak s}i t+b_2 t^2-b_3^{{\mathfrak m}athfrak s}i t^3+\cdots{{{\mathfrak m}athfrak r}m ( finite\; sum)}
$$
Given any $b{\mathfrak i}n{\mathfrak b}$, the ideal $(t^2-b)$ of $C$ is $*$-invariant, so the quotient ring $A=C/(t^2-b)$ inherits an involution from $C$ and satisfies all our requirements. Note that if ${{\mathfrak m}athfrak s}i{\mathfrak n}eq 1_B$ then $A$ is not commutative. Two noteworthy special cases are the following:
${\mathfrak b}ullet$ Let $D$ be a local principal ideal domain with finite residue field of odd characteristic and let $B$ be a quotient of $D$ by a positive power of its maximal ideal; take ${{\mathfrak m}athfrak s}i=1_B$ and let $b$ be a generator of ${\mathfrak b}$. Then $A$ is a finite, commutative, principal ideal, local ring.
${\mathfrak b}ullet$ Let $B$ be a finite, commutative, principal ideal, local ring with Jacobson radical ${\mathfrak b}$. Suppose $2{\mathfrak i}n U(B)$ and let ${{\mathfrak m}athfrak s}i{\mathfrak n}eq 1$
be an automorphism of $B$ of order 2 (as an example, take $A$ and its involution,
as in the previous case). Take $b$ to be a generator of ${\mathfrak b}$ and let $a=t+(t^2-b){\mathfrak i}n A$. Then $Aa=aA$ is the Jacobson radical of $A$. Moreover, every left
(resp. right) ideal of $A$ is
a power of $Aa$ (and hence an ideal). Furthermore, note that $A/Aa\cong B/Bb$.}
{\varepsilon}nd{exa}
We will also make the following assumption on $V$ for the remainder of the paper:
{\mathfrak m}edskip
(A5) $V$ is a free $A$-module of finite rank $n>0$ (reducing $V$ modulo ${{\mathfrak m}athfrak r}$, we see the rank of $A$ is well-defined).
In what follows we write $(u,v)$ instead of $h(u,v)$.
{{\mathfrak b}eta}gin{lemma}{\lambda}abel{basis} Any linearly independent list of vectors from $V$
is part of a basis; if the list has $n$ vectors, it is already a basis, and no list has more than $n$ vectors.
{\varepsilon}nd{lemma}
{{\mathfrak b}eta}gin{proof} Suppose $u_1,\dots,u_k{\mathfrak i}n V$ are linearly independent and
$v_1,\dots,v_n$ is a basis of $V$. By (A4) there is some $r{\mathfrak n}eq 0$
in ${{\mathfrak m}athfrak r}^{e-1}$ such that ${{\mathfrak m}athfrak r} r=0$. Write
$u_1=v_1a_1+\cdots+v_na_n$, where $a_i{\mathfrak i}n A$. If all $a_i{\mathfrak i}n{{\mathfrak m}athfrak r}$
then $u_1r=0$, contradicting linear independence. By (A1), we may assume
without loss of generality that $a_1{\mathfrak i}n U(A)$, whence
$u_1,v_2,\dots,v_n$ is a basis of $V$. Next write
$u_2=u_1b_1+v_2b_2+\cdots+v_nb_n$, where $b_i{\mathfrak i}n A$. If $b_i{\mathfrak i}n{{\mathfrak m}athfrak r}$
for all $i>1$ then $u_1(-b_1r)+u_2r=0$, contradicting linear
independence. This process can be continued and the result
follows.
{\varepsilon}nd{proof}
{{\mathfrak b}eta}gin{cor}{\lambda}abel{bas} If $v_1,\dots,v_n$ is a basis of $V$, ${\mathfrak i}$ is a proper ideal of $A$, and $v_i{\varepsilon}quiv u_i{\mathfrak m}od V{\mathfrak i}$ for all $1{\lambda}eq i{\lambda}eq n$, then $u_1,\dots,u_n$ is also a basis of $V$.
{\varepsilon}nd{cor}
{{\mathfrak b}eta}gin{proof} Let $M$ (resp. $N$) be the Gram matrix of $v_1,\dots,v_n$ (resp. $u_1,\dots,u_n$). Then, by assumption, $N=M+P$, for some
$P{\mathfrak i}n M_n({\mathfrak i})$, so $P{\mathfrak i}n M_n({{\mathfrak m}athfrak r})$ by (A1). It is well-known (see
\cite[Theorem 1.2.6]{H}) that $M_n({{\mathfrak m}athfrak r})$ is the Jacobson radical of $M_n(A)$. On the other hand, by assumption and Lemma {{\mathfrak m}athfrak r}ef{gram}, $M{\mathfrak i}n{{\mathfrak m}athrm{ GL}}_n(A)$. It follows that $N{\mathfrak i}n{{\mathfrak m}athrm{ GL}}_n(A)$ as well.
{\varepsilon}nd{proof}
{{\mathfrak b}eta}gin{lemma}{\lambda}abel{basin} Let $v_1,\dots,v_n$ be a basis of $V$. Then, given any $1{\lambda}eq i{\lambda}eq n$, there exists $1{\lambda}eq j{\lambda}eq n$
such that $j{\mathfrak n}eq i$ and $(v_i,v_j){\mathfrak i}n U(A)$.
{\varepsilon}nd{lemma}
{{\mathfrak b}eta}gin{proof} Suppose, if possible, that $(v_i,v_j){\mathfrak n}otin U(A)$ for all $j{\mathfrak n}eq i$. Then by (A1), $(v_i,v_j){\mathfrak i}n {{\mathfrak m}athfrak r}$ for all $j{\mathfrak n}eq i$.
Since $(v_i,v_i){\mathfrak i}n{{\mathfrak m}athfrak r}$ by (A3), it follows that
$(v_i,V){\mathfrak i}n{{\mathfrak m}athfrak r}$. As $v_1,\dots,v_n$ span $V$ and $T_h:V\to V^*$ is surjective, every linear functional $V\to A$
has values in ${{\mathfrak m}athfrak r}$, a contradiction.
{\varepsilon}nd{proof}
{{\mathfrak b}eta}gin{cor}{\lambda}abel{basincor} If $v{\mathfrak i}n V$ is a basis vector, there is some $w{\mathfrak i}n V$ such that $(v,w)=1$.\qed
{\varepsilon}nd{cor}
{{\mathfrak b}eta}gin{lemma}{\lambda}abel{uno} Let ${\mathfrak i}$ be a proper ideal of $A$. Suppose $v{\mathfrak i}n V$ is a basis vector such that $(v,v){\mathfrak i}n {\mathfrak i}$.
Then there is a basis vector $z{\mathfrak i}n V$ such that $v{\varepsilon}quiv z{\mathfrak m}od V{\mathfrak i}$ and $(z,z)=0$.
{\varepsilon}nd{lemma}
{{\mathfrak b}eta}gin{proof} It follows from (A1) and (A4) that ${\mathfrak i}$ is nilpotent, and we argue by induction on the nilpotency degree, say $f$, of ${\mathfrak i}$. If $f=1$ then ${\mathfrak i}=0$ and we take $z=v$. Suppose $f>1$
and the result is true for ideals of nilpotency degree less than $f$.
By Corollary {{\mathfrak m}athfrak r}ef{basincor}, there is $w{\mathfrak i}n V$ such that $(v,w)=1$. Observing that the Gram matrix of $v,w$ is invertible,
Lemmas {{\mathfrak m}athfrak r}ef{gram} and {{\mathfrak m}athfrak r}ef{basis} imply that $v,w$ belong to a common basis of $V$.
Then, for any $b{\mathfrak i}n A$, $v+wb$ is also a basis vector and, moreover,
$$
(v+wb,v+wb)=(v,v)+b^*(w,w)b+b-b^*.
$$
Thanks to (A2), we may take $b=-(v,v)/2{\mathfrak i}n {\mathfrak i}$. Then $b^*=-b$, so $b-b^*=-(v,v)$ and
$$
(v+wb,v+wb)=b^*(w,w)b=-b(w,w)b{\mathfrak i}n {\mathfrak i}^2.
$$
As the nilpotency degree of ${\mathfrak i}^2$ is less than $f$, by induction hypothesis
there is a basis vector $z{\mathfrak i}n V$ such that $v+wb{\varepsilon}quiv z{\mathfrak m}od V{\mathfrak i}^2$ and $(z,z)=0$. Since $v{\varepsilon}quiv v+wb{\varepsilon}quiv z{\mathfrak m}od V{\mathfrak i}$, the result follows.
{\varepsilon}nd{proof}
{{\mathfrak b}eta}gin{lemma}{\lambda}abel{tres} Let ${\mathfrak i}$ be a proper ideal of $A$. Suppose $u,v{\mathfrak i}n V$ satisfy
$(u,v){\varepsilon}quiv 1{\mathfrak m}od {\mathfrak i}$. Then there is $z{\mathfrak i}n V$ such that $z{\varepsilon}quiv v{\mathfrak m}od V{\mathfrak i}$ and $(u,z)=1$.
{\varepsilon}nd{lemma}
{{\mathfrak b}eta}gin{proof}
Since $(u,v) {\varepsilon}quiv 1 {\mathfrak m}od {\mathfrak i}$, $(u,v)$ must be a unit. Moreover,
$(u,v)^{-1} {\varepsilon}quiv 1 {\mathfrak m}od {\mathfrak i}$, so we can take $z = v(u,v)^{-1}$.
{\varepsilon}nd{proof}
{{\mathfrak b}eta}gin{lemma}{\lambda}abel{dos} Let ${\mathfrak i}$ be an ideal of $A$. Suppose $u,v{\mathfrak i}n V$ satisfy $(u,u)=0$, $(u,v)=1$ and $(v,v){\mathfrak i}n {\mathfrak i}$.
Then there is $z{\mathfrak i}n V$ such that $z{\varepsilon}quiv v{\mathfrak m}od V{\mathfrak i}$, $(u,z)=1$ and $(z,z)=0$.
{\varepsilon}nd{lemma}
{{\mathfrak b}eta}gin{proof} Set $b=(v,v)/2{\mathfrak i}n {\mathfrak i}$ and $z=ub+v$. Then $b^*=-b$, so that $b^*-b=-(v,v)$ and
$$
(z,z)=(ub+v,ub+v)=b^*-b+(v,v)=0,\quad (u,z)=(u,u)b+(u,v)=1.
$$
{\varepsilon}nd{proof}
A symplectic basis of $V$ is a basis $u_1,\dots,u_m,v_1,\dots,v_m$ such that $(u_i,u_j)=0=(v_i,v_j)$ and
$(u_i,v_j)=\delta_{ij}$. A pair of vectors $u,v$ of $V$ is symplectic if $(u,v)=1$ and $(u,u)=0=(v,v)$.
{{\mathfrak b}eta}gin{lemma}{\lambda}abel{exte}
Suppose the Gram matrix, say $M$, of $v_1,\dots,v_s$ is invertible. If $s<n$ then
there is a basis $v_1,\dots,v_s,w_1,\dots,w_t$ of $V$ such that $(v_i,w_j)=0$ for all $i$ and $j$.
{\varepsilon}nd{lemma}
{{\mathfrak b}eta}gin{proof} By Lemmas {{\mathfrak m}athfrak r}ef{gram} and {{\mathfrak m}athfrak r}ef{basis}, there is a basis $v_1,\dots,v_s,u_1,\dots,u_t$ of $V$. Given $1{\lambda}eq i{\lambda}eq t$, we wish to find $a_1,\dots,a_s$
so that $w_i=u_i-(v_1a_1+\cdots+v_s a_s)$ is orthogonal to all $v_j$. This means
$$
(v_j,v_1)a_1+\cdots+(v_j,v_s)a_s=(v_j,u_i),\quad 1{\lambda}eq j{\lambda}eq s.
$$
This linear system can be solved by means of $M^{-1}$. Since $v_1,\dots,v_s,w_1,\dots,w_t$ is a basis of $V$,
the result follows.
{\varepsilon}nd{proof}
{{\mathfrak b}eta}gin{prop}{\lambda}abel{sp} $V$ has a symplectic basis; in particular $n=2m$ is even.
{\varepsilon}nd{prop}
{{\mathfrak b}eta}gin{proof} Let $w_1,\dots,w_n$ be a basis of $V$, whose existence is guaranteed by (A5). We argue by induction on $n$.
By (A3), $(w_1,w_1){\mathfrak i}n{{\mathfrak m}athfrak r}$. We infer from Lemma {{\mathfrak m}athfrak r}ef{uno} the existence of a basis vector $u{\mathfrak i}n V$ such that $(u,u)=0$.
By Corollary {{\mathfrak m}athfrak r}ef{basincor}, there is $w{\mathfrak i}n V$ such that $(u,w)=1$. By Lemma {{\mathfrak m}athfrak r}ef{dos}, there
is $v{\mathfrak i}n V$ such that $(v,v)=0$ and $(u,v)=1$.
It follows from Lemmas {{\mathfrak m}athfrak r}ef{gram} and {{\mathfrak m}athfrak r}ef{basis} that $u,v$ is part of a basis of $V$.
If $n=2$ we are done. Suppose $n>2$ and the result is true for smaller ranks. By Lemma {{\mathfrak m}athfrak r}ef{exte} there is basis $u,v,w_1,\dots,w_{n-2}$ of $V$
such that every $w_i$ is orthogonal to both $u$ and $v$. Let $U$ be the span of $u,v$ and let $W$ be the span of $w_1,\dots,w_{n-2}$.
By Lemma {{\mathfrak m}athfrak r}ef{deco}, the restriction of $h$ to $W$ is non-degenerate. By inductive assumption, $n-2$ is even, say $n-2=2(m-1)$, and $W$ has a symplectic basis, say $u_2,\dots,u_m,v_2,\dots,v_m$. It follows that $u_1,u_2,\dots,u_m,v,v_2,\dots,v_m$ is a symplectic basis of $V$.
{\varepsilon}nd{proof}
{{\mathfrak b}eta}gin{cor} All non-degenerate skew-hermitian forms on $V$ are equivalent.\qed
{\varepsilon}nd{cor}
{{\mathfrak m}athfrak s}ection{The unitary group acts transitively on basis vectors of the same length}{\lambda}abel{s2}
By definition, the unitary group associated to $(V,h)$ is the subgroup, say $U(V,h)$, of ${{\mathfrak m}athrm{ GL}}(V)$ that preserves~$h$. Thus, $U(V,h)$ consists of all $A$-linear automorphisms
$g:V\to V$ such that $h(gu,gv)=h(u,v)$ for all $u,v{\mathfrak i}n V$.
{{\mathfrak b}eta}gin{theorem}{\lambda}abel{actra} Let $u,v{\mathfrak i}n V$ be basis vectors satisfying $(u,u)=(v,v)$. Then there exists $g{\mathfrak i}n U(V,h)$ such that $gu=v$.
{\varepsilon}nd{theorem}
{{\mathfrak b}eta}gin{proof} As $u$ is a basis vector, there is a vector $u'{\mathfrak i}n V$ such that $(u,u')=1$.
Let $W$ be the span of $u,u'$. By Lemma {{\mathfrak m}athfrak r}ef{exte}, $V=W\oplus W^\perp$. The restrictions of $h$ to $W$ and $W^\perp$
are non-degenerate by Lemma {{\mathfrak m}athfrak r}ef{deco}. A like decomposition exists for $v$. Thus, by means of Proposition {{\mathfrak m}athfrak r}ef{sp},
we may restrict to the case $n=2$.
By Proposition {{\mathfrak m}athfrak r}ef{sp}, there is a symplectic basis $x,y$ of $V$ and we have $u=xa+yb$ for some $a,b{\mathfrak i}n A$. Since $u$
is a basis vector, one of these coefficients, say $a$ is a unit. Replacing $x$ by $xa$ and $y$ by $y(a^*)^{-1}$,
we may assume that $u=x+yb$ for some $b{\mathfrak i}n A$, where $x,y$ is still a symplectic basis of $V$. We have
$b-b^*=(u,u)$. Likewise, there is a symplectic basis $w,z$ of $V$ such that
$v=w+zc$, where $c-c^*=(v,v)=(u,u)=b-b^*$. It follows that $c=b+r$ for some $r{\mathfrak i}n A$ such that $r^*=r$.
Replace $w,z$ by $w-zr,z$. This basis of $V$ is also symplectic, since $(w-zr,w-zr)=0$ (because $r-r^*=0$). Moreover, $v=(w-zr)+z(c+r)=(w-rz)+zb$. Thus, $u$ and $v$ have
exactly the same coordinates, namely $1,b$ relative to some symplectic bases of $V$. Let $g{\mathfrak i}n U$ map one symplectic basis into the other one.
Then $gu=v$, as required.
{\varepsilon}nd{proof}
{{\mathfrak m}athfrak s}ection{Reduction modulo a $*$-invariant ideal}{\lambda}abel{s3}
{{\mathfrak b}eta}gin{lemma}{\lambda}abel{util} Let ${\mathfrak i}$ be a proper ideal of $A$. Suppose $w_1,\dots,w_m,z_1,\dots,z_m{\mathfrak i}n V$ satisfy
$$
(w_i,z_j){\varepsilon}quiv\delta_{ij}{\mathfrak m}od{\mathfrak i},\; 1{\lambda}eq i{\lambda}eq m,\quad (w_i,w_j){\varepsilon}quiv 0{\varepsilon}quiv (z_i,z_j){\mathfrak m}od{\mathfrak i},\; 1{\lambda}eq i,j{\lambda}eq m.
$$
Then there exists a symplectic basis $w'_1,\dots,w'_m,z'_1,\dots,z'_m$ of $V$ such that
$$
w_i{\varepsilon}quiv w'_i{\mathfrak m}od V{\mathfrak i},\quad z_i{\varepsilon}quiv z'_i{\mathfrak m}od V{\mathfrak i},\; 1{\lambda}eq i{\lambda}eq m.
$$
{\varepsilon}nd{lemma}
{{\mathfrak b}eta}gin{proof} By induction on $m$. Successively applying Lemmas {{\mathfrak m}athfrak r}ef{uno}, {{\mathfrak m}athfrak r}ef{tres} and {{\mathfrak m}athfrak r}ef{dos} we obtain
$w'_1,z'_1{\mathfrak i}n V$ such that
$$
w_1{\varepsilon}quiv w'_1 {\mathfrak m}od V{\mathfrak i},\quad z_1{\varepsilon}quiv z'_1 {\mathfrak m}od V{\mathfrak i},\quad (w'_1,w'_1)=0=(z'_1,z'_1),\quad (w'_1,z'_1)=1.
$$
If $m=1$ we are done. Suppose $m>1$ and the result is true for smaller ranks. Applying Corollary {{\mathfrak m}athfrak r}ef{bas}, we see that
$w'_1,z'_1,w_2\dots,w_m,z_2,\dots,z_m$ is a basis of $V$. Applying the procedure of Lemma {{\mathfrak m}athfrak r}ef{exte} we obtain a basis
$w'_1,z'_1,w^0_2\dots,w^0_m,z^0_2,\dots,z^0_m$ of $V$ such that $w'_1,z'_1$ are orthogonal to all other vectors in this list.
Since $(x,y){\mathfrak i}n{\mathfrak i}$ when $x{\mathfrak i}n\{w'_1,z'_1\}$ and $y{\mathfrak i}n\{w_2\dots,w_m,z_2,\dots,z_m\}$, the proof of Lemma {{\mathfrak m}athfrak r}ef{exte} shows that
$$
w^0_i{\varepsilon}quiv w_i{\mathfrak m}od V{\mathfrak i},\quad z^0_i{\varepsilon}quiv z_i{\mathfrak m}od V{\mathfrak i},\quad 2{\lambda}eq i{\lambda}eq m.
$$
Therefore
$$
(w^0_i,z^0_j){\varepsilon}quiv\delta_{ij}{\mathfrak m}od{\mathfrak i},\; 2{\lambda}eq i{\lambda}eq m,\quad (w^0_i,w^0_j){\varepsilon}quiv 0{\varepsilon}quiv (z^0_i,z^0_j){\mathfrak m}od{\mathfrak i},\; 2{\lambda}eq i,j{\lambda}eq m.
$$
By Lemma {{\mathfrak m}athfrak r}ef{deco}, the restriction of $h$ to the span, say $W$, of $w^0_2\dots,w^0_m,z^0_2,\dots,z^0_m$ is non-degenerate.
By induction hypothesis, there is a symplectic basis $w'_2,\dots,w'_m,z'_2,\dots,z'_m$ of $W$ such that
$$
w'_i{\varepsilon}quiv w^0_i{\mathfrak m}od V{\mathfrak i}, \quad z'_i{\varepsilon}quiv z^0_i{\mathfrak m}od V{\mathfrak i},\; 2{\lambda}eq i{\lambda}eq m.
$$
Then $w'_1,\dots,w'_m,z'_1,\dots,z'_m$ is a basis of $V$ satisfying all our requirements.
{\varepsilon}nd{proof}
Let ${\mathfrak i}$ be a $*$-invariant ideal of $A$. Then $\overline{A}=A/{\mathfrak i}$ inherits an involution, also denoted by $*$, from $A$ by declaring
$(a+{\mathfrak i})^*=a^*+{\mathfrak i}$. This is well-defined, since ${\mathfrak i}$ is $*$-invariant. Set $\overline{V}=V/V{\mathfrak i}$ and consider the skew-hermitian
form $\overline{h}:\overline{V}\times \overline{V}\to \overline{A}$, given by $\overline{h}(u+V{\mathfrak i},v+V{\mathfrak i})=h(u,v)+{\mathfrak i}$. We see that
$\overline{h}$ is well-defined and non-degenerate. We then have a group homomorphism $U(V,h)\to U(\overline{V},\overline{h})$, given by $g{\mathfrak m}apsto \overline{g}$, where $\overline{g}(u+V{\mathfrak i})=g(u)+V{\mathfrak i}$.
{{\mathfrak b}eta}gin{theorem}{\lambda}abel{sur} Let ${\mathfrak i}$ be a proper $*$-invariant ideal of $A$. Then the canonical group homomorphism $U(V,h)\to U(\overline{V},\overline{h})$ is surjective.
{\varepsilon}nd{theorem}
{{\mathfrak b}eta}gin{proof} Let $f{\mathfrak i}n U(\overline{V},\overline{h})$. By Proposition {{\mathfrak m}athfrak r}ef{sp}, $V$ has a symplectic basis $u_1,\dots,u_m,v_1,\dots,v_m$.
We have $f(u_i+V{\mathfrak i})=w_i+V{\mathfrak i}$ and $f(v_i+V{\mathfrak i})=z_i+V{\mathfrak i}$ for some $w_i,z_i{\mathfrak i}n V$ and $1{\lambda}eq i{\lambda}eq m$.
Since $f$ preserves $\overline{h}$, we must have
$$
(w_i,z_j){\varepsilon}quiv\delta_{ij}{\mathfrak m}od{\mathfrak i},\; 1{\lambda}eq i{\lambda}eq m,\quad (w_i,w_j){\varepsilon}quiv 0{\varepsilon}quiv (z_i,z_j){\mathfrak m}od{\mathfrak i},\; 1{\lambda}eq i,j{\lambda}eq m.
$$
By Lemma {{\mathfrak m}athfrak r}ef{util} there is a symplectic basis $w'_1,\dots,w'_m,z'_1,\dots,z'_m$ of $V$ such that
$$
w_i{\varepsilon}quiv w'_i{\mathfrak m}od V{\mathfrak i},\quad z_i{\varepsilon}quiv z'_i{\mathfrak m}od V{\mathfrak i},\; 1{\lambda}eq i{\lambda}eq m.
$$
Let $g{\mathfrak i}n U(V,h)$ map $u_1,\dots,u_m,v_1,\dots,v_m$ into $w'_1,\dots,w'_m,z'_1,\dots,z'_m$. Then $\overline{g}=f$.
{\varepsilon}nd{proof}
{{\mathfrak m}athfrak s}ection{Computing the order of the $U_{2m}(A)$ by successive reductions}{\lambda}abel{s4}
We refer to an element $a$ of $A$ as hermitian (resp. skew-hermitian) if $a=a^*$ (resp. $a=-a^*$).
Let $R$ (resp. $S$) be the subgroup of the additive group of $A$
of all hermitian (resp. skew-hermitian) elements. We know by (A3) that
{{\mathfrak b}eta}gin{equation}
{\lambda}abel{sr}
S{{\mathfrak m}athfrak s}ubseteq {{\mathfrak m}athfrak r}.
{\varepsilon}nd{equation}
Moreover, it follows from (A2) that
{{\mathfrak b}eta}gin{equation}
{\lambda}abel{ars}
A=R\oplus S.
{\varepsilon}nd{equation}
Letting $${\mathfrak m}=R\cap{{\mathfrak m}athfrak r},$$
we have a group imbedding $R/{\mathfrak m}\hookrightarrow A/{{\mathfrak m}athfrak r}$. In fact, we deduce from ({{\mathfrak m}athfrak r}ef{sr}) and ({{\mathfrak m}athfrak r}ef{ars}) that
{{\mathfrak b}eta}gin{equation}
{\lambda}abel{ars2}
A/{{\mathfrak m}athfrak r}\cong R/{\mathfrak m}.
{\varepsilon}nd{equation}
{{\mathfrak b}eta}gin{lem}
{\lambda}abel{cuad}
Suppose ${\mathfrak i}$ is a $*$-invariant ideal of $A$ satisfying ${\mathfrak i}^2=0$. Let $\{v_1,\dots,v_n\}$ be a basis of $V$
and let $J$ be the Gram matrix of $v_1,\dots,v_n$. Then, relative to $\{v_1,\dots,v_n\}$, the
kernel of the canonical epimorphism $U(V,h)\to U(\overline{V},\overline{h})$ consists
of all matrices $1+M$, such that $M{\mathfrak i}n M_n({\mathfrak i})$ and
{{\mathfrak b}eta}gin{equation}
{\lambda}abel{anr}
M^*J+JM=0.
{\varepsilon}nd{equation}
{\varepsilon}nd{lem}
{{\mathfrak b}eta}gin{proof} By definition the kernel of $U(V,h)\to U(\overline{V},\overline{h})$ consists of all matrices of the form $1+M$,
where $M{\mathfrak i}n M_n({\mathfrak i})$ and
$$
(1+M)^* J(1+M)=J.
$$
Expanding this equation and using ${\mathfrak i}^2=0$ yields ({{\mathfrak m}athfrak r}ef{anr}).
{\varepsilon}nd{proof}
Let $\{u_1,\dots,u_m,v_1,\dots,v_m\}$ be a symplectic basis of $V$. We write $U_{2m}(A)$ for the image of $U(V,h)$ under the group isomorphism $GL(V)\to{{\mathfrak m}athrm{ GL}}_{2m}(A)$ relative to $\{u_1,\dots,u_m,v_1,\dots,v_m\}$.
We make the following assumption on $A$ for the remainder of the paper:
{\mathfrak m}edskip
(A6) $A$ is a finite ring.
{\mathfrak m}edskip
We deduce from ({{\mathfrak m}athfrak r}ef{ars}) that
$$|A|=|R||S|.$$
On the other hand, it follows from (A1), (A2) and (A6) that $F_q=A/{{\mathfrak m}athfrak r}$ is a finite field of odd characteristic. By (A3), $a+{{\mathfrak m}athfrak r}=a^* +{{\mathfrak m}athfrak r}$ for all $a{\mathfrak i}n A$, so the involution that $*$ induces
on $F_q$ is the identity. Taking ${\mathfrak i}={{\mathfrak m}athfrak r}$ in Theorem {{\mathfrak m}athfrak r}ef{sur}, we have ${U}_{2m}(\overline{A})={{\mathfrak m}athrm {Sp}}_{2m}(q)$,
the symplectic group of rank $2m$ over $F_q$. Recall \cite[Chapter 8]{T}
that
$$
|{{\mathfrak m}athrm {Sp}}_{2m}(q)|=(q^{2m}-1)q^{2m-1}(q^{2(m-1)}-1)q^{2m-3}\cdots (q^2-1)q=q^{m^2}(q^{2m}-1)(q^{2(m-1)}-1)\cdots (q^{2}-1).
$$
{{\mathfrak b}eta}gin{cor}
{\lambda}abel{cuad2} Suppose ${\mathfrak i}$ is a $*$-invariant ideal of $A$ satisfying ${\mathfrak i}^2=0$. Then the kernel of
$U(V,h)\to U(\overline{V},\overline{h})$ has order $|{\mathfrak i}|^{2m^2-m}|{\mathfrak i}\cap {\mathfrak m}|^{2m}$.
{\varepsilon}nd{cor}
{{\mathfrak b}eta}gin{proof} Let $\{u_1,\dots,u_m,v_1,\dots,v_m\}$ be a symplectic basis of $V$. Thus, the Gram matrix, say $J{\mathfrak i}n M_{2m}(A)$, of
$u_1,\dots,u_m,v_1,\dots,v_m$ is
$$
J={\lambda}eft(
{{\mathfrak b}eta}gin{array}{cc}
0 & 1 \\
-1 & 0 \\
{\varepsilon}nd{array}
{{\mathfrak m}athfrak r}ight),
$$
where all blocks are in $M_{m}(A)$. According to Lemma {{\mathfrak m}athfrak r}ef{cuad}, the kernel of $U(V,h)\to U(\overline{V},\overline{h})$ consists of all $1+M$, where
$$
M={\lambda}eft(
{{\mathfrak b}eta}gin{array}{cc}
P & Q \\
T & S \\
{\varepsilon}nd{array}
{{\mathfrak m}athfrak r}ight),
$$
and $S=-P^*$, $Q=Q^*$ and $T=T^*$, which yields the desired result.
{\varepsilon}nd{proof}
{{\mathfrak b}eta}gin{thm}
{\lambda}abel{zxz} Let $A$ be a finite local ring, not necessarily commutative, with Jacobson radical~${{\mathfrak m}athfrak r}$ and residue field $F_q$ of odd characteristic. Suppose $A$ has
an involution $*$ such that $a-a^*{\mathfrak i}n{{\mathfrak m}athfrak r}$ for all $a{\mathfrak i}n A$. Let ${\mathfrak m}$ be the group of all $a{\mathfrak i}n{{\mathfrak m}athfrak r}$ such that
$a=a^*$. Then
$$
|U_{2m}(A)|=|{{\mathfrak m}athfrak r}|^{2m^2-m}|{\mathfrak m}|^{2m} q^{m^2}(q^{2m}-1)(q^{2(m-1)}-1)\cdots (q^2-1).
$$
{\varepsilon}nd{thm}
{{\mathfrak b}eta}gin{proof} Consider the rings
$$
A=A/{{\mathfrak m}athfrak r}^{e}, A/{{\mathfrak m}athfrak r}^{e-1},\dots,A/{{\mathfrak m}athfrak r}^2,A/{{\mathfrak m}athfrak r}.
$$
Each of them is a factor of $A$, so is local and inherits an
involution from $*$. Each successive pair is of the form
$C=A/{{\mathfrak m}athfrak r}^k,D=A/{{\mathfrak m}athfrak r}^{k-1}$, where the kernel of the canonical
epimorphism $C\to D$ is ${\mathfrak j}={{\mathfrak m}athfrak r}^{k-1}/{{\mathfrak m}athfrak r}^k$, so that ${\mathfrak j}^2=0$. We
may thus apply Theorem {{\mathfrak m}athfrak r}ef{sur} and Corollary {{\mathfrak m}athfrak r}ef{cuad2} $e-1$
times to obtain the desired result, as follows. We have
$$
|{{\mathfrak m}athfrak r}|=|{{\mathfrak m}athfrak r}^{e-1}/{{\mathfrak m}athfrak r}^e|\cdots |{{\mathfrak m}athfrak r}/{{\mathfrak m}athfrak r}^2|
$$
and
$$
|{\mathfrak m}|=|{\mathfrak m}\cap{{\mathfrak m}athfrak r}^{e-1}/{\mathfrak m}\cap {{\mathfrak m}athfrak r}^e|\cdots |{\mathfrak m}\cap{{\mathfrak m}athfrak r}^{k-1}/{\mathfrak m}\cap {{\mathfrak m}athfrak r}^{k}|\cdots |{\mathfrak m}\cap {{\mathfrak m}athfrak r}/{\mathfrak m}\cap {{\mathfrak m}athfrak r}^2|,
$$
where the group of hermitian elements in the kernel of $C\to D$
has $|{\mathfrak m}\cap{{\mathfrak m}athfrak r}^{k-1}/{\mathfrak m}\cap {{\mathfrak m}athfrak r}^{k}|$ elements. Indeed, these
elements are those $a+{{\mathfrak m}athfrak r}^k$ such that $a{\mathfrak i}n {{\mathfrak m}athfrak r}^{k-1}$ and
$a-a^*{\mathfrak i}n {{\mathfrak m}athfrak r}^k$. But $a+a^*$ is hermitian, so
$a+a^*{\mathfrak i}n{\mathfrak m}\cap{{\mathfrak m}athfrak r}^{k-1}$. Thus $$a=(a-a^*)/2+(a+a^*)/2{\mathfrak i}n
{{\mathfrak m}athfrak r}^k+{\mathfrak m}\cap{{\mathfrak m}athfrak r}^{k-1}.$$ Hence the group of hermitian elements in
the kernel of $C\to D$ is
$$
({\mathfrak m}\cap{{\mathfrak m}athfrak r}^{k-1}+{{\mathfrak m}athfrak r}^k)/{{\mathfrak m}athfrak r}^k\cong {\mathfrak m}\cap{{\mathfrak m}athfrak r}^{k-1}/({\mathfrak m}\cap{{\mathfrak m}athfrak r}^{k-1}\cap {{\mathfrak m}athfrak r}^k)\cong {\mathfrak m}\cap{{\mathfrak m}athfrak r}^{k-1}/{\mathfrak m}\cap{{\mathfrak m}athfrak r}^{k}.
$$
{\varepsilon}nd{proof}
{{\mathfrak b}eta}gin{thm} Given a $*$-invariant proper ideal ${\mathfrak i}$ of $A$, the kernel of
$U(V,h)\to U(\overline{V},\overline{h})$ has order $|{\mathfrak i}|^{2m^2-m}|{\mathfrak i}\cap{\mathfrak m}|^{2m}$.
{\varepsilon}nd{thm}
{{\mathfrak b}eta}gin{proof} By Theorems {{\mathfrak m}athfrak r}ef{sur} and {{\mathfrak m}athfrak r}ef{zxz}, the alluded kernel has order
$$
\frac{|U(V,h)|}{|U(\overline{V},\overline{h})|}=\frac{|{{\mathfrak m}athfrak r}|^{2m^2-m}|{\mathfrak m}|^{2m}}{|{{\mathfrak m}athfrak r}/{\mathfrak i}|^{2m^2-m}|({\mathfrak m}+{\mathfrak i})/{\mathfrak i}|^{2m}}=|{\mathfrak i}|^{2m^2-m}|{\mathfrak i}\cap{\mathfrak m}|^{2m}.
$$
{\varepsilon}nd{proof}
{{\mathfrak m}athfrak s}ection{Computing the order of $U_{2m}(A)$ by counting symplectic pairs}{\lambda}abel{s5}
The following easy observation will prove useful. Given $s{\mathfrak i}n S$ and $y{\mathfrak i}n A$, we have
{{\mathfrak b}eta}gin{equation}
{\lambda}abel{yx}
y-y^*=s\text{ if and only if }y{\mathfrak i}n s/2+R.
{\varepsilon}nd{equation}
By the length of a vector $v{\mathfrak i}n V$ we understand the element $(v,v){\mathfrak i}n S$. Given $s{\mathfrak i}n S$, the number of basis vectors
of $V$ of length $s$ will be denoted by $N(m,s)$.
{{\mathfrak b}eta}gin{lemma}{\lambda}abel{numbers} Given $s{\mathfrak i}n S$, we have
$$
N(1,s)=(|A|-|{{\mathfrak m}athfrak r}|)(|R|+|{\mathfrak m}|)=(|A|^2-|{{\mathfrak m}athfrak r}|^2)/|S|,\quad s{\mathfrak i}n S.
$$
In particular, $N(1,s)$ is independent of $s$.
{\varepsilon}nd{lemma}
{{\mathfrak b}eta}gin{proof} Let $u,v$ be a symplectic basis of $V$. Given $(a,b){\mathfrak i}n A^2$, the length of $w=ua+vb$ is
$$
(w,w)=a^* b-b^* a.
$$
Thus, we need to count the number of pairs $(a,b){\mathfrak i}n A^2{{\mathfrak m}athfrak s}etminus {{\mathfrak m}athfrak r}^2$ such that
{{\mathfrak b}eta}gin{equation}
{\lambda}abel{lens}
a^* b-b^* a=s.
{\varepsilon}nd{equation}
For this purpose, suppose first $a{\mathfrak i}n U(A)$. Setting $y=a^* b$ and using ({{\mathfrak m}athfrak r}ef{yx}), we see that ({{\mathfrak m}athfrak r}ef{lens}) holds
if and only if $b{\mathfrak i}n (a^*)^{-1}(s/2+R)$. Thus, the number of solutions $(a,b){\mathfrak i}n A^2$ to ({{\mathfrak m}athfrak r}ef{lens}) such that $a{\mathfrak i}n U(A)$ is
$(|A|-|{{\mathfrak m}athfrak r}|)|R|$. Suppose next that $a{\mathfrak n}otin U(A).$ Then $b {\mathfrak i}n U(A)$.
Rewriting ({{\mathfrak m}athfrak r}ef{lens}) in the form
{{\mathfrak b}eta}gin{equation}
{\lambda}abel{lens2}
b^* a-a^* b=-s
{\varepsilon}nd{equation}
and setting $y=b^* a$, we see as above that ({{\mathfrak m}athfrak r}ef{lens2}) holds if and only if $a{\mathfrak i}n (b^*)^{-1}(-s/2+R)$. Recalling that $a{\mathfrak i}n{{\mathfrak m}athfrak r}$, we
are thus led to calculating
$$
|[(b^*)^{-1}(-s/2+R)]\cap{{\mathfrak m}athfrak r}|=|(-s/2+R)\cap b^*{{\mathfrak m}athfrak r}|=|(-s/2+R)\cap {{\mathfrak m}athfrak r}|=|R\cap{{\mathfrak m}athfrak r}|,
$$
the last two equalities holding because $b{\mathfrak i}n U(R)$ and $s{\mathfrak i}n{{\mathfrak m}athfrak r}$. Recalling that ${\mathfrak m}=R\cap{{\mathfrak m}athfrak r}$, it follows that
$N(1,s)=(|A|-|{{\mathfrak m}athfrak r}|)(|R|+|{\mathfrak m}|)$. Since this is independent of $s$, we infer $N(1,s)=(|A|^2-|{{\mathfrak m}athfrak r}|^2)/|S|$.
{\varepsilon}nd{proof}
Note that the identity $(|A|-|{{\mathfrak m}athfrak r}|)(|R|+|{\mathfrak m}|)=(|A|^2-|{{\mathfrak m}athfrak r}|^2)/|S|$ also follows from $|R|=|A|/|S|$ and $|{\mathfrak m}|=|{{\mathfrak m}athfrak r}|/|S|$.
{{\mathfrak b}eta}gin{prop}{\lambda}abel{igual} Given $s{\mathfrak i}n S$, we have
$$
N(m,s)=(|A|^{2m}-|{{\mathfrak m}athfrak r}|^{2m})/|S|,\quad s{\mathfrak i}n S.
$$
In particular, $N(m,s)$ is independent of $s$.
{\varepsilon}nd{prop}
{{\mathfrak b}eta}gin{proof} The first assertion follows from the second. We prove the latter by induction on $m$. The case $m=1$ is done in Lemma
{{\mathfrak m}athfrak r}ef{numbers}. Suppose that $m>1$ and $N(m-1,s)$ is independent of $s$. Set $N=N(1,0)$ and $M=N(m-1,0)$. Decompose $V$ as $U\perp W$ where $U$ has rank 2. Thus $N(m,s)$ is the number of pairs $(u,w) {\mathfrak i}n U \times W$
such that either $w$ is an arbitrary element of $W$ and $u$ is a basis vector of $U$ of length $s-(w,w)$, or,
$u$ is an arbitrary non basis vector of $U$ and $w$ is a basis vector of $W$ of length $s - (u,u)$. These two
possibilities are mutually exclusive. It follows, using the inductive hypothesis, that
$$
N(m,s)=N|A|^{2(m-1)}+|{{\mathfrak m}athfrak r}|^2 M,
$$
which is independent of $s$.
{\varepsilon}nd{proof}
{{\mathfrak b}eta}gin{cor}{\lambda}abel{nusy} The number of symplectic pairs in $V$ is
$$
\frac{(|A|^{2m}-|{{\mathfrak m}athfrak r}|^{2m})|A|^{2m-1}}{|S|^2}.
$$
{\varepsilon}nd{cor}
{{\mathfrak b}eta}gin{proof} By Proposition {{\mathfrak m}athfrak r}ef{igual}, the number of basis vectors of length 0 is
$(|A|^{2m}-|{{\mathfrak m}athfrak r}|^{2m})/|S|$. Given any such vector, say $u$, Lemma {{\mathfrak m}athfrak r}ef{dos} ensures the existence of a vector $v{\mathfrak i}n V$ of length 0
such that $(u,v)=1$. Then, a vector $w{\mathfrak i}n V$ satisfies $(u,w)=1$ if and only if
$w=au+v+z$, where $z$ is orthogonal to $u,v$. Moreover, given any such $z$, we see that $w$ has length 0 if and only if
$$
0=(w,w)=(ua+v,ua+v)+(z,z)=a^*-a+(z,z).
$$
It follows from ({{\mathfrak m}athfrak r}ef{yx}) that the number of solutions $a {\mathfrak i}n A$ to this equation is $|R|$.
We infer that the number of symplectic pairs is
$$
\frac{(|A|^{2m}-|{{\mathfrak m}athfrak r}|^{2m})}{|S|}\times |A|^{2m-2}|R|=\frac{(|A|^{2m}-|{{\mathfrak m}athfrak r}|^{2m})|A|^{2m-1}}{|S|^2}.
$$
{\varepsilon}nd{proof}
It follows from Lemma {{\mathfrak m}athfrak r}ef{exte} and Proposition {{\mathfrak m}athfrak r}ef{sp} that $U_{2m}(A)$ acts transitively on symplectic pairs. Moreover,
we readily see that the stabilizer of a given symplectic pair is isomorphic to $U_{2(m-1)}(A)$. We infer from Corollary
{{\mathfrak m}athfrak r}ef{nusy} that
$$
|U_{2m}(A)|=\frac{(|A|^{2m}-|{{\mathfrak m}athfrak r}|^{2m})|A|^{2m-1}}{|S|^2}\times \frac{(|A|^{2(m-1)}-|{{\mathfrak m}athfrak r}|^{2(m-1)})|A|^{2m-3}}{|S|^2}\times\cdots\times
\frac{(|A|^{2}-|{{\mathfrak m}athfrak r}|^{2})|A|}{|S|^2}.
$$
We have proven the following result.
{{\mathfrak b}eta}gin{thm}
{\lambda}abel{zxz2} Let $A$ be a finite local ring, not necessarily commutative, with Jacobson radical~${{\mathfrak m}athfrak r}$ and residue field $F_q$ of odd characteristic. Suppose $A$ has
an involution $*$ such that $a-a^*{\mathfrak i}n{{\mathfrak m}athfrak r}$ for all $a{\mathfrak i}n A$. Let $S$ be the group of all $a{\mathfrak i}n A$ such that
$a=-a^*$. Then
$$
|U_{2m}(A)|=\frac{|{{\mathfrak m}athfrak r}|^{m(m+1)}|A|^{m^2} (q^{2m}-1)(q^{2(m-1)}-1)\cdots (q^2-1)}{|S|^{2m}}.\qed
$$
{\varepsilon}nd{thm}
{{\mathfrak b}eta}gin{note}{{{\mathfrak m}athfrak r}m We readily verify, by means of ({{\mathfrak m}athfrak r}ef{ars}) and ({{\mathfrak m}athfrak r}ef{ars2}), the equivalence of the formulae given in Theorems {{\mathfrak m}athfrak r}ef{zxz} and {{\mathfrak m}athfrak r}ef{zxz2}.}
{\varepsilon}nd{note}
{{\mathfrak m}athfrak s}ection{The order of the stabilizer of a basis vector}{\lambda}abel{s6}
{{\mathfrak b}eta}gin{thm}{\lambda}abel{lasta} Let $v{\mathfrak i}n V$ be a basis vector and let $S_v$ be the stabilizer of $v$ in $U(V,h)$. Then
$$
|S_v|=|U_{2(m-1)}(A)|\times |A|^{2m-1}/|S|.
$$
In particular, the order of $S_v$ is independent of $v$ and its length.
{\varepsilon}nd{thm}
{{\mathfrak b}eta}gin{proof} By Theorem {{\mathfrak m}athfrak r}ef{actra}, the number of basis vectors of length $(v,v)$ is equal to $|U_{2m}(A)|/|S_v|$.
It follows from Proposition {{\mathfrak m}athfrak r}ef{igual} that
{{\mathfrak b}eta}gin{equation}{\lambda}abel{idw}
|U_{2m}(A)|/|S_v|=(|A|^{2m}-|{{\mathfrak m}athfrak r}|^{2m})/|S|.
{\varepsilon}nd{equation}
On the other hand, the above discussion shows that
{{\mathfrak b}eta}gin{equation}{\lambda}abel{idw2}
|U_{2m}(A)|=\frac{(|A|^{2m}-|{{\mathfrak m}athfrak r}|^{2m})|A|^{2m-1}}{|S|^2}\times |U_{2(m-1)}(A)|.
{\varepsilon}nd{equation}
Combining ({{\mathfrak m}athfrak r}ef{idw}) and ({{\mathfrak m}athfrak r}ef{idw2}) we obtain the desired result.
{\varepsilon}nd{proof}
{{\mathfrak m}athfrak s}ection{The case when $A$ is commutative and principal}{\lambda}abel{s7}
We make the following assumptions on $A$ until further notice:
{\mathfrak m}edskip
(A7) There is $a{\mathfrak i}n{{\mathfrak m}athfrak r}$ such that $Aa=aA={{\mathfrak m}athfrak r}$.
{\mathfrak m}edskip
(A8) The elements of $R$ commute among themselves.
{\mathfrak m}edskip
Using (A7), we see that $|A|=q^e$, $|{{\mathfrak m}athfrak r}|=q^{e-1}$. Moreover, from $Aa=aA$, we get $a^*A=Aa^*$. Since $a=(a-a^*)/2+(a+a^*)/2$, not both $a-a^*$ and $a+a^*$ can be in ${{\mathfrak m}athfrak r}^2$. Thus,
${{\mathfrak m}athfrak r}$ has a generator $x$ that is hermitian or skew-hermitian and satisfies $Ax=xA$. In any case, $x^2$ is hermitian. We claim
that
$$
A=R+Rx.
$$
Note first of all that, because of (A8), $R$ is a subring of $A$. Clearly, $R$ is a local ring with maximal ideal ${\mathfrak m}=R\cap{{\mathfrak m}athfrak r}$
and residue field $R/{\mathfrak m}\cong A/{{\mathfrak m}athfrak r}$. Secondly, from $A = R+S$ and $S{{\mathfrak m}athfrak s}ubseteq{{\mathfrak m}athfrak r} = Ax$, we deduce
{{\mathfrak b}eta}gin{equation}
{\lambda}abel{der}
A=R+Ax.
{\varepsilon}nd{equation}
Repeatedly using ({{\mathfrak m}athfrak r}ef{der}) as well as (A8), we obtain
$$
{{\mathfrak b}eta}gin{aligned}
A &=R+(R+Ax)x=R+Rx+Ax^2=R+Rx+(R+Ax)x^2\\
&=R+Rx+Ax^3=R+Rx+(R+Ax)x^3=R+Rx+Ax^4=\dots=R+Rx.
{\varepsilon}nd{aligned}
$$
If $*=1_A$ then $A=R$ and ${{\mathfrak m}athfrak r}={\mathfrak m}$ has $q^{e-1}$ elements. We make the following assumptions on $A$ until further notice:
{\mathfrak m}edskip
(A9) $*{\mathfrak n}eq 1_A$.
{\mathfrak m}edskip
(A10) $R\cap Rx=(0)$.
{\mathfrak m}edskip
It follows from (A9) and $A=R+Rx$ that $x$ cannot be hermitian. Therefore $x$ is skew-hermitian. Note that $R$ is a principal ring
with maximal ideal ${\mathfrak m}=Rx^2$, since
$$
{\mathfrak m}=R\cap Ax=R\cap (R+Rx)x=R\cap (Rx+Rx^2)=Rx^2+(R\cap Rx)=Rx^2.
$$
{{\mathfrak b}eta}gin{lemma}{\lambda}abel{evod} The group epimorphism $f:R\to Rx$, given by $f(r)=rx$, is injective if $e$ is even, whereas
the kernel of $f$ is $Rx^{e-1}$ and has $q$ elements if $e$ is odd.
{\varepsilon}nd{lemma}
{{\mathfrak b}eta}gin{proof} Note that every non-zero element of $A$ is of the form $cx^i$ for some unit $c{\mathfrak i}n U(A)$ and a unique $0{\lambda}eq i<e$.
It follows that the annihilator of $x$ in $A$ is equal to $Ax^{e-1}$. From $A=R+Rx$, we infer $Ax^{e-1}=Rx^{e-1}$. Thus,
the kernel of $f$ is $R\cap Rx^{e-1}$. If $e$ is even then $R\cap Rx^{e-1}{{\mathfrak m}athfrak s}ubseteq R\cap Rx=(0)$, while if $e$ is odd
$$R\cap Rx^{e-1}=Rx^{e-1}=Ax^{e-1}$$ is a 1-dimensional vector space over $F_q=A/{{\mathfrak m}athfrak r}$.
{\varepsilon}nd{proof}
{{\mathfrak b}eta}gin{cor} We have
$$
|A|=|R|^2\text{ if }e\text{ is even and }|A|=\frac{|R|^2}{q}\text{ if }e\text{ is odd.}
$$
Thus, either $e=2{\varepsilon}ll$ is even and
$$
|{{\mathfrak m}athfrak r}|=q^{2{\varepsilon}ll-1},\;|{\mathfrak m}|=q^{{\varepsilon}ll-1}
$$
or $e=2{\varepsilon}ll -1$ is odd and
$$
|{{\mathfrak m}athfrak r}|=q^{2{\varepsilon}ll-2},\; |{\mathfrak m}|=q^{{\varepsilon}ll-1}.
$$
{\varepsilon}nd{cor}
{{\mathfrak b}eta}gin{proof} This follows from $A=R\oplus Rx$, Lemma {{\mathfrak m}athfrak r}ef{evod} and the group isomorphism $R/{\mathfrak m}\cong F_q$.
{\varepsilon}nd{proof}
We now resume the general discussion and note that if $A$ is a commutative, principal ideal ring and $*{\mathfrak n}eq 1_A$ then conditions (A7)-(A10)
are automatically satisfied, for in this case we have $R\cap Rx{{\mathfrak m}athfrak s}ubseteq R\cap S=(0)$. It is clear that $A\cong R[t]/(t^2-x^2)$ if $e=2{\varepsilon}ll$ is even, and $A\cong R[t]/(t^2-x^2,t^{2{\varepsilon}ll-1})$ if $e=2{\varepsilon}ll-1$ is odd. Using part of the above information together with Theorem {{\mathfrak m}athfrak r}ef{zxz}, we obtain the following result.
{{\mathfrak b}eta}gin{thm}
{\lambda}abel{ords} Let $A$ be a finite, commutative, principal, local ring with Jacobson radical~${{\mathfrak m}athfrak r}$ and residue field $A/{{\mathfrak m}athfrak r}\cong F_q$ of odd characteristic. Let $e$ be the nilpotency degree of ${{\mathfrak m}athfrak r}$. Suppose $A$ has
an involution $*$ such that $a-a^*{\mathfrak i}n{{\mathfrak m}athfrak r}$ for all $a{\mathfrak i}n A$.
(a) If $*=1_A$ then
$$
|U_{2m}(A)|=|{{\mathfrak m}athrm {Sp}}_{2m}(A)|=q^{(e-1)(2m^2+m)+m^2}(q^{2m}-1)(q^{2(m-1)}-1)\cdots (q^2-1).
$$
(b) If $*{\mathfrak n}eq 1_A$ and $e=2{\varepsilon}ll$ is even then
$$
|U_{2m}(A)|=q^{(2{\varepsilon}ll-1)(2m^2-m)}q^{2({\varepsilon}ll-1)m} q^{m^2}(q^{2m}-1)(q^{2(m-1)}-1)\cdots (q^2-1).
$$
(b) If $*{\mathfrak n}eq 1_A$ and $e=2{\varepsilon}ll-1$ is odd then
$$
|U_{2m}(A)|=q^{(2{\varepsilon}ll-2)(2m^2-m)}q^{2({\varepsilon}ll-1)m} q^{m^2}(q^{2m}-1)(q^{2(m-1)}-1)\cdots (q^2-1).\qed
$$
{\varepsilon}nd{thm}
{\mathfrak m}edskip
{{\mathfrak b}eta}gin{note}{{{\mathfrak m}athfrak r}m Our initial conditions on $A$ do not force $R$ to be a subring of $A$ or $R\cap Rx=(0)$. Indeed, let $A$ be as indicated in the parenthetical remark of the second case of Example {{\mathfrak m}athfrak r}ef{tresdos}, and set $x=a$, $r=b$. Then $rx{\mathfrak i}n R\cap Rx$, so $R\cap Rx{\mathfrak n}eq (0)$, and $rxr=-r^2x$
with $(-r^2 x)^*=r^2 x$, so $R$ is not a subring of $A$. It is also clear that $A$ need not be principal, even if so is $R$, as can be seen
by taking $b=0$ and $B$ not a field in the general construction of Example {{\mathfrak m}athfrak r}ef{tresdos} (e.g. $A=Z_{p^2}[t]/(t^2)$).}
{\varepsilon}nd{note}
{\mathfrak n}oindent{{\mathfrak b}f Acknowledgement.} We are very grateful to the referee for a thorough reading of the paper and valuable suggestions.
{{\mathfrak b}eta}gin{thebibliography}{RBMW}
{\mathfrak b}ibitem[CHQS]{CHQS} J. Cruickshank, A. Herman, R. Quinlan, F. Szechtman, {\varepsilon}mph{Unitary groups over local rings}, J. Algebra Appl. 13 (2014) 1350093.
{\mathfrak b}ibitem[D]{D} J. Dieudonn\'e, {\varepsilon}mph{La G\'eom\'etrie des Groupes
Classiques}, Springer-Verlag, Berlin, 1955.
{\mathfrak b}ibitem[FH]{FH} H. Feng, {\varepsilon}mph{Orders of classical groups over finite rings}, J. Math.
Res. Exposition 18 (1998) 507--512.
{\mathfrak b}ibitem[H]{H} I.N. Herstein, {\varepsilon}mph{Noncommutative rings}, The Mathematical
Association of America, 1968.
{\mathfrak b}ibitem[HO]{HO} A. Hahn and O.T. O'Meara, {\varepsilon}mph{The classical groups and $K$-theory}, Spinger-Verlag, Berlin, 1989.
{\mathfrak b}ibitem[T]{T} D.E. Taylor, {\varepsilon}mph{The geometry of the classical groups}, Heldermann Verlag, Berlin, 1992.
{\varepsilon}nd{thebibliography}
{\varepsilon}nd{document} |
\begin{document}
\title{Growth of solutions for QG and 2D Euler equations}
\author{Diego Cordoba \\
{\small Department of Mathematics} \\
{\small University of Chicago} \\
{\small 5734 University Av, Il 60637} \\
{\small Telephone: 773 702-9787, e-mail: [email protected]} \\
{\small and} \\
Charles Fefferman\thanks{Partially supported by NSF grant DMS 0070692.}\\
{\small Princeton University} \\
{\small Fine Hall, Washington Road, NJ 08544} \\
{\small Phone: 609-258 4205, e-mail: [email protected]} \\
}
\date{January 17 2001}
\maketitle
\markboth{QG and 2D Euler equations}{D.Cordoba and C.Fefferman}
\newtheorem {Thm}{Theorem}
\newtheorem {Def}{Definition}
\newtheorem {Lm}{Lemma}
\newtheorem {prop}{Proposition}
\newtheorem {Rem}{Remark}
\newtheorem {Cor}{Corollary}
\def\mathcal{\mathcal}
\newtheorem {Ack*}{Acknowledgments}
\section{Abstract}
We study the rate of growth of sharp fronts of the Quasi-geostrophic equation and 2D incompressible Euler equations.. The development of sharp fronts are due to a mechanism that piles up level sets very fast. Under a semi-uniform collapse, we obtain a lower bound on the minimum distance between the level sets.
\section{Introduction}
The work of Constantin-Majda-Tabak [1] developed an analogy between the Quasi-geostrophic and 3D Euler equations. Constantin, Majda and Tabak proposed a candidate for a singularity for the Quasi-geostrophic equation. Their numerics showed evidence of a blow-up for a particular initial data, where the level sets of the temperature contain a hyperbolic saddle. The arms of the saddle tend to close in finite time, producing a a sharp front. Numerics studies done later by Ohikitani-Yamada [8] and Constantin-Nie-Schorgofer [2], with the same initial data, suggested that instead of a singularity the derivatives of the temperature where increasing as double exponential in time.
The study of collapse on a curve was first studied in [1] for the Quasi-geostrophic equation where they considered a simplified ansatz for classical frontogenesis with trivial topology. At the time of collapse, the scalar $\theta$ is discontinues across the curve $x_2 = f(x_1)$ with different limiting values for the temperature on each side of the front. They show that under this topology the directional field remains smooth up to the collapse, which contradicts the following theorem proven in [1]:
\begin{eqnarray*}
\text{If locally the direction field remains smooth as t}\ \ \ \ \\
\text{ approaches $T_*$, then no finite singularity is possible}\ \ \ \ \\
\text{ as t approaches $T_*$.}\ \ \ \ \quad \quad \quad \quad \quad \quad \quad \quad
\end{eqnarray*}
The simplified ansatz with trivial topology studied in [1] does not describe a hyperbolic saddle.
Under the definition of a simple hyperbolic saddle, in [3], it was shown that the angle of the saddle can not decrease faster than a double exponential in time.
The criterion obtained in [5] for a sharp front formation for a general two dimensional incompressible flow is :
\begin{eqnarray*}
\text{A necessary condition to have a sharp front at time T is}\\
\int_{0}^{T}|u|_{L^{\infty}}(s) ds = \infty\ \quad \quad \quad \quad \ \ \ \ \quad
\end{eqnarray*}
For the Quasi-geostrophic equation it is not known if the quantity $\int_{0}^{T}|u|_{L^{\infty}}(s) ds$ diverges or not. And the criterion does not say how fast the arms of a saddle can close.
In this paper we do not assume anything on the velocity field, and we show that under a semi-uniform collapse the distance between two level curves cannot decrease faster than a double exponential in time. The semi-uniform collapse assumption greatly weakens the assumptions made in [1] for an ansatz for classical frontogenesis, and the simple hyperbolic saddle in [3].
In the case of 2D incompressible Euler equation we are interested in the large time behavior of solutions.
The two equations we discus in this paper, have in common the property that a scalar function is convected by the flow, which implies that the level curves are transported by the flow. The possible singular scenario is due to level curves approaching each other very fast which will lead to a fast growth on the gradient of the scalar function. Below we study the semi-uniform collapse of two level sets on a curve. By semi-uniform collapse we mean that the distance of the two curves in any point are comparable.
The equations we study are as follows:
\bc
{\underline{The Quasi-geostrophic (QG) Equation}}
\ec
Here the unknowns are a scalar $\theta(x,t)$ and a velocity field $u(x,t) = (u_1(x,t), u_2(x,t)) \in R^2$, defined for $t\in[0,T^*)$ with $T^*\leq \infty$, and for $x \in \Omega$ where $\Omega = R^2$ or $R^2/Z^2$. The equations for $\theta$, u are as follows
\begin{eqnarray}
\left (\partial_t + u\cdot\nabla_x \right ) \theta = 0 \\
u = \nabla_{x}^{\perp}\psi\ \ and \ \ \psi = (-\triangle_x)^{-\frac{1}{2}}\theta, \nonumber
\end{eqnarray}
where $\nabla_{x}^{\perp} f = (-\frac{\partial f}{\partial x_2}, \frac{\partial f}{\partial x_1})$ for scalar functions f. The initial condition is $\theta(x,0) = \theta_0(x)$ for a smooth initial datum $\theta_0$.
\bc
{\underline{The Two-Dimensional Euler Equation}}
\ec
The unknown is an incompressible velocity field u(x,t) as above with vorticity denoted by $\omega$. The 2D Euler equation may be written in the form
\begin{eqnarray}
\left (\partial_t + u\cdot\nabla_x \right ) \omega = 0 \\
u = \nabla_{x}^{\perp}\psi\ \ and \ \ \psi = (-\triangle_x)^{-1}\omega, \nonumber
\end{eqnarray}
with u(x,0) equal to a given smooth divergence free $u_0(x)$.
\section{Results}
Asssume that q = q(x,t) is a solution to (1) or (2), and that a level curve of q can be parameterized by
\begin{eqnarray}
x_2=\phi_{\rho}(x_1,t)\ \ for\ \ x_1\in[a,b] \label{eq:1}
\end{eqnarray}
with $\phi_{\rho}\in C^1([a,b]\cap [0,T^*))$, in the sense that
\begin{eqnarray}
q(x_1,\phi_{\rho}(x_1,t), t) = G(\rho)\ \ for\ \ x_1\in[a,b],\label{eq:2}
\end{eqnarray}
and for certain $\rho$ to be specified below.
The stream function $\psi$ satisfies
\begin{eqnarray}
\nabla^{\perp}\psi=u.
\end{eqnarray}
From (3) and (4), we have
\begin{eqnarray}
\frac{\partial q}{\partial x_1} + \frac{\partial q}{\partial x_2} \frac{\partial\phi_{\rho}}{\partial x_1} = 0 \label{eq:3}
\end{eqnarray}
\begin{eqnarray}
\frac{\partial q}{\partial t} + \frac{\partial q}{\partial x_2} \frac{\partial\phi_{\rho}}{\partial t} = 0 \label{eq:4}
\end{eqnarray}
By (1), (2), (5), (6) and (7) we obtain
\begin{eqnarray*}
\frac{\partial\phi_{\rho}}{\partial t} & = & -\frac{\frac{\partial q}{\partial t}}{\frac{\partial q}{\partial x_2}} = \frac{<-\frac{\partial\psi}{\partial x_2},\frac{\partial\psi}{\partial x_1}>\cdot <\frac{\partial q}{\partial x_1},\frac{\partial q}{\partial x_2} >}{\frac{\partial q}{\partial x_2}} \\
& = & <-\frac{\partial\psi}{\partial x_2},\frac{\partial\psi}{\partial x_1}>\cdot <\frac{\frac{\partial q}{\partial x_1}}{\frac{\partial q}{\partial x_2}}, 1> \\
& = & <-\frac{\partial\psi}{\partial x_2},\frac{\partial\psi}{\partial x_1}>\cdot <-\frac{\partial\phi_{\rho}}{\partial x_1}, 1>
\end{eqnarray*}
Next
\begin{eqnarray*}
\frac{\partial}{\partial x_1}\left (\psi(x_1,\phi_{\rho}(x_1,t), t)\right ) & = & \frac{\partial\psi}{\partial x_1} + \frac{\partial\psi}{\partial x_2} \frac{\partial\phi_{\rho}}{\partial x_1} \\
& = & <-\frac{\partial\psi}{\partial x_2},\frac{\partial\psi}{\partial x_1}>\cdot <-\frac{\partial\phi_{\rho}}{\partial x_1}, 1>
\end{eqnarray*}
Therefore
\begin{eqnarray}
\frac{\partial\phi_{\rho}}{\partial t} = \frac{\partial}{\partial x_1}\left (\psi(x_1,\phi_{\rho}(x_1,t), t)\right ) \label{eq:5}
\end{eqnarray}
With this formula we can write a explicit equation for the change of time of the area between two fixed points a, b and two level curves $(\phi_{\rho_1}, \phi_{\rho_2})$;
\begin{eqnarray}
\frac{d }{d t}\left ( \int_{a}^{b} [\phi_{\rho_2}(x_1,t) - \phi_{\rho_1}(x_1,t)] dx_1 \right ) \nonumber \\
= \psi(b,\phi_{\rho_2}(b,t), t) - \psi(a,\phi_{\rho_2}(a,t), t) \nonumber \\
+ \psi (a,\phi_{\rho_1}(a,t), t) - \psi(b,\phi_{\rho_1}(b,t), t) \label{eq:6}
\end{eqnarray}
Assume that two level curves $\phi_{\rho_1}$ and $\phi_{\rho_2}$ collapse when t tends to $T^*$ uniformly in $a\leq x_1\leq b$ i.e.
$$
\phi_{\rho_2}(x_1,t) - \phi_{\rho_1}(x_1,t) \sim \frac{1}{b - a} \int_{a}^{b} [\phi_{\rho_2}(x_1,t) - \phi_{\rho_1}(x_1,t)] dx_1
$$
In other words; the distance between two level sets are comparable for $a \leq x_1 \leq b$.
Let
$$
\delta(x_1,t) = |\phi_{\rho_2}(x_1,t) - \phi_{\rho_1}(x_1,t)|
$$
be the thickness of the front.
We define semi-uniform collapse on a curve if (3) and (4) holds and there exists a constant $c$, independent of t, such that
$$
min \delta (x_1,t) \geq c\cdot max \delta (x_1,t)
$$
for $a\leq x_1 \leq b$, and for all $t\in [0,T^*)$.
We call the length b-a of the interval [a,b] the length of the front.
Now we can state the following theorem
\begin{Thm}
For a QG solution with a semi-uniform front, the thickness $\delta(t)$ satisfies
\begin{eqnarray*}
\delta(t) > e^{-e^{At + B}} \ \ for\ \ all\ \ t\in[0,T^*).
\end{eqnarray*}
Here, the constants A and B may be taken to depend only on the length of the front, the semi-uniformity constant, the initial thickness $\delta(0)$, and the norm of the initial datum $\theta_0(x)$ in $L^1\cap L^{\infty}$.
\end{Thm}
Proof: From (9) we have
\begin{eqnarray}
|\frac{d}{d t} A(t)| < \frac{C}{b - a} sup_{a\leq x_1\leq b} |\psi(x_1,\phi_{\rho_2}(x_1,t), t) - \psi(x_1,\phi_{\rho_2}(x_1,t), t)|
\end{eqnarray}
where
\begin{eqnarray*}
A(t) = \frac{1}{b - a} \int_{a}^{b} [\phi_{\rho_2}(x_1,t) & - &\phi_{\rho_1}(x_1,t)] dx_1,
\end{eqnarray*}
and C is determined by the semi-uniformity constant c.
The estimate of the difference of the value of the stream function at two different points that are close to each other is obtained by writing the stream function as follows;
\begin{eqnarray*}
\psi(x,t) = - \int_{\Omega}\frac{\theta(x + y,t)}{|y|} dy,
\end{eqnarray*}
and this is because $\psi = (-\triangle_x)^{-\frac{1}{2}}\theta$.
Therefore
\begin{eqnarray*}
\psi(z_1, t) - \psi(z_2,t) & = & \int_{\Omega}\theta(y)(\frac{1}{|y - z_1|} - \frac{1}{|y - z_2|}) dy \\ & = & \int_{|y - z_1| \leq 2\tau} + \int_{2\tau < |y - z_2| \leq k} + \int_{k < |y - z_1| } \\ & \equiv & I_{1} + I_{2} + I_{3}.\end{eqnarray*}
where $\tau = |z_1 - z_2| $.
Furthermore
\begin{eqnarray*}
|I_{1}| & \leq & ||\theta||_{L^{\infty}} \cdot\int_{|y - z_1| \leq 2\tau}(\frac{1}{|y - z_1|} + \frac{1}{|y - z_2|}) dy \\ & \leq & C\tau
\end{eqnarray*}
We define s to be a point in the line between $z_1$ and $z_2$, then $|y - z_1|
\leq 2|y - s|$ and $I_{2}$ can be estimated by
\begin{eqnarray*}
|I_{2}| &\leq& C\tau \cdot \int_{2\tau < |y - z_1| \leq
k}max_{s}|\nabla(\frac{1}{|y - s|})| dy \\ &\leq& C\tau \cdot
\int_{2\tau < |y - z_1| \leq k}max_{s}\frac{1}{|y - s|^{2}} dy \\ &\leq&
C\tau \cdot |\log \tau|
\end{eqnarray*}
We use the conservation of energy to estimate $I_{3}$ by
\begin{eqnarray*}
|I_{3}| \leq C \cdot \tau
\end{eqnarray*}
Finally, by choosing $\tau = |z_1 - z_2|$ we obtain
\begin{eqnarray}
|\psi(z_1, t) - \psi(z_2,t)| \leq M|z_1 - z_2||log |z_1 - z_2||
\end{eqnarray}
where M is a constant that depend on the initial data $\theta_0$. (See details in [3].)
Then we have
\begin{eqnarray*}
|\frac{d}{d t} A(t)| & \leq & \frac{M}{b - a} sup_{a\leq x_1\leq b}|\phi_{\rho_2}(x_1,t) - \phi_{\rho_1}(x_1,t)||log |\phi_{\rho_2}(x_1,t) - \phi_{\rho_1}(x_1,t)|| \\
& \leq & \frac{C\cdot M}{\cdot(b - a)} |A(t)||log A(t)|
\end{eqnarray*}
and therefore
\begin{eqnarray*}
A(t) >> A(0)e^{-e^{\frac{C\cdot M}{\cdot(b - a)} t}}
\end{eqnarray*}
\begin{Thm}
For a 2D Euler solution with a semi-uniform front, the thickness $\delta(t)$ satisfies
\begin{eqnarray*}
\delta(t) > e^{-[At + B]} \ \ for\ \ all\ \ t\in[0,T^*).
\end{eqnarray*}
\end{Thm}
Here, the constants A and B may be taken to depend only on the length of the front, the semi-uniformity constant, the initial thickness $\delta(0)$, and the norm of the initial vorticity in $L^1\cap L^{\infty}$.
The proof theorem 2 is similar to theorem 1 with the difference that instead of the estimate (11), we have
\begin{eqnarray*}
|\psi(z_1, t) - \psi(z_2,t)|\leq M|z_1 - z_2|
\end{eqnarray*}
where M is a constant that depend on the initial data $u_0$. (See details in [3].)
Similar estimates can be obtain for 2D ideal Magneto-hydrodynamics (MHD) Equation, with the extra assumption that $\int_{0}^{T^*}|u|_{L^{\infty}}(s) ds$ is bounded up to the time of the blow-up. This estimates are consequence of applying the Mean value theorem in (10). Nevertheless in the case of MHD these estimates improve the results obtain in [6].
\begin{Ack*}
This work was initially supported by the American Institute of Mathematics.
\end{Ack*}
\end{document} |
\begin{document}
\title{Nash equilibrium with Sugeno payoff}
\author{Taras Radul}
\maketitle
Institute of Mathematics, Casimirus the Great University, Bydgoszcz, Poland;
\newline
Department of Mechanics and Mathematics, Lviv National University,
Universytetska st.,1, 79000 Lviv, Ukraine.
\newline
e-mail: tarasradul\@ yahoo.co.uk
\textbf{Key words and phrases:} Nash equilibrium, game in capacities, Sugeno integral
\begin{abstract} This paper is devoted to Nash equilibrium for games in capacities. Such games with payoff expressed by Choquet
integral were considered in \cite{KZ} and existence of Nash equilibrium was proved. We also consider games in capacities but with expected payoff expressed by Sugeno
integral. We prove existence of Nash equilibrium using categorical methods and abstract convexity theory.
\varepsilonnd{abstract}
\section{Introduction}
The classical Nash equilibrium theory is based on fixed point theory and was developed in frames of linear convexity. The mixed strategies of a player are probability (additive) measures on a set of pure strategies. But an interest to Nash equilibria in more general frames is rapidly growing in last decades. There are also results about Nash equilibrium for non-linear convexities. For instance, Briec and Horvath proved in \cite{Ch} existence of Nash equilibrium point for $B$-convexity and MaxPlus convexity. Let us remark that MaxPlus convexity is
related to idempotent (Maslov) measures in the same sense as linear convexity is related to probability measures.
We can use additive measures only when we know precisely probabilities of all events considered in a game. However it is not the case
in many modern economic models. The decision theory under uncertainty considers a model when probabilities of states are either not known or imprecisely specified. Gilboa \cite{Gil} and Schmeidler \cite{Sch} axiomatized expectations expressed by Choquet
integrals attached to non-additive measures called capacities, as a formal approach to decision-making under uncertainty. Dow and Werlang \cite{DW} generalized this approach for two players game where belief of each player about a choice of the strategy by the other player is a capacity. This result was extended onto games with arbitrary finite number of players \cite{EK}.
Kozhan and Zaricznyi introduced in \cite{KZ} a formal mathematical generalization of Dow and Werlang's concept of Nash equilibrium of a game where players are allowed to form non-additive beliefs about opponent's decision but also to play their mixed non-additive strategies. Such game is called by authors game in capacities. The expected payoff function was there defined using a Choquet integral. Kozhan and Zaricznyi proved existence theorem using a linear convexity on the space of capacities which is preserved by Choquet integral. There was stated a problem of existence of Nash equilibrium for another functors \cite{KZ}.
An alternative to so-called Choquet expected utility model is the qualitative decision theory. The corresponding expected utility is expressed by Sugeno integral. See for example papers \cite{DP}, \cite{DP1}, \cite{CH1}, \cite{CH} and others. Sugeno integral chooses a median value of utilities which is qualitative counterpart of the averaging operation by Choquet integral.
Following \cite{KZ} we introduce in this paper the general mathematical concept of Nash equilibrium of a game in capacities. However, motivated by the qualitative approach, we consider expected payoff function defined by Sugeno integral. To prove existence theorem for this concrete case, we consider more general framework which could unify all mentioned before situations and give us a method to prove theorems about existence of Nash equilibrium in different contexts. We use categorical methods and abstract convexity theory.
The notion of convexity considered in this paper is
considerably broader then the classic one; specifically, it is not
restricted to the context of linear spaces. Such convexities
appeared in the process of studying different structures like
partially ordered sets, semilattices, lattices, superextensions
etc. We base our approach on the notion of topological convexity
from \cite{vV} where the general convexity theory is covered from axioms
to application in different areas. Particularly, there is proved Kakutani fixed point theorem for abstract convexity.
Above mentioned constructions of the spaces of probability measures, idempotent measures and capacities are functorial and could be completed to monads (see \cite{RZ}, \cite{Z} and \cite{NZ} for more details). There was introduced in \cite{R1} a convexity structure on
each $\mathbb F$-algebra for any monad $\mathbb F$ in the category of compact Hausdorff spaces and continuous maps. Particularly, topological properties of monads with binary convexities were investigated.
We prove a counterpart of Nash theorem for an abstract convexity in this paper. Particularly, we consider binary convexities. These results we use to obtain Nash theorem for algebras of any L- monad with binary convexity. Since capacity monad is an L-monad with binary convexity \cite{R2}, we obtain as corollary the corresponding result for capacities.
\section{Games in capacities} By $\mathsf{Comp}$ we denote the category of compact Hausdorff
spaces (compacta) and continuous maps. For each compactum $X$ we denote by $C(X)$ the Banach space of all
continuous functions on $X$ with the usual $\sup$-norm. In what follows, all
spaces and maps are assumed to be in $\mathsf{Comp}$ except for $\mathbb R$ and
maps in sets $C(X)$ with $X$ compact Hausdorff.
We need the definition of capacity on a compactum $X$. We follow a terminology of \cite{NZ}.
A function $c$ which assign each closed subset $A$ of $X$ a real number $c(A)\in [0,1]$ is called an {\it upper-semicontinuous capacity} on $X$ if the three following properties hold for each closed subsets $F$ and $G$ of $X$:
1. $c(X)=1$, $c(\varepsilonmptyset)=0$,
2. if $F\subset G$, then $c(F)\le c(G)$,
3. if $c(F)<a$, then there exists an open set $O\supset F$ such that $c(B)<a$ for each compactum $B\subset O$.
We extend a capacity $c$ to all open subsets $U\subset X$ by the formula $c(U)=\sup\{c(K)\mid K$ is a closed subset of $X$ such that $K\subset U\}$.
It was proved in \cite{NZ} that the space $MX$ of all upper-semicontinuous capacities on a compactum $X$ is a compactum as well, if a topology on $MX$ is defined by a subbase that consists of all sets of the form $O_-(F,a)=\{c\in MX\mid c(F)<a\}$, where $F$ is a closed subset of $X$, $a\in [0,1]$, and $O_+(U,a)=\{c\in MX\mid c(U)>a\}$, where $U$ is an open subset of $X$, $a\in [0,1]$. Since all capacities we consider here are upper-semicontinuous, in the following we call elements of $MX$ simply capacities.
There is considered in \cite{KZ} a tensor product for capacities, which is a continuous map $\otimes:MX_1\times\dots\times MX_n\to M(X_1\times\dots\times X_n)$. Note that, despite the space of capacities contains the space of probability measures, the tensor product of capacities does not extend tensor product of probability measures.
Due to Zhou \cite{Zh} we can identify the set $MX$ with some set of functionals defined on the space $C(X)$ using the
Choquet integral. We consider for each $\mu\in MX$ its value on a function $f\in C(X)$ defined by the formulae
$$\mu(f)=\int fd\mu=\int_0^\infty\mu\{x\in X|f(X)\ge t\}dt+\int^0_{-\infty}(\mu\{x\in X|f(X)\ge t\}-1)dt$$
Let us remember the definition of Nash equilibrium. We consider a $n$-players game $f:X=\mathrm{pr}od_{i=1}^n X_i\to\mathbb R^n$ with compact Hausdorff spaces of strategies $X_i$. The coordinate function $f_i:X\to \mathbb R$ we call payoff function of $i$-th player. For $x\in X$ and $t_i\in X_i$ we use the notation $(x;t_i)=(x_1,\dots,x_{i-1},t_i,x_{i+1},\dots,x_n)$. A point $x\in X$ is called a Nash equilibrium point if for each $i\in\{1,\dots,n\}$ and for each $t_i\in X_i$ we have $f_i(x;t_i)\le f_i(x)$. Kozhan and Zarichnyj proved in \cite{KZ} existence
of Nash equilibrium for game in capacities $ef:\mathrm{pr}od_{i=1}^n MX_i\to\mathbb R^n$ with expected payoff functions defined by $$ef_i(\mu_1,\dots,\mu_n)=\int_{X_1\times\dots\times X_n}f_id(\mu_1\otimes\dots\otimes\mu_n)$$
Let us remark that the Choquet functional representation of capacities preserves the natural linear convexity structure on $MX$ which was used in the proof of existence of Nash equilibrium \cite{KZ}. However this representation does not preserve the capacity monad structure. (We will introduce the monad notion in Section 4).
There was introduced \cite{R2} another functional representation of capacities using Sugeno integral (see also \cite{NR} for similar result). This representation preserves the capacity monad structure. Let us describe such representation. Fix any increasing homeomorphism $\psi:(0,1)\to\mathbb R$. We put additionally $\psi(0)=-\infty$, $\psi(1)=+\infty$ and assume $-\infty<t<+\infty$ for each $t\in\mathbb R$. We consider for each $\mu\in MX$ its value on a function $f\in C(X)$ defined by the formulae
$$\mu(f)=\int_X^{Sug} fd\mu=\max\{t\in\mathbb R\mid \mu(f^{-1}([t,+\infty)))\ge\psi^{-1}(t)\}$$
Let us remark that we use some modification of Sugeno integral. The original Sugeno integral \cite{Su} "ignores" function values outside the interval $[0,1]$ and we introduce a "correction" homeomorphism $\psi$ to avoid this problem. Now, following \cite{KZ}, we consider a game in capacities $sf:\mathrm{pr}od_{i=1}^n MX_i\to\mathbb R^n$, but motivated by \cite{DP}, we consider Sugeno expected payoff functions defined by $$sf_i(\mu_1,\dots,\mu_n)=\int^{Sug}_{X_1\times\dots\times X_n}f_id(\mu_1\otimes\dots\otimes\mu_n)$$
The main goal of this paper is to prove existence of Nash equilibrium for such game. Since Sugeno integral does not preserve linear convexity on $MX$ we can not use methods from \cite{KZ}. We will use some another natural convexity structure which has the binarity property (has Helly number 2). We will obtain some general result for such convexities which could be useful to investigate existence of Nash equilibrium for diverse construction. Finally, we will obtain the result for capacities as a corollary of these general results.
\section{Binary convexities}
A family $\mathcal C$ of closed subsets of a compactum $X$ is
called a {\it convexity} on $X$ if $\mathcal C$ is stable for intersection
and contains $X$ and the empty set. Elements of $\mathcal C$ are called
$\mathcal C$-convex (or simply convex). Although we follow general concept of abstract convexity from \cite{vV}, our definition is different.
We consider only closed convex sets. Such structure is called closure structure in \cite{vV}. The whole family of convex
sets in the sense of \cite{vV} could be obtained by the operation of
union of up-directed families. In what follows, we assume that each convexity contains all singletons.
A convexity $\mathcal C$ on $X$ is called $T_2$ if for each distinct $x_1$, $x_2\in
X$ there exist $S_1$, $S_2\in\mathcal C$ such that $S_1\cup S_2=X$,
$x_1\notin S_2$ and $x_2\notin S_1$. Let us remark that if a convexity $\mathcal C$ on a compactum $X$ is $T_2$, then $\mathcal C$ is a subbase for closed sets.
A convexity $\mathcal C$ on $X$ is called $T_4$ (normal) if for each disjoint $C_1$, $C_2\in
\mathcal C$ there exist $S_1$, $S_2\in\mathcal C$ such that $S_1\cup S_2=X$,
$C_1\cap S_2=\varepsilonmptyset$ and $C_2\cap S_1=\varepsilonmptyset$.
Let $(X,\mathcal C)$, $(Y,\mathcal D)$ be two
compacta with convexity structures. A continuous map $f:X\to Y$ is
called {\it CP-map} (convexity preserving map) if $f^{-1}(D)\in\mathcal C$
for each $D\in\mathcal D$; $f$ is
called {\it CC-map} (convex-to-convex map) if $f(C)\in\mathcal D$
for each $C\in\mathcal C$.
By a multimap (set-valued map) of a set $X$ into a set $Y$ we mean a map $F:X\to 2^Y$. We use the notation $F:X\multimap Y$. If $X$ and $Y$ are topological spaces, then a multimap $F:X\multimap Y$ is called upper semi-continuous (USC) provided for each open set $O\subset Y$ the set $\{x\in X\mid F(x)\subset O\}$ is open in $X$. It is well-known that a multimap is USC iff its graph is closed in $X\times Y$.
Let $F:X\multimap X$ be a multimap. We say that a point $x\in X$ is a fixed point of $F$ if $x\in F(x)$.
The following counterpart of Kakutani theorem for abstract convexity is a partial case of Theorem 3 from \cite{W} (it also could be obtain combining Theorem 6.15, Ch.IV and Theorem 4.10, Ch.III from \cite{vV}).
\begin{theorem}\label{KA} Let $\mathcal C$ be a normal convexity on a compactum $X$ such that all convex sets are connected and $F:X\multimap X$ is a USC multimap with values in $\mathcal C$. Then $F$ has a fixed point.
\varepsilonnd{theorem}
Let $\mathcal C$ be a family of subsets of a compactum $X$. We say that $\mathcal C$ is {\it linked} if the intersection of every two elements is non-empty. A convexity $\mathcal C$ is called {\it binary} if the intersection of every linked subsystem of $\mathcal C$ is non-empty.
\begin{lemma}\label{BC} Let $\mathcal C$ be a $T_2$ binary convexity on a continuum $X$. Then $\mathcal C$ is normal and all convex sets are connected.
\varepsilonnd{lemma}
\begin{proof} The first assertion of the lemma is proved in Lemma 3.1 \cite{RZ}. Let us prove the second one. Consider any $A\in\mathcal C$. There was defined in \cite{MV} a retraction $h_A:X\to A$ by the formula $h_A(x)=\cap\{C\in\mathcal C\mid x\in C$ and $C\cap A\ne\varepsilonmptyset\}$. Hence $A$ is connected and the lemma is proved.
\varepsilonnd{proof}
Now we can reformulate Theorem \ref{KA} for binary convexities.
\begin{theorem}\label{KB} Let $\mathcal C$ be a $T_2$ binary convexity on a continuum $X$ and $F:X\multimap X$ is a USC multimap with values in $\mathcal C$. Then $F$ has a fixed point.
\varepsilonnd{theorem}
Now, let $\mathcal C_i$ be a convexity on $X_i$. We say that the function $f_i:X\to\mathbb R$ is quasi concave by $i$-th coordinate if we have $(f_i^x)^{-1}([t;+\infty))\in\mathcal C_i$ for each $t\in\mathbb R$ and $x\in X$ where $f_i^x:X_i\to\mathbb R$ is a function defined as follows $f_i^x(t_i)=f_i(x;t_i)$ for $t_i\in X_i$.
\begin{theorem}\label{NN} Let $f:X=\mathrm{pr}od_{i=1}^n X_i\to\mathbb R^n$ be a game with a normal convexity $\mathcal C_i$ defined on each compactum $X_i$ such that all convex sets are connected, the function $f$ is continuous and the function $f_i:X\to\mathbb R$ is quasi concave by $i$-th coordinate for each $i\in\{1,\dots,n\}$. Then there exists a Nash equilibrium point.
\varepsilonnd{theorem}
\begin{proof} Fix any $x\in X$. For each $i\in\{1,\dots,n\}$ consider a set $M_i^x\subset X_i$ defined as follows $M_i^x=\{t\in X_i\mid f_i^x(t)=\max_{s\in X_i}f_i^x(s)\}$. We have that $M_i^x$ is a closed subset $X_i$. Since the function $f_i:X\to\mathbb R$ is quasi concave by $i$-th coordinate, we have that $M_i^x\in\mathcal C_i$. Define a multimap $F:X\multimap X$ by the formulae $F(x)=\mathrm{pr}od_{i=1}^n M_i^x$ for $x\in X$.
Let us show that $F$ is USC. Consider any point $(x,y)\in X\times X$ such that $y\notin F(x)$. Then there exists $i\in\{1,\dots,n\}$ such that $f_i^x(y_i
)<\max_{s\in X_i}f_i^x(s)\}$. Hence we can choose $t_i\in X_i$ such that $f_i(x;y_i)<f_i(x;t_i)$. Since $f_i$ is continuous, there exists a neighborhood
$O_x$ of $x$ in $X$ and a neighborhood $O_{y_i}$ of $y_i$ in $Y_i$ such that for each $x'\in O_x$ and $y_i'\in O_{y_i}$ we have $f_i(x;y_i')<f_i(x;t_i)$. Put $O_y=(\mathrm{pr}_i)^{-1}(O_{y_i})$. Then for each $(x',y')\in O_x\times O_y$ we have $y'\notin F(x')$. Thus the graph of $F$ is closed in $X\times Y$, hence $F$ is upper semicontinuous.
We consider on $X$ the family $\mathcal C=\{\mathrm{pr}od_{i=1}^n C_i\mid C_i\in\mathcal C_i\}$. It is easy to see that $\mathcal C$ forms a normal convexity on compactum $X$ such that all convex sets are connected.
Then by Theorem \ref{KA} $F$ has a fixed point which is a Nash equilibrium point.
\varepsilonnd{proof}
Now, the following corollary follows from the previous theorem and Lemma \ref{BC}.
\begin{corollary}\label{NB} Let $f:X=\mathrm{pr}od_{i=1}^n X_i\to\mathbb R^n$ be a game such that there is defined a $T_2$ binary convexity $\mathcal C_i$ on each continuum $X_i$, the function $f$ is continuous and the function $f_i:X\to\mathbb R$ is quasi concave by $i$-th coordinate for each $i\in\{1,\dots,n\}$. Then there exists a Nash equilibrium point.
\varepsilonnd{corollary}
\section{L-monads and its algebras}
We apply Corollary \ref{NB} to study games defined on algebras of binary L-monads. We recall some categorical notions (see \cite{Mc} and \cite{TZ}
for more details). We define them only for the
category $\mathsf{Comp}$. Let $F:\mathsf{Comp}\to\mathsf{Comp}$ be a covariant functor. A functor $F$ is called continuous if it preserves the limits of inverse
systems.
In what follows, all functors assumed to preserve
monomorphisms, epimorphisms, weight of infinite compacta. We also assume that our functors are continuous.
For a functor $F$ which preserves
monomorphisms and an embedding
$i:A\to X$ we shall identify the space $FA$ and the subspace
$F(i)(FA)\subset FX$.
A {\it monad} $\mathbb T=(T,\varepsilonta,\mu)$ in the category
$\mathsf{Comp}$ consists of an endofunctor $T:{\mathsf{Comp}}\to{\mathsf{Comp}}$ and
natural transformations $\varepsilonta:\mathrm{Id}_{\mathsf{Comp}}\to T$ (unity),
$\mu:T^2\to T$ (multiplication) satisfying the relations $\mu\circ
T\varepsilonta=\mu\circ\varepsilonta T=${\bf 1}$_T$ and $\mu\circ\mu T=\mu\circ
T\mu$. (By $\mathrm{Id}_{\mathsf{Comp}}$ we denote the identity functor on the
category ${\mathsf{Comp}}$ and $T^2$ is the superposition $T\circ T$ of
$T$.)
Let $\mathbb T=(T,\varepsilonta,\mu)$ be a monad in the category ${\mathsf{Comp}}$. The
pair $(X,\xi)$ where $\xi:TX\to X$ is a map is called a $\mathbb T$-{\it
algebra} if $\xi\circ\varepsilonta X=id_X$ and $\xi\circ\mu X=\xi\circ
T\xi$. Let $(X,\xi)$, $(Y,\xi')$ be two $\mathbb T$-algebras. A map
$f:X\to Y$ is called a $\mathbb T$-algebras morphism if $\xi'\circ
Tf=f\circ\xi$.
Let $(X,\xi)$ be an $\mathbb F$-algebra for a monad $\mathbb F=(F,\varepsilonta,\mu)$ and
$A$ is a closed subset of $X$. Denote by $f_A$ the quotient map
$f_A:X\to X/A$ (the classes of equivalence are one-point sets $\{x\}$ for $x\in X\setminus A$ and the set $A$) and put $a=f_A(A)$. Denote $A^+=(Ff_A)^{-1}(\varepsilonta(X/A)(a))$. Define the $\mathbb F$-{\it convex
hull} $C_\mathbb F(A)$ of $A$ as follows
$C_\mathbb F(A)=\xi(A^+)$. Put additionally
$C_\mathbb F(\varepsilonmptyset)=\varepsilonmptyset$. We define the family
$\mathcal C_\mathbb F(X,\xi)=\{A\subset X|A $ is closed and $\mathcal C_\mathbb F(A)=A\}$.
Elements of the family $\mathcal C_\mathbb F(X,\xi)$ we call $\mathbb F$-{\it convex}. It was shown in \cite{R1} that the family $\mathcal C_\mathbb F(X,\xi)$ forms a
convexity on $X$, moreover, each morphism of $\mathbb F$-algebras is a $CP$-map. Let us remark that one-point sets are always $\mathbb F$-convex.
We don't know if the convexities we have introduced
are $T_2$. We consider in this section a
class of monads generating convexities which have this
property. The class of $L$-monads was introduced in \cite{R1} and it contains many well-known monads in
$\mathsf{Comp}$ like superextension, hyperspace, probability measure, capacity, idempotent measure etc.
For $\phi\in C(X)$ by $\max\phi$ ($\min\phi$) we denote $\max_{x\in
X}\phi(x)$ ($\min_{x\in X}\phi(x)$) and $\pi_\phi$ or $\pi(\phi)$
denote the corresponding projection $\pi_\phi:\mathrm{pr}od_{\psi\in
C(X)}[\min\psi,\max\psi]\to[\min\phi,\max\phi]$. It was shown in
\cite{R3} that for each L-monad $\mathbb F=(F,\varepsilonta,\mu)$ we can consider $FX$
as subset of the product $\mathrm{pr}od_{\phi\in C(X)}[\min\phi,\max\phi]$,
moreover, we have $\pi_\phi\circ \varepsilonta X=\phi$, $\pi_\phi\circ \mu
X=\pi(\pi_\phi)$ for all $\phi\in C(X)$ and $\pi_\psi\circ
Ff=\pi_{\psi\circ f}$ for all $\psi\in C(Y)$, $f:X\to Y$. We could consider these properties of $L$-monads as a definition \cite{R3}.
We say that an L-monad $\mathbb F=(F,\varepsilonta,\mu)$ weakly
preserves preimages if for each map $f:X\to Y$ and each closed
subset $A\subset Y$ we have
$\pi_\phi(\nu)\in[\min\phi(f^{-1}(A)),$ $\max\phi(f^{-1}(A))]$ for
each $\nu\in (Ff)^{-1}(A)$ and $\phi\in C(X)$ \cite{R1}. It was shown in \cite{R1} that for each L-monad $\mathbb F$ which weakly
preserves preimages the convexity $\mathcal C_\mathbb F(FX,\mu X)$ is $T_2$.
\begin{lemma}\label{CC} Let $(X,\xi)$ be an $\mathbb F$-algebra for an $L$-monad $\mathbb F=(F,\varepsilonta,\mu)$ which weakly preserves preimages. Then the map $\xi:FX\to X$ is a CC-map for convexities $\mathcal C_\mathbb F(FX,\mu)$ and $\mathcal C_\mathbb F(X,\xi)$ respectively.
\varepsilonnd{lemma}
\begin{proof} Consider any $B\in \mathcal C_\mathbb F(FX,\mu)$. We should show that $\xi(B)\in\mathcal C_\mathbb F(X,\xi)$. Denote by $\chi:X\to X/\xi(B)$ the quotient map and put $b=\chi(\xi(B))$. Consider any $\mathcal A\in FX$ such that $F\chi(\mathcal A)=(\varepsilonta(X/\xi(B))(b))$. We should show that $\xi(\mathcal A)\in\xi(B)$.
Consider the quotient map $\chi_1:FX\to FX/B$ and put $b_1=\chi_1(B)$. There exists a (unique) continuous map $\xi':FX/B\to X/\xi(B)$ such that $\xi'(b_1)=b$ and $\xi'\circ \chi_1=\chi\circ \xi$. Put $\mathcal D=F(\varepsilonta X)(\mathcal A)$. We have $F\xi(\mathcal D)=\mathcal A$, hence $F\xi'\circ F\chi_1(\mathcal D)=F\chi\circ F\xi(\mathcal D)=F\chi(\mathcal A)=\varepsilonta(X/\xi(B))(b)$. Since $F$ weakly preserves preimages, we have $F\chi_1(\mathcal D)=\varepsilonta(FX/B)(b_1)$. Since $B\in \mathcal C_\mathbb F(FX,\mu)$, we have $\mu X(\mathcal D)\in B$. Hence $\xi(\mathcal A)=\xi\circ F\xi(\mathcal D)=\xi\circ \mu(\mathcal D)\in\xi(B)$. The lemma is proved.
\varepsilonnd{proof}
We call a monad $\mathbb F$ binary if
$\mathcal C_\mathbb F(X,\xi)$ is binary for each $\mathbb F$-algebra $(X,\xi)$.
\begin{lemma}\label{BT} Let $\mathbb F=(F,\varepsilonta,\mu)$ be a binary L-monad which weakly preserves preimages. Then for each $\mathbb F$-algebra $(X,\xi)$ the convexity $\mathcal C_\mathbb F(X,\xi)$ is $T_2$.
\varepsilonnd{lemma}
\begin{proof} Consider any two distinct points $x$, $y\in X$. Since $\xi$ is a morphism of $\mathbb F$-algebras $(FX,\mu X)$ and $(X,\xi)$, it is a CP-map and we have $\xi^{-1}(x)$, $\xi^{-1}(y)\in \mathcal C_\mathbb F(FX,\mu)$. Since $\mathcal C_\mathbb F(FX,\mu)$ is $T_2$ and binary, it is normal by Lemma \ref{BC}. Hence we can choose $L_1$, $L_2\in \mathcal C_\mathbb F(FX,\mu)$ such that $L_1\cup L_2=FX$ and $L_1\cap\xi^{-1}(x)=\varepsilonmptyset$, $L_2\cap\xi^{-1}(y)=\varepsilonmptyset$. Then we have $\xi(L_1)$, $\xi(L_2)\in\mathcal C_\mathbb F(X,\xi)$ by Lemma \ref{CC}, $\xi(L_1)\cup\xi(L_2)=X$, $x\notin L_1$ and $y\notin L_2$. The lemma is proved.
\varepsilonnd{proof}
Consider any L-monad $\mathbb F=(F,\varepsilonta,\mu)$. It is easy to check that for each segment $[a,b]\subset\mathbb R$ the pair $([a,b],\xi_{[a,b]})$ is an $F$-algebra where $\xi_{[a,b]}=\pi_{\mathrm{id}_{[a,b]}}$.
Consider a game $f:X=\mathrm{pr}od_{i=1}^n X_i\to\mathbb R^n$ where for each compactum $X_i$ there exists a map $\xi_i:FX_i\to X_i$ such that the pair $(X_i,\xi_i)$ is an $\mathbb F$-algebra. We say that the function $f_i:X\to\mathbb R$ is an $\mathbb F$-algebras morphism by $i$-th coordinate if for each $x\in X$ the function $f_i^x:X_i\to\mathbb R$ is a morphism of $\mathbb F$-algebras $(X_i,\xi_i)$ and $([\min f_i^x,\max f_i^x],\xi_{[\min f_i^x,\max f_i^x]})$.
\begin{theorem}\label{NA} Let $\mathbb F=(F,\varepsilonta,\mu)$ be a binary L-monad which weakly preserves preimages. Let $f:X=\mathrm{pr}od_{i=1}^n X_i\to\mathbb R^n$ be a game such that there is defined an $\mathbb F$-algebra map $\xi_i:FX_i\to X_i$ on each continuum $X_i$, the function $f$ is continuous and the function $f_i:X\to\mathbb R$ is an $\mathbb F$-algebras morphism by $i$-th coordinate for each $i\in\{1,\dots,n\}$. Then there exists a Nash equilibrium point.
\varepsilonnd{theorem}
\begin{proof} Since for each $x\in X$ the function $f_i^x:X_i\to\mathbb R$ is an $\mathbb F$-algebras morphism, it is a CP-map, hence quasi concave. Now, our theorem follows from Lemma \ref{BT} and Corollary \ref{NB}.
\varepsilonnd{proof}
\section{Pure and mixed strategies}
Let $\mathbb F=(F,\varepsilonta,\mu)$ be a binary L-monad which weakly preserves preimages.
We consider Nash equilibrium for free algebras $(FX,\mu X)$ in this section. Points of a compactum $X$ we call pure strategies and points of $FX$ we call mixed strategies. Such approach is a natural generalization of the model from \cite{KZ} where spaces of capacities $MX$ were considered.
We consider a game $u:X=\mathrm{pr}od_{i=1}^n X_i\to\mathbb R^n$ with compact Hausdorff spaces of pure strategies $X_1,\dots,X_n$ and continuous payoff functions $u_i:\mathrm{pr}od_{i=1}^n X_i\to\mathbb R$.
It is well known how to construct the tensor product of two (or finite number) probability measures. This operation was generalized in \cite{TZ} for each monad in the category $\mathsf{Comp}$. More precisely there was constructed for each compacta $X_1,\dots,X_n$ a continuous map $\otimes:\mathrm{pr}od_{i=1}^n F X_i\to F(\mathrm{pr}od_{i=1}^n X_i)$ which is natural by each argument and for each $i$ we have $F(p_i)\circ\otimes= \mathrm{pr}_i$ where $p_i:\mathrm{pr}od_{j=1}^nX_j\to X_i$ and $\mathrm{pr}_i:\mathrm{pr}od_{j=1}^n FX_j\to FX_i$ are natural projections.
We define the payoff functions $eu_i:FX_1\times\dots\times FX_n\to\mathbb R$ by the formula $eu_i=\pi_{u_i}\circ\otimes$. Evidently, $eu_i$ is continuous. Consider any $t\in\mathbb R$ and $\nu\in FX_1\times\dots\times FX_n$. Then we have $(eu_i^\nu)^{-1}[t;+\infty)=\{\mu_i\in FX_i\mid eu_i(\nu;\mu_i)\ge t_i\}=l^{-1}(\pi_{u_i}^{-1}[t;+\infty)\cap\{\nu_i\}\times\dots\times FX_i\times\dots\times\{\nu_n\})$, where $l:FX_i\to\mathrm{pr}od_{j=1}^n FX_j$ is an embedding defined by $l(\mu_i)=(\nu;\mu_i)$ for $\mu_i\in FX_i$. A structure of $\mathbb F$-algebra on the product $\mathrm{pr}od_{j=1}^n FX_j$ of $\mathbb F$-algebras $(FX_i,\mu X_i)$ is given by a map $\xi:F(\mathrm{pr}od_{i=1}^n FX_i)\to\mathrm{pr}od_{i=1}^n FX_i$ defined by the formula $\xi=(\mu X_i\circ F(p_i))_{i=1}^n$. It is easy to check that a product of convex in $FX_i$ sets is convex in $\mathrm{pr}od_{i=1}^n FX_i$. Since $\mathbb F$ weakly preserves preimages, $\pi_{u_i}^{-1}[t;+\infty)$ is convex in $\mathrm{pr}od_{i=1}^n FX_i$. It is easy to see that $l$ is a CP-map, hence the map $eu_i$ is quasiconcave on $i$-th coordinate.
Hence, using Corollary \ref{NB}, we obtain the following theorem.
\begin{theorem} The game with payoff functions $eu_i$ has a Nash equilibrium point provided each $FX_i$ is connected.
\varepsilonnd{theorem}
Now, consider a game in capacities with Sugeno payoff functions introduced in the beginning of the paper.
The assignment $M$ extends to the capacity functor $M$ in the category of compacta, if the map $Mf:MX\to MY$ for a continuous map of compacta $f:X \to Y$ is defined by the formula $Mf(c)(F)=c(f^{-1}(F))$ where $c\in MX$ and $F$ is a closed subset of $X$. This functor was completed to the monad $\mathbb M=(M,\varepsilonta,\mu)$ \cite{NZ}, where the components of the natural transformations are defined as follows: $\varepsilonta X(x)(F)=1$ if $x\in F$ and $\varepsilonta X(x)(F)=0$ if $x\notin F$;
$\mu X(\mathcal C)(F)=\sup\{t\in[0,1]\mid \mathcal C(\{c\in MX\mid c(F)\ge t\})\ge t\}$, where $x\in X$, $F$ is a closed subset of $X$ and $\mathcal C\in M^2(X)$.
Since capacity monad $\mathbb M$ is a binary L-monad which weakly preserves preimages with $\pi_\varphi(\nu)=\int_X^{Sug} fd\nu$ for any $\nu\in MX$ and $\varphi\in C(X)$ \cite{R2}, we obtain as a consequence
\begin{corollary}\label{NC} A game in capacities $sf:\mathrm{pr}od_{i=1}^n MX_i\to\mathbb R^n$ with Sugeno payoff functions has a Nash equilibrium point.
\varepsilonnd{corollary}
\begin{thebibliography}{}
\bibitem{Ch} W.Briec, Ch.Horvath {\varepsilonm Nash points, Ku Fan inequality and equilibria of abstract economies in Max-Plus and $\mathbb B$-convexity,} J. Math. Anal. Appl. {\bf 341} (2008), 188--199.
\bibitem{CH1} A. Chateauneuf, M. Grabisch, A. Rico, {\varepsilonm Modeling attitudes toward uncertainty through the use
of the Sugeno integral}, Journal of Mathematical Economics {\bf 44} (2008) 1084--1099.
\bibitem{DW} J.Dow, S.Werlang, {\varepsilonm Nash equilibrium under Knightian uncertainty: breaking down backward induction},J Econ. Theory {\bf 64} (1994) 205--224.
\bibitem{DP} D.Dubois, H.Prade, R.Sabbadin, {\varepsilonm Qualitative decision theory with Sugeno integrals}, arxiv.org 1301.7372
\bibitem{DP1} D. Dubois, J.-L. Marichal, H. Prade, M. Roubens, R. Sabbadin, {\varepsilonm The use of the discrete Sugeno integral
in decision making: a survey}, Internat. J. Uncertainty, Fuzziness Knowledge-Based Systems {\bf 9} (5) (2001)
539–-561.
\bibitem{EK} J.Eichberger, D.Kelsey, {\varepsilonm Non-additive beliefs and strategic equilibria}, Games Econ Behav {\bf 30} (2000) 183--215.
\bibitem{Gil} I.Gilboa, {\varepsilonm Expected utility with purely
subjective non-additive probabilities}, J. of Mathematical
Economics {\bf 16} (1987) 65--88.
\bibitem{KZ} R.Kozhan, M.Zarichnyi, {\varepsilonm Nash equilibria for games in capacities}, Econ. Theory {\bf 35} (2008) 321--331.
\bibitem{Mc} S.MacLane, {\varepsilonm Categories for Working
Matematicians}, Springer Verlag, 1976.
\bibitem{MV} J.van Mill, M.van de Vel, {\varepsilonm Convexity preserving mappings in subbase convexity theory}, Proc. Kon. Ned. Acad. Wet. {\bf 81} (1978) 76--90.
\bibitem{NR} O.R.Nykyforchyn, {\varepsilonm The Sugeno integral and functional representation of the monad of lattice-valued capacities.}, Topology {\bf 48} (2009) 137--148.
\bibitem{NZ} O.R.Nykyforchyn, M.M.Zarichnyi, {\varepsilonm Capacity functor in the category of compacta}, Mat.Sb. {\bf 199} (2008) 3--26.
\bibitem{R1} T.Radul, {\varepsilonm Convexities generated by L-monads}, Applied Categorical Structures {\bf 19} (2011) 729--739.
\bibitem{R2} T.Radul, {\varepsilonm A functional representation of capacity monad}, Topology {\bf 48} (2009) 100--104.
\bibitem{R3} T.Radul, {\varepsilonm On strongly Lawson and I-Lawson monads}, Boletin de Matematicas {\bf 6} (1999) 69--76.
\bibitem{RZ} T.N.Radul, M.M.Zarichnyi, {\varepsilonm Monads in the category of
compacta}, Uspekhi Mat.Nauk. {\bf 50} (1995) 83-–108.
\bibitem{CH} A. Rico, M. Grabisch, Ch. Labreuchea, A. Chateauneuf {\varepsilonm Preference modeling on totally ordered sets by the
Sugeno integral}, Discrete Applied Mathematics {\bf 147} (2005) 113--124.
\bibitem{Sch} D.Schmeidler, {\varepsilonm Subjective probability
and expected utility without additivity}, Econometrica {\bf 57} (1989) 571--587.
\bibitem{Su} M.Sugeno, {\varepsilonm Fuzzy measures and fuzzy integrals}, A survey. In Fuzzy Automata and Decision
Processes. North-Holland, Amsterdam: M. M. Gupta, G. N. Saridis et B. R. Gaines editeurs. 89--102. 1977
\bibitem{TZ} A.Teleiko, M.Zarichnyi, {\varepsilonm Categorical Topology
of Compact Hausdorff Spaces}, VNTL Publishers. Lviv, 1999.
\bibitem{vV} M.van de Vel, {\varepsilonm Theory of convex strutures}, North-Holland, 1993.
\bibitem{W} A.Wieczorek {\varepsilonm The Kakutani property and the fixed point property of topological spaces with abstract convexity,} J. Math. Anal. Appl. {\bf 168} (1992), 483--499.
\bibitem{Z} M.Zarichnyi {\varepsilonm Spaces and mappings of idempotent measures}, Izv. Ross. Akad. Nauk Ser. Mat., {\bf 74} (2010),
45--64.
\bibitem{Zh} L.Zhou {\varepsilonm Integral representation of continuous comonotonically additive functionals}, Trans Am Math Soc, {\bf 350} (1998),
1811--1822.
\varepsilonnd{thebibliography}
\varepsilonnd{document} |
\begin{document}
\title{On Entire Solutions of an Elliptic System Modeling Phase Separations}
\date{}
\begin{abstract}
We study the qualitative properties of a limiting elliptic system arising in phase separation for Bose-Einstein condensates with multiple states:
\[
\begin{cases} \Delta u=u v^2\ \ \mbox{in} \ {\mathbb R}^n, \\
\Delta v= v u^2 \ \ \mbox{in} \ {\mathbb R}^n, \\
u, v>0\quad \ \mbox{in} \ {\mathbb R}^n.
\end{cases}
\]
When $n=1$, we prove uniqueness of the one-dimensional profile. In dimension $2$,
we prove that stable solutions with linear growth must be
one-dimensional. Then we construct entire solutions in ${\mathbb R}^2$
with polynomial growth $|x|^d$ for any positive integer $d \geq 1$. For $d\geq 2$, these solutions are not one-dimensional. The construction is also extended to multi-component elliptic
systems.
\end{abstract}
\noindent {\sl Keywords:} {\small Stable solutions, elliptic systems, phase separations, Almgren's monotonicity formulae.}\
\vskip 0.2cm
\noindent {\sl AMS Subject Classification (2000):} {\small 35B45 .}
\vskip 0.2cm
\section{Introduction and Main Results}
\setcounter{equation}{0}
Consider the following two-component Gross-Pitaevskii system
\begin{align}
& -\Delta u + \alpha u^3 + \Lambda v^2 u = \lambda_1 u &&\text{in }\Omega, \label{1}\\
& -\Delta v +\beta v^3 + \Lambda u^2 v = \lambda_2 v &&\text{in }\Omega, \label{2}\\
& u>0,\quad v>0 && \text{in }\Omega, \label{37}\\
& u=0,\quad v=0 && \text{on }\partial\Omega\,, \label{3}\\
& \int_\Omega u^2=N_1,\quad\int_\Omega v^2=N_2\, , \label{301} &&
\end{align}
where $\alpha, \beta, \Lambda >0$ and $\Omega$ is a bounded smooth domain in ${\mathbb R}^n$. Solutions of (\ref{1})-(\ref{301})
can be regarded as critical points of the energy functional
\begin{equation}\label{5.1}
E_\Lambda(u,v)=\int_\Omega\,\left(|\nabla u|^2+|\nabla
v|^2\right)+\frac{\alpha}{2}u^4+\frac{\beta}{2}v^4+\frac{\Lambda}{2}
u^2v^2\,,\end{equation} on the space $(u,v)\in H^1_0(\Omega)\times
H^1_0(\Omega)$ with constraints
\begin{equation}
\label{302}
\int_\Omega u^2 dx=N_1, \int_\Omega v^2 dx=N_2.
\end{equation}
The eigenvalues $\lambda_j$'s are Lagrange multipliers with
respect to~(\ref{302}). Both eigenvalues
$\lambda_j=\lambda_{j,\Lambda}, j=1,2$, and eigenfunctions $u=u_\Lambda,
v=v_\Lambda$ depend on the parameter $\Lambda$. As the parameter $\Lambda$ tends to infinity, the two components tend to separate their supports. In order to investigate the basic rules of phase separations in this system one needs to understand the asymptotic behavior of $(u_\Lambda, v_\Lambda)$ as $ \Lambda \to +\infty$.
We shall assume that the solutions $(u_\Lambda,
v_\Lambda)$ of (\ref{1})-(\ref{301}) are such that the associated
eigenvalues $\lambda_{j,\Lambda}$'s are uniformly bounded, together with their energies $ E_\Lambda (u_\Lambda, v_\Lambda)$.
Then, as $\Lambda \to +\infty$, there is weak convergence (up to a subsequence) to a limiting profile $(u_\infty, v_\infty)$ which formally satisfies
\begin{equation}
\label{eq:limit-equation1}
\begin{cases}
-\Delta u_{\infty} +\alpha u_{\infty}^3 =\lambda_{1,\infty}
u_{\infty} \qquad & \text{in $\Omega_u$}\,,\\
-\Delta v_{\infty} +\beta v_{\infty}^3 =\lambda_{2,\infty}
v_{\infty} \qquad &\text{in $\Omega_v$}\,,\\
\end{cases}
\end{equation} where $\Omega_u=\{x\in\Omega: u_\infty(x)>0\}$ and
$\Omega_v=\{x\in\Omega: v_\infty(x)>0\}$ are positivity domains
composed of finitely disjoint components with positive Lebesgue
measure, and each $\lambda_{j,\infty}$ is the limit of
$\lambda_{j,\Lambda}$'s as $\Lambda\to\infty$ (up to a
subsequence).
There is a large literature about this type of questions. Effective numerical simulations for
(\ref{eq:limit-equation1}) can be
found in~\cite{B}, \cite{BaD} and~\cite{CLLL}.
Chang-Lin-Lin-Lin ~\cite{CLLL} proved pointwise convergence of
$(u_\Lambda, v_\Lambda)$ away from the interface
$\Gamma\equiv\{x\in\Omega: u_\infty(x)=v_\infty(x)=0\}$. In Wei-Weth
\cite{ww} the uniform equicontinuity of $(u_\Lambda,
v_\Lambda)$ is established, while Noris-Tavares-Terracini-Verzini~\cite{NTTV} proved
the uniform-in-$\Lambda$ H\"older continuity of $(u_\Lambda, v_\Lambda)$. The regularity of the nodal set of the
limiting profile has been investigated in \cite{C-L 2, TT2011} and in \cite{DWZ2011}: it turns out that
the limiting pair $(u_\infty(x),v_\infty(x))$ is the positive and negative pair $(w^+,w^-)$ of a solution of the equation $-\Delta w+\alpha (w^{+})^3-\beta (w^{-})^3 =\lambda_{1,\infty}w^+-\lambda_{2,\infty}w^-$.
To derive the asymptotic behavior of $(u_\Lambda, v_\Lambda)$
near the interface $\Gamma=\{x\in\Omega:
u_\infty(x)=v_\infty(x)=0\}$, one is led to considering the points
$x_\Lambda \in \Omega$ such that $ u_\Lambda
(x_\Lambda)=v_\Lambda (x_\Lambda)= m_\Lambda\to 0$ and $x_\Lambda
\to x_\infty \in \gamma\subset\Omega$ as $\Lambda \to +\infty$
(up to a subsequence). Assuming that
\begin{equation}
\label{mainas}
m_\Lambda^4 \Lambda \to C_0>0,
\end{equation}
(without loss of generality we may assume that $ C_0=1$), then, by blowing up, we find the following nonlinear
elliptic system
\begin{equation}\label{maineqn}
\Delta u= u v^2\,, \quad \Delta v= v u^2\,, \quad u,
v > 0 \quad \mbox{in} \quad {\mathbb R}^n\,.
\end{equation}
Problem (\ref{maineqn}) has been studied in Berestycki-Lin-Wei-Zhao \cite{blwz}, and Noris-Tavares-Terracini-Verzini \cite{NTTV}. It has been proved in \cite{blwz} that, in the one-dimensional case, (\ref{mainas}) always holds. In addition, the authors showed the existence, symmetry and nondegeneracy of the
solution to one-dimensional limiting system
\begin{equation}
\label{1D}
u^{''}= uv^2, v^{''}=v u^2, u, v>0 \ \mbox{in} \ {\mathbb R}.
\end{equation}
In particular they showed that entire solutions are reflectionally symmetric, i.e., there exists $x_0$ such that $ u(x-x_0)= v(x_0-x)$. They also established a two-dimensional version of the De Giorgi Conjecture in this framework. Namely, under the growth condition
\begin{equation}
\label{bd1}
u(x)+v(x)\leq C (1+|x|),
\end{equation}
all monotone solution is one dimensional.
On the other hand, in \cite{NTTV}, it was proved that the linear growth is the lowest possible for solutions to (\ref{maineqn}). In other words, if there exists $\alpha \in (0,1)$ such that
\begin{equation}
\label{bd2}
u(x)+v(x)\leq C (1+|x|)^{\alpha},
\end{equation}
then $u, v \equiv 0$.
In this paper we address three problems left open in \cite{blwz}. First, we
prove the uniqueness of (\ref{1D}) (up to translations and scaling). This answers
the question stated in Remark 1.4 of \cite{blwz}. Second, we prove that the De Giorgi conjecture still holds in the two dimensional case, when we replace the monotonicity assumption by the stability condition. A third open question of (\ref{maineqn}) is whether all solutions to (\ref{maineqn}) necessarily satisfy the growth bound (\ref{bd1}). We shall answer this question negatively in this paper.
We first study the one-dimensional problem (\ref{1D}).
Observe that problem (\ref{1D}) is invariant under the translations $ (u(x), v(x)) \to ( u(x+t), v(x+t)), \forall t \in {\mathbb R}$ and scalings $ (u(x), v(x)) \to ( \lambda u(\lambda x), \lambda v(\lambda x)), \forall \lambda >0$. The following theorem classifies all entire solutions to (\ref{1D}).
\begin{thm}
\label{thm0}
The solution to (\ref{1D}) is unique, up to translations and scaling.
\end{thm}
Next,we want to classify the stable solutions in ${\mathbb R}^2$. We recall that a {\em stable} solution $(u, v)$ to (\ref{maineqn}) is such that the linearization is weakly positive definite. That is, it satisfies
\[
\int_{{\mathbb R}^n} [\nabla \varphi|^2+|\nabla \psi |^2 + v^2 \varphi^2+u^2 \psi^2 +4 uv \varphi \psi] \geq 0, \qquad \forall \varphi, \psi \in C_0^\infty ({\mathbb R}^n).
\]
In \cite{blwz}, it was proved that the one-dimensional solution is stable in ${\mathbb R}^n$. Our first result states that the only stable solution in ${\mathbb R}^2$, among those growing at most linearly, is the one-dimensional family.
\begin{thm}
\label{thm1}
Let $(u,v)$ be a stable solution to (\ref{maineqn}) in ${\mathbb R}^2$. Furthermore, we assume that the growth bound (\ref{bd1}) holds. Then $(u, v)$ is one-dimensional, i.e., there exists $a \in {\mathbb R}^2, |a|=1$ such that $(u, v)= (U (a \cdot x), V (a \cdot x))$ where $(U, V)$ are functions of one variable and satisfies (\ref{1D}).
\end{thm}
Our third result shows that there are solutions to (\ref{maineqn})
with polynomial growth $|x|^d$ that are not one dimensional. The construction depends on the following harmonic polynomial $\Phi$ of degree
$d$:
$$\Phi:=\mbox{Re}(z^d).$$
Note that $\Phi$ has some dihedral symmetry; indeed, let us take its
$d$ nodal lines $L_1, \cdots, L_d$ and denote the corresponding
reflection with respect to these lines by $T_1,\cdots, T_d$. Then there holds
\begin{equation}\label{reflectional symmetry}
\Phi(T_i z)=-\Phi(z).
\end{equation}
The third result of this paper is the following one.
\begin{thm}\label{main result}
For each positive integer $d \geq 1$, there exists a solution $(u,v)$ to problem \eqref{maineqn}, satisfying
\begin{enumerate}
\item $u-v>0$ in $\{\Phi>0\}$ and $u-v<0$ in $\{\Phi<0\}$;
\item $u \geq\Phi^+$ and $v\geq\Phi^-$;
\item $\forall i=1,\cdots, d$, $u(T_iz)=v(z)$;
\item $\forall r>0$, the Almgren frequency function satisfies
\begin{equation}
\label{nr} N(r):=\frac{r\int_{B_r(0)}|\nabla u|^2+|\nabla
v|^2+u^2v^2}{\int_{\partial B_r(0)}u^2+v^2}\leq d;
\end{equation}
\item
\begin{equation}\label{nr 2}
\lim_{r \to +\infty} N(r) =d.
\end{equation}
\end{enumerate}
\end{thm}
Note that the one-dimensional solution constructed in \cite{blwz} can be viewed as corresponding to the case $d=1$. For $d\geq 2$, the solutions of Theorem \ref{main result} will be obtained by a minimization argument under symmetric variations
$(\varphi,\psi)$ (i.e. satisfying $\varphi\circ T_i=\psi$ for every
reflection $T_i$). The first four claims will be derived from the
construction. See Theorem \ref{thm existence on bounded set}.
Regarding the claim 5, we note that by Almgren's monotonicity formula,
(see Proposition \ref{monotonocity} below), the Almgren frequency quotient $N(r)$
is increasing in $r$. Hence $ \lim_{r \to +\infty} N(r)$ exists.
To understand the asymptotics at infinity of the solutions,
one way is to study the
blow-down sequence defined by:
$$(u_R(x), v_R(x)):=(\frac{1}{L(R)}u(Rx)\frac{1}{L(R)}v(Rx)),$$
where $L(R)$ is chosen so that
$$\int_{\partial
B_1(0)}u_R^2+v_R^2=1.$$
In Section 6, we will prove
\begin{thm}\label{thm asymptotics at infinity}
Let $(u,v)$ be a solution of \eqref{maineqn} such that
\[d:=\lim\limits_{r\rightarrow+\infty}N(r)<+\infty.\]
Then $d$ is a positive integer.
As $R\to\infty$, $(u_R, v_R)$ defined above (up to a subsequence)
converges to $(\Psi^+,\Psi^-)$ uniformly on any compact set of
$\mathbb{R}^N$ where $\Psi$ is a homogeneous harmonic polynomial of
degree $d$. If $d=1$ then $(u,v)$ is asymptotically flat at infinity.
\end{thm}
In particular
this applies to the solutions found by Theorem \ref{main result} to yield the following property
\begin{coro}
Let $(u,v)$ be a solution of \eqref{maineqn} given by Theorem \ref{main result}. Then
$$(u_R(x), v_R(x)):=(\frac{1}{R^d}u(Rx)\frac{1}{R^d}v(Rx))$$
converges uniformly on compact subsets of $\mathbb R^2$ to a multiple of $(\Phi^+,\Phi^-)$, where
$\Phi:=\mbox{Re}(z^d)$.
\end{coro}
\par
Theorem \ref{thm asymptotics at infinity} roughly says that $(u,v)$
is asymptotic to $(\Psi^+,\Psi^-)$ at infinity for some homogeneous harmonic polynomial. The extra information we have in the setting of Theorem \ref{main result} is that $\Psi\equiv\Phi=\mbox{Re}(z^d)$. This can be inferred from the symmetries of the solution (property $3$ in Theorem \ref{main result}).
For another elliptic system with a similar form,
\begin{equation}
\label{uvnew}
\left\{ \begin{aligned}
&\Delta u=uv, u>0 \ \mbox{in} \ {\mathbb R}^n,\\
&\Delta v=vu, v>0 \ \mbox{in} \ {\mathbb R}^n \end{aligned} \right.
\end{equation}
the same result has been proved by Conti-Terracini-Verzini in \cite{C-T-V 3}.
In fact, their result hold for any dimension $n\geq 1$ and any
harmonic polynomial function on $\mathbb{R}^n$. Note however that the problem here is different from (\ref{uvnew}). Actually, equation (\ref{uvnew}) can be reduced to a single equation: indeed, the difference $u-v$ is a harmonic function ($\Delta (u-v)=0$) and thus we can write $v= u-\Phi $ where $\Phi$ is a harmonic function. By restricting to certain symmetry classes, then (\ref{uvnew}) can be solved by sub-super solution method. However, this reduction does not work for system (\ref{maineqn}) that we study here.
For the proof of Theorem \ref{main result}, we first construct solutions to (\ref{maineqn}) in any bounded ball $B_R(0)$ satisfying appropriate boundary conditions:
\begin{equation}\label{equation100}
\left\{ \begin{aligned}
&\Delta u=uv^2, ~~\mbox{in}~~B_R(0),\\
&\Delta v=vu^2,~~\mbox{in}~~B_R(0), \\
& u=\Phi^+, v=\Phi^- \ \mbox{ on} \ \partial B_R(0).
\end{aligned} \right.
\end{equation}
This is done by variational method and using heat flow. The next natural step is to let
$R\rightarrow+\infty$ and obtain some convergence result. This requires
some uniform (in $R$) upper bound for solutions to
(\ref{equation100}). In order to prove
this, we will exploit a new monotonicity formula for symmetric functions (Proposition \ref{prop:upperbound}).
We also need to exclude the possibility of
degeneracy, that is that the limit could be $0$ or a solution with lower
degree such as a one dimensional solution. To this end, we will give some lower
bound using the Almgren monotonicity formula.
Lastly, we observe that the same construction works also for a system with many
components. Let $d$ be an integer or a half-integer and $2d=hk$ be a
multiple of the number of components $k$, and $G$ denote the
rotation of order $2d$. In this way we prove the following result
\begin{thm}\label{thm:maini}
There exists a positive solution to the system
\begin{equation}\label{eq:system}
\left\{ \begin{aligned}
&\Delta u_i=u_i\sum_{j\neq i,j=1}^ku_j^2, ~~\mbox{in}~~\mathbb C={\mathbb R}^2, i=1,\dots, k,\\
& u_i>0, i=1,\ldots, k,
\end{aligned} \right.
\end{equation}
having the following symmetries (here $\overline{z}$ is the complex conjugate of $z$)
\begin{equation}
\label{eqn2_i}
\begin{aligned}
u_{i}(z)&=u_i(G^hz), \qquad \ &\mbox{ on} \ &\mathbb C\,,i=1,\dots,k,\\
u_i(z)&=u_{i+1}(Gz), \qquad \ &\mbox{ on} \ &\mathbb C\,,i=1,\dots,k,\\
u_{k+1}(z)&=u_1(z), \ &\mbox{ on} \ &\mathbb C\\
u_{k+2-i}(z)&=u_i(\overline{z}), \qquad \ &\mbox{ on} \ &\mathbb C\,,i=1,\dots,k.\\
\end{aligned}
\end{equation}
Furthermore,
\[\lim_{r\to\infty} \dfrac{1}{r^{1+2d}}\int_{\partial B_r(0)}\sum_1^k u_{i}^2=b\in(0,+\infty)\;;\]
and
\[\lim_{r\to\infty} \frac{r\int_{B_r(0)}\sum_1^k |\nabla u_{i}|^2+\sum_{i<j}u_i^2u_j^2}
{\int_{\partial B_r(0)}\sum_1^k u_{i}^2}=d\;.\]
\end{thm}
The problem of the full classification of solutions to
\eqref{maineqn} is largely open. In view of our results, one can formulate several open questions.
\noindent
{\bf Open problem 1.} We recall from \cite{blwz} that it is still an open problem to know in which dimension it is true that all monotone solution is one-dimensional. A similar open question is in which dimension it is true that all stable solution is one-dimensional. We refer to \cite{A-C}, \cite{GG}, \cite{dkw}, \cite{pacard}, and \cite{savin} for results of this kind for Allen-Cahn equation.
\noindent
{\bf Open problem 2.} Let us recall that in one space
dimension, there exists a unique solution to (\ref{1D}) (up to translations and scalings). Such solutions have linear growth at infinity and, in the Almgren
monotonicity formula, they satisfy
\begin{equation}
\label{2.1n}
\lim\limits_{r\rightarrow+\infty}N(r)=1.
\end{equation}
It is natural to conjecture that, in any space dimension, a solution of (\ref{maineqn}) satisfying (\ref{2.1n}) is actually one dimensional, that is, there is a unit vector
$a$ such that $(u(x),v(x))=(U (a \cdot x ), V (a \cdot x))$ for
$x \in\mathbb{R}^n$, where $(U,V)$ solves (\ref{1D}). However this result seems to be difficult to obtain at this stage.
\noindent
{\bf Open problem 3.} A further step would be to prove uniqueness of the (family of) solutions having polynomial asymptotics given by Theorem \ref{main result} in two space dimension. A more challenging question is to classify all solutions with
\begin{equation}
\label{2.1nn}
\lim\limits_{r\rightarrow+\infty}N(r)=d.
\end{equation}
\noindent
{\bf Open problem 4.} For the Allen-Cahn equation $ \Delta u+u-u^3=0$ in ${\mathbb R}^2$, solutions similar to Theorem \ref{main result} was first constructed in \cite{dfp} for $d=2$ and in \cite{acm} for $ d\geq 3$. (However all solutions to Allen-Cahn equation are bounded.) On the other hand, it was also proved in \cite{dkpw} that Allen-Can equation in ${\mathbb R}^2$ admits solutions with multiple fronts. An open question is whether similar result holds for (\ref{maineqn}). Namely, are there solutions to (\ref{maineqn}) such that the set $\{ u=v\}$ contains disjoint multiple curves?
\noindent
{\bf Open problem 5.} This question is related to extension of Theorem \ref{main result} to higher dimensions. We recall that for the Allen-Cahn equation $ \Delta u+u-u^3=0$ in ${\mathbb R}^{2m}$ with $m\geq 2$, saddle-like solutions were constructed in \cite{cabre} by employing properties of Simons cone. Stable solutions to Allen-Cahn equation in ${\mathbb R}^8$ with non planar level set were found in \cite{pacard}, using minimal cones. We conjecture that all these results should have analogues for (\ref{maineqn}).
\section{Uniqueness of solutions in ${\mathbb R}$: Proof of Theorem \ref{thm0}}
\numberwithin{equation}{section}
\setcounter{equation}{0}
In this section we prove Theorem \ref{thm0}. Without loss of generality, we assume that
\begin{equation}
\lim_{x \to +\infty} u(x)= +\infty, \lim_{x \to +\infty} v(x)=0.
\end{equation}
The existence of such entire solutions has been proved in \cite{blwz}. By symmetry property of solutions to (\ref{1D}) (Theorem 1.3 of \cite{blwz}), we may consider the following problem
\begin{equation}\label{entire problem}
\left\{ \begin{aligned}
&u^{''}=uv^2,v^{''}=vu^2, u,v>0~~\text{in}~~\mathbb{R},\\
&\lim\limits_{x\to+\infty}u^{'}(x)=-\lim\limits_{x\to-\infty}v^{'}(x)=a
\end{aligned} \right.
\end{equation}
where $a>0$ is a constant. We now prove that there exists a unique
solution $(u, v)$ to (\ref{entire problem}), up to translations. We
will prove it using the method of moving planes.
First we observe that for any solution $(u,v)$ of \eqref{entire problem}, $u^{''}$ and
$v^{''}$ decay exponentially at infinity. Integration shows that as
$x\to+\infty$, $|u^{'}(x)-a|$ decays exponentially. (See also \cite{blwz}.) This implies the
existence of a positive constant $A$ such that
\begin{equation}\label{uniform deviation}
|u(x)-ax^+|+|v(x)-ax^-|\leq A.
\end{equation}
Moreover, the limits
\[\lim\limits_{x\to+\infty}(u(x)-ax^+),\lim\limits_{x\to-\infty}(v(x)-ax^-)\]
exist.
Now assume $(u_1,v_1)$ and $(u_2,v_2)$ are two solutions of
\eqref{entire problem}. For $t>0$, denote
\[u_{1,t}(x):=u_1(x+t),v_{1,t}(x):=v_1(x+t).\]
We want to prove that there exists an optimal $t_0$ such that for all $t\geq
t_0$,
\begin{equation}\label{sliding}
u_{1,t}(x)\geq u_2(x),v_{1,t}(x)\leq
v_2(x)~~\text{in}~~\mathbb{R}.
\end{equation}
Then we will show that when $t=t_0$ these inequalities are identities. This will
imply the uniqueness result.
Without loss of generality, assume $(u_1,v_1)$ and $(u_2,v_2)$
satisfy the estimate \eqref{uniform deviation} with the same
constant $A$.
\noindent
{\bf Step 1.} For $t\geq \frac{16A}{a}$ ($A$ as in \eqref{uniform deviation}), \eqref{sliding} holds.
Firstly, in the region $\{x\geq -t+\frac{2A}{a}\}$, by \eqref{uniform deviation} we have
\begin{equation}\label{1n}
u_{1,t}(x)\geq a(x+t)-A\geq ax^++A\geq u_2(x);
\end{equation}
while in the region $\{x\leq -t+\frac{2A}{a}\}$, we have
\begin{equation}\label{2n}
v_{1,t}(x)\leq a(x+t)^-+A\leq ax^--A\leq v_2(x).
\end{equation}
On the interval $\{x<-t+\frac{2A}{a}\}$, we have
\begin{equation}
\label{new1}
\left\{ \begin{aligned}
&u_{1,t}^{''}=u_{1,t}v_{1,t}^2\leq u_{1,t}v_2^2,\\
&u_2^{''}=u_2v_2^2.\\
\end{aligned} \right.
\end{equation}
With the right boundary conditions
\[u_{1,t}(-t+\frac{2A}{a})\geq u_2(-t+\frac{2A}{a}),
\lim\limits_{x\to-\infty}u_{1,t}(x)=\lim\limits_{x\to-\infty}u_2(x)=0,\]
a direct application of the
maximum principle implies
\[\inf_{\{x<-t+\frac{2A}{a}\}}(u_{1,t}-u_2)\geq 0.\]
By the same type of argument also show that
\[\sup_{\{x>-t+\frac{2A}{a} \}}(v_{1,t}-v_2)\leq 0.\]
Therefore, we have shown that for $ t\geq \frac{16A}{a}, u_{1,t}\geq u_2$ and $ v_{1,t}\leq v_2 $.
\noindent
{\bf Step 2.} We now decrease the $t$ to an optimal value when (\ref{sliding}) holds
\[t_0=\inf\{t^{'} | \ \text{such that}~~\eqref{sliding}~~\text{holds} ~~\text{for all} ~~ t \geq t^{'} \}.\]
Thus $t_0$ is well defined by Step 1. Since $ -(u_{1,t_0}-u_2)^{''} +v_{1,t_0}^2 (u_{1,t_0}-u_2) \geq 0,\ -(v_{2}-v_{1,t_0})^{''} +u_{1,t_0}^2 (v_2- v_{1,t_0}) \geq 0,$ by the strong maximum principle, either
\[u_{1,t_0}(x)\equiv u_2(x),v_{1,t_0}(x)\equiv
v_2(x)~~\text{in}~~\mathbb{R},\] or
\begin{equation}
\label{new23}
u_{1,t_0}(x)> u_2(x),v_{1,t_0}(x)<
v_2(x)~~\text{in}~~\mathbb{R}.
\end{equation}
Let us argue by contradiction that (\ref{new23}) holds. By the definition of $t_0$, there
exists a sequence of $t_k<t_0$ such that
$\lim\limits_{k\to+\infty}t_k=t_0$ and either
\begin{equation}
\label{2.0n}
\inf_{\mathbb{R}}(u_{1,t_k}-u_2)<0,
\end{equation}
or
\[\sup_{\mathbb{R}}(v_{1,t_k}-v_2)>0.\]
Let us only consider the first case.
Define $w_{1,k}:=u_{1,t_k}-u_2$ and $w_{2,k}:=v_2-v_{1,t_k}$. Direct
calculations show that they satisfy
\begin{equation}\label{2.9}
\left\{ \begin{aligned}
&-w_{1,k}^{''}+v_{1,t_k}^2w_{1,k}=u_2(v_2+v_{1,t_k})w_{2,k}~~\mbox{in}~~\mathbb{R}, \\
&-w_{2,k}^{''}+u_{1,t_k}^2w_{2,k}=v_2(u_2+u_{1,t_k})w_{1,k}~~\mbox{in}~~\mathbb{R}.
\end{aligned} \right.
\end{equation}
We use the auxiliary function $g(x)=\log(|x|+3)$ as in \cite{cl}. Note that
$$g\geq 1,~~g^{''}<0~~\mbox{in}~~\{x\neq 0\}.$$
Define $\widetilde{w}_{1,k}:=w_{1,k}/g$ and $\widetilde{w}_{2,k}:=w_{2,k}/g$. For $x \not = 0$ we have
\begin{equation}\label{2.10}
\left\{ \begin{aligned}
&-\widetilde{w}_{1,k}^{''}-2\frac{g^{'}}{g}\widetilde{w}_{1,k}^{'}
+[v_{1,t_k}^2-\frac{g^{'}}{g}]\widetilde{w}_{1,k}=u_2(v_2+v_{1,t_k})\widetilde{w}_{2,k},
~~\mbox{in}~~\mathbb{R}, \\
&-\widetilde{w}_{2,k}^{''}-2\frac{g^{'}}{g}\widetilde{w}_{2,k}^{'}
+[u_{1,t_k}^2-\frac{g^{'}}{g}]\widetilde{w}_{2,k}=v_2(u_2+u_{1,t_k})\widetilde{w}_{1,k},~~\mbox{in}~~\mathbb{R}.
\end{aligned} \right.
\end{equation}
By definition, $w_{1,k}$ and $w_{2,k}$ are bounded in $\mathbb{R}$, and hence
\[\widetilde{w}_{1,k},\widetilde{w}_{2,k}\to 0~~\text{as}~~|x|\to\infty.\]
In particular, in view of (\ref{2.0n}), we know that $\inf_{{\mathbb R}} (\widetilde{w}_{1,k})<0$ is attained at some point
$x_{k,1}$.
Note that $|x_{k,1}|$ must be unbounded, for if $ x_{k,1} \to x_{\infty}, t_k\to t_0$, then $ w_{1,k} (x_{k,1}) \to u_{1,t_0} (x_\infty)- u_2 (x_\infty) =0$. But this violates the assumption (\ref{new23}).
Since $|x_{k,1}|$ is unbounded, at $x=x_{k,1}$ there holds
$$\widetilde{w}_{1,k}^{''}\geq 0~~\mbox{and}~~\widetilde{w}_{1,k}^{'}=0.$$
Substituting this into the first equation of \eqref{2.10}, we get
\begin{equation}\label{2.11}
[v_{1,t_k}(x_{k,1})^2-\frac{ g^{''}(x_{k,1})}{g(x_{k,1})}]
\widetilde{w}_{1,k}(x_{k,1})\geq
u_2(x_{k,1})(v_2(x_{k,1})+v_{1,t_k}(x_{k,1}))\widetilde{w}_{2,k}(x_{k,1})
\end{equation}
which implies that $\widetilde{w}_{2,k}(x_{k,1})<0$. Thus we also have
$\inf\limits_{\mathbb{R}}\widetilde{w}_{2,k}<0$. Assume it is attained
at $x_{k,2}$. Same argument as before shows that $|x_{k,2}|$ must also be unbounded. Similar to \eqref{2.11}, we have
\begin{equation}\label{2.12}
[u_{1,t_k}(x_{k,2})^2-\frac{g^{''}(x_{k,2})}{g(x_{k,2})}]
\widetilde{w}_{2,k} (x_{k,2})\geq
v_2(x_{k,2})(u_2(x_{k,2})+u_{1,t_k}(x_{k,2}))\widetilde{w}_{1,k} (x_{k,2}).
\end{equation}
Observe that
$$\widetilde{w}_{2,k} (x_{k,2})=\inf\limits_{\mathbb{R}}\widetilde{w}_{2,k} \leq\widetilde{w}_{2, k} (x_{k,1}),
$$
$$\widetilde{w}_{1, k} (x_{k,1})=\inf\limits_{\mathbb{R} }\widetilde{w}_{1,k} \leq\widetilde{w}_{1, k} (x_{k,2}).
$$
Substituting these into \eqref{2.11} and \eqref{2.12}, we obtain
\begin{equation}
\label{2.13n}
\widetilde{w}_{1, k} (x_{k,1})\geq
\frac{u_2(x_{k,1})[v_2(x_{k,1})+v_{1,t_k}(x_{k,1})]}
{v_{1,t_k}(x_{k,1})^2-\frac{g^{''}(x_{k,1})}{g(x_{k,1})}}
\frac{v_2(x_{k,2})[u_2(x_{k,2})+u_{1,t_k}(x_{k,2})]}
{u_{1,t_k}(x_{k,2})^2-\frac{g^{''}(x_{k,2})}{g(x_{k,2})}}
\widetilde{w}_{1,k}(x_{k,1}).
\end{equation}
Since $ \tilde{w}_{1,k} (x_{k,1}) <0$, we conclude from (\ref{2.13n}) that
\begin{equation}
\frac{u_2(x_{k,1})[v_2(x_{k,1})+v_{1,t_k}(x_{k,1})]}
{v_{1,t_k}(x_{k,1})^2-\frac{g^{''}(x_{k,1})}{g(x_{k,1})}}
\frac{v_2(x_{k,2})[u_2(x_{k,2})+u_{1,t_k}(x_{k,2})]}
{u_{1,t_k}(x_{k,2})^2-\frac{g^{''}(x_{k,2})}{g(x_{k,2})}} \geq 1
\end{equation}
where $|x_{k,1}|\to +\infty, |x_{k,2}| \to +\infty$. This is impossible since $ \frac{ g^{''} (x)}{g (x)} \sim -\frac{1}{|x|^2 \log (|x|+3)}$ as $|x| \to +\infty$, and we also use the decaying as well as the linear growth properties of $u$ and $v$ at $\infty$.
We have thus reached a contradiction, and the proof of Theorem \ref{thm0} is thereby completed.
\section{Stable solutions: Proof of Theorem \ref{thm1}}
\numberwithin{equation}{section}
\setcounter{equation}{0}
In this section, we prove Theorem \ref{thm1}. The proof follows an idea from Berestycki-Caffarelli-Nirenberg \cite{BCN}-see also Ambrosio-Cabr\'{e} \cite{A-C} and Ghoussoub-Gui \cite{GG}. First, by the stability, we have the following
\begin{lem}
There exist a constant $\lambda\geq 0$ and two functions $\varphi>0$
and $\psi<0$, smoothly defined in $\mathbb{R}^2$ such that
\begin{equation}\label{entire eigenfunction}
\left\{ \begin{aligned}
&\Delta\varphi=v^2\varphi+2uv\psi-\lambda\varphi,\\
&\Delta\psi= 2uv\varphi+v^2\psi-\lambda\psi.
\end{aligned} \right.
\end{equation}
\end{lem}
\begin{proof}
For any $R<+\infty$ the stability assumption reads
$$\lambda(R):=\min\limits_{\varphi,\psi\in H_0^1(B_R(0))\setminus\{0\}}
\frac{\int_{B_R(0)}|\nabla\varphi|^2+|\nabla\psi|^2+v^2\varphi^2+u^2\psi^2+4uv\varphi\psi}
{\int_{B_R(0)}\varphi^2+\psi^2}\geq0.$$ It's well known that the
corresponding minimizer is the first eigenfunction. That is, let
$(\varphi_R,\psi_R)$ realizing $\lambda(R)$, then
\begin{equation}\label{first eigenfunction R}
\left\{ \begin{aligned}
&\Delta\varphi_R= v^2\varphi_R+2uv\psi_R-\lambda(R)\varphi_R, \ \mbox{in} \ B_R (0),\\
&\Delta\psi_R= 2uv\varphi_R+v^2\psi_R-\lambda(R)\psi_R, \ \mbox{in} \ B_R (0), \\
& \varphi_R =\psi_R=0 \ \mbox{on} \ \partial B_R (0).
\end{aligned} \right.
\end{equation}
By possibly replacing $(\varphi_R,\psi_R)$ with $(|\varphi_R|,-|\psi_R|)$, we
can assume $\varphi_R\geq 0$ and $\psi_R\leq 0$. After a
normalization, we also assume
\begin{equation}\label{normalization condition 2}
|\varphi_R(0)|+|\psi_R(0)|=1.
\end{equation}
$\lambda(R)$ is
decreasing in $R$, thus uniformly bounded as $R\rightarrow+\infty$.
Let $$\lambda:=\lim\limits_{R\rightarrow+\infty}\lambda(R).$$ The
equation for $\varphi_R$ and $-\psi_R$ (both of them are
nonnegative functions) forms a cooperative system, thus by the Harnack
inequality (\cite{A-G-M} or \cite{BS}), $\varphi_R$ and $\psi_R$ are uniformly
bounded on any compact set of $\mathbb{R}^2$. By letting
$R\rightarrow+\infty$, we can obtain a converging subsequence and the
limit $(\varphi,\psi)$ satisfies \eqref{entire eigenfunction}.
\par
We also have $\varphi\geq 0$ and $\psi\leq 0$ by passing to the
limit. Hence
$$-\Delta\varphi+(v^2-\lambda)\varphi\geq 0.$$
Applying the strong maximum principle, either $\varphi>0$ strictly
or $\varphi\equiv 0$. If $\varphi\equiv 0$, substituting this into
the first equation in \eqref{entire eigenfunction}, we obtain
$\psi\equiv 0$. This contradicts the normalization condition
\eqref{normalization condition 2}. Thus, it holds true that $\varphi>0$ and
similarly $\psi<0$.
\end{proof}
Fix a unit vector $\xi$. Differentiating the equation (\ref{maineqn}) yields the following equation for $(u_{\xi},v_{\xi})$
\begin{equation}\label{linearized equation}
\left\{ \begin{aligned}
&\Delta u_{\xi}=v^2u_{\xi}+2uvv_{\xi},\\
&\Delta v_{\xi}=2uvu_{\xi}+v^2v_{\xi}.
\end{aligned} \right.
\end{equation}
Let
$$w_1=\frac{u_{\xi}}{\varphi},w_2=\frac{v_{\xi}}{\psi}.$$
Direct calculations using \eqref{entire eigenfunction} and
\eqref{linearized equation} show
\begin{equation}
\left\{ \begin{aligned}
&\text{div}(\varphi^2\nabla
w_1)=2uv\varphi\psi(w_2-w_1)+\lambda\varphi^2w_1,\\\nonumber
&\text{div}(\varphi^2\nabla w_2)=2uv\varphi\psi(w_1-w_2)+\lambda\psi^2w_2.
\end{aligned} \right.
\end{equation}
For any $\eta\in C_0^{\infty}(\mathbb{R}^2)$, testing these two
equations with $w_1\eta^2$ and $w_2\eta^2$ respectively, we obtain
\begin{equation}
\left\{ \begin{aligned}
&-\int\varphi^2|\nabla w_1|^2\eta^2-2\varphi^2w_1\eta\nabla w_1\nabla\eta
=\int
2uv\varphi\psi(w_2-w_1)w_1\eta^2+\lambda\varphi^2w_1\eta^2,\\\nonumber
&-\int\psi^2|\nabla w_2|^2\eta^2-2\psi^2w_2\eta\nabla w_2\nabla\eta
=\int 2uv\varphi\psi(w_1-w_2)w_2\eta^2+\lambda\psi^2w_2\eta^2.
\end{aligned} \right.
\end{equation}
Adding these two and applying the Cauchy-Schwarz inequality, we infer that
\begin{equation}\label{5.6}
\int\varphi^2|\nabla w_1|^2\eta^2+\psi^2|\nabla w_2|^2\eta^2\leq 16
\int\varphi^2w_1^2|\nabla\eta|^2+\psi^2w_2^2|\nabla\eta|^2\leq 16
\int (u_{\xi}^2+v_{\xi}^2)|\nabla\eta|^2.
\end{equation}
Here we have taken away the positive term in the right hand side and used
the fact that
$$2uv\varphi\psi(w_2-w_1)w_1\eta^2+2uv\varphi\psi(w_1-w_2)w_2\eta^2
=-2uv\varphi\psi(w_1-w_2)^2\eta^2\geq0,$$ because $\varphi>0$ and
$\psi<0$.
On the other hand, testing the equation $\Delta u \geq 0$ with $u\eta^2$ ($\eta$ as
above) and integrating by parts, we get
$$\int|\nabla u|^2\eta^2\leq 16\int u^2|\nabla\eta|^2.$$
The same estimate also holds for $v$. For any $r>0$, take
$\eta\equiv 1$ in $B_r(0)$, $\eta\equiv0$ outside $B_{2r}(0)$ and
$|\nabla\eta|\leq 2/r$. By the linear growth of $u$ and $v$, we obtain
a constant $C$ such that
\begin{equation}\label{5.7}
\int_{B_r(0)}|\nabla u|^2+|\nabla v|^2\leq Cr^2.
\end{equation}
Now for any $R>0$, in \eqref{5.6}, we take $\eta$ to be
\begin{equation}
\eta(z)= \left\{
\begin{array}{ll}
1, & x\in B_R(0), \\\nonumber
0, & x\in B_{R^2}(0)^c,\\\nonumber
1-\frac{\log(|z|/R)}{\log R} & x\in B_{R^2}(0)\setminus B_R(0).
\end{array}
\right. \end{equation} With this $\eta$, we infer from (\ref{5.6})
\begin{eqnarray*}
&&\int_{B_R(0)}\varphi^2|\nabla w_1|^2+\psi^2|\nabla w_2|^2\\
&\leq&\frac{C}{(\log R)^2}\int_{B_{R^2}(0)\setminus
B_R(0)}\frac{1}{|z|^2}(|\nabla u|^2+|\nabla v|^2)\\
&\leq &\frac{C}{(\log R)^2}\int_R^{R^2}r^{-2}(\int_{\partial
B_r(0)}|\nabla u|^2+|\nabla
v|^2)dr\\
&=&\frac{C}{(\log
R)^2}\int_R^{R^2}r^{-2}(\frac{d}{dr}\int_{B_r(0)}|\nabla
u|^2+|\nabla
v|^2)dr\\
&=&\frac{C}{(\log R)^2}[r^{-2}\int_{\partial B_r(0)}|\nabla
u|^2+|\nabla v|^2)|_R^{R^2}+2\int_R^{R^2}r^{-3}(\int_{B_r(0)}|\nabla
u|^2+|\nabla v|^2)dr]\\
&\leq& \frac{C}{\log R}.
\end{eqnarray*}
By letting $R\rightarrow+\infty$, we see $\nabla w_1\equiv 0$ and
$\nabla w_2\equiv 0$ in $\mathbb{R}^2$. Thus, there is a constant $c$
such that
$$(u_{\xi},v_{\xi})=c(\varphi,\psi).$$
Because $\xi$ is an arbitrary unit vector, from this we actually know that after
changing the coordinates suitably,
$$u_y\equiv 0,v_y\equiv 0\ \ \text{in}~\mathbb{R}^2.$$
That is, $u$ and $v$ depend on $x$ only and they are one
dimensional.
\section{Existence in bounded balls}
\numberwithin{equation}{section}
\setcounter{equation}{0}
In this section we first construct a solution $(u,v)$ to the problem
\begin{equation}\label{equation}
\left\{ \begin{aligned}
&\Delta u=uv^2 ~~\mbox{in}~~B_R(0),\\
&\Delta v=vu^2 ~~\mbox{in}~~B_R(0),
\end{aligned} \right.
\end{equation}
satisfying the boundary condition
\begin{equation}
\label{eqn2}
u=\Phi^+, v=\Phi^- \ \mbox{ on} \ \partial B_R(0)\subset\mathbb{R}^2.
\end{equation}
More precisely, we prove
\begin{thm}\label{thm existence on bounded set}
There exists a solution $(u_R,v_R)$ to problem \eqref{equation},
satisfying
\begin{enumerate}
\item $u_R-v_R>0$ in $\{\Phi>0\}$ and $u_R-v_R<0$ in $\{\Phi<0\}$;
\item $u_R\geq\Phi^+$ and $v_R\geq\Phi^-$;
\item $\forall i=1,\cdots, d$, $u_R(T_iz)=v_R(z)$;
\item $\forall r\in(0,R)$,
$$N(r;u_R,v_R):=\frac{r\int_{B_r(0)}|\nabla u_R|^2+|\nabla v_R|^2+u_R^2v_R^2}
{\int_{\partial B_r(0)}u_R^2+v_R^2}\leq d.$$
\end{enumerate}
\end{thm}
\begin{proof}
Let us denote $\mathcal U\subset H^1(B_R(0))^2$ the set of pairs
satisfying the boundary condition \eqref{eqn2}, together with
conditions $(1,2,3)$ of the statement of the Theorem (with the
strict inequality $<$ replaced by $\leq$, and so now $\mathcal{U}$ is a
closed set).
The desired solution will be a minimizer of the energy functional
$$E_R(u,v):=\int_{B_R(0)}|\nabla u|^2+|\nabla v|^2+u^2v^2$$
over $\mathcal U$. Existence of at least one minimizer follows easily from the direct method of the Calculus of Variations. To prove that the minimizer also satisfies equation (\ref{equation}), we use the heat flow method.
More precisely, we consider the following parabolic problem
\begin{equation}\label{parabolic equation}
\left\{ \begin{aligned}
&U_t-\Delta U=-UV^2, ~~\mbox{in}~~[0,+\infty)\times B_R(0),\\
&V_t-\Delta V=-VU^2,~~\mbox{in}~~[0,+\infty)\times B_R(0),
\end{aligned} \right.
\end{equation}
with the boundary conditions $U=\Phi^+$ and $V=\Phi^{-}$ on
$(0,+\infty)\times \partial B_R(0)$ and initial conditions in
$\mathcal U$.
By the standard parabolic theory, there exists a unique local solution
$(U,V)$. Then by the maximum principle, $0\leq U\leq
\sup_{B_R(0)}\Phi^+, \ \ 0\leq V \leq \sup_{B_R(0)} \Phi^{-}$,
hence the solution can be extended to a
global one, for all $t\in(0,+\infty)$. By noting the energy inequality
\begin{equation}
\label{ert}
\frac{d}{dt}E_R(U(t),V(t))=-\int_{B_R(0)}|\frac{\partial U}{\partial t}|^2
+|\frac{\partial V}{\partial t}|^2
\end{equation}
and the fact that $E_R\geq 0$, standard parabolic theory implies that for any sequence $t_i\to+\infty$, there
exists a subsequence of $t_i$ such that $(U(t_i),V(t_i))$ converges to
a solution $(u,v)$ of \eqref{equation}.
Next we show that $\mathcal U$ is positively invariant by the parabolic flow.
First of all, by the symmetry of initial and boundary data, $(V(t,T_iz),U(t,T_iz))$ is also a solution to the problem
\eqref{parabolic equation}. By the uniqueness of solutions to the
parabolic system \eqref{parabolic equation}, $(U,V)$ inherits the
symmetry of $(\Phi^+,\Phi^-)$. That is, for all $t\in[0,+\infty)$
and $i=1,\cdots, d$,
$$U(t,z)=V(t,T_iz).$$
This implies
$$U-V=0~~\mbox{on}~~\{\Phi=0\}.$$
Thus, in the open set $D_R:=B_R(0)\cap\{\Phi>0\}$, we have, for any initial datum $(u_0,v_0)\in\mathcal U$,
\begin{equation}\label{eq:difference}
\left\{ \begin{aligned}
&(U-V)_t-\Delta (U-V)=UV(U-V), ~~\mbox{in}~~[0,+\infty)\times D_R(0),\\
&U-V\geq 0,~~\mbox{on}~~[0,+\infty)\times \partial D_R(0),\\
&U-V\geq 0,~~\mbox{on}~~\{0\}\times D_R(0).
\end{aligned} \right.
\end{equation}
The strong maximum principle implies $U-V>0$ in $(0,+\infty)\times
D_R(0)$. By letting $t\to+\infty$, we obtain that the limit satisfies
\begin{equation}\label{4.10}
u-v\geq 0~~\mbox{in}~~D_R(0).
\end{equation}
$(u,v)$ also has the symmetry, $\forall i=1,\cdots, d$
$$u (T_i z)=v (z).$$
Similar to \eqref{eq:difference}, noting \eqref{4.10}, we have
\begin{equation}
\left\{ \begin{aligned}
&-\Delta (u-v)\geq 0, ~~\mbox{in}~~D_R(0),\\
&u-v=\Phi^+,~~\mbox{on}~~\partial D_R(0).
\end{aligned} \right.
\end{equation}
Comparing with $\Phi^+$ on $D_R(0)$, we obtain
\begin{equation}\label{2m}
u-v>\Phi^+>0,~~\mbox{in}~~D_R(0).
\end{equation}
Because $u>0$ and $v>0$ in $B_R(0)$, we in
fact have
\begin{equation}\label{3n}
u>\Phi^+,~~\mbox{in}~~B_R(0).
\end{equation}
In conclusion, $(u,v)$ satisfies conditions $(1,2,3)$ in the
statement of the theorem.
Let $(u_R, v_R)$ be a minimizer of $E_{R}$ over ${\mathcal U}$. Now we consider the parabolic equation (\ref{parabolic equation}) with the initial condition
\begin{equation}
U(x, t)= u_R (x), V (x, t) = v_R (x).
\end{equation}
By (\ref{ert}), we deduce that
$$ E_R (u_R, v_R) \leq E_{R} (U, V) \leq E_R (u_R, v_R) $$
and hence $ (U(x, t), V(x, t) )\equiv (u_R(x), v_R (x))$ for all $ t \geq 0$. By the arguments above, we see that $ (u_R, v_R)$ satisfies (\ref{equation})and conditions $(1,2,3)$ in the statement of the theorem.
In order to prove (4), we firstly note that, as $(u_R,v_R)$ minimizes the energy and $(\Phi^+,\Phi^-)\in\mathcal U$, there holds
\[\int_{B_R(0)}|\nabla u_R|^2+|\nabla v_R|^2+u_R^2v_R^2\leq
\int_{B_R(0)}|\nabla \Phi|^2.
\]
Now by the Almgren
monotonicity formula (Proposition \ref{monotonocity} below) and the boundary conditions, $\forall
r\in(0,R)$, we derive
$$N(r;u_R,v_R)\leq N(R;u_R,v_R)\leq \frac{R\int_{B_R(0)}|\nabla\Phi|^2}{\int_{\partial B_R(0)}|\Phi|^2}=d.$$
This completes the proof of Theorem \ref{thm existence on bounded set}.
\end{proof}
Let us now turn to the system with many components.
In a similar way we shall prove the existence on bounded sets. Let $d$ be an integer or a
half-integer and $2d=hk$ be a multiple of the number of components
$k$, and $G$ denote the rotation of order $2d$. Take the fundamental
domain $F$ of the rotations group of degree $2d$, that is
$F=\{z\in\mathbb{C}\;:\;
\theta=\mbox{arg}(z)\in(-\pi/{2d},\pi/{2d})\}$.
\begin{equation}
\Psi(z)=\begin{cases}
r^{d}\cos(d\theta)\qquad&\text{if $z\in \cup _{i=0}^{h-1}G^{ik}(F)$,}\\
0 & \text{otherwise in $\mathbb C$.}
\end{cases}
\end{equation}
Note that $\Psi(z)$ is positive whenever it is not zero. Next we construct a solution $(u_1,\dots,u_k)$ to the system
\begin{equation}\label{equation_i}
\Delta u_i=u_i\sum_{j\neq i,j=1}^ku_j^2, ~~\mbox{in}~~B_R(0), i=1,\dots, k
\end{equation}
satisfying the symmetry and boundary condition (here $\overline{z}$ is the complex conjugate of $z$)
\begin{equation}
\label{eqn2_in}
\left\{\begin{array}{l}
u_{i}(z)= u_i(G^hz), \qquad \ \ \ \ \mbox{ on} \ B_R(0)\,,i=1,\dots,k,\\
u_i (z)= u_{i+1}(Gz), \qquad \ \ \mbox{ on} \ B_R(0)\,,i=1,\dots,k,\\
u_{k+2-i}(z)= u_i(\overline{z}), \qquad \ \mbox{ on} \ B_R(0)\,,i=1,\dots,k,\\
u_{k+1}(z)= u_1(z), \ \ \ \ \ \ \ \ \ \mbox{ on} \ B_R(0),
\end{array}
\right.
\end{equation}
\begin{equation}
\label{eqn2_ibc}
u_{i+1}(z)=\Psi(G^i(z)), \qquad \mbox{ on} \ \partial B_R(0)\,,i=0,\dots,k-1.
\end{equation}
More precisely, we prove the following.
\begin{thm}\label{thm existence on bounded seti}
For every $R>0$, there exists a solution $(u_{1,R},\dots,u_{k,R})$ to the system \eqref{equation_i} with symmetries \eqref{eqn2_in} and boundary conditions \eqref{eqn2_ibc},
satisfying,
$$N(r):=\frac{r\int_{B_r(0)}\sum_1^k|\nabla u_{i,R}|^2+\sum_{i<j}u_{i,R}^2u_{j,R}^2}
{\int_{\partial B_r(0)}\sum_1^k u_{i,R}^2}\leq d, \ \forall r\in(0,R).$$
\end{thm}
\begin{proof}
Let us denote by $\mathcal U\subset H^1(B_R(0))^k$ the set of pairs satisfying the symmetry and boundary condition \eqref{eqn2_in}, \eqref{eqn2_ibc}. The desired solution will be the minimizer of the energy functional
$$\int_{B_r(0)}\sum_1^k|\nabla u_{i,R}|^2+\sum_{i<j}u_{i,R}^2u_{j,R}^2$$
over $\mathcal U$. Once more, to deal with the constraints, we may take advantage of the positive invariance of the associated heat flow:
\begin{equation}\label{parabolic equation_i}
\left\{ \begin{aligned}
&\dfrac{\partial U_i}{\partial t}-\Delta U_i=-U_i\sum_{j\neq i}U_j^2, ~~\mbox{in}~~[0,+\infty)\times B_R(0),\\
\end{aligned} \right.\end{equation}
which can be solved under conditions \eqref{eqn2_i}, \eqref{eqn2_ibc} and initial conditions in $\mathcal U$.
Thus, the minimizer of the energy $(u_{1,R},\dots,u_{k,R})$ solves the differential system.
In addition, using the test function $(\Psi_1,\dots,\Psi_k)$, where $\Psi_i=\Psi\circ G^{i-1}$, $i=1,\dots,k$, we have
\[\int_{B_R(0)}\sum_1^k|\nabla u_{i,R}|^2+\sum_{i<j}u_{i,R}^2u_{j,R}^2\leq
k\int_{B_R(0)}|\nabla \Psi|^2.
\]
Now by the Almgren
monotonicity formula below (Proposition \ref{monotonocity}) and the boundary conditions, we get
$$N(r)\leq N(R)\leq \frac{R\int_{B_R(0)}|\nabla\Psi|^2}{\int_{\partial B_R(0)}|\Psi|^2}=d, \ \forall r \in (0, R).$$
\end{proof}
In order to conclude the proof of Theorems \ref{thm existence on bounded set} and \ref{thm existence on bounded seti}, we need to find upper and lower bounds for the solutions, uniform with respect to $R$ on bounded subsets of $\mathbb C$. That
is, we will prove that for any $ r>0$, there exists positive constants $0<c(r)<C(r)$ (independent of $R$) such
that
\begin{equation}\label{uniform upper bound}c(r)<
\sup\limits_{B_r(0)}u_R\leq C(r).
\end{equation}
Once we have this estimate, then by letting $R\rightarrow+\infty$, a
subsequence of $(u_R,v_R)$ will converge to a solution $(u,v)$ of
problem (\ref{maineqn}), uniformly on any compact set of
$\mathbb{R}^2$. It is easily seen that properties (1), (2), (3) and
(4) in Theorem \ref{thm existence on bounded set} can be derived by passing to
the limit, and we obtain the main results stated in Theorem \ref{main result} and \ref{thm:maini}. It then remains to establish the bound (\ref{uniform upper bound}). In the next section, we shall obtain this estimate by using the monotonicity formula.
\section{Monotonicity formula}
Let us start by stating some monotonicity formulae for solutions to
(\ref{maineqn}), for any dimension $n\geq 2$. The first two are
well-known and we include them here for completeness. But we will also require some refinements.
\begin{prop}\label{monotonocity 1}
For $r>0$ and $x\in\mathbb{R}^n$,
$$E(r)=r^{2-n}\int_{B_r(x)}\sum_1^k|\nabla u_i|^2+\sum_{i<j}u_i^2u_j^2$$
is nondecreasing in $r$.
\end{prop}
For a proof, see \cite{C-L 2}. The next statement is an Almgren-type
monotonicity formula with remainder.
\begin{prop}\label{monotonocity}
For $r>0$ and $x\in\mathbb{R}^n$, let us define
\[H(r)=r^{1-n}\int_{\partial B_r(x)}\sum_1^k u_i^2.\] Then
$$N(r;x):=\frac{E(r)}{H(r)}$$
is nondecreasing in $r$. In addition there holds
\begin{equation}\label{eq:remainder}
\int_{0}^r \dfrac{2\int_{B_s}\sum_{i<j}^ku_i^2u_j^2}{\int_{\partial B_s}\sum_1^ku_i^2}ds\leq N(r)\;.
\end{equation}
\end{prop}
\begin{proof}
For simplicity, take $x$ to be the origin $0$ and let $k=2$. We have
\[H(r)= r^{1-n}\int_{\partial B_r}u^2+v^2\;,\qquad E(r)=r^{2-n}\int_{B_r}|\nabla u|^2+|\nabla v|^2+u^2v^2\;.
\]
Then, direct calculations show that
\begin{equation}\label{4.2}
\frac{d}{dr} H(r)
=2r^{1-n}\int_{B_r}|\nabla u|^2+|\nabla v|^2+2u^2v^2.
\end{equation}
By the proof of
Proposition \ref{monotonocity 1}, we have
\begin{equation}\label{4.1}
\frac{d}{dr} E(r)
=2r^{2-n}\int_{\partial B_r}[u_r^2+v_r^2] +2r^{1-n}\int_{B_r}u^2v^2.
\end{equation}
With these two identities, we obtain
\begin{multline*}
\frac{d}{dr}\frac{E}{H}(r)
=\dfrac{H [2r^{2-n}\int_{\partial B_r}(u_r^2+v_r^2)
+2r^{1-n}\int_{B_r}u^2v^2]
-E[2r^{1-n}\int_{\partial B_r} uu_r+vv_r]}{H^2}\\
\geq \dfrac{2r^{3-2n}\int_{\partial B_r}(u^2+v^2)
\int_{\partial B_r}(u_r^2+v_r^2)
-2r^{3-2n}\left[\int_{\partial B_r} uu_r+vr_r\right]^2}{H^{2}}+\\
+\dfrac{2r^{1-n}\int_{B_r}u^2v^2}{H}
\geq \dfrac{2r^{1-n}\int_{B_r}u^2v^2}{H}.
\end{multline*}
Here we have used the following inequality
$$E(r)\leq \int_{B_r}|\nabla u|^2+|\nabla v|^2+2u^2v^2
=\int_{\partial B_r} uu_r+vr_r.$$
Hence this yields monotonicity of the Almgren quotient. In addition, by integrating the above
inequality we obtain
\begin{equation*}
\int_{r_0}^r \dfrac{2\int_{B_s}u^2v^2}{\int_{\partial B_s}u^2+v^2}ds\leq N(r)\;.
\end{equation*}
\end{proof}
If $x=0$, we simply denote $N(r;x)$ as $N(r)$. Assuming an upper
bound on $N(r)$, we establish a doubling property by the Almgren
monotonicity formula.
\begin{prop}\label{doubling property}
Let $R>1$ and let $(u_1,\dots,u_k)$ be a solution of \eqref{eq:system} on $B_R$. If $N(R)\leq d$, then for any $1<r_1\leq r_2\leq R$
\begin{equation}\label{eq:h_monotone}
\dfrac{H(r_2)}{H(r_1)}\leq e^{d}\dfrac{r_2^{2d}}{r_1^{2d}}.
\end{equation}
\end{prop}
\begin{proof}
For simplicity of notation, we expose the proof for the case of two components. By direct calculation using \eqref{4.2}, we obtain
\begin{eqnarray*}
\frac{d}{dr}\log\Bigg[r^{1-n} (\int_{\partial B_r(0)}u^2+v^2)\Bigg]
&=&\frac{2\int_{B_r}|\nabla u|^2+|\nabla v|^2+2u^2v^2}{\int_{\partial B_r(0)}u^2+v^2}\\
&\leq& \frac{2N(r)}{r}+\frac{2\int_{B_r}u^2v^2}{\int_{\partial B_r(0)}u^2+v^2}\\
&\leq& \frac{2d}{r}+\frac{2\int_{B_r}u^2v^2}{\int_{\partial B_r(0)}u^2+v^2}\\
\end{eqnarray*}
Thanks to \eqref{eq:remainder}, by integrating, we find that, if $r_1\leq r_2\leq 2r_0$ then
\begin{equation}\label{eq:h_monotone1}
\dfrac{H(r_2)}{H(r_1)}\leq e^{d}\dfrac{r_2^{2d}}{r_1^{2d}}.
\end{equation}
\end{proof}
An immediate consequence of Proposition \ref{doubling property} is the lower bound on bounded sets for the solutions found in Theorems \ref{thm existence on bounded set} and \ref{thm existence on bounded seti}.
\begin{prop}\label{prop:lowerbound}
Ler $(u_{1,R},\dots,u_{k,R})$ be a family of solutions to \eqref{eq:system} such that $N(R)\leq d$ and $H(R)= CR^{2d}$. Then, for every fixed $r<R$, there holds
\[H(r)\geq Ce^{-d}r^{2d}.\]
\end{prop}
Another byproduct of the monotonicity formula with the remainder \eqref{eq:remainder} is the existence of the limit of $H(r)/r^{2d}$.
\begin{coro}\label{existencelimitH}
Let $R>1$ and let $(u_1,\dots,u_k)$ be a solution of \eqref{eq:system} on $\mathbb C$ such that $\lim_{r\to+\infty}N(r)\leq d$, then there exists
\begin{equation}\label{eq:Hhaslimit}
\lim_{r\to+\infty}\dfrac{H(r)}{r^{2d}}<+\infty\;.
\end{equation}
\end{coro}
Now we prove the optimal lower bound on the growth of the solution. To this aim, we need a fine estimate on the asymptotics of the
lowest eigenvalue as the competition term diverges. The following result is an extension of Theorem 1.6 in \cite{blwz}, where
the estimate was proved in case of two components.
\begin{thm}\label{thm:lambda}
Let $d$ be a fixed integer and let us consider
\begin{multline}\label{eq:min_eigenvalue}
\mathcal{L}(d,\Lambda)=\min\left\{ \int_0^{2\pi}\sum_i^d|u^\prime_i|^2+\Lambda\sum_{i<j}^d u_i^2u_j^2\; \Bigg| \; \begin{array}{l} \int_0^{2\pi}\sum_i u_i^2=1, \
u_{i+1}(x)=u_i(x-2\pi/d),\\
u_1(-x)=u_1(x)\;,u_{d+1}=u_1\;
\end{array}
\right\}.
\end{multline}
Then, there exists a constant $C$ such that for all $\Lambda>1$ we have
\begin{equation}\label{eq:est_l}
d^2-C \Lambda^{-1/4}\leq\mathcal{L}(d,\Lambda)\leq d^2\;.
\end{equation}
\end{thm}
\begin{proof}
Any minimizer $(u_{1,\Lambda},\dots,u_ {d,\Lambda})$ solves the system of ordinary differential equations
\begin{equation}u_i^{''}=\Lambda u_i\sum_{j\neq i}u_j^2-\lambda u_i\;,\qquad i=1,\dots,d,
\end{equation}
together with the associated energy conservation law
\begin{equation}
\sum_1^d(u_i^{'})^2+\lambda u_i^2-\Lambda\sum_{i<j}^du_i^2u_j^2=h\;.
\end{equation}
Note that the Lagrange multiplier satisfies
\[\lambda=\int_0^{2\pi}\sum_i^d|u^\prime_i|^2+2\Lambda\sum_{i<j}^d u_i^2u_j^2=\mathcal{L}(d,\Lambda)+\int_0^{2\pi}\Lambda\sum_{i<j}^d u_i^2u_j^2\;.\]
As $\Lambda\to\infty$, we see convergence of the eigenvalues $\lambda\simeq \mathcal{L}(d,\Lambda)\to d^2$, together with the energies $h\to 2d^2$. Moreover, the solutions remain bounded in Lipschitz norm and converge in Sobolev and H\"older spaces (see \cite{blwz} for more details).
Now, let us focus on the interval $I=(a,a+2\pi/d)$ where the $i$-th component is active.
The symmetry constraints imply
\begin{multline*}u_{i-1}(a)=u_i(a)\;,u_{i-1}^{'}(a)=-u_i^{'}(a)\;,\\
u_{i+1}(a+2\pi/d)=u_i(a+2\pi/d)\;,u_{i+1}^{'}(a+2\pi/d)=-u_i^{'}(a+2\pi/d)\end{multline*}
We observe that there is interaction only with the two prime neighboring components, while the others are exponentially small (in $\Lambda$) on $I$.
Close to the endpoint $a$, the component $u_i$ is increasing and convex, while $u_{i-1}$ is decreasing and again convex. Similarly to \cite{blwz} we have that
\begin{equation}\label{eq:iv}
u_i(a)= u_{i-1}(a)\simeq K\Lambda^{-1/4}\;, u'_i(a)= -u^{'}_{i-1}(a)\simeq H=(h+K)/2\;.
\end{equation}
Hence, in a right neighborhood of $a$, there holds
$u_i(x)\geq u_i(a)$, and therefore, as $u_{i-1}^{''}\geq\Lambda u_i^2(a)u_{i-1}$, from the initial value problem \eqref{eq:iv} we infer
\[u_{i-1}(x)\leq C u_i(a)e^{-\Lambda^{1/2}u_i(a)(x-a)}\;,\forall x\in [a,b].\]
On the other hand, on the same interval we have
\[u_{i}(x)\leq u_i(a)+C(x-a)\;,\forall x\in [a,b].\]
(here and below $C$ denotes a constant independent of $\Lambda$). Consequently, there holds
\begin{equation}\label{eq:allterms}
\Lambda \int_I u_{i-1}^2 u_{i}^2+u_{i-1}^3 u_{i}+u_{i-1} u_{i}^2\leq C\Lambda^{-1/2}u_i(a)^{-1}\simeq C\Lambda^{-1/4}\;.
\end{equation}
In particular, this yields
\begin{equation}\label{eq:lambda}
\mathcal L(d,\Lambda)\geq \lambda -C\Lambda^{-1/4}\;.
\end{equation}
In order to estimate $\lambda$, let us consider $\widehat u_i=\left(u_i-\sum_{j=i\pm 1}u_j\right)^+$.
Then, as $u_i(a)=u_{i-1}(a)$ and $u_i(a+2\pi/d)=u_{i+1}(a+2\pi/d)$, $\widehat u_i\in H^1_0(I)$. By testing the differential equation for $u_i-\sum_{j=i\pm 1}u_j$ with $\widehat u_i$ on $I$ we find
\[\int_I |\widehat u_i^{'}|^2\leq \lambda \int_I |\widehat u_i|^2+C\Lambda^{-1/4}\;,\]
where in the last term we have majorized all the integrals of mixed fourth order monomials with \eqref{eq:allterms}.
As $|I|=2\pi/d$, using Poincar\'e inequality and \eqref{eq:lambda} we obtain the desired estimate on $\mathcal L(d,\Lambda)$.
\end{proof}
We are now ready to apply the estimate from below on $\mathcal L$ to derive a lower bound on the energy growth. We recall that there holds
\[
\widehat E(r):=\int_{B_r(x)}\sum_1^k|\nabla u_i|^2+2\sum_{i<j}u_i^2u_j^2=\int_{\partial B_r(x)}\sum_1^k u_i\dfrac{\partial u_i}{\partial r}
\]
\begin{prop}\label{prop:upperbound}
Let $(u_{1,R},\dots,u_{k,R})$ be a solution of \eqref{eq:system} having the symmetries \eqref{eqn2_i} on $B_R$. There exists a constant $C$ (independent of $R$) such that for all $\;1\leq r_1\leq r_2\leq R$ there holds
\begin{equation}
\dfrac{\widehat E(r_2)}{\widehat E(r_1)}\geq C\dfrac{r_2^{2d}}{r_1^{2d}}
\end{equation}
\end{prop}
\begin{proof}
Let us compute,
\begin{multline*}\dfrac{d}{dr}\log\left(r^{-2d}\widehat E(r)\right)=-\dfrac{2d}{r}+\dfrac{\int_{\partial B_r(x)}{\sum_1^k|\nabla u_i|^2}+2\sum_{i<j}u_i^2u_j^2}{\int_{\partial B_r(x)}\sum_1^k u_i\dfrac{\partial u_i}{\partial r}}\\
=-\dfrac{2d}{r}+\dfrac{\int_{\partial B_r(x)}\sum_1^k\left(\dfrac{\partial u_i}{\partial r}\right)^2+\dfrac{1}{r^2}\left[\sum_1^k\left(\dfrac{\partial u_i}{\partial \theta}\right)^2+2r^2\sum_{i<j}u_i^2u_j^2\right]}{\int_{\partial B_r(x)}\sum_1^k u_i\dfrac{\partial u_i}{\partial r}}\\
=-\dfrac{2d}{r}+\dfrac{\int_{0}^{2\pi}\sum_1^k\left(\dfrac{\partial u_i}{\partial r}\right)^2+\dfrac{1}{r^2}\left[\sum_1^k\left(\dfrac{\partial u_i}{\partial \theta}\right)^2+2r^2\sum_{i<j}u_i^2u_j^2\right]}{\int_{0}^{2\pi}\sum_1^k u_i\dfrac{\partial u_i}{\partial r}}\end{multline*}
Now we use Theorem \ref{thm:lambda} and we continue the chain of inequalities:
\begin{multline}\label{eq:Emonotone}\dfrac{d}{dr}\log\left(r^{-2d}\widehat E(r)\right)\geq -\dfrac{2d}{r}+\dfrac{\int_{0}^{2\pi}\sum_1^k\left(\dfrac{\partial u_i}{\partial r}\right)^2+\dfrac{\mathcal L(d,2r^2)}{r^2}\int_0^{2\pi}\sum_1^k u_i^2}{\int_{0}^{2\pi}\sum_1^k u_i\dfrac{\partial u_i}{\partial r}}\\
\geq -\dfrac{2d-2 \sqrt{\mathcal L(d,2r^2)}}{r}\geq -\dfrac{C}{r^{3/2}}\;,
\end{multline}
where in the last line we have used H\"older inequality. By integration we easily obtain the assertion.
\end{proof}
A direct consequence of the above inequalities is the non vanishing of the quotient $E/r^{2d}$:
\begin{coro}\label{existencelimitE}
Let $R>1$ and let $(u_1,\dots,u_k)$ be a solution of \eqref{eq:system} on $\mathbb C$ satisfying \ref{eqn2_i}: then there exists
\begin{equation}\label{eq:hatEhaslimit}
\lim_{r\to+\infty}\dfrac{\widehat E(r)}{r^{2d}}=b\in (0,+\infty]\;.
\end{equation}
If, in addition, $\lim_{r\to+\infty}N(r)\leq d$, then we have that $b<+\infty$ and
\begin{equation}\label{eq:Ehaslimit}
\lim_{r\to+\infty}N(r)=d,\quad\text{and}\quad\lim_{r\to+\infty}\dfrac{E(r)}{r^{2d}}=b\;.
\end{equation}
\end{coro}
\begin{proof}
Note that \eqref{eq:hatEhaslimit} is a straightforward consequence of the monotonicity formula \eqref{eq:Emonotone}. To prove \eqref{eq:Ehaslimit}, we first notice that
\[\lim_{r\to+\infty}\dfrac{E(r)}{r^{2d}}=\lim_{r\to+\infty}N(r)\dfrac{H(r)}{r^{2d}}.\]
So the limit of $E(r)/r^{2d}$ exists finite. Now we use \eqref{eq:remainder}
\[
\int_{0}^{+\infty} \dfrac{2\int_{B_s}\sum_{i<j}^ku_i^2u_j^2}{\int_{\partial B_s}\sum_1^ku_i^2}ds<+\infty
\]
and we infer
\[\liminf_{r\to+\infty}\dfrac{r\int_{B_{r}}\sum_{i<j}^ku_i^2u_j^2}{\int_{\partial B_{r}}\sum_1^ku_i^2}=0.\]
Next, using Corollary \ref{existencelimitH} we can compute
\[\liminf_{r\to+\infty}\dfrac{\int_{B_{r}}\sum_{i<j}^ku_i^2u_j^2}{r^{2d}}=
\liminf_{r\to+\infty}\dfrac{\int_{B_{r}}\sum_{i<j}^ku_i^2u_j^2}{H(r)} \dfrac{H(r)}{r^{2d}}
=0,\]
and finally
\[\liminf_{r\to +\infty}\dfrac{\widehat E(r)-E(r)}{r^{2d}}=0;.\]
Was the limit of $N(r)$ strictly less that $d$, the growth of $H(r)$ would be in contradiction with that of $E(r)$.
\end{proof}
Now we can combine the upper and lower estimates to obtain convergence
of the approximating solutions on compact sets and complete the
proof of Theorems \ref{thm:maini}
\begin{proof}[Proof of Theorem \ref{thm:maini}.]
Let $(u_{1,R},\dots,u_{k,R})$ be a family of solutions to \eqref{eq:system} such that $N_R(R)\leq d$ and $H_R(R)= CR^{2d}$.
Since $H_R(R)=CR^{2d}$, then,
by Proposition \ref{doubling property}
we deduce that, for every fixed $1<r<R$, there holds
\[H_R(r)\geq Ce^{-d}r^{2d}\;. \]
Assume first that there holds a uniform bound for some $r>1$,
\begin{equation}\label{eq:boundonH}
H_R(r)\leq C\;.
\end{equation}
Then $H_R(r)$ and $E_R(r)$ are uniformly bounded on $R$. This implies a uniform bound on
the $H^1(B_{r})$ norm. As the components are subharmonic, standard elliptic estimates (Harnack inequality) yield actually a $\mathcal C^2$ bound on $B_{r/2}$, which is independent on $R$. Note that, by Proposition \ref{prop:lowerbound}, $H_R(r)$ is bounded away from zero, so the weak limit cannot be zero. By the doubling Property \ref{doubling property} the uniform bound on $H_R(r_2)\leq C r_2^{2d}$ holds for every $r_2\in\mathbb R$ larger than $r$. Thus, a diagonal procedure yields existence of a nontrivial limit solution of the differential system, defined on the whole of $\mathbb C$. It is worthwhile noticing that this solution inherits all the symmetries of the approximating solutions together with the upper bound on the Almgren's quotient.
Finally, from Corollary \ref{existencelimitH} and \ref{existencelimitE} infer the limit
\begin{equation}\label{right growth rate}
\lim_{r\to +\infty}\dfrac{H(r)}{r^{2d}}=\lim_{r\to
+\infty}\dfrac{1}{N(r)},
\lim_{r\to
+\infty}\dfrac{E(r)}{r^{2d}}=\dfrac bd\in(0,+\infty)\:.
\end{equation}
Let us now show that $H_R (r)$ is uniformly bounded with respect to $R$ for fixed $r$. We argue by contradiction and assume that, for a sequence $R_n\to+\infty$, there holds
\begin{equation}\label{eq:Hunbounded}
\lim_{n\to+\infty}H_{R_n}(r)=+\infty\;.
\end{equation}
Denote $u_{i,n}=u_{i,R_n}$ and $H_n$, $E_n$, $N_n$ the corresponding functions. Note that, as $E_n$ is bounded, we must have $N_n(r)\to 0$. For each $n$, let $\lambda_n\in(0,r)$ such that
\[\lambda^2_nH_n(\lambda_n)=1\;\]
(such $\lambda_n$ exist right because of \eqref{eq:Hunbounded}) and scale
\[\tilde u_{i,n}(z)=\lambda_n u_{i,n}(\lambda_n z)\;, \quad |z|<R_n/\lambda_n\;.\]
Note that the $(\tilde u_{i,n})_i$ still solve system \eqref{eq:system} on the disk $B(0,R_n/\lambda_n)$ and enjoy all the symmetries \eqref{eqn2_i}. Let us denote $\tilde H_n$, $\tilde E_n$, $\tilde N_n$ the corresponding quantities. We have
\[\begin{aligned}
\tilde H_n(1)&=\lambda^2_nH_n(\lambda_n)=1, \\
\tilde E_n(1)&=\lambda^2_nE_n(\lambda_n) \to 0 \\
\tilde N_n(1)&=N_n(\lambda_n)\to 0
\end{aligned}\]
In addition there holds $\tilde N_n(s)\leq d$ for $s<R_n/\lambda_n$. By the compactness argument exposed above, we can extract a subsequence converging in the compact-open topology of $\mathcal C^2$ to a nontrivial symmetric solution of \eqref{eq:system} with Almgren quotient vanishing constantly. Thus, such solution should be a nonzero constant in each component, but constant solution are not compatible with the system of PDE's \eqref{eq:system} .
\end{proof}
\section{Asymptotics at infinity}
\numberwithin{equation}{section}
\setcounter{equation}{0}
We now come to the proof of Theorem \ref{thm asymptotics at
infinity}. Note that by Proposition \ref{doubling property}, the
condition on $N(r)$ implies that $u$ and $v$ have a polynomial
growth. (In fact, with more effort we can show the reverse also holds. Namely, if $u$ and $v$ have polynomial growth, then $N(r)$ approaches a positive integer as $r\to +\infty$. We leave out the proof.)
Recall the blow down
sequence is defined by
$$(u_R(x), v_R(x)):=(\frac{1}{L(R)}u(Rx),\frac{1}{L(R)}v(Rx)),$$
where $L(R)$ is chosen so that
\begin{equation}\label{normalization condition}
\int_{\partial B_1(0)}u_R^2+v_R^2=\int_{\partial B_1(0)}\Phi^2.
\end{equation}
For the solutions in Theorem \ref{main result}, by \eqref{right
growth rate}, we have
\begin{equation}\label{eq:L(R)}
L(R)\sim R^d.
\end{equation}
\par
We
will now analyze the limit of $(u_R,v_R)$ as $R\rightarrow+\infty$.
\par
Because for any $r\in(0,+\infty)$, $N(r)\leq d$, $(u, v)$ satisfies
Proposition \ref{doubling property} for any $r\in(1,+\infty)$. After
rescaling, we see that Proposition \ref{doubling property} holds for
$(u_R,v_R)$ as well. Hence, there exists a constant $C>0$, such that for
any $R$ and $r\in(1,+\infty)$,
\begin{equation}\label{4.3}
\int_{\partial B_r(0)}u_R^2+v_R^2\leq C e^{d}r^d.
\end{equation}
Next, $(u_R,v_R)$ satisfies the equation
\begin{equation}\label{4.4}
\left\{ \begin{aligned}
&\Delta u_R=L(R)^2R^2u_Rv_R^2,\\
&\Delta v_R=L(R)^2R^2v_Ru_R^2,\\
&u_R,v_R>0~~\mbox{in}~~\mathbb{R}^2.
\end{aligned} \right.
\end{equation}
Here we need to observe that, by \eqref{eq:L(R)},
$$\lim\limits_{R\rightarrow+\infty}L(R)^2R^2=+\infty.$$
By \eqref{4.3}, as $R\rightarrow+\infty$, $u_R$ and $v_R$ are
uniformly bounded on any compact set of $\mathbb{R}^2$. Then by the
main result in \cite{DWZ2011}, \cite{NTTV} and \cite{TT2011}, there is a harmonic
function $\Psi$ defined in $\mathbb{R}^2$, such that (a subsequence
of) $(u_R,v_R)\rightarrow(\Psi^+,\Psi^-)$ in $H^1$ and in H\"older spaces on any compact
set of $\mathbb{R}^2$. By \eqref{normalization condition},
$$\int_{\partial B_1(0)}\Psi^2=\int_{\partial B_1(0)}\Phi^2,$$
so $\Psi$ is nonzero. Because $L(R)\rightarrow+\infty$, $u_R(0)$ and
$v_R(0)$ goes to $0$, hence
\begin{equation}\label{4.6}
\Psi(0)=0.
\end{equation}
\par
After rescaling in Proposition \ref{monotonocity}, we obtain a
corresponding monotonicity formula for $(u_R,v_R)$,
$$N(r;u_R,v_R):=\frac{r\int_{B_r(0)}|\nabla u_R|^2+|\nabla v_R|^2+L(R)^2R^2u_R^2v_R^2}
{\int_{\partial B_r(0)}u_R^2+v_R^2}=N(Rr)$$ is nondecreasing in $r$.
By (4) in Theorem \ref{main result} and from Corollary \ref{existencelimitE},
\begin{equation}\label{4.7}
N(r;u_R,v_R)\leq d=\lim_{r\to+\infty} N(r;u_R,v_R)\;\;, \forall\; r\in(0,+\infty).
\end{equation}
In \cite{DWZ2011}, it's also proved that
$(u_R,v_R)\rightarrow(\Psi^+,\Psi^-)$ in $H^1_{loc}$ and for any
$r<+\infty$,
$$\lim\limits_{R\rightarrow+\infty}\int_{B_r(0)}L(R)^2R^2u_R^2v_R^2=0.$$
After letting $R\rightarrow+\infty$ in \eqref{4.7}, we get
\begin{equation}\label{convergence of degree}
N(r;\Psi):=\frac{r\int_{B_r(0)}|\nabla \Psi|^2} {\int_{\partial
B_r(0)}\Psi^2}=\lim\limits_{R\rightarrow+\infty}N(r;u_R,v_R)=\lim\limits_{R\rightarrow+\infty}N(Rr)=
d.
\end{equation}
In
particular, $N(r;\Psi)$ is a constant for all $r\in(0,+\infty)$. So $\Psi$ is a homogeneous polynomial of degree $d$. Actually the number
$d$ is the vanishing order of $\Psi$ at $0$, which must therefore be a positive integer.
Now it remains to prove that $\Psi\equiv\Phi$: this is easily done by exploiting the symmetry conditions on $\Psi$ (point $(3)$ of Theorem \ref{main result}).
\noindent {\bf Acknowledgment.} Part of this work was carried out while Henri Berestycki was visiting the
Department of Mathematics at the University of Chicago. Heá was supported
by an NSF FRG grant DMS-1065979 and by the French "Agence Nationale de la
Recherche" within the project PREFERED (ANR 08-BLAN-0313). Juncheng Wei was supported by a GRF grant
from RGC of Hong Kong. Susanna Terracini was partially supported by the
Italian PRIN2009 grant ``Critical Point Theory and Perturbative
Methods for Nonlinear Differential Equations". Kelei Wang was
supported by the Australian Research Council.
\addcontentsline{toc}{section}{References}
\end{document} |
\begin{document}
\title{Corrigendum to ``A Discrete Method to Solve Fractional Optimal Control Problems''}
\alphauthor{Ricardo Almeida\\
{\tt [email protected]}
\alphand
Delfim F. M. Torres\\
{\tt [email protected]}}
\date{Center for Research and Development in Mathematics and Applications (CIDMA)\\
Department of Mathematics, University of Aveiro, 3810--193 Aveiro, Portugal}
\maketitle
\begin{abstract}
We clarify the applicability of the method proposed
in [Nonlinear Dynam. {\bf 80} (2015), no.~4, 1811--1816].
\end{abstract}
In \cite{MR3343434}, a discrete method was presented
to solve the following optimal control problem:
minimize the functional
$$
J(x,u)=\int_a^b L(t,x(t),u(t))\,dt
$$
subject to the fractional dynamic constraint
$$
M \dot{x}(t) + N{_a^CD_t^\a} x(t)= f\left(t,x(t),u(t)\right), \quad t\in[a,b],
$$
and the initial condition
$$
x(a)=x_a,
$$
where $(M,N)\not=(0,0)$ and $x_a$ is a fixed real number.
The first step of the method consists to replace the fractional
derivative by an approximation, obtaining a new dynamic constraint:
$$
\dot{x}(t)=\frac{f(t,x(t),u(t))-NA(t-a)^{-\alpha}x(t)+\displaystyle\sum_{p=2}^K NC_p(t-a)^{1-p-\alpha}V_p(t)+\displaystyle\frac{Nx_a(t-a)^{-\alpha}}{\Gamma(1-a)}}{M+NB(t-a)^{1-\alpha}}.
$$
Since $\alphalpha\in(0,1)$, one gets that $t=a$ is a singularity
for the optimal control problem. In order to proceed with the method,
the initial condition appearing in Eq. (7) of \cite{MR3343434} must
be also approximated by a new one. Instead of considering the constraint
$x(a)=x_a$, one should replace it by $x(a+\epsilon)=x_a$, where $0<\epsilon\alphapprox0$,
eliminating in this way the problem with respect to the initial point.
Later, when one applies the Euler discretization method, it is also necessary
to replace the initial condition $x_0=x_a$ by a new one, $x_i=x_a$, with $i>0$.
In the example of \cite[Section 4]{MR3343434} with $n=100$, one has $x_5=0$.
\section*{Acknowledgments}
Research supported by Portuguese funds through the
Center for Research and Development in Mathematics and Applications (CIDMA)
and the Portuguese Foundation for Science and Technology (FCT),
within project UID/MAT/04106/2013.
\end{document} |
\begin{document}
\textsc{In memory of my Grandmother}
\begin{Titul}
{\large \bf ON AN INEQUALITY OF DIFFERENT METRICS\\[2mm] FOR ALGEBRAIC POLYNOMIALS }\\[3ex]
{{\bf Roman~A.~Veprintsev} \\[5ex]}
\end{Titul}
\begin{Anot}
{\bf Abstract.} We establish an inequality of different metrics for algebraic polynomials.
{\bf Key words and phrases:} inequality of different metrics, algebraic polynomials, generalized Jacobi weight
{\bf MSC 2010:} 41A17, 42A05
\end{Anot}
\section{Introduction and preliminaries}
In this section, we give some notation used in the article.
Consider the generalized Jacobi weight
\begin{equation*}
\omega_{\alpha,\beta,\gamma}(x)=(1-x)^\alpha(1+x)^\beta|x|^\gamma,\qquad x\in[-1,1],
\end{equation*}
where $\alpha,\,\beta,\,\gamma>-1$. Given $1\leq p\leq\infty$, we denote by $L_p(\omega_{\alpha,\beta,\gamma})$ the space of complex-valued Lebesgue measurable functions $f$ on $[-1,1]$ with finite norm
\begin{equation*}
\begin{array}{lr}
\|f\|_{L_p(\omega_{\alpha,\beta,\gamma})}=\Bigl(\int\nolimits_{-1}^1 |f(x)|^p\,\omega_{\alpha,\beta,\gamma}(x)\,dx\Bigr)^{1/p},&\quad 1\leq p<\infty,\\[1.0em]
\|f\|_{L_\infty(\omega_{\alpha,\beta,\gamma})}=\esssup\limits_{x\in[-1,1]} |f(x)|.&
\end{array}
\end{equation*}
Define the uniform norm of a continuous function $f$ on $[-1,1]$ by
\begin{equation*}
\|f\|_{\infty}=\max\limits_{-1\leq x\leq 1} |f(x)|.
\end{equation*}
The maximum and the mimimum of two real numbers $x$ and $y$ are denoted by $\max(x,y)$ and $\min(x,y)$, respectively. For $\alpha,\,\mu\geq0$, $p\in[1,\infty)$, and $n=1,\,2,\,\ldots$, let
\begin{equation*}
l_{\alpha,\mu}=\frac{\alpha}{\alpha+\mu}\quad (l_{0,0}=0),\qquad l_{\alpha,\mu}^{\max}=\frac{\max(\alpha,\mu)}{\alpha+\mu}\quad (l_{0,0}^{\max}=0),
\end{equation*}
\begin{equation}\label{special_constant}
C(\alpha,\mu,p,n)=\Bigl(1-\frac{1}{\pi n}\Bigr)^{-\frac{\min(\alpha,\mu)}{p}}\, 2^{\,1+\frac{1}{p}}\,(\max(\alpha,\mu)+1)^{\frac{1}{p}}\,\pi^{\frac{\max(\alpha,\mu)}{p}}.
\end{equation}
Note that $l_{\alpha,\mu}^{\max}\in\bigl[\frac{1}{2},1\bigr]$ if $\max(\alpha,\mu)>0$.
The aim of the paper is to establish an inequality of different metrics for algebraic polynomials. In order to realize this aim, we prove a generalization of a lemma by N.\,K.~Bari.
\section{Auxiliary results}
In this section, we establish some lemmas that will be used to prove our main results.
\begin{lemen}\label{first_lemma_for_segment}
Let $\alpha\geq\mu\geq0$. Suppose that $\Delta\subset[0,1]$ is any segment of length $l$ with $l\leq l_{\alpha,\mu}$. Then the following inequality holds:
\begin{equation*}
\int\nolimits_0^{l} x^\alpha(1-x)^\mu\,dx\leq \int\nolimits_{\Delta}x^\alpha(1-x)^\mu\,dx.
\end{equation*}
\end{lemen}
\proofen The claim is obviously true when $\mu=0$. We now prove the claim for $\mu>0$.
Note that $l_{\alpha,\mu}\in\bigl[\frac{1}{2},1\bigr)$ when $\mu>0$.
The function $x^\alpha(1-x)^\mu$ is increasing on $\bigl[0,\frac{\alpha}{\alpha+\mu}\bigr]$ and is decreasing on $\bigl[\frac{\alpha}{\alpha+\mu},1\bigr]$, because the derivative of this function is positive on $\bigl(0,\frac{\alpha}{\alpha+\mu}\bigr)$ and is negative on $\bigl(\frac{\alpha}{\alpha+\mu},1\bigr)$.
Let us consider the following cases:
\begin{itemize}
\item[I)] $\Delta\subset[0,l_{\alpha,\mu}]$;
\item[II)] $\Delta\subset[l_{\alpha,\mu},1]$;
\item[III)] $\Delta=[a,b]$ and $l_{\alpha,\mu}\in(a,b)$.
\end{itemize}
Case I). Because the function $x^\alpha(1-x)^\mu$ is increasing on $\bigl[0,\frac{\alpha}{\alpha+\mu}\bigr]$, we have, for any segments $\Delta_1=[a_1,b_1]\subset\bigl[0,\frac{\alpha}{\alpha+\mu}\bigr]$ and $\Delta_2=[a_2,b_2]\subset\bigl[0,\frac{\alpha}{\alpha+\mu}\bigr]$ of equal length with $a_1\leq a_2$ (or, equivalently, $b_1\leq b_2$),
\begin{equation}\label{equation_for_first_case}
\int\nolimits_{\Delta_1} x^\alpha(1-x)^\mu\,dx\leq \int\nolimits_{\Delta_2} x^\alpha(1-x)^\mu\,dx.
\end{equation}
Putting $\Delta_1=[0,l]$ and $\Delta_2=\Delta$ in \eqref{equation_for_first_case}, we obtain the desired inequality.
Case II). Note that in this case $l\leq 1-l_{\alpha,\mu}\leq\frac{1}{2}$. Because the function $x^\alpha(1-x)^\mu$ is decreasing on $\bigl[\frac{\alpha}{\alpha+\mu},1\bigr]$,
\begin{equation}\label{first_equation_for_second_case}
\int\nolimits_{\Delta} x^\alpha(1-x)^\mu\,dx\geq\int\nolimits_{1-l}^1 x^\alpha(1-x)^\mu\,dx.
\end{equation}
Since $x^\alpha(1-x)^\mu\leq (1-x)^\alpha x^\mu$ on $\bigl[0,\frac{1}{2}\bigr]$, we get
\begin{equation}\label{second_equation_for_second_case}
\int\nolimits_{1-l}^1 x^\alpha(1-x)^\mu\,dx=\int\nolimits_0^l (1-x)^\alpha x^\mu\,dx\geq \int\nolimits_{0}^l x^\alpha(1-x)^\mu\,dx.
\end{equation}
Now the desired inequality follows from the inequalities \eqref{first_equation_for_second_case} and \eqref{second_equation_for_second_case}.
Case III). We have
\begin{equation}\label{first_equation_for_third_case}
\int\nolimits_0^l x^\alpha(1-x)^\mu\,dx=\int\nolimits_{0}^{b-l_{\alpha,\mu}} x^\alpha(1-x)^\mu\,dx+\int\nolimits_{b-l_{\alpha,\mu}}^l x^\alpha(1-x)^\mu\,dx
\end{equation}
and
\begin{equation}\label{second_equation_for_third_case}
\int\nolimits_{\Delta} x^\alpha(1-x)^\mu\,dx=\int\nolimits_a^{l_{\alpha,\mu}} x^\alpha(1-x)^\mu\,dx+\int\nolimits_{l_{\alpha,\mu}}^b x^\alpha(1-x)^\mu\,dx.
\end{equation}
Applying \eqref{equation_for_first_case}--\eqref{second_equation_for_second_case} in the appropriate settings, we can obtain
\begin{equation*}
\int\nolimits_{b-l_{\alpha,\mu}}^l x^\alpha(1-x)^\mu\,dx\leq\int\nolimits_a^{l_{\alpha,\mu}} x^\alpha(1-x)^\mu\,dx,
\end{equation*}
\begin{equation*}
\int\nolimits_{0}^{b-l_{\alpha,\mu}} x^\alpha(1-x)^\mu\,dx\leq \int\nolimits_{l_{\alpha,\mu}}^b x^\alpha(1-x)^\mu\,dx.
\end{equation*}
Using \eqref{first_equation_for_third_case}, \eqref{second_equation_for_third_case}, and the above inequalities, we get the desired inequality.
$\square$
\begin{coren}\label{first_corollary_for_segment}
Let $\mu\geq\alpha\geq0$. Suppose that $\Delta\subset[0,1]$ is any segment of length $l$ with $l\leq l_{\mu,\alpha}$. Then the following inequality holds:
\begin{equation*}
\int\nolimits_{1-l}^{1} x^\alpha(1-x)^\mu\,dx\leq \int\nolimits_{\Delta}x^\alpha(1-x)^\mu\,dx.
\end{equation*}
\end{coren}
\begin{coren}\label{second_corollary_for_segment}
Let $\alpha,\,\mu\geq0$. Suppose that $\Delta\subset[0,1]$ is any segment of length $l$ with $l\leq l_{\alpha,\mu}^{\max}$ and $l<1$.
Then the following inequality holds:
\begin{equation*}
\int\nolimits_{\Delta} x^\alpha(1-x)^\mu\,dx\geq (1-l)^{\min(\alpha,\mu)}\frac{l^{\max(\alpha,\mu)+1}}{\max(\alpha,\mu)+1}.
\end{equation*}
\end{coren}
\proofen If $\alpha\geq\mu$, then, by Lemma~\ref{first_lemma_for_segment}, we get
\begin{equation}\label{first_estimate_for_second_corollary}
\int\nolimits_{\Delta}x^\alpha(1-x)^\mu\,dx\geq \int\nolimits_0^{l} x^\alpha(1-x)^\mu\,dx\geq (1-l)^\mu\int\nolimits_0^l x^\alpha\,dx=(1-l)^\mu \frac{l^{\alpha+1}}{\alpha+1}.
\end{equation}
If $\mu\geq\alpha$, then, by Corollary~\ref{first_corollary_for_segment}, we get
\begin{equation}\label{second_estimate_for_second_corollary}
\int\nolimits_{\Delta}x^\alpha(1-x)^\mu\,dx\geq \int\nolimits_{1-l}^{1} x^\alpha(1-x)^\mu\,dx\geq (1-l)^\alpha\int\nolimits_{1-l}^1 (1-x)^\mu\,dx=(1-l)^\alpha \frac{l^{\mu+1}}{\mu+1}.
\end{equation}
Combining \eqref{first_estimate_for_second_corollary} and \eqref{second_estimate_for_second_corollary}, we obtain the desired estimate.
$\square$
\begin{lemen}\label{additional_lemma}Let $\alpha,\,\mu\geq0$. Suppose that $\Delta\subset\bigl[0,\frac{\pi}{2}\bigr]$ is any segment of length $l$ with $l\leq \frac{\pi}{2}\,l_{\alpha,\mu}^{\max}$ and $l<\frac{\pi}{2}$. Then
\begin{equation*}
\int\nolimits_{\Delta} |\sin t|^\alpha|\cos t|^\mu\,dt\geq \Bigl(1-\frac{2l}{\pi}\Bigr)^{\min(\alpha,\mu)} \frac{2^{\,\max(\alpha,\mu)}}{\pi^{\max(\alpha,\mu)}}\,\cdot\,\frac{l^{\max(\alpha,\mu)+1}}{\max(\alpha,\mu)+1}.
\end{equation*}
\end{lemen}
\proofen It is well known that, for $t\in\bigl[0,\frac{\pi}{2}\bigr]$,
\begin{equation*}
\sin t\geq \frac{2t}{\pi},\qquad
\cos t\geq 1-\frac{2t}{\pi}.
\end{equation*}
Let $\Delta=[a,b]$. Note that $b-a=l$, $\bigl(\frac{2b}{\pi}-\frac{2a}{\pi}\bigr)\leq l_{\alpha,\mu}^{\max}$, and $\bigl(\frac{2b}{\pi}-\frac{2a}{\pi}\bigr)<1$. Using the above inequalities and Corollary~\ref{second_corollary_for_segment}, we obtain
\begin{equation*}
\begin{split}
\int_a^b |\sin t|^\alpha|\cos t|^\mu\,dt&\geq \int\nolimits_a^b \Bigl(\frac{2}{\pi}\,t\Bigr)^\alpha \Bigl(1-\frac{2}{\pi}\,t\Bigr)^\mu\,dt=\frac{\pi}{2}\,\int\nolimits_{(2a)/\pi}^{(2b)/\pi} x^\alpha(1-x)^\mu\,dx\geq\\
&\geq \frac{\pi}{2}\,\Bigl(1-\frac{2l}{\pi}\Bigr)^{\min(\alpha,\mu)}\,\cdot\,\frac{\bigl(\frac{2}{\pi}\,l\bigr)^{\max(\alpha,\mu)+1}}{\max(\alpha,\mu)+1}=\\
&=\Bigl(1-\frac{2l}{\pi}\Bigr)^{\min(\alpha,\mu)} \frac{2^{\,\max(\alpha,\mu)}}{\pi^{\max(\alpha,\mu)}}\,\cdot\,\frac{l^{\max(\alpha,\mu)+1}}{\max(\alpha,\mu)+1}.
\end{split}
\end{equation*}
$\square$
\section{Main results}
The following lemma generalizes Lemma~1 in \cite{bari_article_generalization_of_inequalities_1954}.
\begin{lemen}\label{generalization_of_Bari}Let $\alpha,\,\mu\geq0$, $p\geq1$, $n$ is a positive integer. For any trigonometric polynomial $T_n$ of degree $n$, we have
\begin{equation*}
\max\limits_{-\pi\leq t\leq\pi} |T_n(t)|\leq C(\alpha,\mu,p,n)\,n^{\frac{\max(\alpha,\mu)+1}{p}}\,\Bigl(\int\nolimits_{-\pi}^{\pi} |T_n(t)|^p |\sin t|^{\alpha}|\cos t|^\mu\,dt\Bigr)^{1/p},
\end{equation*}
where the constant $C(\alpha,\mu,p,n)$ is defined in \eqref{special_constant}.
\end{lemen}
\proofen Let
\begin{equation}\label{eq_1_Bari}
\nu=|T_n(t_0)|=\max\limits_{-\pi\leq t\leq\pi} |T_n(t)|,
\end{equation}
\begin{equation*}
\Delta_0=\Bigl[t_0-\frac{1}{2n},t_0+\frac{1}{2n}\Bigr].
\end{equation*}
From Bernstein's inequality it follows that
\begin{equation}\label{eq_2_Bari}
|T'_n(t)|\leq n\nu,\quad t\in[-\pi,\pi].
\end{equation}
It is known that, for any $h\geq0$, there exists a $\theta\in(0,1)$ such that
\begin{equation}\label{eq_3_Bari}
\bigl||T_n(t_0+h)|-|T_n(t_0)|\bigr|\leq|T_n(t_0+h)-T_n(t_0)|=|h||T'_n(t_0+\theta h)|.
\end{equation}
Using \eqref{eq_1_Bari}--\eqref{eq_3_Bari}, we get $\bigl||T_n(t_0+h)|-\nu\bigr|\leq n\nu|h|$. Hence, for $|h|\leq\frac{1}{2n}$,
\begin{equation*}
|T_n(t)|\geq\frac{\nu}{2},\qquad t\in\Delta_0.
\end{equation*}
Thus, we have
\begin{equation}\label{eq_4_Bari}
\begin{split}
\int\nolimits_{-\pi}^{\pi} |T_n(t)|^p |\sin t|^{\alpha}|\cos t|^\mu\,dt&\geq\int\nolimits_{\Delta_0} |T_n(t)|^p |\sin t|^{\alpha}|\cos t|^\mu\,dt\geq\\&\geq\Bigl(\frac{\nu}{2}\Bigr)^p \int\nolimits_{\Delta_0} |\sin t|^{\alpha}|\cos t|^\mu\,dt.
\end{split}
\end{equation}
Since $|\sin t|^{\alpha}|\cos t|^\mu$ is an even function of period $\pi$, we can assume, without loss of generality, that the centre of $\Delta_0$ belongs to $\bigl[0,\frac{\pi}{2}\bigr]$. Then there exists a segment $\Delta$ of length $\frac{1}{2n}$ such that $\Delta\subset\bigl[0,\frac{\pi}{2}\bigr]$ and $\Delta\subset\Delta_0$. Note that
$\frac{1}{2n}<\frac{\pi}{4}\leq \frac{\pi}{2} l_{\alpha,\mu}^{\max}$. Hence, using Lemma~\ref{additional_lemma}, we get
\begin{equation}\label{eq_5_Bari}
\begin{split}
\int\nolimits_{\Delta_0} |\sin t|^{\alpha}|\cos t|^{\mu}\,dt&\geq \int\nolimits_{\Delta} |\sin t|^{\alpha}|\cos t|^{\mu}\,dt\geq\\
&\geq \Bigl(1-\frac{1}{\pi n}\Bigr)^{\min(\alpha,\mu)}\,\cdot\,\frac{1}{2(\max(\alpha,\mu)+1) \pi^{\max(\alpha,\mu)} n^{\max(\alpha,\mu)+1}}.
\end{split}
\end{equation}
From \eqref{eq_4_Bari}, \eqref{eq_5_Bari} it follows that
\begin{equation*}
\begin{split}
\Bigl(\int\nolimits_{-\pi}^{\pi} &|T_n(t)|^p |\sin t|^{\alpha}|\cos t|^\mu\,dt\Bigr)^{1/p}\geq\\
&\geq\frac{\nu}{2}\,\cdot\,\Bigl\{\Bigl(1-\frac{1}{\pi n}\Bigr)^{\min(\alpha,\mu)}\,\cdot\,\frac{1}{2(\max(\alpha,\mu)+1) \pi^{\max(\alpha,\mu)} n^{\max(\alpha,\mu)+1}}\Bigr\}^{1/p}=\\
&=\Bigl\{\Bigl(1-\frac{1}{\pi n}\Bigr)^{-\frac{\min(\alpha,\mu)}{p}} 2^{\,1+\frac{1}{p}}(\max(\alpha,\mu)+1)^{\frac{1}{p}}\pi^{\frac{\max(\alpha,\mu)}{p}} n^{\frac{\max(\alpha,\mu)+1}{p}}\Bigr\}^{-1}\,\cdot\,\nu.
\end{split}
\end{equation*}
$\square$
Now we list some properties of $C(\alpha,\mu,p,n)$:
\begin{itemize}
\item[$(1)$] $C(\alpha,\mu,p,n)\leq C(\alpha,\mu,p,1)$, $n=1,\,2,\,\ldots$.
\item[$(2)$] $C(\alpha,\mu,p,n)\to 2^{1+\frac{1}{p}}\,(\max(\alpha,\mu)+1)^{\frac{1}{p}}\,\pi^{\frac{\max(\alpha,\mu)}{p}}$, $n\to\infty$.
\item[$(3)$] If $\max(\alpha,\mu)\leq p$, then $C(\alpha,\mu,p,1)\leq\frac{8\pi^2}{\pi-1}$ and $C(\alpha,0,p,1)\leq8\pi$.
\end{itemize}
The following theorem generalizes Lemma (an inequality of different metrics for polynomials) in \cite{kamzolov_article_Foirier-Jacobi_series_2007}.
\begin{teoen}\label{main_theorem}Let $\alpha\geq\beta\geq-\frac{1}{2}$, $\mu\geq0$, $1\leq p<q\leq\infty$, $n$ is a positive integer.
If $P_n$ is an algebraic polynomial of degree $n$, then
\begin{equation*}
\|P_n\|_{L_q(\omega_{\alpha,\beta,\mu})}\leq B(\alpha,\beta,\mu,p,n)^{\left(\frac{1}{p}-\frac{1}{q}\right)} n^{\max(2(\alpha+1),\mu+1)\left(\frac{1}{p}-\frac{1}{q}\right)} \|P_n\|_{L_p(\omega_{\alpha,\beta,\mu})},
\end{equation*}
where
\begin{equation*}
B(\alpha,\beta,\mu,p,n)=2^{\,2p+1+\alpha-\beta} \Bigl(1-\frac{1}{\pi n}\Bigr)^{-\min(2\alpha+1,\mu)} \max(2(\alpha+1),\mu+1) \, \pi^{\max(2\alpha+1,\mu)}.
\end{equation*}
\end{teoen}
\proofen Note that
\begin{equation*}
B(\alpha,\beta,\mu,p,n)=\Bigl\{2^{1+\frac{\alpha-\beta}{p}} C(2\alpha+1,\mu,p,n)\Bigr\}^p.
\end{equation*}
Using Lemma~\ref{generalization_of_Bari}, we get
\begin{equation*}
\begin{split}
\|P_n\|_\infty&=\max\limits_{-\pi\leq t\leq\pi} |P_n(\cos t)|\leq\\&\leq C(2\alpha+1,\mu,p,n) \, n^{\frac{\max(2\alpha+1,\mu)+1}{p}} \Bigl(\int\nolimits_{-\pi}^{\pi} |P_n(\cos t)|^p |\sin t|^{2\alpha+1}|\cos t|^{\mu}\,dt\Bigr)^{1/p}=\\
&=2\, C(2\alpha+1,\mu,p,n) \, n^{\frac{\max\left(2(\alpha+1),\mu+1\right)}{p}}\, \Bigl(\int\nolimits_{0}^{\pi} |P_n(\cos t)|^p (\sin t)^{2\alpha+1}|\cos t|^{\mu}\,dt\Bigr)^{1/p}=\\
&=2^{1+\frac{2\alpha+1}{p}} \, C(2\alpha+1,\mu,p,n) \, n^{\frac{\max\left(2(\alpha+1),\mu+1\right)}{p}} \times\\&\quad\times \Bigl(\int\nolimits_{0}^{\pi} |P_n(\cos t)|^p \Bigl(\sin \frac{t}{2}\Bigr)^{2\alpha+1}\Bigl(\cos \frac{t}{2}\Bigr)^{2\alpha+1}|\cos t|^{\mu}\,dt\Bigr)^{1/p}=\\
&=2 \, C(2\alpha+1,\mu,p,n) \, n^{\frac{\max\left(2(\alpha+1),\mu+1\right)}{p}}\times\\&\quad\times \Bigl(\int\nolimits_{0}^{\pi} |P_n(\cos t)|^p (1-\cos t)^{\alpha+1/2}(1+\cos t)^{\alpha+1/2}|\cos t|^{\mu}\,dt\Bigr)^{1/p}=\\
&=2^{\,1+\frac{\alpha-\beta}{p}} \, C(2\alpha+1,\mu,p,n) \, n^{\frac{\max\left(2(\alpha+1),\mu+1\right)}{p}}\times\\&\quad\times \Bigl(\int\nolimits_{0}^{\pi} |P_n(\cos t)|^p (1-\cos t)^{\alpha+1/2}(1+\cos t)^{\beta+1/2}|\cos t|^{\mu}\,dt\Bigr)^{1/p}=\\
&=2^{\,1+\frac{\alpha-\beta}{p}} \, C(2\alpha+1,\mu,p,n) \, n^{\frac{\max\left(2(\alpha+1),\mu+1\right)}{p}}\times\\&\quad\times
\Bigl(\int\nolimits_{-1}^1 |P_n(x)|^p\,(1-x)^{\alpha}(1+x)^{\beta}|x|^{\mu}\,dx\Bigr)^{1/p}=\\
&=2^{\,1+\frac{\alpha-\beta}{p}} \, C(2\alpha+1,\mu,p,n) \, n^{\frac{\max\left(2(\alpha+1),\mu+1\right)}{p}}\,\|P_n\|_{L_p(\omega_{\alpha,\beta,\mu})}.
\end{split}
\end{equation*}
Consequently,
\begin{equation*}
\begin{split}
\|P_n\|_{L_q(\omega_{\alpha,\beta,\mu})}&=\Bigl(\int\nolimits_{-1}^1 |P_n(x)|^q\,(1-x)^{\alpha}(1+x)^{\beta}|x|^{\mu}\,dx\Bigr)^{1/q}\leq\\
&\leq \Bigl(\int\nolimits_{-1}^1 \|P_n\|_{\infty}^{q-p} |P_n(x)|^p\,(1-x)^{\alpha}(1+x)^{\beta}|x|^{\mu}\,dx\Bigr)^{1/q}=\\
&=\|P_n\|_{\infty}^{1-\frac{p}{q}} \|P_n\|_{L_p(\omega_{\alpha,\beta,\mu})}^{\frac{p}{q}}\leq\\
&\leq \Bigl\{\Bigl(2^{\,1+\frac{\alpha-\beta}{p}} \, C(2\alpha+1,\mu,p,n)\Bigr)^p\Bigr\}^{\frac{1}{p}-\frac{1}{q}} n^{\max(2(\alpha+1),\mu+1)\left(\frac{1}{p}-\frac{1}{q}\right)} \|P_n\|_{L_p(\omega_{\alpha,\beta,\mu})}.
\end{split}
\end{equation*}
$\square$
Now we list some properties of $B(\alpha,\beta,\mu,p,n)$:
\begin{itemize}
\item[$(a)$] $B(\alpha,\beta,\mu,p,n)\leq B(\alpha,\beta,\mu,p,1)$, $n=1,\,2,\,\ldots$.
\item[$(b)$] $B(\alpha,\beta,\mu,p,n)\to2^{\,2p+1+\alpha-\beta}\, \max(2(\alpha+1),\mu+1)\, \pi^{\max(2\alpha+1,\mu)}$, $n\to\infty$.
\item[$(c)$] If $\max(2\alpha+1,\mu)\leq p$, then $B(\alpha,\beta,\mu,p,1)\leq2^{p+\alpha-\beta} \bigl(\frac{8\pi^2}{\pi-1}\bigr)^{p}$.
\end{itemize}
\section{Conclusion}
Our next aim is to prove based on Lemma~2.2 in \cite{fejzullahu_article_2013} that the inequality in Theorem~\ref{main_theorem} is precise in order.
\begin{Biblioen}
\bibitem{bari_article_generalization_of_inequalities_1954}N.\,K.~Bari, A generalization of the inequalities of S.\,N.~Bernstein and A.\,A.~Markov, \textit{Izv. Akad. Nauk SSSR Ser. Mat.} \textbf{18}\,(2)~(1954), 159--176.
\bibitem{fejzullahu_article_2013}B.\,Xh.~Fejzullahu, On orthogonal expansions with respect to the generalized Jacobi weight, \textit{Results. Math.} \textbf{63}~(2013), 1177--1193.
\bibitem{kamzolov_article_Foirier-Jacobi_series_2007}A.\,I.~Kamzolov, A norm of partial sums of Fourier\,--\,Jacobi series for functions from $L_p^{(\alpha,\beta)}$, \textit{Moscow University Mathematics Bulletin}, \textbf{62}\,(6)~(2007), 228--236.
\end{Biblioen}
\noindent \textsc{Independent researcher, Uzlovaya, Russia}
\noindent \textit{E-mail address}: \textbf{[email protected]}
\end{document} |
\begin{document}
\title{Quantum sensing to suppress systematic errors with measurements
in the Zeno regime
}
\author{Alisa Shimada}
\affiliation{Research Center for Emerging Computing Technologies, National institute of Advanced Industrial Science and Technology (AIST), Central2, 1-1-1 Umezono, Tsukuba, Ibaraki 305-8568, Japan}
\author{Hideaki Hakoshima}
\affiliation{Research Center for Emerging Computing Technologies, National institute of Advanced Industrial Science and Technology (AIST), Central2, 1-1-1 Umezono, Tsukuba, Ibaraki 305-8568, Japan}
\author{Suguru Endo}
\affiliation{NTT Secure Platform Laboratories, NTT Corporation, Musashino 180-8585, Japan}
\author{Kaoru Yamamoto}
\affiliation{NTT Secure Platform Laboratories, NTT Corporation, Musashino 180-8585, Japan}
\author{Yuichiro Matsuzaki}
\email{[email protected]}
\affiliation{Research Center for Emerging Computing Technologies, National institute of Advanced Industrial Science and Technology (AIST), Central2, 1-1-1 Umezono, Tsukuba, Ibaraki 305-8568, Japan}
\begin{abstract}
Quantum magnetic field sensing is an important technology for material science and biology. Although experimental imperfections affect the sensitivity, repetitions of the measurements decrease the estimation uncertainty by a square root of the total number of measurements
if there are only statistical errors.
However, it is difficult to precisely characterize the coherence time of the system because it fluctuates in time in realistic conditions, which induces systematic errors. In this case, due to residual bias of the measured values, estimation uncertainty cannot be lowered than a finite value even in the limit of the infinite number of measurements. On the basis of the fact that the decoherence dynamics in the so-called Zeno regime are not significant compared to other regimes, we propose a novel but very simple protocol to use measurements in the Zeno regime for reducing systematic errors. Our scheme allows the estimation uncertainty $\delta ^2 \omega$ to scale as $L^{1/4}$ where $L$ denotes the number of measurements even when we cannot precisely characterize the coherence time.
\end{abstract}
\maketitle
\section{Introduction}
Measurements of small magnetic fields is an essential technique in several fields such as biology and material science. Recently, quantum systems have been used for sensitive magnetic field sensing. External magnetic fields shift the resonant frequency of the quantum system, and measurements of a phase shift allow us to detect magnetic fields via the frequency shift \cite{degen2017quantum}. Nitrogen vacancies in diamond \cite{taylor2008high,maze2008nanoscale,balasubramanian2008nanoscale}, superconducting flux qubits \cite{bal2012ultrasensitive,toida2019electron,budoyo2020electron}, and optically pumped atoms \cite{kominis2003subfemtotesla}
have been used for such quantum magnetic field sensing.
Experimental imperfections such as decoherence typically deteriorate the sensitivity \cite{degen2017quantum}. For the detection of the small magnetic fields, it is essential to decrease the estimation uncertainty. When experimental parameters such as g factor and coherence time of the qubits are known, we can estimate the magnetic fields from the experimental results without bias.
In this case, the estimation uncertainty decreases as the total number of measurements increases, because there are only statistical errors.
However, in the actual experiments, the coherence time of the system generally fluctuates in time \cite{yan2016flux,abdurakhimov2019long}, which makes it difficult to know the accurate value of the coherence time. In such a case, the lack of
knowledge of the coherence time induces systematic errors in the estimation. Under the effect of the systematic errors, the estimation uncertainty is lower bounded by a specific value even in the limit of the large number of measurements \cite{wolf2015subpicotesla,budoyo2020electron}. This could be a
problem to measure small magnetic fields.
On the other hand, quantum mechanics predicts that
frequent measurements with short time intervals can suppress the evolution of quantum states \cite{misra1977zeno,facchi2008quantum}. In the so-called Zeno-regime, `survival probability' that the state remains in the initial state after a short time $t$ scales as $P_{\rm{suv}}\simeq 1-\Gamma ^2 t^2$, where $\Gamma$ denotes a decay rate \cite{schulman1994characteristic,nakazato1996temporal} \cite{erez2004correcting,facchi2008quantum,nakazato1996temporal,koshino2005quantum}. This phenomenon has been experimentally demonstrated
in several systems \cite{itano1990quantum,kwiat1995interaction,fischer2001observation,streed2006continuous,helmer2009quantum,wolters2013quantum,kakuyanagi2015observation,kondo2016using,kalb2016experimental}.
Here, we propose to use measurements in the Zeno regime for quantum sensing
to suppress the systematic errors when we cannot characterize the precise value of the coherence time. In the Zeno regime, the system coupled with the environment shows a quadratic decay, and the decay dynamics can be much slower for a short time region than the other regions. This means that the qubit state is not significantly affected by the decoherence in the short time regime. We show that, by performing the measurements in the Zeno regime, we can suppress the effect of the systematic errors induced by the lack of
knowledge of the coherence time.
Moreover, in our scheme,
the estimation uncertainty
can be arbitrarily small by increasing the number of measurements, even
when we do not know the precise value of the coherence time.
Although the measurements in the Zeno regime was discussed in entanglement enhanced sensing \cite{Matsuzaki2011NMmetro,ChinNM2012PRL,Zeno2015}, we firstly utilize this concept to suppress the systematic errors.
\section{quantum sensing}
Let us review the standard quantum sensing, focusing on the case of a qubit.
We explain two cases, without and with systematic errors.
\subsection{Quantum sensing without systematic errors}
First, we review the sensing without systematic errors.
Here, for simplicity, we ignore any experimental imperfection such as decoherence.
We consider the Hamiltonian as follows
\begin{eqnarray}
H=\sum _{j=1}^L \frac{\omega}{2} \hat{\sigma}^{(j)}_z,
\end{eqnarray}
where $\omega$ denotes a frequency of the qubit and
$\hat{\sigma}_z$
denote a Pauli matrix spanned by $|1\rangle $ and $|-1\rangle$. We assume that $\omega$ has a linear dependence on
applied magnetic fields $B$. This means that, if we know the value of the $\omega$, we can determine the value of the magnetic fields.
The sensing protocol can be performed as following.
First, we prepare the state of $\bigotimes _{j=1}^L|+\rangle _j=\frac{1}{\sqrt{2}}(|1\rangle _j+|-1\rangle _j)$.
Second, let the state evolve by the Hamiltonian to obtain $\bigotimes _{j=1}^L|\psi(t) \rangle_j $
where $|\psi(t) \rangle_j=\frac{1}{\sqrt{2}}(|1\rangle _j+e^{i\omega t}|-1\rangle _j)$. Third, perform a projective measurement with $\hat{\mathcal{P}}_j=(\openone +\hat{\sigma }^{(j)}_y)/2$ for $j=1,2,\cdots L$
where the probability to have this projection is $P'=(1+\sin \omega t)/2\simeq (1+\omega t)/2$ for weak magnetic fields with $|\omega| t \ll 1$.
Fourth, repeat these three steps $N$ times.
Finally, we obtain $L$ measurement results $\{s_m\}_{m=1}^{L}$
from $L$ qubits, and estimate the frequency $\omega$ based on the measurement results.
More specifically, we obtain the estimate value as $\omega _{\rm{est}}=(2(\sum _{m=1}^{L} \frac{s_m}{LN})-1)/t$.
We can calculate the uncertainty of the estimation
as follows.
\begin{eqnarray}
\delta ^2\omega &=& \overline{(\omega -\omega _{\rm{est}})^2} \nonumber \\
&=&\frac{P'(1-P')}{|\frac{dP'}{d\omega }|^2LN}\nonumber \\
&=&\frac{1}{t^2LN}
\end{eqnarray}
where the overline denotes the statistical average.
In our paper, we basically consider a case of $N=1$.
The uncertainty scales as $\delta ^2 \omega =\Theta(L^{-2})$, and so we can decrease the uncertainty by increasing the number of qubits
\subsection{Quantum sensing with systematic errors}
We review the sensing with systematic errors.
Suppose that there are some unknown experimental imperfections such as systematic errors, and the actual probability $P'$ to have a projection of $\hat{\mathcal{P}}_j=(\openone +\hat{\sigma }^{(j)}_y)/2$ for $j=1,2,\cdots L$
is different from what an experimentalist expected to be true.
Let us define such a wrong (true) probability
as $P$ ($P'$). We assume that these probabilities
have a linear
dependence on $\omega$ such as $P=x + y \omega $ and $P'=x'+y'\omega $.
In this case, the uncertainty of the estimation for a small $\omega$
can be calculated as follows \cite{sugiyama2015precision,takeuchi2019quantum}.
\begin{eqnarray}
\delta ^2 \omega = \frac{1}{y^2}\left[\frac{P'(1-P')}{L}+(x-x')^2\right]\label{takeuchi}
\end{eqnarray}
It is worth mentioning that, even in a limit of large $L$, the uncertainty has a finite non-zero value as follows:
\begin{eqnarray}
\lim _{L \rightarrow \infty}\delta ^2 \omega =\frac{(x-x')^2}{y^2}.
\end{eqnarray}
This finite value comes from systematic errors due to the lack of
knowledge of the coherence time.
The goal of this paper is to suppress such systematic errors.
\section{Quantum sensing with measurements in Zeno regime}
In this section, we explain our scheme to suppress the systematic errors with measurements in Zeno regime.
\subsection{Setup}
Let us explain the setup of our scheme.
We consider a three level system (or a spin-1 system) , which describes
an NV center in diamond \cite{taylor2008high,maze2008nanoscale,balasubramanian2008nanoscale}
or a capacitively shunted flux qubit \cite{you2007low,yan2016flux}.
Although there are three levels such as $|1\rangle $, $|-1\rangle $, and $|0\rangle $, we mainly use $|1\rangle $ and $|-1\rangle $ for magnetic field sensing.
However, due to an energy relaxation, we have unwanted population of the state $|0\rangle $.
Throughout this paper, although this system is not a qubit because it has a small population of the third level, we call this a qubit because we mainly
use $|1\rangle $ and $|-1\rangle $.
In the magnetic field sensing scheme, there is a state preparation step to have an initial state of $\bigotimes _j^L|+\rangle _j=\bigotimes _j^L\frac{1}{\sqrt{2}}(|1\rangle _j+|-1\rangle _j)$, an exposure step to interact the spin-1 systems with target magnetic fields, and a readout step to measure the state.
We assume that the state preparation time and readout time is much longer than the exposure time.
After interacting the $L$ qubits
with the magnetic fields for sensing, we have the following state.
\begin{eqnarray}
\rho &=& \bigotimes _{j=1}^L \rho _j \label{quadstate}
\nonumber \\
\rho _j&=&(1-\epsilon )|\psi (t)\rangle _j \langle \psi(t)|_j+\epsilon |0\rangle_j \langle 0|_j
\label{rhostate}
\\
|\psi (t)\rangle _j&=& \frac{1}{\sqrt{2}}(|1\rangle_j +e^{i\omega t}|-1\rangle _j
\end{eqnarray}
where $\epsilon$ denotes an error rate due to the energy relaxation.
Since every unstable system shows a quadratic decay for a short time scale
\cite{schulman1994characteristic,nakazato1996temporal}, the error can be approximately described by $1-\epsilon \simeq 1- (t/T'_1)^2 $, where $T'_1$ denotes a energy relaxation time
as long as we are interested in a short time scale.
We assume that the experimentalists do not know the precise value of $T'_1$.
In the experiment, the energy relaxation time can
fluctuate
in time
\cite{yan2016flux,abdurakhimov2019long}. In this case, we could not estimate the exact value of $T'_1$ because it changes before a precise estimation of $T'_1$.
In our paper, we assume that, $T'_1$ is constant during the interaction between the spin-1 system and magnetic fields. However, it is worth mentioning that
$T'_1$ could change if we consider a longer time scale such as an initialize time, a readout time, or a repetition time (when we have $N\geq 2$), which makes it difficult to estimate the exact value of the
$T_1'$.
\subsection{Uncertainty of the estimation for a Gaussian decay}
Let us calculate the uncertainty of the estimation when we have a Gaussian decay such as
$\epsilon =1- e^{-(t/T'_1)^2} $.
We perform
projective measurement with $\hat{\mathcal{P}}_j=(\openone +\hat{\sigma }^{(j)}_y)/2$ for $j=1,2,\cdots L$ on the state described by Eq.~(\ref{rhostate}).
Here, $\hat{\sigma }^{(j)}_y$ is the Pauli matrix spanned by $|1\rangle $ and $|-1\rangle $.
The probability to obtain this projection is as follows.
\begin{eqnarray}
P'\simeq \frac{e^{-(t/T'_1)^2}}{2}(1+ \omega t)
\end{eqnarray}
where we assume $|\omega t|\ll 1$.
Since we do not know the precise value of $T'_1$, we use $T_1 (\neq T_1')$ to estimate the projection probability.
This means that we consider the projection probability as $P\simeq \frac{e^{-(t/T_1)^2}}{2}(1+ \omega t)$, which is different from the true probability $P'$.
In this case, the imperfect knowledge of the energy relaxation time induce the systematic errors. We can calculate the estimation uncertainty
from Eq.~(\ref{takeuchi}) as follows.
\begin{eqnarray}
\delta ^2 \omega &\simeq&\frac{e^{2(t/T_1)^2}}{t^2}\Bigl[\frac{e^{-(t/T'_1)^2}(2-e^{-(t/T'_1)^2})}{L}\Bigr.\nonumber \\
&+& \Bigl.(e^{-(t/T_1)^2}-e^{-(t/T'_1)^2})^2\Bigr],\label{gaussw}
\end{eqnarray}
where the first term comes from the statistical error while the second term comes from the systematic errors due to the lack of
the
knowledge of the coherence time.
Here, we try to minimize the uncertainty by optimizing the interaction time $t$.
In the conventional strategy,
since
we believe that $T_1 \simeq T_1'$,
we obtain the following 'wrong' uncertainty by substituting $T_1'=T_1$ in Eq.~(\ref{gaussw}):
\begin{eqnarray}
\delta ^2 \omega _{\rm{est}}= \frac{2e^{(t/T_1)^2}-1}{(t/T_1)^2LT_1^2}.
\end{eqnarray}
Actually, we can minimize $\delta ^2 \omega _{\rm{est}}$ with $t= c T_1$ and $c\simeq 0.876$.
However, since the actual
coherence time $T_1'$ is different from $T_1$, the optimal $t$ obtained above induces the bias. To check this,
we calculate the 'actual' uncertainty under $T_1'\neq T_1$ for
$t= c T_1$
using
Eq.~(\ref{gaussw})
and obtain
\begin{eqnarray}
\delta ^2 \omega _{\rm{conv}}&\simeq &
\frac{e^{2c^2}}{t^2}\Bigl[\frac{e^{-(cT_1/T_1')^2}(2-e^{-(cT_1/T_1')^2})}{L}\Bigr.\nonumber \\
&+& \Bigl.(e^{-c^2}-e^{-(cT_1/T_1')^2})^2\Bigr] .
\end{eqnarray}
We can confirm that
even in the limit of large $L$, the uncertainty approaches to $\lim _{L\rightarrow \infty }\delta ^2 \omega _{\rm{conv}} =(e^{-c^2}-e^{-(cT_1/T_1')^2})^2)$, and so the uncertainty is lower bounded due to the systematic errors. This could be a serious problem to use the quantum sensor for practical purposes.
\begin{figure}
\caption{The uncertainty of the estimation against the number of qubits with systematic errors.
We set $T_1=1$ $s$, $T_1'=1.4$ $s$, and $t =2L^{-1/4}
\label{fig:gauss}
\end{figure}
On the other hand, in our strategy, we set the interaction time $t$ in the Zeno regime to suppress the systematic errors. We consider a case with $t/T'_1 \ll 1$. To satisfy this condition, we remark that it is not necessary to know the exact value of $T_1'$, but just necessary to know the order of $T_1'$.
In this case, we can perform a Taylor expansion on the estimation uncertainty
for $t/T'_1 \ll 1$, and we obtain the following.
\begin{eqnarray}
\delta ^2 \omega &\simeq&\frac{1}{Lt^2}+\frac{1}{t^2} \left[\left(\frac{t}{T_1}\right)^2-\left(\frac{t}{T'_1}\right)^2\right]^2\nonumber \\
&\geq&\frac{2}{\sqrt{L}}\left| \left(\frac{1}{T_1}\right)^2-\left(\frac{1}{T'_1}\right)^2\right|
\label{newgaussw}
\end{eqnarray}
where we use an inequality of
arithmetic
and geometric means. The interaction time that minimize $\delta^2\omega$ is given as follows:
\begin{eqnarray}
\frac{1}{Lt^2}&=&\frac{1}{t^2} \left[\left(\frac{t}{T_1}\right)^2-\left(\frac{t}{T'_1}\right)^2\right]^2\nonumber \\
\Leftrightarrow t&=&t_{\rm{opt}}=L^{-1/4}\left| \left(\frac{1}{T_1}\right)^2-\left(\frac{1}{T'_1}\right)^2\right|^{-1/2} .
\end{eqnarray}
It is noteworthy that, since we do not know the precise value of $T'_1$, we cannot choose the
optimal
time $t_{\rm{opt}}=L^{-1/4}|(1/T_1)^2-(1/T'_1)^2|^{-1/2}$.
Instead, we can take $t=\tau L^{-1/4}$ where $\tau$ can be determined by a rough estimation of $T_1'$.
Therefore, by taking $t=\Theta(L^{-1/4})$, we obtain $\delta ^2\omega =\Theta(L^{-1/2})$ regardless
of
the choice of $\tau$.
This means that, by increasing the number of qubits, we can decrease the estimation uncertainty as small as we want, which is in stark contrast to the conventional scheme to have a lower bound of the estimation uncertainty.
Note that, although such measurements in the Zeno regime were utilized
in entanglement enhanced sensing
\cite{Matsuzaki2011NMmetro,ChinNM2012PRL,Zeno2015}, we firstly utilize this concept to suppress the systematic errors of the quantum sensing.
We plot the uncertainty of the estimation with the systematic errors
to
compare the uncertainty of our scheme with that of the conventional scheme, as shown in Fig.~\ref{fig:gauss}.
The uncertainty of the conventional scheme approaches to a finite non-zero value, while the uncertainty of our scheme decreases as we increase the number of qubits.
In our scheme, the statistical error is larger than that of the conventional scheme due to the short interaction time between the qubits and magnetic fields. So, for the small number of the qubits where the statistical error is more dominant than the systematic errors, the uncertainty of our scheme is larger than that of the conventional scheme.
However, as we increase the number of qubits, the systematic errors become more dominant than the statistical error, and our scheme shows a better performance than the conventional scheme.
\subsection{Uncertainty of the estimation for a realistic decay}
Here, we calculate the estimation uncertainty for a more realistic decay.
We consider a spin-boson model with a Lorentzian form factor, and this is a typical noise model when the spin-1 system is coupled with a leaky cavity \cite{koshino2005quantum}.
(See the details in Appendix \ref{general}).
The error rate can be described as follows:
\begin{eqnarray}
\epsilon&=&1-|f(t)|^2\nonumber \\
f(t)&=&\frac{1+\sqrt{1-2\gamma /\Delta}
}{2e^{i \lambda _1 t}\sqrt{1-2\gamma /\Delta}}
-
\frac{1-\sqrt{1-2\gamma /\Delta}
}{2e^{i \lambda _2 t}\sqrt{1-2\gamma /\Delta}}\ \ \ \ \label{ffunction}
\end{eqnarray}
where $\gamma$ denotes the strength of the noise,
and $\Delta$ denotes the linewidth of the form factor.
Also, $\lambda _1$ and $\lambda _2$ are the solutions of the following equation:
\begin{eqnarray}
\lambda (\lambda +i\Delta) - \gamma \Delta/2=0 .
\end{eqnarray}
We can find the coherence time of the system as $\tilde{T}'_1=\sqrt{2/(\gamma \Delta)}$ by approximating the error rate as $\epsilon \simeq 1- \frac{\gamma \Delta t^2}{2}$.
The $f(t)$ depends on $\gamma$ and $\tilde{T}'_1$. We assume that, while we know the value of $\gamma$, we do not know the exact value of $\tilde{T}'_1$, and we have $\tilde{T}_1$ as an expected coherence time where $\tilde{T}'_1\neq \tilde{T}_1$. By substituting $\Delta = \frac{2}{\gamma (\tilde{T_1})^2}$ with Eq.~(\ref{ffunction}), we obtain $f_{\rm{est}}(t)$ that we expect, while the true value is $f_{\rm{true}}(t)$ where $\Delta = \frac{2}{\gamma (\tilde{T'_1})^2}$ is substituted with Eq.~(\ref{ffunction}).
Let us calculate the uncertainty of the estimation when we have the decay such as
$\epsilon =1-|f(t)|^2$.
We perform
projective measurement with $\hat{\mathcal{P}}_j=(\openone +\hat{\sigma }^{(j)}_y)/2$ for $j=1,2,\cdots L$
on the state
described
by Eq.~(\ref{rhostate}).
The probability to obtain this projection is as follows:
\begin{eqnarray}
P\simeq \frac{1-|f(t)|^2}{2}(1+ \omega t),
\end{eqnarray}
where we assume $|\omega t|\ll 1$.
If the true value of the coherence time is given, we have
$P'\simeq \frac{1-|f_{\rm{true}}(t)|^2}{2}(1+ \omega t)$, while we have
$P\simeq \frac{1-|f_{\rm{est}}(t)|^2}{2}(1+ \omega t)$ when we have $f_{\rm{est}}(t)$.
By using Eq.~(\ref{takeuchi}), we obtain
\begin{eqnarray}
\delta ^2 \omega &=& \frac{1}{t^2(1-|f_{\rm{est}}(t)|^2)^2}\Bigl[\frac{(1-|f_{\rm{true}}(t)|^2)(1+|f_{\rm{true}}(t)|^2)}{L}
\Bigr.\nonumber \\
&+&\Bigl.(|f_{\rm{est}}(t)|^2-|f_{\rm{true}}(t)|^2)^2\Bigr]\label{genedw}
\end{eqnarray}
As long as we are interested in a short time region, we can obtain an approximate form of the error rate such as $1-|f_{\rm{true}}(t)|^2 \simeq 1- t^2/(\tilde{T}'_1)^2\simeq e^{- (t/\tilde{T}'_1)^2}$ and $1-|f_{\rm{est}}(t)|^2 \simeq e^{- (t/\tilde{T}_1)^2}$.
In this case, the uncertainty of Eq.~(\ref{genedw})
has the same form as Eq.~(\ref{gaussw}), and
we can adopt the same strategy as the case of the Gaussian decay.
Therefore, we can take
$t=\Theta(L^{-1/4})$ to obtain the scaling of $\delta ^2\omega =\Theta(L^{-1/2})$.
\begin{figure}
\caption{The uncertainty of the estimation against the number of
qubits
with systematic errors.
We set $\tilde{T}
\label{fig:koshino}
\end{figure}
To quantify the performance of our scheme, we plot the uncertainty of the estimation of our scheme, and also compare it with that of the conventional scheme in Fig.~\ref{fig:koshino}.
In the conventional scheme, similar to the Gaussian decay case, we substitute $\tilde{T}'_1=\tilde{T}_1$ in
Eq.~(\ref{genedw}), and obtain $\delta ^2 \omega_{\rm{conv}}$. After we fix the values of $\gamma$, $\tilde{T}_1$, and $\tilde{T}'_1$, we minimize $\delta ^2 \omega_{\rm{conv}}$ by choosing the optimal $t$, and use this $t$ for the plot. On the other hand, in our scheme, we take $t=\tau L^{-1/4}$.
Similar to the case of the Gaussian decay, the statistical error is relevant for the small number of the qubits, and the uncertainty of our scheme is larger than that of the conventional scheme.
However, the uncertainty of the estimation of the conventional scheme has a lower bound even in the large $L$ due to the systematic errors. On the other hand, in our scheme, the uncertainty decreases as we increase the number of qubits. Therefore, for the large number of the qubits, our scheme shows better performance than the conventional one.
\section{Conclusion}
In conclusion, we propose a scheme to use measurements in the Zeno regime for the suppression of systematic errors in quantum magnetic field sensing. When we do not know a precise value of the coherence time of the quantum system, such a lack of
knowledge induces the systematic errors for quantum sensing. In this case, the uncertainty of the estimation of the target magnetic field is lower bounded even in the limit of the infinite number of the repetitions of the measurements.
To suppress such systematic errors, we utilized the fact that every decoherence process shows a quadratic decay in a short time region in the Zeno regime where the effect of the decoherence does not significantly affect the dynamics. We show that, by taking the interaction time (between the system and magnetic fields) as $\Theta (L^{-1/4})$, we obtain a scaling of $\delta ^2 \omega =\Theta (L^{-1/2})$ where $L$ is the number of measurements.
Therefore, by increasing the number of
the
repetition, we can decrease the uncertainty as small as we want even under the effect of the systematic errors due to the imperfect knowledge of the coherence time.
Our results pave the way for the practically useful quantum magnetic fields sensors.
\begin{acknowledgments}
We are grateful to Shiro Kawabata for useful discussions.
This work was supported by Leading Initiative for Excellent Young Researchers MEXT Japan and JST presto (Grant No. JPMJPR1919) Japan.
\end{acknowledgments}
\appendix
\section{Decay dynamics under the effect of the decoherence}\label{general}
Here, we discuss the decay dynamics of the three level system
under the effect of the decoherence.
Especially, we consider an NV center for the three level system.
The Hamiltonian of the NV center coupled with an environment
is described as follows.
\begin{eqnarray}
H&=&H_{nv}+H_{I}+H_{E} \nonumber \\
H_{nv}&=&D_0 \hat{S}_z^2 + g\mu _bB \hat{S}_z \nonumber \\
H_{I}&=& \hat{S}_x \int _{\mu} g_{\mu}(\hat{b}_{\mu} +\hat{b}_{\mu}^{\dagger})d \mu \nonumber \\
H_E&=& \int _{\mu} \omega _{\mu} \hat{b}^{\dagger}_{\mu} \hat{b}_{\mu} d\mu
\end{eqnarray}
where $D_0$ denotes a zero field splitting,
$g$ denotes a g factor, $\mu _b$ denotes a Bohr magneton, $g_\mu$ denotes the coupling strength
between the NV center and environment, $\omega _\mu$ denotes the frequency of the environment,
$\hat{S}_x$ ($\hat{S}_y$) denote the spin $1$ operator for $x$ ($y$), and $\hat{b}_\mu$ denotes
an
annihilation (creation) operator of the environmental mode.
In the interaction picture,
we have
\begin{eqnarray}
H_I(t)=
\int _{\mu}g_{\mu} (|B\rangle \langle 0| \hat{b}_{\mu}e^{-i \Delta \omega _{\mu}} +|0\rangle \langle B|(\hat{b}_{\mu}^{\dagger})e^{i \Delta \omega _{\mu}})d\mu \nonumber
\end{eqnarray}
where $\Delta \omega _{\mu} = \omega_{\mu} - D_0-g\mu _bB$ and $|B\rangle =\frac{1}{\sqrt{2}}(|1\rangle +|-1\rangle )$.
Here, we can consider a subspace spanned by $|B\rangle $ and $|0\rangle $
because
the dark state $|D\rangle=\frac{1}{\sqrt{2}}(|1\rangle -|-1\rangle ) $ is not involved in the dynamics,
In this case, we can adopt the standard results of the open quantum system for a qubit. We consider the following Lorentzian form factor
\cite{koshino2005quantum}.
\begin{eqnarray}
|g_{\mu}|^2=\frac{\gamma}{2\pi}\frac{\Delta ^2}{(\mu -\mu _0)^2 + \Delta ^2}
\end{eqnarray}
where $\gamma$ denotes the strength of the noise, $\mu _0$ denotes a central frequency of the environment, and $\Delta$ denotes the linewidth of the form factor. In our paper, we set $\mu _0 = D_0 \simeq D_0 +g \mu _b B$ where $g \mu _b B$ is assumed to be much smaller than any other parameters.
We consider an initial state as $|B\rangle$, and we can calculate the survival probability as follows \cite{koshino2005quantum}:
\begin{eqnarray}
P_{\rm{suv}}&=&|f(t)|^2 \nonumber \\
f(t)&=&\frac{1+\sqrt{1-2\gamma /\Delta}
}{2\sqrt{1-2\gamma /\Delta}}e^{-i \lambda _1 t}
-
\frac{1-\sqrt{1-2\gamma /\Delta}
}{2\sqrt{1-2\gamma /\Delta}}e^{-i \lambda _2 t}
\nonumber \\
\end{eqnarray}
Also, $\lambda _1$ and $\lambda _2$ are the solutions of the following equation:
\begin{eqnarray}
\lambda (\lambda +i\Delta) - \gamma \Delta/2=0 .
\end{eqnarray}
The density matrix in the interaction picture
after a time $t$ is described as follows:
\begin{eqnarray}
\rho_I(t)= P_{\rm{suv}}|B\rangle \langle B| + (1-P_{\rm{suv}})|0\rangle \langle 0| .
\end{eqnarray}
Therefore, by going back to the Schr\"{o}dinger picture,
we have
\begin{eqnarray}
\rho(t)= P_{\rm{suv}}|\psi (t)\rangle \langle \psi (t)| + (1-P_{\rm{suv}})|0\rangle \langle 0|,
\end{eqnarray}
where $|\psi (t)\rangle = \frac{1}{\sqrt{2}}(|+1\rangle +e^{i\omega t}|-1\rangle )
$ and $\omega =2g \mu _bB$.
We can thus
define an error rate as $\epsilon =1- P_{\rm{suv}}$.
\begin{thebibliography}{32}
\makeatletter
\providecommand \@ifxundefined [1]{
\@ifx{#1\undefined}
}
\providecommand \@ifnum [1]{
\ifnum #1\expandafter \@firstoftwo
\else \expandafter \@secondoftwo
\fi
}
\providecommand \@ifx [1]{
\ifx #1\expandafter \@firstoftwo
\else \expandafter \@secondoftwo
\fi
}
\providecommand \natexlab [1]{#1}
\providecommand \enquote [1]{``#1''}
\providecommand \bibnamefont [1]{#1}
\providecommand \bibfnamefont [1]{#1}
\providecommand \citenamefont [1]{#1}
\providecommand \href@noop [0]{\@secondoftwo}
\providecommand \href [0]{\begingroup \@sanitize@url \@href}
\providecommand \@href[1]{\@@startlink{#1}\@@href}
\providecommand \@@href[1]{\endgroup#1\@@endlink}
\providecommand \@sanitize@url [0]{\catcode `\\12\catcode `\$12\catcode
`\&12\catcode `\#12\catcode `\^12\catcode `\_12\catcode `\%12\relax}
\providecommand \@@startlink[1]{}
\providecommand \@@endlink[0]{}
\providecommand \url [0]{\begingroup\@sanitize@url \@url }
\providecommand \@url [1]{\endgroup\@href {#1}{\urlprefix }}
\providecommand \urlprefix [0]{URL }
\providecommand \Eprint [0]{\href }
\providecommand \doibase [0]{http://dx.doi.org/}
\providecommand \selectlanguage [0]{\@gobble}
\providecommand \bibinfo [0]{\@secondoftwo}
\providecommand \bibfield [0]{\@secondoftwo}
\providecommand \translation [1]{[#1]}
\providecommand \BibitemOpen [0]{}
\providecommand \bibitemStop [0]{}
\providecommand \bibitemNoStop [0]{.\EOS\space}
\providecommand \EOS [0]{\spacefactor3000\relax}
\providecommand \BibitemShut [1]{\csname bibitem#1\endcsname}
\let\auto@bib@innerbib\@empty
\bibitem [{\citenamefont {Degen}\ \emph {et~al.}(2017)\citenamefont {Degen},
\citenamefont {Reinhard},\ and\ \citenamefont
{Cappellaro}}]{degen2017quantum}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {C.~L.}\ \bibnamefont
{Degen}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Reinhard}}, \
and\ \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Cappellaro}},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Reviews of modern
physics}\ }\textbf {\bibinfo {volume} {89}},\ \bibinfo {pages} {035002}
(\bibinfo {year} {2017})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Taylor}\ \emph {et~al.}(2008)\citenamefont {Taylor},
\citenamefont {Cappellaro}, \citenamefont {Childress}, \citenamefont {Jiang},
\citenamefont {Budker}, \citenamefont {Hemmer}, \citenamefont {Yacoby},
\citenamefont {Walsworth},\ and\ \citenamefont {Lukin}}]{taylor2008high}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Taylor}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Cappellaro}},
\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Childress}}, \bibinfo
{author} {\bibfnamefont {L.}~\bibnamefont {Jiang}}, \bibinfo {author}
{\bibfnamefont {D.}~\bibnamefont {Budker}}, \bibinfo {author} {\bibfnamefont
{P.}~\bibnamefont {Hemmer}}, \bibinfo {author} {\bibfnamefont
{A.}~\bibnamefont {Yacoby}}, \bibinfo {author} {\bibfnamefont
{R.}~\bibnamefont {Walsworth}}, \ and\ \bibinfo {author} {\bibfnamefont
{M.}~\bibnamefont {Lukin}},\ }\href@noop {} {\bibfield {journal} {\bibinfo
{journal} {Nature Physics}\ }\textbf {\bibinfo {volume} {4}},\ \bibinfo
{pages} {810} (\bibinfo {year} {2008})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Maze}\ \emph {et~al.}(2008)\citenamefont {Maze},
\citenamefont {Stanwix}, \citenamefont {Hodges}, \citenamefont {Hong},
\citenamefont {Taylor}, \citenamefont {Cappellaro}, \citenamefont {Jiang},
\citenamefont {Dutt}, \citenamefont {Togan}, \citenamefont {Zibrov} \emph
{et~al.}}]{maze2008nanoscale}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~R.}\ \bibnamefont
{Maze}}, \bibinfo {author} {\bibfnamefont {P.~L.}\ \bibnamefont {Stanwix}},
\bibinfo {author} {\bibfnamefont {J.~S.}\ \bibnamefont {Hodges}}, \bibinfo
{author} {\bibfnamefont {S.}~\bibnamefont {Hong}}, \bibinfo {author}
{\bibfnamefont {J.~M.}\ \bibnamefont {Taylor}}, \bibinfo {author}
{\bibfnamefont {P.}~\bibnamefont {Cappellaro}}, \bibinfo {author}
{\bibfnamefont {L.}~\bibnamefont {Jiang}}, \bibinfo {author} {\bibfnamefont
{M.~G.}\ \bibnamefont {Dutt}}, \bibinfo {author} {\bibfnamefont
{E.}~\bibnamefont {Togan}}, \bibinfo {author} {\bibfnamefont
{A.}~\bibnamefont {Zibrov}}, \emph {et~al.},\ }\href@noop {} {\bibfield
{journal} {\bibinfo {journal} {Nature}\ }\textbf {\bibinfo {volume} {455}},\
\bibinfo {pages} {644} (\bibinfo {year} {2008})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Balasubramanian}\ \emph {et~al.}(2008)\citenamefont
{Balasubramanian}, \citenamefont {Chan}, \citenamefont {Kolesov},
\citenamefont {Al-Hmoud}, \citenamefont {Tisler}, \citenamefont {Shin},
\citenamefont {Kim}, \citenamefont {Wojcik}, \citenamefont {Hemmer},
\citenamefont {Krueger} \emph {et~al.}}]{balasubramanian2008nanoscale}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont
{Balasubramanian}}, \bibinfo {author} {\bibfnamefont {I.}~\bibnamefont
{Chan}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Kolesov}},
\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Al-Hmoud}}, \bibinfo
{author} {\bibfnamefont {J.}~\bibnamefont {Tisler}}, \bibinfo {author}
{\bibfnamefont {C.}~\bibnamefont {Shin}}, \bibinfo {author} {\bibfnamefont
{C.}~\bibnamefont {Kim}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Wojcik}}, \bibinfo {author} {\bibfnamefont {P.~R.}\ \bibnamefont {Hemmer}},
\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Krueger}}, \emph
{et~al.},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Nature}\ }\textbf {\bibinfo {volume} {455}},\ \bibinfo {pages} {648}
(\bibinfo {year} {2008})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Bal}\ \emph {et~al.}(2012)\citenamefont {Bal},
\citenamefont {Deng}, \citenamefont {Orgiazzi}, \citenamefont {Ong},\ and\
\citenamefont {Lupascu}}]{bal2012ultrasensitive}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Bal}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Deng}}, \bibinfo
{author} {\bibfnamefont {J.-L.}\ \bibnamefont {Orgiazzi}}, \bibinfo {author}
{\bibfnamefont {F.}~\bibnamefont {Ong}}, \ and\ \bibinfo {author}
{\bibfnamefont {A.}~\bibnamefont {Lupascu}},\ }\href@noop {} {\bibfield
{journal} {\bibinfo {journal} {Nature communications}\ }\textbf {\bibinfo
{volume} {3}},\ \bibinfo {pages} {1} (\bibinfo {year} {2012})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Toida}\ \emph {et~al.}(2019)\citenamefont {Toida},
\citenamefont {Matsuzaki}, \citenamefont {Kakuyanagi}, \citenamefont {Zhu},
\citenamefont {Munro}, \citenamefont {Yamaguchi},\ and\ \citenamefont
{Saito}}]{toida2019electron}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {H.}~\bibnamefont
{Toida}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Matsuzaki}},
\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Kakuyanagi}}, \bibinfo
{author} {\bibfnamefont {X.}~\bibnamefont {Zhu}}, \bibinfo {author}
{\bibfnamefont {W.~J.}\ \bibnamefont {Munro}}, \bibinfo {author}
{\bibfnamefont {H.}~\bibnamefont {Yamaguchi}}, \ and\ \bibinfo {author}
{\bibfnamefont {S.}~\bibnamefont {Saito}},\ }\href@noop {} {\bibfield
{journal} {\bibinfo {journal} {Communications Physics}\ }\textbf {\bibinfo
{volume} {2}},\ \bibinfo {pages} {1} (\bibinfo {year} {2019})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Budoyo}\ \emph {et~al.}(2020)\citenamefont {Budoyo},
\citenamefont {Kakuyanagi}, \citenamefont {Toida}, \citenamefont
{Matsuzaki},\ and\ \citenamefont {Saito}}]{budoyo2020electron}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {R.~P.}\ \bibnamefont
{Budoyo}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Kakuyanagi}},
\bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Toida}}, \bibinfo
{author} {\bibfnamefont {Y.}~\bibnamefont {Matsuzaki}}, \ and\ \bibinfo
{author} {\bibfnamefont {S.}~\bibnamefont {Saito}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {Applied Physics Letters}\
}\textbf {\bibinfo {volume} {116}},\ \bibinfo {pages} {194001} (\bibinfo
{year} {2020})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Kominis}\ \emph {et~al.}(2003)\citenamefont
{Kominis}, \citenamefont {Kornack}, \citenamefont {Allred},\ and\
\citenamefont {Romalis}}]{kominis2003subfemtotesla}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {I.}~\bibnamefont
{Kominis}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Kornack}},
\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Allred}}, \ and\ \bibinfo
{author} {\bibfnamefont {M.~V.}\ \bibnamefont {Romalis}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {Nature}\ }\textbf {\bibinfo
{volume} {422}},\ \bibinfo {pages} {596} (\bibinfo {year}
{2003})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Yan}\ \emph {et~al.}(2016)\citenamefont {Yan},
\citenamefont {Gustavsson}, \citenamefont {Kamal}, \citenamefont {Birenbaum},
\citenamefont {Sears}, \citenamefont {Hover}, \citenamefont {Gudmundsen},
\citenamefont {Rosenberg}, \citenamefont {Samach}, \citenamefont {Weber}
\emph {et~al.}}]{yan2016flux}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {F.}~\bibnamefont
{Yan}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Gustavsson}},
\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Kamal}}, \bibinfo
{author} {\bibfnamefont {J.}~\bibnamefont {Birenbaum}}, \bibinfo {author}
{\bibfnamefont {A.~P.}\ \bibnamefont {Sears}}, \bibinfo {author}
{\bibfnamefont {D.}~\bibnamefont {Hover}}, \bibinfo {author} {\bibfnamefont
{T.~J.}\ \bibnamefont {Gudmundsen}}, \bibinfo {author} {\bibfnamefont
{D.}~\bibnamefont {Rosenberg}}, \bibinfo {author} {\bibfnamefont
{G.}~\bibnamefont {Samach}}, \bibinfo {author} {\bibfnamefont
{S.}~\bibnamefont {Weber}}, \emph {et~al.},\ }\href@noop {} {\bibfield
{journal} {\bibinfo {journal} {Nature communications}\ }\textbf {\bibinfo
{volume} {7}},\ \bibinfo {pages} {1} (\bibinfo {year} {2016})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Abdurakhimov}\ \emph {et~al.}(2019)\citenamefont
{Abdurakhimov}, \citenamefont {Mahboob}, \citenamefont {Toida}, \citenamefont
{Kakuyanagi},\ and\ \citenamefont {Saito}}]{abdurakhimov2019long}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {L.~V.}\ \bibnamefont
{Abdurakhimov}}, \bibinfo {author} {\bibfnamefont {I.}~\bibnamefont
{Mahboob}}, \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Toida}},
\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Kakuyanagi}}, \ and\
\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Saito}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {Applied Physics Letters}\
}\textbf {\bibinfo {volume} {115}},\ \bibinfo {pages} {262601} (\bibinfo
{year} {2019})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Wolf}\ \emph {et~al.}(2015)\citenamefont {Wolf},
\citenamefont {Neumann}, \citenamefont {Nakamura}, \citenamefont {Sumiya},
\citenamefont {Ohshima}, \citenamefont {Isoya},\ and\ \citenamefont
{Wrachtrup}}]{wolf2015subpicotesla}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {T.}~\bibnamefont
{Wolf}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Neumann}},
\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Nakamura}}, \bibinfo
{author} {\bibfnamefont {H.}~\bibnamefont {Sumiya}}, \bibinfo {author}
{\bibfnamefont {T.}~\bibnamefont {Ohshima}}, \bibinfo {author} {\bibfnamefont
{J.}~\bibnamefont {Isoya}}, \ and\ \bibinfo {author} {\bibfnamefont
{J.}~\bibnamefont {Wrachtrup}},\ }\href@noop {} {\bibfield {journal}
{\bibinfo {journal} {Physical Review X}\ }\textbf {\bibinfo {volume} {5}},\
\bibinfo {pages} {041001} (\bibinfo {year} {2015})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Misra}\ and\ \citenamefont
{Sudarshan}(1977)}]{misra1977zeno}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {B.}~\bibnamefont
{Misra}}\ and\ \bibinfo {author} {\bibfnamefont {E.~G.}\ \bibnamefont
{Sudarshan}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Journal of Mathematical Physics}\ }\textbf {\bibinfo {volume} {18}},\
\bibinfo {pages} {756} (\bibinfo {year} {1977})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Facchi}\ and\ \citenamefont
{Pascazio}(2008)}]{facchi2008quantum}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {P.}~\bibnamefont
{Facchi}}\ and\ \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont
{Pascazio}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Journal of Physics A: Mathematical and Theoretical}\ }\textbf {\bibinfo
{volume} {41}},\ \bibinfo {pages} {493001} (\bibinfo {year}
{2008})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Schulman}\ \emph {et~al.}(1994)\citenamefont
{Schulman}, \citenamefont {Ranfagni},\ and\ \citenamefont
{Mugnai}}]{schulman1994characteristic}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont
{Schulman}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Ranfagni}},
\ and\ \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Mugnai}},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Physica Scripta}\
}\textbf {\bibinfo {volume} {49}},\ \bibinfo {pages} {536} (\bibinfo {year}
{1994})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Nakazato}\ \emph {et~al.}(1996)\citenamefont
{Nakazato}, \citenamefont {Namiki},\ and\ \citenamefont
{Pascazio}}]{nakazato1996temporal}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {H.}~\bibnamefont
{Nakazato}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Namiki}}, \
and\ \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Pascazio}},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {International
Journal of Modern Physics B}\ }\textbf {\bibinfo {volume} {10}},\ \bibinfo
{pages} {247} (\bibinfo {year} {1996})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Erez}\ \emph {et~al.}(2004)\citenamefont {Erez},
\citenamefont {Aharonov}, \citenamefont {Reznik},\ and\ \citenamefont
{Vaidman}}]{erez2004correcting}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {N.}~\bibnamefont
{Erez}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Aharonov}},
\bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Reznik}}, \ and\ \bibinfo
{author} {\bibfnamefont {L.}~\bibnamefont {Vaidman}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {Physical Review A}\ }\textbf
{\bibinfo {volume} {69}},\ \bibinfo {pages} {062315} (\bibinfo {year}
{2004})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Koshino}\ and\ \citenamefont
{Shimizu}(2005)}]{koshino2005quantum}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont
{Koshino}}\ and\ \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Shimizu}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Physics reports}\ }\textbf {\bibinfo {volume} {412}},\ \bibinfo {pages}
{191} (\bibinfo {year} {2005})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Itano}\ \emph {et~al.}(1990)\citenamefont {Itano},
\citenamefont {Heinzen}, \citenamefont {Bollinger},\ and\ \citenamefont
{Wineland}}]{itano1990quantum}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {W.~M.}\ \bibnamefont
{Itano}}, \bibinfo {author} {\bibfnamefont {D.~J.}\ \bibnamefont {Heinzen}},
\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Bollinger}}, \ and\
\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Wineland}},\ }\href@noop
{} {\bibfield {journal} {\bibinfo {journal} {Physical Review A}\ }\textbf
{\bibinfo {volume} {41}},\ \bibinfo {pages} {2295} (\bibinfo {year}
{1990})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Kwiat}\ \emph {et~al.}(1995)\citenamefont {Kwiat},
\citenamefont {Weinfurter}, \citenamefont {Herzog}, \citenamefont
{Zeilinger},\ and\ \citenamefont {Kasevich}}]{kwiat1995interaction}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {P.}~\bibnamefont
{Kwiat}}, \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Weinfurter}},
\bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Herzog}}, \bibinfo
{author} {\bibfnamefont {A.}~\bibnamefont {Zeilinger}}, \ and\ \bibinfo
{author} {\bibfnamefont {M.~A.}\ \bibnamefont {Kasevich}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {Physical Review Letters}\
}\textbf {\bibinfo {volume} {74}},\ \bibinfo {pages} {4763} (\bibinfo {year}
{1995})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Fischer}\ \emph {et~al.}(2001)\citenamefont
{Fischer}, \citenamefont {Guti{\'e}rrez-Medina},\ and\ \citenamefont
{Raizen}}]{fischer2001observation}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Fischer}}, \bibinfo {author} {\bibfnamefont {B.}~\bibnamefont
{Guti{\'e}rrez-Medina}}, \ and\ \bibinfo {author} {\bibfnamefont
{M.}~\bibnamefont {Raizen}},\ }\href@noop {} {\bibfield {journal} {\bibinfo
{journal} {Physical review letters}\ }\textbf {\bibinfo {volume} {87}},\
\bibinfo {pages} {040402} (\bibinfo {year} {2001})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Streed}\ \emph {et~al.}(2006)\citenamefont {Streed},
\citenamefont {Mun}, \citenamefont {Boyd}, \citenamefont {Campbell},
\citenamefont {Medley}, \citenamefont {Ketterle},\ and\ \citenamefont
{Pritchard}}]{streed2006continuous}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {E.~W.}\ \bibnamefont
{Streed}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Mun}},
\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Boyd}}, \bibinfo {author}
{\bibfnamefont {G.~K.}\ \bibnamefont {Campbell}}, \bibinfo {author}
{\bibfnamefont {P.}~\bibnamefont {Medley}}, \bibinfo {author} {\bibfnamefont
{W.}~\bibnamefont {Ketterle}}, \ and\ \bibinfo {author} {\bibfnamefont
{D.~E.}\ \bibnamefont {Pritchard}},\ }\href@noop {} {\bibfield {journal}
{\bibinfo {journal} {Physical review letters}\ }\textbf {\bibinfo {volume}
{97}},\ \bibinfo {pages} {260402} (\bibinfo {year} {2006})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Helmer}\ \emph {et~al.}(2009)\citenamefont {Helmer},
\citenamefont {Mariantoni}, \citenamefont {Solano},\ and\ \citenamefont
{Marquardt}}]{helmer2009quantum}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {F.}~\bibnamefont
{Helmer}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Mariantoni}},
\bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Solano}}, \ and\ \bibinfo
{author} {\bibfnamefont {F.}~\bibnamefont {Marquardt}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {Physical Review A}\ }\textbf
{\bibinfo {volume} {79}},\ \bibinfo {pages} {052115} (\bibinfo {year}
{2009})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Wolters}\ \emph {et~al.}(2013)\citenamefont
{Wolters}, \citenamefont {Strau{\ss}}, \citenamefont {Schoenfeld},\ and\
\citenamefont {Benson}}]{wolters2013quantum}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Wolters}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Strau{\ss}}},
\bibinfo {author} {\bibfnamefont {R.~S.}\ \bibnamefont {Schoenfeld}}, \ and\
\bibinfo {author} {\bibfnamefont {O.}~\bibnamefont {Benson}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {Physical Review A}\ }\textbf
{\bibinfo {volume} {88}},\ \bibinfo {pages} {020101} (\bibinfo {year}
{2013})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Kakuyanagi}\ \emph {et~al.}(2015)\citenamefont
{Kakuyanagi}, \citenamefont {Baba}, \citenamefont {Matsuzaki}, \citenamefont
{Nakano}, \citenamefont {Saito},\ and\ \citenamefont
{Semba}}]{kakuyanagi2015observation}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont
{Kakuyanagi}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Baba}},
\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Matsuzaki}}, \bibinfo
{author} {\bibfnamefont {H.}~\bibnamefont {Nakano}}, \bibinfo {author}
{\bibfnamefont {S.}~\bibnamefont {Saito}}, \ and\ \bibinfo {author}
{\bibfnamefont {K.}~\bibnamefont {Semba}},\ }\href@noop {} {\bibfield
{journal} {\bibinfo {journal} {New Journal of Physics}\ }\textbf {\bibinfo
{volume} {17}},\ \bibinfo {pages} {063035} (\bibinfo {year}
{2015})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Kondo}\ \emph {et~al.}(2016)\citenamefont {Kondo},
\citenamefont {Matsuzaki}, \citenamefont {Matsushima},\ and\ \citenamefont
{Filgueiras}}]{kondo2016using}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont
{Kondo}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Matsuzaki}},
\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Matsushima}}, \ and\
\bibinfo {author} {\bibfnamefont {J.~G.}\ \bibnamefont {Filgueiras}},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {New Journal of
Physics}\ }\textbf {\bibinfo {volume} {18}},\ \bibinfo {pages} {013033}
(\bibinfo {year} {2016})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Kalb}\ \emph {et~al.}(2016)\citenamefont {Kalb},
\citenamefont {Cramer}, \citenamefont {Twitchen}, \citenamefont {Markham},
\citenamefont {Hanson},\ and\ \citenamefont
{Taminiau}}]{kalb2016experimental}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {N.}~\bibnamefont
{Kalb}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Cramer}},
\bibinfo {author} {\bibfnamefont {D.~J.}\ \bibnamefont {Twitchen}}, \bibinfo
{author} {\bibfnamefont {M.}~\bibnamefont {Markham}}, \bibinfo {author}
{\bibfnamefont {R.}~\bibnamefont {Hanson}}, \ and\ \bibinfo {author}
{\bibfnamefont {T.}~\bibnamefont {Taminiau}},\ }\href@noop {} {\bibfield
{journal} {\bibinfo {journal} {Nature communications}\ }\textbf {\bibinfo
{volume} {7}},\ \bibinfo {pages} {1} (\bibinfo {year} {2016})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Matsuzaki}\ \emph {et~al.}(2011)\citenamefont
{Matsuzaki}, \citenamefont {Benjamin},\ and\ \citenamefont
{Fitzsimons}}]{Matsuzaki2011NMmetro}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont
{Matsuzaki}}, \bibinfo {author} {\bibfnamefont {S.~C.}\ \bibnamefont
{Benjamin}}, \ and\ \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Fitzsimons}},\ }\href {\doibase 10.1103/PhysRevA.84.012103} {\bibfield
{journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume}
{84}},\ \bibinfo {pages} {012103} (\bibinfo {year} {2011})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Chin}\ \emph {et~al.}(2012)\citenamefont {Chin},
\citenamefont {Huelga},\ and\ \citenamefont {Plenio}}]{ChinNM2012PRL}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.~W.}\ \bibnamefont
{Chin}}, \bibinfo {author} {\bibfnamefont {S.~F.}\ \bibnamefont {Huelga}}, \
and\ \bibinfo {author} {\bibfnamefont {M.~B.}\ \bibnamefont {Plenio}},\
}\href {\doibase 10.1103/PhysRevLett.109.233601} {\bibfield {journal}
{\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {109}},\
\bibinfo {pages} {233601} (\bibinfo {year} {2012})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Macieszczak}(2015)}]{Zeno2015}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont
{Macieszczak}},\ }\href {\doibase 10.1103/PhysRevA.92.010102} {\bibfield
{journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume}
{92}},\ \bibinfo {pages} {010102} (\bibinfo {year} {2015})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Sugiyama}(2015)}]{sugiyama2015precision}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {T.}~\bibnamefont
{Sugiyama}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Physical Review A}\ }\textbf {\bibinfo {volume} {91}},\ \bibinfo {pages}
{042126} (\bibinfo {year} {2015})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Takeuchi}\ \emph {et~al.}(2019)\citenamefont
{Takeuchi}, \citenamefont {Matsuzaki}, \citenamefont {Miyanishi},
\citenamefont {Sugiyama},\ and\ \citenamefont {Munro}}]{takeuchi2019quantum}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont
{Takeuchi}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Matsuzaki}},
\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Miyanishi}}, \bibinfo
{author} {\bibfnamefont {T.}~\bibnamefont {Sugiyama}}, \ and\ \bibinfo
{author} {\bibfnamefont {W.~J.}\ \bibnamefont {Munro}},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {Physical Review A}\ }\textbf
{\bibinfo {volume} {99}},\ \bibinfo {pages} {022325} (\bibinfo {year}
{2019})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {You}\ \emph {et~al.}(2007)\citenamefont {You},
\citenamefont {Hu}, \citenamefont {Ashhab},\ and\ \citenamefont
{Nori}}]{you2007low}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{You}}, \bibinfo {author} {\bibfnamefont {X.}~\bibnamefont {Hu}}, \bibinfo
{author} {\bibfnamefont {S.}~\bibnamefont {Ashhab}}, \ and\ \bibinfo {author}
{\bibfnamefont {F.}~\bibnamefont {Nori}},\ }\href@noop {} {\bibfield
{journal} {\bibinfo {journal} {Physical Review B}\ }\textbf {\bibinfo
{volume} {75}},\ \bibinfo {pages} {140515} (\bibinfo {year}
{2007})}\BibitemShut {NoStop}
\end{thebibliography}
\end{document} |
\begin{document}
\title{On a conjecture in second-order optimality conditions}
\author{Roger Behling\footnote{Federal University of Santa Catarina, Blumenau-SC, Brazil. Email: [email protected]}\and
Gabriel Haeser\footnote{Department of Applied Mathematics, University of S\~ao Paulo, S\~ao Paulo-SP, Brazil. This research was partially conducted while holding a Visiting Scholar position at Department of Management Science and Engineering, Stanford University, Stanford CA, USA. Email: [email protected] \Letter} \and Alberto Ramos\footnote{Department of Mathematics,
Federal University of Paran\'a, Curitiba, PR, Brazil.
e-mail: [email protected].}
\and Daiana S. Viana\footnote{Federal University of Acre, Center of Exact and Technological Sciences, Rio Branco-AC, Brazil. PhD student at Department of Applied Mathematics, University of S\~ao Paulo-SP, Brazil. Email: [email protected]}
}
\date{June 16th, 2016. Last reviewed on June 23rd, 2017.}
\maketitle
\begin{abstract}
In this paper we deal with optimality conditions that can be verified by a nonlinear optimization algorithm,
where only a single Lagrange multiplier is avaliable.
In particular, we deal with a conjecture formulated in
[{\it R. Andreani, J.M. Mart\'{\i}nez, M.L. Schuverdt, ``On second-order optimality conditions for nonlinear programming'', Optimization, 56:529--542, 2007}],
which states that whenever a local minimizer of a nonlinear optimization problem
fulfills the Mangasarian-Fromovitz Constraint Qualification and the
rank of the set of gradients of active constraints increases at most by one in a neighborhood of the minimizer, a second-order optimality condition that depends on one single Lagrange multiplier is satisfied.
This conjecture generalizes previous results under a constant rank assumption or under a rank deficiency of at most one.
In this paper we prove the conjecture under the additional
assumption that the Jacobian matrix has a smooth singular value decomposition, which is weaker than previously considered assumptions. We also review previous literature related to the conjecture.
\end{abstract}
\noindent {\bf Keywords:}
Nonlinear optimization, Constraint qualifications, Second-order optimality
conditions, Singular value decomposition.
\noindent {\bf AMS Classification:} 90C46, 90C30
\pagestyle{myheadings}
\thispagestyle{plain}
\markboth{}{R. Behling, G. Haeser, A. Ramos, D. S. Viana, On a conjecture in second-order
optimality conditions}
\section{Introduction}
This paper considers a conjecture about second-order necessary optimality conditions for
constrained optimization. Our interest in such conjecture comes from
practical considerations. Numerical optimization
deals with the design of algorithms with the aim of finding a point with the lowest possible value of a certain function over a constraint set.
Useful tools for the design of algorithms are the necessary optimality conditions, i.e.,
conditions satisfied by every local minimizer.
Not all necessary optimality conditions serve that purpose.
Optimality conditions must be
computable with the information provided by the algorithm,
where its fulfillment indicates that the considered point is an acceptable solution.
For constrained optimization problems,
the Karush-Kuhn-Tucker (KKT) conditions are the basis for most optimality conditions.
In fact, most algorithms for constrained optimization are iterative and in their
implementation,
the KKT conditions serve as a theoretical guide for developing suitable
stopping criteria.
For more details, see \cite[Framework 7.13, page 513]{nocedal}, \cite[Chapter 12]{fletcher} and \cite{amrs}.
Necessary optimality conditions can be of first- or second-order
depending on whether the first- or second-order derivatives are used in the formulation.
When the second-order information is avaliable,
one can formulate second-order conditions. Such
conditions are much stronger than first-order ones and hence are mostly desirable, since they allow us to rule out possible non-minimizers accepted as solution when we only use first-order information.
Global convergence proofs of second-order algorithms are based on second-order necessary optimality condition of the form: If a local
minimizer satisfies some constraint qualification, then the WSOC condition holds,
where WSOC stands for the Weak Second-order Optimality Condition, that states that the Hessian of
the Lagrangian at a KKT point, for some Lagrange multiplier, is positive semidefinite on the subspace orthogonal to the gradients of
active constraints, see Definition \ref{def:wsoc}.
Thus, we are interested in assumptions guaranteeing that local minimizers satisfy WSOC, given its implications to numerical algorithms.
The conjecture comes along these lines. In order to precisely state the conjecture, we need some definitions.
Consider the nonlinear constrained optimization problem
\begin{equation}
\label{problem}
\begin{array}{lll}
\mbox{minimize } & f(x), \\
\mbox{subject to } & h_{i}(x) = 0 \ \ \forall i \in \mathcal{E}:=\{1,\dots, m\},\\
& g_{j}(x) \leq 0 \ \ \forall j \in \mathcal{I}:=\{1,\dots,p\}, \\
\end{array}
\end{equation}
where $f$, $h_{i}$, $g_{j}: \mathbb{R}^{n} \rightarrow \mathbb{R}$ are assumed to be,
at least, twice continuously differentiable functions.
Denote by $\Omega$ the feasible set of \eqref{problem}.
For a point $x \in \Omega$, we define $A(x):=\{j \in \mathcal{I}: g_{j}(x)=0\}$ to denote the set of indices of active inequalities.
A feasible point $x^*$ satisfies the Mangasarian-Fromovitz Constraint Qualification (MFCQ)
if $\{\nabla h_i(x^*):i\in\mathcal{E}\}$
is a linearly independent set and there is a direction
$d\in{\mathbb R}^n$ such that $\nabla h_i(x^*)^\mathtt{T}d=0, i \in \mathcal{E}$ and $\nabla g_j(x^*)^\mathtt{T}d<0$,
$j\in A(x^*)$.
Define by $J(x)$
the matrix whose first $m$ rows are formed by $\nabla h_i(x)^\mathtt{T}$, $i\in\mathcal{E}$ and the
remaining rows by $\nabla g_j(x)^\mathtt{T}, j\in A(x^*)$.
In \cite{ams2}, with the aim of
stating
a verifiable condition guaranteeing global convergence of a second-order
augmented Lagrangian method to a second-order stationary point,
the authors proposed a new condition \cite[Section 3]{ams2} suitable for that purpose.
Furthermore, based on \cite{baccaritrad} and their recently proposed condition,
they stated the following conjecture, see \cite[Section 5]{ams2}:
\begin{conjecture}
Let $x^*$ be a local minimizer of (\ref{problem}). Assume that:
\begin{enumerate}
\item MFCQ holds at $x^{*}$,
\item the
rank of
$\{\nabla h_i(x), \nabla g_j(x): i \in \mathcal{E}; j \in A(x^*)\}$
is at most $r+1$ in a neighborhood of $x^*$,
where $r$ is the rank of
$\{\nabla h_i(x^*), \nabla g_j(x^*): i \in \mathcal{E}; j \in A(x^*)\}$.
\end{enumerate}
Then, there exists a Lagrange multiplier $(\lambda,\mu)\in{\mathbb R}^m\times{\mathbb R}^p_+$ such that
\begin{equation}
\label{eqn:wsoc1}
\nabla f(x^{*})+
\sum_{i=1}^{m}\lambda_{i}\nabla h_{i}(x^{*})+
\sum_{j=1}^{p}\mu_{j}\nabla g_{j}(x^{*})=0 \ \ \text{ with } \ \ \mu_{j} g_{j}(x^{*})=0, \forall j,
\end{equation}
and for every $d \in \mathbb{R}^{n}$ such that
$\nabla h_i(x^*)^\mathtt{T}d=0$, $\forall i$; $\nabla g_j(x^*)^\mathtt{T}d=0$, $\forall j \in A(x^*)$,
we have
\begin{equation}
\label{eqn:wsoc2}
d^{\mathtt{T}}
(
\nabla^{2} f(x^{*})+
\sum_{i=1}^m \lambda_{i} \nabla^{2} h_{i}(x^{*})+
\sum_{j=1}^p \mu_j \nabla^{2} g_{j}(x^{*})
) d\geq 0.
\end{equation}
\end{conjecture}
Note that \eqref{eqn:wsoc1}-\eqref{eqn:wsoc2} is the WSOC condition.
We are aware of two previous attempts of solving this conjecture.
A proof of it under an additional technical condition has appeared,
recently, in \cite{chineses}.
Also, a counter-example appeared in \cite{minch}.
As we will see later in Section 3, these results are incorrect. Also, the recent paper \cite{conjnino2} proved the conjecture for a special form of quadratically-constrained problems.
Our approach is different from the ones mentioned above and
it is based on an additional assumption of smoothness of the singular value decomposition of $J(x)$ around the basis point $x^*$.
As we have mentioned, WSOC has two important features that makes the Conjecture relevant in practical algorithms, which is our main motivation for pursuing it.
The first one is that it does not rely on the whole set of Lagrange multipliers,
in contrast with other second-order conditions
in the literature, and
the second one is that positive semi-definiteness of the
Hessian of the Lagrangian must be verified in a subspace (a more tractable task)
rather than at a pointed cone.
This is compatible with the
implementation of an algorithm that globally converges to a point $x^*$ fulfilling WSOC.
At each iteration, one has available an aproximation $x^k$ to a solution and a single
Lagrange multiplier approximation $(\lambda^k, \mu^k)$ and one may check
if WSOC is approximately satisfied at the current point $(x^k,\lambda^k,\mu^k)$ if one wishes to declare
convergence to a second-order stationary point (see details in \cite{akkt2} and references therein). Of course, this is still a non-trivial computational task, so this only makes sense when most of the effort to check WSOC was already done as part of the computation of the iterate. This is the case of algorithms that try to compute a descent direction and a negative curvature direction \cite{abms2,moguerza}. Near a KKT point, once the procedure for computing the negative curvature direction fails, WSOC is approximately satisfied.
This is an important difference with respect to other conditions that we review in the next section.
In order to verify an optimality condition that relies on the whole set of Lagrange multipliers,
one needs an algorithm that generates all multipliers, which may be difficult.
Even more, in classical second-order conditions, one must check if a matrix is positive semi-definite on a pointed cone, which
is a far more difficult problem than
checking it on a subspace (see \cite{murtykabadi}).
Finally, we are not aware of any
reasonable iterative algorithm that generates subsequences that converges to a point that satisfies a
classical, more accurate, second-order optimality condition based on a pointed cone.
The discussion in \cite{tointexample} indicates that such algorithms probably do not exist.
In Section \ref{sec:opt2} we briefly review some related results on
second-order optimality conditions.
In Section 3 we prove the Conjecture under the additional assumption that the singular value decomposition of $J(x)$ is smooth in a neighborhood of $x^*$.
In Section 4 we present some conclusions and future directions of research on this topic.
\section{Second-order optimality conditions}
\label{sec:opt2}
In this section, we review some classical and some recent results on second-order optimility conditions.
Several second-order optimility conditions have been proposed in the literature, both
from a theoretical and practical point of view, see
\cite{bazaraa, rwets, nocedal, fletcher, fiaccomc, bertsekasnl,
bshapiro, penotsot, casast, bcshapiro, arupereira, baccaritrad, baccariwcr, gfrererd, bomze}
and references therein.
First, we start with the basic notation.
$\mathbb{R}^{n}$ stands for the $n$-dimensional real Euclidean space, $n \in
\mathbb{N}$. $\mathbb{R}_{+}^{n}\subset\mathbb{R}^{n}$
is the set of vector whose components are nonnegative.
The canonical basis of ${\mathbb R}^n$ is denoted by $e_1,\dots,e_{n}$.
A set $\mathcal{R}\subset \mathbb{R}^{n}$ is a ray if
$\mathcal{R}:=\{rd_{0}: r \geq 0\}$ for some $d_{0}\in \mathbb{R}^{n}$.
Given a convex cone $\mathcal{K} \subset \mathbb{R}^{n}$,
we define the lineality set of $\mathcal{K}$ as $\mathcal{K}\cap-\mathcal{K}$, which is the largest subspace contained in $\mathcal{K}$.
We say that $\mathcal{K}$ is a first-order cone if $\mathcal{K}$ is the direct sum of a subspace and a ray.
We denote the Lagrangian function by
$L(x,\lambda,\mu)=f(x)+\sum_{i=1}^m\lambda_i h_i(x)+\sum_{j=1}^p\mu_j g_j(x)$
where $(x, \lambda, \mu)$ is in $\mathbb{R}^{n}\times \mathbb{R}^{m}\times \mathbb{R}_{+}^{p}$
and the generalized Lagrangian function as
$L^{g}(x,\lambda_0,\lambda,\mu)=\lambda_0f(x)+\sum_{i=1}^m\lambda_i h_i(x)+\sum_{j=1}^p\mu_j g_j(x)$
where $(x, \lambda_{0}, \lambda, \mu) \in \mathbb{R}^{n}\times \mathbb{R}_{+}\times\mathbb{R}^{m}\times \mathbb{R}_{+}^{p}$.
Clearly, $L^{g}(x,1,\lambda,\mu)=L(x,\lambda,\mu)$.
The symbols $\nabla_{x} L^{g}(x,\lambda_0,\lambda,\mu)$
and $\nabla^{2}_{xx} L^{g}(x,\lambda_0,\lambda,\mu)$ stand for
the gradient and the Hessian of
$L^{g}(x,\lambda_0,\lambda,\mu)$ with respect to $x$, respectively.
Similar notation holds for $L(x,\lambda,\mu)$.
The generalized first-order optimality condition at the feasible point $x^{*}$ is
\begin{equation}
\label{eqn:kkt}
\nabla L^{g}_{x}(x^*,\lambda_0,\lambda,\mu)=0 \ \ \text{ with } \ \ \mu^{\mathtt{T}}g(x^{*})=0, \ \ \lambda_0 \geq0,\ \ \mu\geq0,\ \
(\lambda_{0},\lambda, \mu)\neq(0,0,0).
\end{equation}
The set of vectors $(\lambda_0,\lambda,\mu) \in \mathbb{R}_{+}\times\mathbb{R}^{m}\times \mathbb{R}_{+}^{p}$
satisfying \eqref{eqn:kkt}
is the set of generalized Lagrange multipliers (or Fritz John multipliers), denoted by $\Lambda_0(x^*)$.
Note that \eqref{eqn:kkt} with $\lambda_0=1$ corresponds to the Karush-Kuhn-Tucker (KKT) conditions,
the standard first-order condition in numerical optimization. We denote by
$\Lambda(x^*):=\{(\lambda,\mu) \in \mathbb{R}^{m}\times \mathbb{R}_{+}^{p}: (1,\lambda,\mu) \in \Lambda_0(x^*)\}$, the set of all Lagrange multipliers.
At every minimizer, there are Fritz John multipliers such that \eqref{eqn:kkt} holds, that is, $\Lambda_0(x^*)\neq\emptyset$. In order to get existence of true Lagrange multipliers, additional assumptions have to be required. Assumptions on the analytic description of the feasible set that guarantee the validity of the KKT conditions at local minimizers are called constraint qualification (CQ).
Thus, under any CQ, the KKT conditions are necessary for optimality.
When the second-order information is avaliable, we can consider second-order conditions.
In order to describe second-order conditions (in a dual form),
we introduce some important sets. We start with the
cone of critical directions (critical cone), defined as follows:
\begin{equation}
\label{eqn:cone}
C(x^*):=\{d\in{\mathbb R}^n\mid \nabla f(x^*)^\mathtt{T}d=0; \nabla h_i(x^*)^\mathtt{T}d=0, i\in\mathcal{E};
\nabla g_j(x^*)^\mathtt{T}d\leq0, j \in A(x^*)\}.
\end{equation}
Obviouly, $C(x^{*})$ is a non-empty closed convex cone.
When $\Lambda(x^{*})\neq \emptyset$, the critical cone $C(x^*)$ can be written as
\begin{equation}\label{eqn:scone}
\left \{d \in \mathbb{R}^{n} :
\begin{array}{lll}
& \nabla h_{i}(x^{*})^{\mathtt{T}}d =0,\text{ for } i \in\mathcal{E},
\nabla g_{j}(x^{*})^{\mathtt{T}}d =0,\text{ if } \mu_{j}>0\\
& \nabla g_{j}(x^{*})^{\mathtt{T}}d \leq 0, \text{ if } \mu_{j}=0, j \in A(x^{*})
\end{array}
\right \},
\end{equation}
for every $(\lambda, \mu) \in \Lambda(x^*)$.
From the algorithmic point of view, an important set is the critical subspace (or weak critical cone), given by:
\begin{equation}
\label{eq:wcone}
S(x^*):=\{d\in{\mathbb R}^n\mid \nabla h_i(x^*)^\mathtt{T}d=0, i\in\mathcal{E}; \nabla g_j(x^*)^\mathtt{T}d=0, j \in A(x^*)\}.
\end{equation}
In the case when $\Lambda(x^{*})\neq \emptyset$, a simple inspection shows that the critical subspace $S(x^{*})$ is the lineality space of the critical cone $C(x^*)$. Under strict complementarity, $S(x^*)$ coincides with $C(x^*)$.
Now, we are able to define the
classical second-order conditions.
\begin{definition}
\label{def:wsoc}
Let $x^{*}$ be a feasible point with $\Lambda(x^{*})\neq\emptyset$. We have the following definitions
\begin{enumerate}
\item
We say that the {\it strong second-order optimality condition} (SSOC) holds at $x^{*}$ if
there is a $(\lambda,\mu)\in\Lambda(x^*)$ such that
$d^\mathtt{T}\nabla^2_{xx} L(x^*,\lambda,\mu)d\geq0$ for every $d \in C(x^{*})$.
\item
We say that the {\it weak second-order optimality condition} (WSOC) holds at $x^{*}$ if
there is a $(\lambda,\mu)\in\Lambda(x^*)$ such that
$d^\mathtt{T}\nabla^2_{xx} L(x^*,\lambda,\mu)d\geq0$ for every $d \in S(x^{*})$.
\end{enumerate}
\end{definition}
The classical second-order condition SSOC is particularly important from the point of view of passing from
necessary to sufficient optimality conditions. In this case, strenghtening the sign of the inequality from ``$\geq0$'' to ``$>0$'' in the definition of SSOC, that is,
instead of positive semi-definiteness of the Hessian of the Lagrangian on the critical cone,
we require its positive definiteness
on the same cone (minus the origin), we get a sufficient optimality condition, see \cite{bazaraa, bshapiro}.
Furthermore, this sufficient condition also ensures that the local minimizer $x^*$ is isolated.
Besides these nice properties, from the practical point of view, SSOC has some disvantages.
In fact, to verify the validity of SSOC at a given point, is in general, an NP-hard problem,
\cite{murtykabadi, pardalos}. Also, it is well known that very simple second-order methods fail to generate sequences in which SSOC holds at its accumulation points, see \cite{tointexample}.
From this point of view, WSOC seems to be the
most adequate second-order condition
when dealing with global convergence of second-order methods. In fact, all second-order algorithms known by the authors only guarantee
convergence to points satisfying WSOC, see
\cite{abms2, bss, cly, conntoint2, dennisv, dennisalem, dipillo, facchinei, grSQP, moguerza} and references therein.
This situation in which a most desirable theoretical property
is not suitable in an algorithmic framework is not particular only to the second-order case.
Even in the first-order case, it is known, for example,
that the Guignard constraint qualification is the
weakest possible assumption to yield KKT conditions at a local minimizer \cite{gould}.
In other words, a good first-order necessary optimality condition is of the form ``KKT or not-Guignard''.
But this is too strong for practical purposes, since no algorithm is
known to fulfill such condition at limit points of sequences generated by it,
in fact, the convergence assumptions of algorithms require stronger constraint qualifications \cite{rcpld,cpg,amrs2,amrs}.
For second-order algorithms, the situation is quite similar,
with the peculiarity that the difficulty is not only on the required constraint qualification,
but also in the verification of the
optimality condition, since,
numerically, we can only guarantee a partial second-order property, that is, for directions in the critical subspace,
which is a subset of the desirable critical cone of directions.
As the KKT conditions, SSOC and WSOC hold at minimizers only if some additional condition is valid.
As we will explore in the next section,
only MFCQ is not enough to ensure the existence of some Lagrange multiplier where SSOC holds.
Even WSOC can not be assured to hold under MFCQ alone.
Under MFCQ, we have the following result, \cite{bshapiro, bental}:
\begin{theorem}
\label{teo-bs}
Let $x^{*}$ be a local minimizer of \eqref{problem}. Assume that MFCQ holds at $x^{*}$. Then,
\begin{equation}
\label{bonnans-shapiro1}
\mbox{for each } d\in C(x^*),\mbox{ there is a multiplier }(\lambda,\mu)\in\Lambda(x^*)\mbox{ such that }
d^\mathtt{T}\nabla^2_{xx} L(x^*,\lambda,\mu)d\geq0.
\end{equation}
\end{theorem}
Note that for each critical direction, we have an associated Lagrange multiplier
$(\lambda,\mu)\in\Lambda(x^*)$, in opposition to
SSOC or WSOC, where we require {\it the same} Lagrange multiplier for all critical directions.
Observe that \eqref{bonnans-shapiro1} does not imply WSOC (and neither SSOC).
Observe also that since $\Lambda(x^{*})$ is a compact set (by MFCQ),
\eqref{bonnans-shapiro1} can be written in a more compact form, namely,
$$
\forall d\in C(x^*), \ \ \text{sup} \{d^\mathtt{T}\nabla^2_{xx} L(x^*,\lambda,\mu)d : (\lambda,\mu)\in\Lambda(x^*))\}\geq0.
$$ Although this optimality condition relies on the whole Lagrange multiplier set $\Lambda(x^*)$,
hence it is not suitable for our practical considerations, it will play a crucial role in our analysis.
Even when no constraint qualification is assumed,
a second-order optimality condition can be formulated, relying on Fritz John multipliers \eqref{eqn:kkt}:
\begin{theorem}
\label{teo-aru}
Let $x^*$ be a local minimizer of \eqref{problem}. Then, for every $d$ in the critical cone $C(x^*)$, there is a Fritz John
multiplier $(\lambda_0,\lambda,\mu)\in\Lambda_0(x^*)$ such that
\begin{equation}
\label{arutyunov}
d^\mathtt{T}\nabla^2_{xx} L^{g}(x^*,\lambda_0,\lambda,\mu)d\geq0.
\end{equation}
\end{theorem}
The optimality condition of Theorem \ref{teo-aru}
has been studied a lot over the years, \cite{dubo, bental, levitin, bshapiro, aru}.
An important property is that it can be transformed into a sufficient optimality condition
by simply replacing the non-negative sign ``$\geq0$'' by ``$>0$'' (except at the origin), without any additional assumption.
For this reason, this condition is said to be a ``no-gap'' optimality condition.
Note that this is different from the case of SSOC,
since an additional assumption must be made for the necessary condition to hold.
Note that Theorem \ref{teo-bs} can be derived from Theorem \ref{teo-aru}, since under MFCQ,
there is no Fritz John multiplier with $\lambda_0=0$.
We emphasize that even though optimality conditions
given by Theorems \ref{teo-bs} and \ref{teo-aru} have nice theoretical properties, they do not suit our framework since
their verification requires the knowledge of the whole set of (generalized) Lagrange multipliers
at the basis point, whereas in practice,
we only have access to (an approximation of) a single Lagrange multiplier.
In the case of the optimality condition given by Theorem \ref{teo-aru}, one could argue that the possibility of verifying it with $\lambda_0=0$, and hence independently of the objective function, is not useful at all as an optimality condition. This is arguably the case for the first-order Fritz John optimality condition, but since Theorem \ref{teo-aru} gives a ``no-gap'' optimality condition, this argument is not convincent in the second-order case. In fact, one could show that if the sufficient optimality condition associated to Theorem \ref{teo-aru} is fulfilled with $\lambda_0=0$ for all critical directions, then the basis point is an isolated feasible point, and hence a local solution independently of the objective function. We take the point of view that algorithms naturally treat differently the objective function and the constraint functions, in a way that a multiplier associated to the objective function is not present, hence our focus on Lagrange multipliers, rather than on Fritz John multipliers.
As we have mentioned, known practical methods are only guaranteed to converge to points satisfying WSOC, and hence, we focus our attention, from now on, on conditions ensuring it at local minimizers.
We start with \cite{baccaritrad}, where the authors investigate the issue of verifying (\ref{bonnans-shapiro1})
for the same Lagrange multiplier:
\begin{theorem}[\cite{baccaritrad}]
\label{bttheorem}
Let $x^*$ be a local minimizer of (\ref{problem}). Assume that MFCQ holds at $x^*$
and that $\Lambda(x^*)$ is a line segment.
Then, for every first-order cone $K\subset C(x^*)$,
there is a $(\lambda^K,\mu^K)\in\Lambda(x^*)$ such that
\begin{equation}
\label{bt}
\forall d\in K, \ \ d^\mathtt{T}\nabla^2_{xx} L(x^*,\lambda^K,\mu^K)d\geq0.
\end{equation}
\end{theorem}
We are interested only in the special case $K:=S(x^*)$. Thus,
(\ref{bt}) holds at a local minimizer $x^*$
when $\Lambda(x^*)$ is a line segment and MFCQ holds at $x^{*}$ (or, equivalently, $\Lambda(x^*)$ is a bounded line segment).
Note that in this case, \eqref{bt} is equivalent to WSOC.
In order to prove Theorem \ref{bttheorem} a crucial result is Yuan's Lemma \cite{yuan},
which was generalized for first-order cones in \cite{baccaritrad}. For further applications of
Yuan's Lemma, see \cite{martinezyuan, crouzeixyuan}.
\begin{lemma}[Yuan \cite{yuan,baccaritrad}]
\label{yuan}
Let $P,Q\in{\mathbb R}^{n\times n}$ be two symmetric matrices and $K\subset{\mathbb R}^n$ a first-order cone.
Then the following conditions are equivalent:
\begin{itemize}
\item $\max\{d^\mathtt{T}Pd,d^\mathtt{T}Qd\}\geq0, \ \ \forall d\in K$;
\item There exist $\alpha\geq0$ and $\beta\geq0$ with $\alpha+\beta=1$ such that $d^\mathtt{T}(\alpha P+\beta Q)d\geq0$, $\forall d\in K$.
\end{itemize}
\end{lemma}
A sufficient condition to guarantee that $\Lambda(x^*)$ is a line segment, is to require that the
rank of the Jacobian matrix $J(x^*)$ is row-deficient by at most one, that is, the rank is one less than the number of rows.
The fact that the rank assumption yields the one-dimensionality of $\Lambda(x^*)$ is a simple consequence of
the rank-nullity theorem.
Thus, we have the following result:
\begin{theorem}[Baccari and Trad \cite{baccaritrad}]
\label{bttheo}
Let $x^*$ be a local minimizer of \eqref{problem} such that MFCQ holds
and the rank of the Jacobian matrix $J(x^*)\in{\mathbb R}^{(m+q)\times n}$ is $m+q-1$,
where $q$ is the number of active inequality constraints at $x^*$.
Then, there exists a Lagrange multiplier $(\lambda,\mu)\in{\mathbb R}^m\times{\mathbb R}^p_+$ such that
WSOC holds.
\end{theorem}
Another line of reasoning in order to arrive at second-order optimality conditions
is to use Janin's version of the classical Constant Rank theorem (\cite{spivak}, Theorem 2.9).
See \cite{janin, aes2, param}.
\begin{theorem}[Constant Rank]
\label{ranktheo}
Let $x^*\in\Omega$ and $d\in C(x^*)$.
Let $E\subset\{1,\dots,p\}$ be the set of indices $j$
such that $\nabla g_j(x^*)^\mathtt{T}d=0, j\in A(x^*)$.
If $\{\nabla h_i(x), i\in\mathcal{E}; \nabla g_j(x), j\in E\}$
has constant rank in a neighborhood of $x^*$, then, there are $\varepsilon>0$ and a twice continuously differentiable
function $\xi:(-\varepsilon,\varepsilon)\to{\mathbb R}^n$
such that $\xi(0)=x^*, \xi'(0)=d, h_i(\xi(t))=0, i\in\mathcal{E}; g_j(\xi(t))=0, j\in E$ for
$t\in(-\varepsilon,\varepsilon)$ and $g(\xi(t))\leq0$ for $t\in[0,\varepsilon)$.
\end{theorem}
The proof that the function $\xi$ is twice continuously differentiable was done in \cite{param}.\\
Using a constant rank assumption jointly with MFCQ, in \cite{ams2},
Andreani, Mart\'{\i}nez and Schuverdt have proved the existence of multipliers satisfying WSOC
at a local minimizer as stated below. This joint condition was also used in the convergence
analysis of a second-order augmented Lagrangian method.
\begin{theorem}[Andreani, Mart\'{\i}nez and Schuverdt \cite{ams2}]
\label{ams2}
Let $x^*$ be a local minimizer of \eqref{problem} with MFCQ holding at $x^*$.
Assume that the rank of the Jacobian matrix $J(x)\in{\mathbb R}^{(m+q)\times n}$
is constant around $x^*$, where $q$ is the number of active inequality constraints at $x^*$.
Then, WSOC holds at $x^{*}$.
\end{theorem}
The proof can be done using Theorem \ref{ranktheo} for $d\in S(x^*)$
and $E=\{1,\dots,p\}$, using the fact that $t=0$ is a local minimizer of $f(\xi(t)), t\geq0$.
This result was further improved in \cite{aes2}, where they noticed that MFCQ can be replaced by the non-emptyness of $\Lambda(x^*)$. This was also done independently in \cite{jye}. In fact, WSOC can be proved to hold {\it for all} Lagrange multipliers:
\begin{theorem}[Andreani, Echag\"ue and Schuverdt \cite{aes2}]
\label{aes}
Let $x^*$ be a local minimizer of \eqref{problem} such that the rank of the Jacobian matrix $J(x)\in{\mathbb R}^{(m+q)\times n}$
is constant
around $x^*$, where $q$ is the number of active inequality constraints at $x^*$.
Then, every Lagrange multiplier $(\lambda,\mu)\in{\mathbb R}^m\times{\mathbb R}^p_+$ (if any exists) is such that WSOC holds.
\end{theorem}
This same technique can be employed under the Relaxed Constant Rank CQ (RCRCQ, \cite{minchenko}), that is,
$\{\nabla h_i(x),i\in\mathcal{E}; \nabla g_j(x), j\in E\}\mbox{ has constant rank around }x^*\mbox{ for every }E\subset A(x^*),$
to prove the stronger result that all Lagrange multipliers satisfy SSOC. See \cite{aes2,param}.
These results can be strengthened by replacing the use of the Constant Rank theorem by the assumption that the critical cone is a subset of the Tangent cone of a modified feasible set (Abadie-type assumptions). See details in \cite{abadie2,bomze}.
\section{The conjecture}
In this section we prove the conjecture under an additional assumption based on the smoothness of the singular value decomposition of the Jacobian matrix.
In view of Theorems \ref{bttheo} and \ref{ams2},
that arrives at the same second-order optimality condition under MFCQ and
row-rank deficiency of at most one or under MFCQ and the constant rank assumption of the Jacobian $J(x)$,
it is natural to conjecture that the same result would hold under MFCQ and
assuming that the rank increases at most by one in a neighborhood of a local minimizer.
This was conjectured in \cite{ams2}.
Although an unification of both results would be interesting,
this was a bold conjecture since the theorems have completely different proofs.
Let us first show that Baccari and Trad's result can be generalized
in order to consider column-rank deficiency.
The proof is a simple application of the rank-nullity theorem.
\begin{theorem}
\label{genbac}
Let $x^*$ be a local minimizer of \eqref{problem} such that MFCQ holds and the
rank of the Jacobian matrix $J(x^*)\in{\mathbb R}^{(m+q)\times n}$ is $n-1$, where $q$ is the number of active inequality constraints at $x^*$.
Then, there exists a Lagrange multiplier $(\lambda,\mu)\in{\mathbb R}^m\times{\mathbb R}^p_+$ such that WSOC holds.
\end{theorem}
\begin{proof}
Applying the rank-nullity theorem to $S(x^*)=\text{Ker}(J(x^*))$, we get that
$\mbox{dim}(S(x^*))=1$.
Hence, there is $d_0\in S(x^*)$ such that $S(x^*)=\{td_0, t\in{\mathbb R}\}$.
Since MFCQ holds, Theorem \ref{teo-bs} yields (\ref{bonnans-shapiro1}).
In particular, for $d=d_{0}$, there is a Lagrange multiplier $(\lambda,\mu)$ such that $d_0^\mathtt{T}\nabla^2_{xx} L(x^*,\lambda,\mu)d_0\geq0$.
This same Lagrange multiplier can be used for all other directions $d=td_0\in S(x^*)$,
since $d^\mathtt{T}\nabla^2_{xx} L(x^*,\lambda,\mu)d=t^2d_0^\mathtt{T}\nabla^2_{xx} L(x^*,\lambda,\mu)d_0\geq0$.
Thus, WSOC holds at $x^{*}$.
\end{proof}
The previous results show that the Conjecture is true in dimension less than or equal to two,
or when there are at most two active constraints.
In ${\mathbb R}^3$, the remarkable example by Arutyunov \cite{aru}/Anitescu \cite{ani} shows that if the rank increases by more than two around $x^*$,
WSOC may fail for all Lagrange multipliers (also, SSOC fails).
We describe below a modification of the example, given in \cite{Baccari2004},
since it gives nice insights about the Conjecture.
\begin{example}
$$\begin{array}{ll}
\mbox{Minimize }&x_3,\\
&x_3\geq 2\sqrt{3}x_1x_2-2x_2^2,\\
&x_3\geq x_2^2-3x_1^2,\\
&x_3\geq -2\sqrt{3}x_1x_2-2x_2^2.\\
\end{array}$$
\end{example}
Here $x^*=(0,0,0)$ is a global minimizer. The critical subspace is the whole plane $x_3=0$ and $\Lambda(x^*)$ is
the simplex $\mu_1+\mu_2+\mu_3=1, \mu_j\geq0, j=1,2,3$.
Figure 1 shows the graph of the right-hand side of each constraint,
where the feasible set is the set of points above all surfaces.
Note that along every direction in the critical cone,
there is a convex combination of the constraints that moves upwards and (\ref{bonnans-shapiro1}) holds,
but for any convex combinations of the constraints,
there exists a direction in the critical cone that moves downwards.
This means that WSOC fails for all Lagrange multipliers. Since in this
example $C(x^*)=S(x^*)$, this means that SSOC also fails for all Lagrange multipliers.
Note also in Figure 1 that around $x^*$ there is no feasible curve such that all
constraints are active along this curve, which is the main property allowing
the proof of WSOC under constant rank assumptions (Theorem \ref{ranktheo}).
\begin{figure}
\caption{MFCQ alone is not enough to ensure the validity of SSOC or WSOC.}
\label{baccari}
\end{figure}
Before describing our proof, we briefly point out previous attempts of solving the Conjecture.
In \cite{chineses}, the authors stated the validity of the Conjecture with the additional following assumption:\\
{\bf Assumption (A3) \cite{chineses}}: \\
If there exists a sequence $\{x^k\}$ converging to $x^*$
such that the rank of $J(x^k)$ is $r+1$ for all $k$, then for any $x^k$ and any subset $E\subset A(x^*)$,
the rank of $\{\nabla h_i(x), i\in\mathcal{E}; \nabla g_j(x), j\in E\}$ is constant around $x^k$.\\
The proof of the Conjecture under (A3) in \cite{chineses} is based on the following incorrect Lemma:\\
{\bf Lemma 3.4 from \cite{chineses}}: Under (A3) and the assumptions of the Conjecture,
whenever there exists a sequence $\{x^k\}$ converging to $x^*$ such that the rank of $J(x^k)$ is $r+1$
for all $k$, there exists some index set $E\subset A(x^*)$ such that the rank of $\{\nabla g_j(x^k), j\in E\}$
is equal to $1$ and the rank of $\{\nabla h_i(x), i\in\mathcal{E}; \nabla g_j(x), j\in A(x^*)\backslash E\}$
is $r$ for an infinite number of indices $k$.
The following counter-example shows that it is incorrect.
\begin{counter}
$$\begin{array}{ll}
\mbox{Minimize }&0,\\
\mbox{subject to }&x_1\leq0,\\
&x_1+x_2x_1^2+x_2^3/3\leq0,\\
&2x_1+x_2x_1^2+x_2^3/3\leq0,
\end{array}$$
at $x^*=(0,0)$.
\end{counter}
Clearly, $x^*=(0,0)$ is a local minimizer that fulfills MFCQ,
the rank of the Jacobian is $1$ at $x^*$ and at most $2$ at every other point.
Also, for any subset of $A(x^*)=\{1,2,3\}$, the rank of the associated gradients
is constant in the neighborhood of every point $x$ different from $x^*$, hence all
assumptions of Lemma 3.4 from \cite{chineses} are fulfilled.
A simple inspection shows that for every point $x$ different from $x^*$ one can not separate the three
gradients into two subsets of rank $1$ as the lemma states. In fact, every subset with two
gradients will have rank $2$.
Another attempt to solve the Conjecture is given in \cite{minch}.
Here, the following problem is presented as a counter-example for the Conjecture:
\begin{equation*}
\begin{array}{lll}
\mbox{minimize } & -x_1^2-x_2, \\
\mbox{subject to } & 2x_1^2+x_2\leq0,\\
& -x_1^2+x_2\leq0,\\
& x_2\leq0.
\end{array}
\end{equation*}
The point $x^*:=(0,0)$ is a local minimizer that satisfies MFCQ, the rank of $J(x^*)$ is $1$ and increases at most to $2$ around $x^*$. In \cite{minch}, the authors only show that WSOC does not need to hold for all Lagrange multipliers. In particular, it was shown that it does not hold at $\bar{\mu}:=(0,1,0)$.
Clearly, it does not disprove the Conjecture since there are other Lagrange multipliers that fulfill WSOC, as implied by Theorem \ref{genbac}. For instance, $\mu:=(1,0,0)$ satisfies WSOC.
Finally, we present our main result.
We prove the Conjecture under an additional technical assumption on the smoothness of the singular value decomposition (SVD) of the Jacobian matrix around $x^*$.
\begin{assumption}
\label{svd}
Let $q$ be the number of active inequality constraints at $x^*$ and $J(x)\in{\mathbb R}^{(m+q)\times n}$ be the Jacobian matrix for $x$ near $x^*$.
We assume that there exist differentiable functions around $x^*$ given by
$$x\mapsto U(x)\in{\mathbb R}^{(m+q)\times(m+q)}, \ \ x\mapsto\Sigma(x)\in{\mathbb R}^{(m+q)\times n}\mbox{ and }x\mapsto V(x)\in{\mathbb R}^{n \times n},$$
such that $J(x)=U(x)\Sigma(x)V(x)^{\mathtt{T}}$, where $\Sigma(x)$ is diagonal with diagonal elements $\sigma_1(x),\sigma_2(x),\dots,\sigma_k(x)$,
where $k:=\min\{m+q,n\}$ and $\sigma_i(x)=0$ when $i$ is greater than the rank of $J(x)$. We assume also that $U(x^*)$ and $V(x^*)$ are matrices with non-zero orthogonal columns.
\end{assumption}
Note that only at $x=x^*$ we assume that $U(x^*)$ and $V(x^*)$ are matrices with orthogonal columns. This implies that $U(x)$ and $V(x)$ are at least invertible matrices in a small enough neighborhood of $x^*$, but not necessarily with orthogonal columns.
Note that we do not require that the columns of the matrices $U(x^*)$ and $V(x^*)$ to be normalized,
as in the classical SVD decomposition. In our proof, it is also not necessary to adopt the convention of non-negativeness of $\sigma_i(x)$ or that they are ordered. Without risk of confusion, we will still call this weaker decomposition as the SVD. We introduce this extra freedom in the decomposition in order to allow more easily for the differentiability of the functions.
\begin{theorem}\label{theo} Assume that $x^*$ is a local minimizer of (\ref{problem})
that fulfills MFCQ. Let $r$ be the rank of $J(x^*)$, and assume that for every $x$ in
some neighborhood of $x^*$ the rank of $J(x)$ is at most $r+1$.
Suppose also that Assumption \ref{svd} holds. Then, there is a Lagrange multiplier ($\lambda,\mu)$ such that WSOC holds.
\end{theorem}
\begin{proof}
Let us consider the column functions
$U(x)=[u_1(x) \dots u_{m+q}(x)]$ and $V(x)=[v_1(x) \dots v_n(x)]$.
Clearly, $J(x)=\sum_{k=1}^{r+1}\sigma_k(x)u_k(x)v_k(x)^\mathtt{T}$.
To simplify the notation, let us assume that $m=0$ and $A(x^*)=\{1,\dots,p\}$,
that is, $q=p$, hence, it holds that
$$\nabla g_i(x)^\mathtt{T}=\sum_{k=1}^{r+1}\sigma_k(x)[u_k(x)]_iv_k(x)^\mathtt{T}, \ \ i=1,\dots,p,$$
where $[u]_i$ is the $i$-th coordinate of the vector $u$ of
appropriate dimension.
Since the functions are smooth we can compute derivatives and get
$$\nabla^2 g_i(x)=
\sum_{k=1}^{r+1} \sigma_k(x)[u_k(x)]_iJ_{v_k}(x)+
\sum_{k=1}^{r+1}([u_k(x)]_i\nabla\sigma_k(x)+\sigma_k(x)\nabla[u_k(x)]_i)v_k(x)^\mathtt{T}, \ \ i=1,\dots,p,$$
where $J_{v_k}(x)\in{\mathbb R}^{n\times n}$ is the Jacobian matrix of the function $v_k$ at $x$.
Now, let us fix a direction $d\in S(x^*)$ and a Lagrange multiplier
$\mu \in\Lambda(x^*)$ (we identify a Lagrange multiplier $(\lambda,\mu)$ with $\mu$ since we are assuming $m=0$).
Now, we proceed to evaluate $d^\mathtt{T}\nabla^2_{xx}L(x^*,\mu)d$. We omit the dependency on $x^*$ for simplicity. Then,
\begin{eqnarray}
\begin{array}{rl}
d^\mathtt{T}\nabla^2_{xx}L(x^*,\mu)d
=&d^\mathtt{T}\nabla^2 fd+d^\mathtt{T}\left[\sum_{i=1}^p\mu_i\sum_{k=1}^{r+1}([u_k]_i\nabla\sigma_k+\sigma_k\nabla[u_k]_i)v_k^\mathtt{T}+\sigma_k[u_k]_iJ_{v_k}\right]d \\
=&d^\mathtt{T}\nabla^2fd+
\sum_{k=1}^{r+1}[(d^\mathtt{T}\nabla\sigma_k)(\mu^\mathtt{T}u_k)+\sigma_kd^\mathtt{T}J_{u_k}^\mathtt{T}\mu]v_k^\mathtt{T}d+
\sum_{k=1}^{r+1}\sigma_k(\mu^\mathtt{T}u_k)d^\mathtt{T}J_{v_k}d.
\end{array}
\end{eqnarray}
From $S(x^*)=\text{Ker}(J(x^*))$,
and from the SVD, $J(x)=U(x)\Sigma(x)V(x)^\mathtt{T}$, we can conclude that
there are $s_{r+1},\dots,s_{n}\in{\mathbb R}$ such that $d=\sum_{j=r+1}^ns_jv_j$. Hence,
from the orthogonality of $\{v_1,\dots,v_p\}$,
we get $v_{k}^{\mathtt{T}}d=0$, $k<r+1$.
Furthermore, since $\sigma_{r+1}=0$ we obtain that
\begin{equation}
\label{eqn:hess}
d^\mathtt{T}\nabla^2_{xx}L(x^*,\mu)d=
d^\mathtt{T}\nabla^2fd+
(d^\mathtt{T}\nabla\sigma_{r+1})(\mu^\mathtt{T}u_{r+1})(v_{r+1}^\mathtt{T}d)+
\sum_{k=1}^{r}\sigma_k(\mu^\mathtt{T}u_k)d^\mathtt{T}J_{v_k}d.
\end{equation}
For a fixed Lagrange multiplier $\bar{\mu} \in\Lambda(x^*)$
(note that MFCQ ensures non-emptyness of $\Lambda(x^*)$), we can write $\Lambda(x^*)=(\bar{\mu}+\text{Ker}(J(x^*)^\mathtt{T}))\cap{\mathbb R}^p_+$, hence, there are $t_{r+1},\dots,t_p\in{\mathbb R}$ such that $\mu=\bar{\mu}+\sum_{j=r+1}^{p} t_ju_j$ and we can
write \eqref{eqn:hess} as
\begin{equation}
\label{eqn:hesst}
d^\mathtt{T}\nabla^2_{xx}L(x^*,\mu)d=
d^\mathtt{T}\nabla^2fd+(d^\mathtt{T}\nabla\sigma_{r+1})(\bar{\mu}^\mathtt{T}u_{r+1}+t_{r+1})(v_{r+1}^\mathtt{T}d)+
\sum_{k=1}^{r}\sigma_k(\bar{\mu}^\mathtt{T}u_k)d^\mathtt{T}J_{v_k}d.
\end{equation}
Observe that for a fixed $d\in S(x^*)$, the value of $d^\mathtt{T}\nabla^2_{xx} L(x^*,\mu)d$, for $\mu \in\Lambda(x^*)$, depends on a single parameter $t_{r+1}$.
Since MFCQ holds, condition (\ref{bonnans-shapiro1}) holds, and we may write it as
\begin{equation}
\label{aru2}
\max_{\mu \in \Lambda(x^{*})} d^\mathtt{T}\nabla^2_{xx} L(x^*,\mu)d \geq0, \ \ \forall d\in S(x^*).
\end{equation}
In virtue of the fact that the value of $d^\mathtt{T}\nabla^2_{xx}L(x^*,\mu)d$ depends only on one parameter,
in order to apply Yuan's lemma, we will rewrite \eqref{aru2} as a maximization problem over a line segment.
For that purpose, we define the set
\begin{equation}
\label{eqn:m}
M:=\{(t_{r+1},t_{r+2},\dots,t_{p}) \in \mathbb{R}^{p-r} \text{ such that }
\bar{\mu}+\sum_{j=r+1}^{p} t_{j}u_{j} \in \Lambda(x^{*}) \},
\end{equation}
and the following optimization problems
\begin{equation}
a_{*}:= \text{ Inf } \{ t_{r+1}:(t_{r+1},t_{r+2},\dots,t_{p}) \in M \},
\end{equation}
and
\begin{equation}
b_{*}:= \text{ Sup } \{ t_{r+1}:(t_{r+1},t_{r+2},\dots,t_{p}) \in M \}.
\end{equation}
Both values $a_{*}$ and $b_{*}$ are finite and attained since $M$ is a compact set.
Furthermore, $M$ is a compact convex set. It is easy to see, that $M$ is convex and closed since
$\Lambda(x^{*})$ is also convex and closed. To show that $M$ is bounded, suppose by contradiction, that there is a sequence
$(t_{r+1}^{k},\dots,t_{p}^{k}) \in M$ with $T_{k}:=\max\{|t^{k}_{r+1}|,\dots,|t_{p}^{k}|\} \rightarrow \infty$.
Since $\Lambda(x^{*})$ is bounded, there is a scalar $K$ such that
$\|\bar{\mu}+\sum_{j=r+1}^{p} t^{k}_{j}u_{j} \| \leq K$,
$\forall k \in \mathbb{N}$.
Dividing this expression by $T_{k}$ and taking an adequate subsequence,
we conclude that there are $\bar{t}_{r+1},\dots, \bar{t}_{p}$ not all zero, such that
$\sum_{j=r+1}^{p} \bar{t}_{j}u_{j}=0$, which is a contradiction with the linear independence of $\{u_{r+1},\dots,u_{p}\}$.
Thus, $M$ must be a compact convex set.
Finally, denote by
$\theta(t_{r+1},d):=d^\mathtt{T}\nabla^2fd+
(\bar{\mu}^\mathtt{T}u_{r+1}+t_{r+1})(d^\mathtt{T}\nabla\sigma_{r+1})(v_{r+1}^\mathtt{T}d)+
\sum_{k=1}^{r}\sigma_k(\bar{\mu}^\mathtt{T}u_k)d^\mathtt{T}J_{v_k}d$.
We see from \eqref{eqn:hesst},
that
$\theta(t_{r+1},d)$ coincides with
$d^\mathtt{T}\nabla^2_{xx}L(x^*,\mu)d$ whenever $\mu=\bar{\mu}+\sum_{j=r+1}^{p}t_{j}u_{j}$.
Now, consider the optimization problem:
\begin{equation}
\label{eqn:theta}
\max \{ \theta(t_{r+1},d): t_{r+1} \in [a_{*},b_{*}]\}.
\end{equation}
From \eqref{aru2}, we get $\max \{ \theta(t_{r+1},d): t_{r+1} \in [a_{*},b_{*}]\}\geq0$ for all $d \in S(x^{*})$.
Observe that $\theta(t,d)$ is linear in $t$ and for each $t$ fixed, it defines
a quadratic form as function of $d$.
Then, since $\theta(t,d)$ is linear in $t$,
the maximum of \eqref{eqn:theta} is attained either at $t_{r+1}=a_{*}$ or $t_{r+1}=b_{*}$.
For simplicity, let us call the quadratic forms $\theta(a_{*},d)$ and
$\theta(b_{*},d)$ by $d^\mathtt{T}Pd$ and $d^\mathtt{T}Qd$, respectively.
Thus, we arrive at
\begin{equation}
\label{eqn:yuan2}
\max\{ d^\mathtt{T}Pd, d^\mathtt{T}Qd\}\geq0,\ \ \forall d\in S(x^*).
\end{equation}
Applying Yuan's Lemma (Lemma \ref{yuan}),
we get the existence of $\alpha\geq0$ and
$\beta\geq0$ with $\alpha+\beta=1$ such that
\begin{equation}
\label{eqn:yuan3}
d^\mathtt{T}(\alpha P+\beta Q)d\geq0, \ \ \forall d\in S(x^*).
\end{equation}
Additionally, due to the linearity of $\theta(t,d)$, we see that $\eta:=\alpha a_{*}+\beta b_{*}$
satisfies $\theta(\eta,d)=d^\mathtt{T}(\alpha P+\beta Q)d$.
Denote $\pi_{r+1}(t_{r+1},\dots,t_{p}):=t_{r+1}$ the projection
onto the first coordinate. From the continuity of $\pi_{r+1}$ and the compactness and convexity of $M$, we get $[a_{*}, b_{*}]=\pi_{r+1}(M)$. Since $\eta \in[a_{*}, b_{*}]$, we conclude that there are some scalars $\hat{t}_{r+1},\dots, \hat{t}_{p}$ with
$\hat{t}_{r+1}=\eta$ and $\hat{\mu}:=\bar{\mu}+\sum_{j=r+1}^{p} \hat{t}_{j}u_{j} \in \Lambda(x^*)$
such that $\theta(\eta,d)=d^\mathtt{T}(\alpha P+\beta Q)d=d^\mathtt{T}\nabla^2_{xx} L(x^*,\hat{\mu})d$.
From \eqref{eqn:yuan3}, we get
that $\hat{\mu}$ is a Lagrange multiplier for which WSOC holds at $x^*$.
\end{proof}
We refer the reader to the companion paper \cite{smoothSVD} for the proof that our Theorem \ref{theo} is a generalization of Theorems
\ref{bttheo} (originally from Baccari and Trad \cite{baccaritrad}) and \ref{genbac},
where the rank deficiency is at most one. It is also a generalization of Theorem \ref{ams2} (originally from Andreani et al. \cite{ams2}), where the rank is constant, as long as all non-zero singular values are distinct.
In order to see that Theorem \ref{theo} provides new examples where WSOC holds, let us consider the following:
\begin{example}
$$\begin{array}{ll}
\mbox{Minimize }&x_3,\\
\mbox{subject to }&\cos(x_1+x_2)-x_ 3-1\leq0,\\
&-\cos(x_1+x_2)-x_3+1\leq0,\\
&-2x_3\leq0,
\end{array}$$
at a local minimizer $x^*=(0,0,0)$.
The Jacobian matrix for $x$ around $x^*$ is given by
$$J(x)=\left(\begin{array}{ccc}-\sin(x_1+x_2)&-\sin(x_1+x_2)&-1\\\sin(x_1+x_2)&\sin(x_1+x_2)&-1\\0&0&-2\end{array}\right),$$ that admits the following smooth SVD decomposition:
\begin{eqnarray*}
U(x):=\left(\begin{array}{ccc}-\frac{1}{\sqrt{6}}&-\frac{1}{\sqrt{2}}&\frac{1}{\sqrt{3}}\\-\frac{1}{\sqrt{6}}&\frac{1}{\sqrt{2}}&\frac{1}{\sqrt{3}}\\-\frac{2}{\sqrt{6}}&0&-\frac{1}{\sqrt{3}}\end{array}\right), \Sigma(x):=\left(\begin{array}{ccc}\sqrt{6}&0&0\\0&2\sin(x_1+x_2)&0\\0&0&0\end{array}\right)\mbox{ and}\\
V(x)^\mathtt{T}:=\left(\begin{array}{ccc}0&0&1\\\frac{1}{\sqrt{2}}&\frac{1}{\sqrt{2}}&0\\\frac{1}{\sqrt{2}}&-\frac{1}{\sqrt{2}}&0\end{array}\right).
\end{eqnarray*}
Clearly, MFCQ holds and the rank is $1$ at $x^*$ and increases at most to $2$ around $x^*$.
The set $\Lambda(x^*)$ of Lagrange multipliers is defined by the relations $\mu_1+\mu_2+2\mu_3=1$ with $\mu_1,\mu_2,\mu_3\geq0$.
Theorem \ref{theo} guarantees the existence of a Lagrange multiplier that fulfill WSOC.
In fact, we can see that WSOC holds at $x^*$ whenever $\mu_2\geq\mu_1$.\end{example}
The next example shows, however, that our Theorem \ref{theo} in the way presented does not prove the complete conjecture, since Assumption \ref{svd} may fail.
\begin{example}
$$\begin{array}{ll}
\mbox{Minimize }&x_3,\\
\mbox{subject to }&g_1(x):=x_1x_2-x_ 3\leq0,\\
&g_2(x):=-x_1x_2-x_3\leq0,\\
&g_3(x):=-x_3\leq0,
\end{array}$$
at a local minimizer $x^*=(0,0,0)$.
The jacobian matrix at $x$ near $x^*$ is given by $J(x)=\left(\begin{array}{ccc}x_2&x_1&-1\\-x_2&-x_1&-1\\0&0&-1\end{array}\right).$
Clearly, MFCQ holds and the rank of $J(x^*)$ is $1$ and increases at most to $2$ in a neighborhood. Also, WSOC holds. Let us prove that Assumption \ref{svd} does not hold. Assume that there are differentiable functions $U(x), \Sigma(x), V(x)$ such that $J(x)=U(x)\Sigma(x)V(x)^\mathtt{T}$ for all $x$ in a neighborhood of $x^*$ as in Assumption \ref{svd}. Let $U(x)=[u_1, u_2, u_3]$ and $V(x)=[v_1,v_2,v_3]$ be defined columnwise, where the dependency on $x$ was omitted. Also, let $\sigma_1, \sigma_2, \sigma_3$ be the diagonal elements of $\Sigma(x)$. Since the rank of $J(x)$ is at most $2$, $\sigma_3\equiv0$. At $x=x^*$, since the rank is $1$, we have $\sigma_1(x^*)\neq0$ and $\sigma_2(x^*)=0$. Then, $u_1(x^*)=\frac{\alpha}{\sigma_1(x^*)}(-1,-1,-1), v_1(x^*)=\frac{1}{\alpha}(0,0,1)$ for some $\alpha\neq0$ and $u_2(x^*)\perp u_1(x^*)$, $v_2(x^*)\perp v_1(x^*)$. Now, denoting by $[w]_i, i=1,2,3$ the components of the vector $w\in{\mathbb R}^3$, the first two columns of the identity $U(x)\Sigma(x)V(x)^\mathtt{T}=J(x)$ for all $x$ near $x^*$ gives:
$$\sigma_1[u_1]_i[v_1]_j+\sigma_2[u_2]_i[v_2]_j=\frac{\partial g_i(x)}{\partial x_j}, i=1,2,3, j=1,2.$$
Computing derivatives with respect to $x_1$ and $x_2$ of every entry, gives, for $i=1,2,3, j=1,2$ and $k=1,2$:
\begin{eqnarray*}
\frac{\partial\sigma_1}{\partial x_k}[u_1]_i[v_1]_j+\sigma_1\frac{\partial[u_1]_i}{\partial x_k}[v_1]_j+\sigma_1[u_1]_i\frac{\partial[v_1]_j}{\partial x_k}+\\
\frac{\partial\sigma_2}{\partial x_k}[u_2]_i[v_2]_j+\sigma_2\frac{\partial[u_2]_i}{\partial x_k}[v_2]_j+\sigma_2[u_2]_i\frac{\partial[v_2]_j}{\partial x_k}=
\frac{\partial^2 g_i(x)}{\partial x_k\partial x_j}.
\end{eqnarray*}
At $x=x^*$, since $\sigma_2(x^*)=0$ and $[v_1(x^*)]_j=0, j=1,2$ we have
\begin{eqnarray}
\label{maineq}
\sigma_1(x^*)[u_1(x^*)]_i\frac{\partial[v_1(x^*)]_j}{\partial x_k}+\frac{\partial\sigma_2(x^*)}{\partial x_k}[u_2(x^*)]_i[v_2(x^*)]_j=
\frac{\partial^2 g_i(x^*)}{\partial x_k\partial x_j}.
\end{eqnarray}
For fixed $j=1,2$ and $k=1,2$, we can multiply equation (\ref{maineq}) by $[u_1(x^*)]_i$ and add for $i=1,2,3$ to get:
\begin{eqnarray*}
\sigma_1(x^*)\|u_1(x^*)\|^2\frac{\partial[v_1(x^*)]_j}{\partial x_k}+\frac{\partial\sigma_2(x^*)}{\partial x_k}\langle u_1(x^*),u_2(x^*)\rangle[v_2(x^*)]_j=
\langle u_1(x^*),\left[\frac{\partial^2 g_i(x^*)}{\partial x_k\partial x_j}\right]_{i=1}^3\rangle.
\end{eqnarray*}
Since $\sigma_1(x^*)\neq0$, computing derivatives,
using the definition of $u_1(x^*)$ and the fact that $u_1(x^*)\perp u_2(x^*)$, we conclude that
$$\frac{\partial[v_1(x^*)]_j}{\partial x_k}=0, j=1,2, k=1,2.$$
Substituting back in \eqref{maineq} we have, for all $i=1,2,3, j=1,2, k=1,2$:
\begin{eqnarray*}
\frac{\partial\sigma_2(x^*)}{\partial x_k}[u_2(x^*)]_i[v_2(x^*)]_j=
\frac{\partial^2 g_i(x^*)}{\partial x_k\partial x_j}.
\end{eqnarray*}
At indices $(i,j,k)\in\{(1,1,2),(1,2,1)\}$, where the right-hand side is non-zero, we have $\frac{\partial\sigma_2(x^*)}{\partial x_1}\neq0, \frac{\partial\sigma_2(x^*)}{\partial x_2}\neq0, [v_2(x^*)]_1\neq0$ and $[v_2(x^*)]_2\neq0$. At indices $(i,j,k)\in\{(1,1,1),(2,1,1),(3,1,1)\}$, where the right-hand side is zero, we get $u_2(x^*)=0$, which is a contradiction.
\end{example}
To conclude this section we note that our proof suggests that when the rank is constant,
the Hessian of the Lagrangian does not depend on the Lagrange multiplier.
In fact, we can prove this without additional assumptions.
This explains why results under constant rank conditions hold {\it for all} Lagrange multipliers.
\begin{theorem}
\label{indep}
Suppose that $\Lambda(x^{*})\neq \emptyset$.
If the rank of $J(x)$ is constant around a point
$x^*$, then
the quadratic form $d^\mathtt{T}\nabla^2_{xx}L(x^*,\lambda,\mu)d$ for $d\in S(x^*)$
does not depend on $(\lambda,\mu)\in\Lambda(x^*)$.
\end{theorem}
\begin{proof}
By simplicity, assume $m=0$ and $A(x^*)=\{1,\dots,p\}$.
By Theorem \ref{ranktheo}, for each $d\in S(x^*)$,
there exists a smooth curve $\xi(t)$, $t\in(-\varepsilon,\varepsilon)$ with $g(\xi(t))=0$ for all $t$, with $\xi(0)=x^*$
and $\xi'(0)=d$.
Take $\tilde\mu\in \text{Ker}(J(x^*)^\mathtt{T})$ and let us define
the function $R(t):=\sum_{i=1}^p\tilde\mu_i g_i(\xi(t))$, which is constantly zero for small $t$.
Straightforward calculations show
that $R''(0)=d^\mathtt{T}\sum_{i=1}^p\tilde\mu_i\nabla^2 g_i(x^*)d+\xi''(0)^\mathtt{T}J(x^*)^\mathtt{T}\tilde\mu=0$.
Hence, $d^\mathtt{T}\sum_{i=1}^p\tilde\mu_i\nabla^2 g_i(x^*)d=0$.
But $\Lambda(x^*)=(\bar{\mu}+\text{Ker}(J(x^*)^\mathtt{T}))\cap{\mathbb R}^p_+$ for a fixed
Lagrange multiplier $\bar\mu \in\Lambda(x^*)$.
Hence $\mu \in\Lambda(x^*)$ if, and only if, $\mu=\bar\mu+\tilde\mu$, for some $\tilde\mu\in \text{Ker}(J(x^*)^\mathtt{T})$,
with $\bar \mu+ \tilde \mu \geq0$.
It follows that $d^\mathtt{T}\nabla^2_{xx}L(x^*,\mu)d=d^\mathtt{T}\nabla^2_{xx}L(x^*,\bar\mu)d$, as we wanted to show.
Observe that $x^{*}$ is not necessarily a local minimizer, we only require $\Lambda(x^{*})\neq \emptyset$.
\end{proof}
Despite our focus on conditions implying WSOC, the above analysis allows us to obtain conclusions about SSOC,
related with \cite[Theorem 5.1]{baccaritrad}. Recall
that the {\it generalized strict complementary slackness} (GSCS) condition holds at the feasible point $x^{*}$ if there exists, at most, one index $i_{*} \in A(x^{*})$ such that $\mu_{i_{*}}=0$ whenever
$(\lambda, \mu) \in \Lambda(x^{*})$.
\begin{theorem}\label{theo:main2} Assume that $x^*$ is a local minimizer of (\ref{problem})
that fulfills MFCQ. Let $r$ be the rank of $J(x^*)$, and assume that for every $x$ in
some neighborhood of $x^*$ the rank of $J(x)$ is at most $r+1$.
Suppose also that Assumption \ref{svd} and GSCS hold at $x^*$. Then, there is a Lagrange multiplier ($\lambda,\mu)$ such that SSOC holds.
\end{theorem}
\begin{proof}
From \cite[Theorem 5.1]{baccaritrad} or \cite[Definition 3.3 and Lemma 3.3]{abadie2},
it follows that GSCS implies that $C(x^*)$ is a first-order cone,
hence, we can still apply Yuan's Lemma and prove the result in the same lines of Theorem \ref{theo}.
\end{proof}
\section{Final remarks}
In order to analyse limit points of a sequence generated by a second-order algorithm,
one usually relies on WSOC, the stationarity concept based on the critical subspace,
the lineality space of the cone of critical directions.
Most conditions guaranteeing WSOC at local minimizers are based on a constant rank assumption on the Jacobian matrix.
In this paper we developed new tools to deal with the non-constant rank case, by partially solving
a conjecture formulated in \cite{ams2}.
Possible future lines of research includes
investigating the full conjecture using generalized notions of derivative. We believe this can be done since under the rank assumption, the so-called ``crossing'' of singular values is controlled, at least when the non-zero ones are simple, which is the main source of non-continuity of singular vectors.
Our approach also opens the path to obtaining new second-order results without assuming MFCQ and/or to developing conditions that ensure SSOC, the second-order stationarity concept based on the true critical cone.
\end{document} |
\begin{document}
\title[Some No-Arbitrage Rules for Converging Asset Prices]{Some No-Arbitrage Rules for Converging Asset Prices under Short-Sales Constraints}
\author{Delia Coculescu}
\address{University of Zurich\\ Departement of Banking and Finance, Plattenstrasse 32\\ Z\"{u}rich 8032, Switzerland.}
\email{[email protected]}
\author{Monique Jeanblanc}
\address{Laboratoire de Math\'ematiques et Mod\'elisation d'\'Evry (LaMME), UMR CNRS 8071, Univ Evry, Universit\'e Paris Saclay,
.} \email{[email protected]}
\begin{abstract} Under short sales prohibitions, no free lunch with vanishing risk (NFLVR-S) is known to be equivalent to the existence of an equivalent supermartingale measure for the price processes (Pulido \cite{Pulido14}). For two given price processes, we translate the property (NFLVR-S) in terms of so called structure conditions and we introduce the concept of fundamental supermartingale measure. When a certain condition necessary to the construction of the fundamental martingale measure is not fulfilled, we provide the corresponding arbitrage portfolios. The motivation of our study lies in understanding the particular case of converging prices, i.e., that are going to cross at a bounded random time.
\end{abstract}
\maketitle
\section{Introduction}
In arbitrage-free financial markets, the law of one price simply states that \textit{similar} financial assets, i.e., that have identical payoffs, should be sold at the same price in different locations. There are some particular assumptions about the financial markets that lead to this fundamental result, importantly investors need to be able to observe the prices in the different locations and to sell short the corresponding assets. Also, there should be no transaction costs. Indeed, under these assumptions, any investor is able to construct an arbitrage portfolio consisting in a short position in the (relatively) overpriced asset and a long position in the (relatively) underpriced asset, thus making an immediate profit. This represents the simplest arbitrage strategy one can encounter: not only is it a buy and hold strategy, but additionally, it is model independent, i.e., does not rely upon an underlying model for describing the prices dynamics in time.
Obviously, in case of short sales prohibitions, the above described arbitrage portfolios are impossible to construct, hence similar assets may have differing prices: the rule of one price does not apply. A question arises naturally: How may the differing prices behave as stochastic processes within the limits of no arbitrage with short sales constraints? The aim of this paper is precisely to shed light on this question. For this, we are going to introduce the notion of converging prices, that is, price processes that are expected to "cross", i.e., to reach almost surely the same value over some bounded horizon, which is the mathematical description of the similar assets.
We study the probabilistic properties of such processes when one imposes the no free lunch with vanishing risk condition under short sales constraints, abbreviated (NFLVR-S). This condition was introduced by Pulido in \cite{Pulido14}, as the counterpart -when investors are not allowed to short sell- of the no arbitrage paradigm (NFLVR) of Delbaen and Schachermayer (see \cite{DelbScha94} and \cite{DelbScha98}). For the reader's convenience, the definition of (NFLVR-S) is provided in Section \ref{sec::TheModel}.
Based on previous work by Jouini and Kallal \cite{JouiKall95}, Fritelli \cite{Fritelli97}, Pham and Touzi \cite{PhamTouz99}, Napp \cite{Napp03} and Karatzas and Kardaras \cite{KaraKard07}, the paper by Pulido \cite{Pulido14} establishes important properties of price processes under short sale prohibitions namely the equivalence between (NFLVR-S) and the existence of an equivalent supermartingale measure for the price processes.
In the current paper, we shall rather translate the condition (NFLVR-S) in terms of so-called "structure conditions" for two underlying stochastic processes.
In the framework of converging prices, the existence of imperfect and asymmetric information is crucial to justify the formation and persistence in time of the differing prices. This element is integrated in our analysis: we assume that each individual price is formed given some distinct information set (filtration) a priori unrelated with the information set that drives the price formation in a different location, except measurability of the final payoff in both situations. We then analyse the no arbitrage conditions from the perspective of an agent (called the insider) that has access to a global information set, i.e., that comprises the observation of the two differing prices. The insider can trade in both markets, but cannot sell short.
There are many examples of converging prices, the simplest being a future contract and its underlying asset, or the two portfolios arising from the call-put parity (i.e., one consisting of a call option and bonds, the second of a put option and underlying stock). In markets with short sales prohibitions, the call-put parity is not expected to hold in every point in time but we observe the identity of the payoffs at maturity. Other examples of convergence are represented by some portfolios that are commonly used in capital structure arbitrages or the pairs trading. Note however that in these cases the convergence is model-based; in capital structure arbitrages a particular "structural" model is assumed to explain the joint evolution of the prices for the different securities with common issuer, while in the pairs trading, the pairs are selected upon a statistical analysis. Nevertheless, assuming that the underlying models are "correct" the questions remains the same: how to construct the strategies when selling short is not possible?
Finally, our framework applies well to the case of similar derivative contracts that are sold over the counter, and thus differing prices typically arise as a consequence of a imperfect information between the different sellers and buyers.
The remaining of the paper is organised as follows: Section
\ref{sec::TheModel} introduces the probabilistic model for the two
converging prices and recalls the no arbitrage framework we adopt in this paper. Section \ref{Section3} establishes the ''structure conditions'' in
Theorem \ref{ThmGenRepr}. In Section \ref{sec::MainResult}, we
derive sufficient conditions for the existence of a
supermartingale measure as well as some necessary conditions. We introduce a probability measure that we call fundamental supermartingale measure and arbitrage portfolios are provided when the fundamental supermartingale
measure does not exist. Section \ref{sec::Examples}
analyses many examples of converging prices. Let us emphasise that
our main results, i.e., Theorem \ref{ThmGenRepr} and Theorem
\ref{MainThm}, are more general: the property of the two prices
to be converging is not used for deriving these results.
\section{A Stochastic Model with two Converging Asset Prices }\label{sec::TheModel}
In this paper, all probabilities and filtrations are defined on a probability space $(\Omega, \mathcal{F},\mathbb{P})$.
We consider two financial assets, possibly traded in different locations (exchanges). Their respective price processes denoted by $X:=(X_t)_{t\geq 0}$ and $Y:=(Y_t)_{t\geq 0}$ are modelled as positive stochastic processes on $(\Omega, \mathcal{F},\mathbb{P})$.
We denote by $\mathbb{F}^X:=(\mathcal{F}^X_t)_{t\geq 0}$ (resp. $\mathbb{F}^Y:=(\mathcal{F}^Y_t)_{t \geq 0}$) the right-continuous $\mathbb{P}$-augmented filtrations
of $X$ (resp. $Y$). For simplicity, we suppose that the spot interest rates are constant and equal to zero, that is, the price processes $X$ and $Y$ are already discounted.
We shall assume that the dynamics of the two prices reflect a (local) equilibrium, namely there exist equivalent martingale measures for each asset individually, when considered as stochastic processes in their own filtration:
\begin{itemize}
\item[(NA-X)] There exists $\mathbb{Q}^X\sim \mathbb{P}$ such that the price process $X$ is an $(\mathbb{F}^X,\mathbb{Q}^X)$-local martingale (in other words, $\mathbb{Q}^X$ is a martingale measure for $X$ in its own filtration).
\item[(NA-Y)] There exists $\mathbb{Q}^Y\sim \mathbb{P}$ such that the price process $Y$ is an $(\mathbb{F}^Y,\mathbb{Q}^Y)$-local martingale.
\end{itemize}
Note that this implies that $X$ and $Y$ are $\mathbb{P}$ semimartingales in their own filtrations.
We shall work with the right-continuous versions for $X$ and $Y$.
Notice that the assumptions above exclude price processes that are
predictable and of finite variation in their own filtration. This
pattern would represent a trivial case to examine, so we do not
lose much by excluding it. However, the pattern (i.e., price
processes that are predictable and of finite variation) can still
appear as we shall consider the price processes in a larger
filtration.
We assume that an investor (called hereafter the insider) is able
to observe the price dynamics in the two locations, so that
his information flow is given by $\mathbb{G}:=(\mathcal{G}_t)_{t\geq 0}$
with
\[
\mathcal{G}_t= \cap _{s>t}\mathcal{F}^X_s\vee \mathcal{F}^Y_s.
\]
Also, the investor has a bounded trading horizon, denoted $T$, which is a $\mathbb{G}$-stopping time.
Many examples that we're considering fit in the following framework:
\begin{Definition}\label{defConv}A couple of financial assets $(X,Y)$ are said to have converging prices if $\inf\{t\in\mathbb R_+ \;|\; X_t=Y_t\}$ is a bounded $\mathbb{G}$-stopping time.
\end{Definition}
When $X$ and $Y$ are converging prices, we shall consider that the insider's horizon is a given point of convergence of the two prices,
i.e., $T$ is such that
\[
\xi:=X_T=Y_T
\] and such that $T$ is a bounded $\mathbb{G}$-stopping time. One can take $T= \inf\{t\in\mathbb R_+\;|\; X_t=Y_t\}$, but such a restriction is not necessary. In some situations the $\mathbb{G}$-stopping time $T$ can be chosen as the maturity of the assets, when the cash flow $\xi$ is paid to the investors that have long positions either in the asset $X$ or $Y$. In this case $T$ should be an $\mathbb{F}^X$ and an $\mathbb{F}^Y$-stopping time (i.e., cash flows are always observable by holders of long positions in the corresponding assets). Another interesting situation is when $T$ is only observed by the insider, hence $T$ is neither an $\mathbb{F}^X$ nor an $\mathbb{F}^Y$-stopping time. Either of the two interpretations are possible here, i.e., we do not require $T$ to be more than a bounded $\mathbb{G}$-stopping time, but remaining fixed through the analysis.
Typically, two converging prices $X$ and $Y$ may follow different paths if the different investors, (namely the investors active in the market for $X$ versus the market for $Y$) have access to different information, in which case $\mathbb{F}^X$ and $\mathbb{F}^Y$ differ, and/or they have different risk attitudes. What we mean by different risk attitudes is the property that the restriction of $\mathbb{Q}^X$ to $\sigma(\xi)$ does not coincide with the restriction of $\mathbb{Q}^Y$ to $\sigma(\xi)$.
Our aim is to analyse the no arbitrage property (NFLVR-S) from the insider's perspective, i.e., when there are prohibitions for the insider to sell short the assets $X$ and $Y$. In other words, we consider that the investor's strategies involve the following positions: long or short in cash ($\pi^C$) and only long positions in $X$
and $Y$ ($\pi^X$ and $\pi^Y$), consequently the value of the investor's portfolio writes:
\be
V^\pi_t:=\pi^C_t+\pi^X _tX_t+\pi^Y_t Y_t,
\ee and, when self financing, we have $dV^\pi_t:= \pi^X _tdX_t+\pi^Y_t dY_t$.
As usual, we impose some admissibility conditions for strategies under (NFLVR-S) in this framework. We refer to Pulido \cite{Pulido14} for more details.
\begin{Definition}\label{DefTradingStrategy}
A trading strategy is a $\mathbb{G}$-predictable process $\pi = (\pi^C,\pi^X,\pi^Y)$. A trading strategy $\pi$ is called an admissible trading strategy under short sales prohibitions for $X$ and $Y$ if:
\begin{itemize}
\item[(i)] $\pi^X\in L(X)$ and $\pi^Y\in L(Y)$ (i.e., $\pi^X$ is integrable with respect to the semimartingale $X$, $\pi^Y$ is integrable with respect to the semimartingale $Y$).
\item[(ii)] The process $V^{\pi}$ is bounded from below.
\item[(iii)] $\pi^X\geq 0$ and $\pi^Y\geq 0$.
\end{itemize}
We denote by $\mathcal A$ the set of admissible trading strategies under short sales restrictions for $X$ and $Y$.
\end{Definition}
In the definition above, the price processes $X$ and $Y$ need to be $\mathbb{G}$-semimartingales; this question is examined in the next section.
We now define the following sets:
\[
\mathcal K:=\{V^\pi_T,\pi\in\mathcal A\}\quad \mathcal C:=\left(\mathcal K-L^0_+(\mathbb{P})\right)\cap L^\infty(\mathbb{P}).
\]where $L^0_+(\mathbb{P})$ is the space of equivalence classes of nonnegative finite random variables, and $L^\infty(\mathbb{P})$ is the space of $\mathbb{P}$-essentially bounded random variables.
No Free Lunch with Vanishing Risk under short sales prohibition (NFLVR-S) is defined as follows: (NFLVR-S) holds if $\begin{array}r{\mathcal C} \cap L^0_+(\mathbb{P}) =\{0\}$, where $\begin{array}r{\mathcal C}$ is the closure of $\mathcal C$ with respect with the $\|\cdot\|_\infty$ norm in $L^\infty(\mathbb{P})$.
\begin{Theorem}\cite{Pulido14}\label{NFLVR-S}
(NFLVR-S) holds if and only if there exists a probability measure $\widetilde P $ such that $\widetilde \mathbb{P}\sim\mathbb{P}$ such that the processes $X$ and $Y$ are $(\mathbb{G},\widetilde \mathbb{P})$-supermartingales. Such a probability measure is called a supermartingale measure.
\end{Theorem}
Because the condition of no arbitrages in the form of (NFLVR-S)
is equivalent to the existence of a supermartingale measure for
the couple $(X,Y)$ in the filtration $\mathbb{G}$, our aim is to shed light on the properties of
processes $X$ and $Y$ when considered as stochastic processes in
the larger filtration $\mathbb{G}$, under the requirement that there
exists a probability measure $\widetilde \mathbb{P}$ such that $\widetilde \mathbb{P}\sim\mathbb{P}$ such that the processes $X$ and $Y$ are $(\mathbb{G},\widetilde \mathbb{P})$-supermartingales.
\section{Structure conditions under (NFLVR-S)}\label{Section3}
We aim to clarify the properties of processes $X$ and $Y$ that admit an equivalent supermartingale measure. Structure conditions for asset prices first appeared in the setting of no arbitrage without short selling constraints, i.e., derived from imposing the existence of a strict martingale density. We refer to F\"ollmer and Schweizer \cite{FoelSchw91}, Ansel and Stricker \cite{AnselStick92}, Schweizer \cite{Schw95} for more details.
We shall carry our analysis in the filtration $\mathbb{G}$ and the interval $[0,T]$, i.e., the insider's information set and the insider's investing horizon. Note however that the stochastic processes $X$ and $Y$ are defined on infinite time horizon and the conditions (NA-X) and (NA-Y) are also supposed to hold on an infinite time horizon.
To begin with, let us introduce some notations that are going to be used in the remaining of the paper:
\begin{notation}
\begin{itemize}
\item[(i)]We write $\langle
Z\rangle$ for the sharp bracket of a semimartingale $Z$ under the
measure $\mathbb{P}$ and in the filtration $\mathbb{G}$. Whenever the underlying
filtration we are considering is not $\mathbb{G}$ and/or the probability
is not $\mathbb{P}$ we shall use explicit notations: for instance
$\langle Z\rangle ^{(\mathbb{F},\mathbb{Q})}$ is the sharp bracket under a
measure $\mathbb{Q}$ and in a filtration $\mathbb{F}$ (implicitly, $Z$ needs to
be an $\mathbb{F}$-semimartingale).
\item[(ii)] The expectation operator under the probability $\mathbb{P}$ is written $\mathbb{E}$; whenever the probability measure is a different one, we shall use explicit notations, i.e., $\mathbb{E}^\mathbb{Q}$ is the expectation under the probability measure $\mathbb{Q}$.
\item[(iii)] $\mathcal
P(\mathbb{F})$ is the class of $\mathbb{F}$-predictable processes, where $\mathbb{F}$ is a
given filtration.
\item[(iv)] $\mathcal S(M)$ is the stable subset of $(\mathbb{G},\mathbb{P})$-local martingales generated by $M$, where $M$ is a $(\mathbb{G},\mathbb{P})$-locally square integrable martingale ; $\mathcal S(M)^ \bot$ is the set of $(\mathbb{G},\mathbb{P})$-locally square integrable martingales that are strongly orthogonal to $M$.
\item[(v)] $\mathcal E (Z)$ denotes the Dol\'eans-Dade exponential of a semimartingale $Z$.
\end{itemize}
\end{notation}
The following result is a more precise formulation of Theorem \ref{NFLVR-S} in the particular case of converging prices:
\bl
Suppose that $(X,Y)$ are converging prices. Then, the prices $(X,Y)$ satisfy (NFLVR-S) if and only if there exists a probability measure $\widetilde \mathbb{P}$ such that $\widetilde \mathbb{P}\sim\mathbb{P}$ and:
\begin{align*}
X &=\widetilde M +\widetilde Z^X \\
Y &=\widetilde M +\widetilde Z^Y,
\end{align*}where $\widetilde M_t:=\mathbb{E}^{\widetilde \mathbb{P}}[\xi |\mathcal{G}_t]$ and $\widetilde Z^X$ and $\widetilde Z^Y$ are two $(\mathbb{G},\widetilde \mathbb{P})$-potentials (i.e., are positive supermartingales satisfying $\widetilde Z^X_T=\widetilde Z^Y_T=0$).
\el
\noindent {\it Proof. $\, $} (NFLVR-S) holds if and only if a supermartingale measure
$\widetilde \mathbb{P}$ exists. But then $X$ and $Y$ are uniformly
integrable $\widetilde \mathbb{P}$-supermartingales and the expressions
follow from the Riesz decomposition and the terminal
condition $X_T=Y_T$. For more details, see \cite{Meyer}-VI-11or, alternatively, \cite{cdellacherie} T12 p. 97.
$\Box$ \vskip 5 pt
Now, we investigate the structure of the two price processes under the reference probability
$\mathbb{P}$, which is arbitrarily chosen.
\bp\label{ThmSemi}
Assume the prices $(X,Y)$ satisfy (NFLVR-S). If $\mathbb{E}[X_T]<\infty$ and $\mathbb{E}[Y_T]<\infty$ , then $(X,Y)$ are $(\mathbb{G},\mathbb{P})$-special semimartingales on the interval $[0,T]$.
\ep
\noindent {\it Proof. $\, $} If $(X,Y)$ satisfy (NFLVR-S), then there exists a
probability measure equivalent to $\mathbb{P}$, say $\widetilde \mathbb{P}$,
such that $(X,Y)$ are $(\mathbb{G},\widetilde \mathbb{P})$-supermartingales.
The set of semimartingales being stable under equivalent changes
of the probability measure (Girsanov-Meyer theorem), it follows
that $(X,Y)$ are $(\mathbb{G},\mathbb{P})$-semimartingales. We now show that $X$ is a special $(\mathbb{G},\mathbb{P})$-semimartingale; the reasoning for $Y$ is identical.
First we prove the property in the filtration $\mathbb{F}^X$ and then in the larger filtration $\mathbb{G}$.
We denote $Z_k:=\frac{d\mathbb{P}}{d\mathbb{Q}^X}|_{\mathcal{F}^X_k}$ (with $k$
such that $T\leq k$ $a.s.$) and $Z_t= \mathbb{E}(Z_k \vert \mathcal{F}^X_t)$. We have:
\[
\mathbb{E}^{\mathbb{Q}^X}\left[[X,Z]_T\right]\leq \mathbb{E}^{\mathbb{Q}^X}\left[X_T Z_T\right ]-X_0+\mathbb{E}^{\mathbb{Q}^X}\left[N^*_k\right ] =\mathbb{E}^{\mathbb{P}}[X_T]-X_0+ \mathbb{E}^{\mathbb{Q}^X}\left[N^*_k\right ],
\]
with $N^*_k:=\sup_{s\in[0,k]}|N_s|$ where $N=-\int Z_-dX-\int
X_-dZ$. The process $N$ is a $(\mathbb{F}^X,\mathbb{Q}^X)$-local martingale ($X$ and $Z$ being
$(\mathbb{F}^X,\mathbb{Q}^X)$- martingales). Therefore $N^*$ is locally
integrable under $\mathbb{Q}^X$ and so is $[X,Z]$, due to the inequality
above.
Hence the $(\mathbb{F}^X,\mathbb{Q}^X)$-predictable bracket for $X$ and $Z$, $\langle X,Z\rangle^{(\mathbb{F}^X,\mathbb{Q}^X)}$ exists. It follows by Girsanov's theorem that:
\[
X_t = m^\mathbb{P}_t +\int_0^t\frac{1}{Z_{s-}}d\langle X,Z\rangle^{(\mathbb{F}^X,\mathbb{Q}^X)}_s
\] where $m^\mathbb{P}$ is an $(\mathbb{F}^X,\mathbb{P})$-local martingale. Hence, the process $X$ is a special semimartingale in $\mathbb{F}^X$. We examine the situation in the larger filtration $\mathbb{G}$ and we remark that it is sufficient to show that $m^\mathbb{P}$ is a special $(\mathbb{G},\mathbb{P})$-semimartingale.
The process $m^{\mathbb{P}}$ is a $(\mathbb{G},\mathbb{P})$-semimartingale ($X$ being one). Moreover, $\sup_{s\leq t}|m^\mathbb{P}_s|$ is $(\mathbb{F}^X,\mathbb{P})$-locally integrable ($m^\mathbb{P}$ is a $(\mathbb{F}^X,\mathbb{P})$-local martingale, hence we use Theorem 34 p.130 in \cite{protter}) and therefore it is also $(\mathbb{G},\mathbb{P})$-locally integrable ($\mathbb{F}^X$-stopping times are $\mathbb{G}$-stopping times). This in turn implies that $m^\mathbb{P}$ is a $(\mathbb{G},\mathbb{P})$-special semimartingale (Theorem 33 p.130 in \cite{protter}) which proves the result.
$\Box$ \vskip 5 pt
\bethe\label{ThmGenRepr} Assume that $(X,Y)$ satisfy
(NFLVR-S). If $\mathbb{E}[X_T]<\infty$, then there exist $J^X$ and $w^X$ all being
in $ \mathcal P(\mathbb{G})$ and a $(\mathbb{G},\mathbb{P})$-local martingale $M^X$ with $M^X_0=0$, such that for any $t\leq T$:
\begin{align}\label{Xrepr}
X_t&=X_0+J^X_t+\int_0^t w^X_ud\langle M^X \rangle_u+M^X_t\,.
\end{align}
If $X$ is $\mathbb{F}^X$-predictable, the process $J^X$ is null.
In general, the process $J^X$ satisfies $J^X_0 =0$, is decreasing, and $dJ^X$ is singular with respect to $d\langle M^X\rangle$.
\eethe
\noindent {\it Proof. $\, $}
In view of Proposition \ref{ThmSemi}, there exists a $(\mathbb{G},\mathbb{P})$-local martingale $M^X$ and a finite variation, $\mathbb{G}$-predictable process $V^X$, such that:
\begin{align*}\label{Yrepr}
X_t&=X_0+V^X_t+M^X_t.
\end{align*}
We can write $V^X_t=\int_0^t w^X_ud\langle M^X\rangle_u+J^X_t$,
where $dJ^X$ is a signed measure that is singular with respect to
$d\langle M^X\rangle$ (i.e., the Lebesgue decomposition of
$dV^X$ with respect to $d\langle M^X\rangle$; see Proposition \ref{AnnexA3} in Appendix \ref{A}).
To show that $J^X$ is a decreasing process we use
Girsanov's theorem and Theorem \ref{diffmeasures} in Appendix \ref{A}. More precisely, let $\tilde \mathbb{P}$ be an equivalent $\mathbb{G}$-supermartingale measure for $X$. By Girsanov's theorem the decomposition of $X$ is given by: $X =X_0+(J^X +\tilde D^X )+\tilde M^X $, where $\tilde M^X$ is a $(\mathbb{G},\tilde\mathbb{P})$-martingale and:
\begin{itemize}
\item[(i)] $d\tilde D^X\ll d\langle M^X\rangle$. Hence $d\tilde D^X$ and $dJ^X$ are orthogonal.
\item[(ii)] the process $ J^X +\tilde D^X $ is decreasing.
\end{itemize}The two above points imply that both $J^X$ and $\tilde D^X$ are decreasing (Theorem \ref{diffmeasures} (b)), hence the statement.
Let us suppose that $X$ is $\mathbb{F}^X$-predictable, hence continuous
(because $X$ is also an $(\mathbb{F}^X,\mathbb{Q}^X)$-local martingale). Then, $[X]=
\langle M^X\rangle$ and the process $X$ has the same constancy
intervals as $\langle M^X\rangle$. It follows that $J^X\equiv
J^X_0$, i.e., is constantly null.
$\Box$ \vskip 5 pt
We emphasise that from Theorem \ref{ThmGenRepr}, (NFLVR-S) and $\mathbb{E}[Y_T]<\infty$ imply a decomposition for $Y$ (with obvious notations):
\begin{equation}\label{Yrepr}
Y_t=Y_0+J^Y_t+\int_0^t w^Y_ud\langle M^Y \rangle_u+M^Y_t.
\end{equation}
We consider below two examples of converging prices.
\begin{exam} Let $B^1$ and $B^2$ be two independent
$\mathbb{P}$-Brownian motions with respective natural filtrations $\mathbb{F}^1$
and $\mathbb{F}^2$; consider that $\theta^1$ is a $\mathbb{F}^1$ stopping
time and $\theta^2$ is a $\mathbb{F}^2$ stopping time (hence, they
are predictable), both considered to have absolutely continuous distribution functions denoted $C^1$ and $C^2$, and satisfying $C^1(T)<1,C^2(T)<1$.
The following payoff is scheduled at a fixed maturity date $T$:
\[
\xi=\ind_{\{\theta_1> T\}}+\ind_{\{\theta_2\leq T\}}.
\]
We consider the following distinct information sets:
\begin{align*}
\mathcal{G}^1_t:= &\mathcal{F}^2_t\vee\sigma(t\wedge \theta^1),\\
\mathcal{G}^2_t:= &\mathcal{F}^1_t\vee\sigma(t\wedge \theta^2).
\end{align*} and we assume that the corresponding prices are $X_t=\mathbb{P}(\xi|\mathcal{G}^1_t)$ and $Y_t=\mathbb{P}(\xi|\mathcal{G}^2_t)$.
We have the following $\mathbb{G}^1$ martingales, $t\leq T$:
\[
\mathbb{P}(\theta^1>T|\mathcal{G}^1_t)=\ind_{\{\theta^1>t\}}\frac{\mathbb{P}(\theta^1>T)}{\mathbb{P}(\theta^1>t)}=\ind_{\{\theta^1>t\}}\frac{1-C^1(T)}{1-C^1(t)}
\] (see Proposition 1 in \cite{ejy}), and:
\[
\mathbb{P}(\theta^2\leq T|\mathcal{G}^1_t)=\mathbb{P}(\theta^2\leq T|\mathcal{F}^2_t),
\] (as $\theta^2$ is independent from $\theta^1$) i.e., the last process is a Brownian martingale.
We deduce that the $\mathbb{G}^1$ adapted price for the claim $\xi$ decomposes as follows:
\begin{align*}
X_t=& X_0-\int_{0}^t \frac{1-C^1(T)}{1-C^1(s)}d\ind_{\{\theta_1\leq s\}} +\int_0^{t\wedge \theta^1}(1-C^1(T))d(1-C^1(s))^{-1} +M^X_t,
\end{align*}
where $M^X=\mathbb{P}(\theta^2\leq T|\mathcal{F}^2_\cdot)-\mathbb{P}(\theta^2\leq T)$.
Similar arguments lead to the following $\mathbb{G}^2$ adapted price:
\begin{align*}
Y_t=&Y_0+\int_{0}^t \frac{1-C^2(T)}{1-C^2(s)}d\ind_{\{\theta_2\leq s\}} -\int_0^{t\wedge \theta^2}(1-C^2(T))d(1-C^2(s))^{-1} +M^Y_t,
\end{align*}
with $M^Y=\mathbb{P}(\theta^1> T|\mathcal{F}^1_\cdot)-\mathbb{P}(\theta^1> T)$.
One can check that: $\mathbb{F}^X=\mathbb{G}^1$, $\mathbb{F}^Y=\mathbb{G}^2$, while the insider filtration is $\mathbb{G}=\mathbb{F}^1\vee\mathbb{F}^2$ (i.e., the natural filtration of $(B^1,B^2)$). It follows that $M^X$ and $M^Y$ are also $\mathbb{G}$ martingales. They are Brownian martingales, from the discussion above. Therefore $\langle M^X\rangle$ and $\langle M^Y\rangle$ are absolutely continuous with respect to the Lebesgue measure.
We deduce that $X$ decomposes as in (\ref{Xrepr}) and $Y$ as in (\ref{Yrepr}), with the processes
\begin{align*}
J^X:&=\left(-\int_0^t \frac{1-C^1(T)}{1-C^1(s)}d\ind_{\{\theta_1\leq s\}}\right) \\
J^Y:&=\left(\int_0^t \frac{1-C^2(T)}{1-C^2(s)}d\ind_{\{\theta_2\leq s\}}\right)
\end{align*} being $\mathbb{G}$-predictable. Because $J^Y$ is an increasing process, we conclude by Theorem \ref{ThmGenRepr} that the price process $Y$ does not respect (NFLVR-S) for the insider. Indeed, $\theta^2$ being a predictable $\mathbb{G}$ stopping time, the insider can buy the asset $Y$ at time $\theta^2_-$ and resell it at time $\theta^2$ thus making an arbitrage profit of one monetary unit.
\end{exam}
\begin{exam}
Let us consider the hitting time by a Brownian motion $B$ of a positive random variable $D$ independent from the Brownian motion:
\[
T^D=\inf\{t\geq 0\;|\; B_t\geq D\}.
\]
In the filtration $\mathbb{F}$ given by $\mathcal{F}_t:=\sigma(T^D\wedge s,s\leq t)$ we have that
$T^D$ is a totally inaccessible $\mathbb{F}$-stopping time with corresponding $\mathbb{F}$-intensity process:
\[
c(t)=\frac{\ind_{\{T^D>t \}}}{\mathbb{P}(T^D>t)}\int_0^\infty f_x(t)dF_D(x),
\]
where $F_D(x)$ is the distribution function of $D$ and $f_x(t)$ is the density function of the hitting time $T^x$. We denote $H_t: =\ind_{\{T^D\leq t\}}- \int_0^{t}c(s)ds$ which is an $\mathbb{F}$-martingale.
Let us assume that the price process $X$ is given by the positive local martingale $X=X_0\mathcal E(-H)$, ithat is, it satisfies:
\[
X_t =X_0-\int_0^t X_{s_-} dH_s.
\]
One can notice that $\mathbb{F}^X=\mathbb{F}$. For simplicity we do not introduce the second asset $Y$ and we rather concentrate on the dynamics of $X$ in the larger filtration
$\mathbb{G}$ given by $\mathcal{G}_t:=\mathcal{F}^X_t\vee \sigma(B_s, s\leq t)$.
We denote $\Lambda^\mathbb{G}$ the $\mathbb{G}$-compensator of $T^D$, so that the process: $H^\mathbb{G}_t:=\ind_{\{T^D\leq t\}}-\Lambda^\mathbb{G}_t$ is a martingale. It can be shown (using \cite{ejy} and the fact that $\sigma(B_s, s\leq t)$ is immersed in $\mathbb{G}$), that $\Lambda^\mathbb{G}$ is absolutely continuous with respect to
the measure generated by the running supremum of the Brownian motion:
\[
\Lambda^\mathbb{G}_t=\int_0^{t\wedge T^D} \frac{d\mathbb{P}(T^D>s|\mathcal{F}^B_s)}{\mathbb{P}(T^D>s|\mathcal{F}^B_s)}=-\int_0^{t\wedge T^D} \frac{dF_D(S_s)}{1-F_D(S_s)}= -\ln F_D(S_{t\wedge T^D}) ,
\]
where $\mathbb{F}^B$ is the Brownian filtration and $S$ is the running supremum of $B$.
The $\mathbb{G}$ decomposition of $X$ writes, using that $H=H^\mathbb{G}-\int c(s)ds+\Lambda ^\mathbb{G}$:
\[
X_t =X_0+\left(\int_0^tX_sc(s)ds-\int_0^t X_sd\Lambda^\mathbb{G}_s\right)- \int_0^t
X_{s-} dH^\mathbb{G}_s.
\]
Using Theorem \ref{ThmGenRepr} we identify $M^X_t=- \int_0^t
X_{s-} dH^\mathbb{G}_s$. We have shown above that $d\langle M^X\rangle$ is absolutely continuous with respect to $dS$. Therefore,
$J^X_t=\int_0^tc(s)ds$, as the Lebesgue measure is orthogonal with respect to the $dS$. Because $J^X$ is increasing, from Theorem \ref{ThmGenRepr} we conclude that
there are arbitrage opportunities, in the sense that
(NFLVR-S) fails. Here again, an arbitrage strategy is easy to
implement by the $\mathbb{G}$-informed investor: buy the asset $X$ at any
time before $T^D$ when the Brownian motion is strictly below its
running maximum and sell it any time before it reaches its maximum
level again. On these
intervals, the price process $X$ is strictly increasing; the arbitrage
strategy described performs a strictly positive profit proportional to the
holding period of the asset $X$.
\end{exam}
\section{A Result on the Existence of a Supermartingale Measure}\label{sec::MainResult}
In this section we investigate the existence of a specific
$\mathbb{G}$-supermartingale measure for two price processes $X$ and $Y$,
that we shall call fundamental supermartingale measure for $(X,Y)$. This object will play
an important role, as systematic arbitrage opportunities occur
when this supermartingale measure cannot be constructed.
It is convenient to take as underlying probability measure a specific $\mathbb{G}$-supermartingale measure for $X$, that for simplicity we still call $\mathbb{P}$, so that
in the filtered probability space $(\Omega, \mathcal{G},\mathbb{G},\mathbb{P})$, the two assets have the following representations:
\begin{align}\label{PX}
X&=X_0+J^X+M^X,\\\label{PY}
Y&=Y_0+V^Y+M^Y,
\end{align}
with $M^X$ and $M^Y$ being $(\mathbb{G},\mathbb{P})$-local martingales that are locally square integrable with $M^X_0=M^Y_0=0$ and such that the process $V^Y$ is a finite variation, $\mathbb{G}$-predictable process.
The process $J^X$ is considered to be decreasing and the measure $dJ^X$ is orthogonal to $d\langle M^X \rangle$.
Notice that the decomposition in (\ref{PX}) is not the same as
in (\ref{Xrepr}). In the previous section $\mathbb{P}$ was an arbitrary probability measure (equivalent to $\mathbb{Q}^X$ and $\mathbb{Q}^Y$). In this section $\mathbb{P}$ is a particular equivalent supermartingale measure for $X$, such that $X$ decomposes precisely as in (\ref{PX}). The existence of such a supermartingale measure for $X$ -that here is assumed- is a first step to the construction of the
fundamental supermartingale measure for the couple $(X,Y)$.
We decompose the martingale $M^Y$ as:
\[
M^Y =M^1 +M^2
\]
with $M^1\in \mathcal S(M^X)$ and $M^2\in \mathcal S(M^X)^\bot$ so that we can write $M^1$ as:
\be\label{M1}
M^1_t=\int_0^t h_u dM^X_u,
\ee
for some process $h\in\mathcal P(\mathbb{G})$ and assumed to have right-continuous sample path.
We shall need the following additional decompositions:
\begin{itemize}
\item The predictable, finite variation part of $Y$ stated in (\ref{PY}) decomposes as:
\be\label{V=A-a}
V^Y =A -a ,
\ee
where $A$ and $a$ are increasing processes which do not increase on the
same sets (that is $dA$ and $da$ are orthogonal measures)\footnote{In order to preserve the compatibility with the decomposition result in Theorem \ref{ThmGenRepr}, $dA$ is assumed absolutely continuous with respect to $d\langle M^Y\rangle$. This property will solely be used for constructing an arbitrage portfolio in Lemma \ref{Arbitrage}. }\label{foot}, and $a_0=A_0=0$.
\item Moreover, the process $A$ can always (and uniquely) be written as sum of
two other increasing processes:
\[
A =A^{1} +A^{2} ,
\] where $dA^{1} \ll h^+ d\langle M^X \rangle $ and $dA^{2} \bot h^+ d\langle M^X \rangle $. Therefore, there exist $a^1\geq 0$ such that
\[
A^1_t= \int_ 0^t a^1_u h^+_u d\langle M^X\rangle _u
\] (the non negativity of $a^1$ comes from the fact that $A^1$ is increasing) and
$\tilde a^{1} $ so that $\tilde a^1 =\tilde a^1 \ind_{ {\{h> 0 \}}}= \frac{a^1}{h} \ind_{ {\{h> 0 \}}}\geq 0$ and
\[
A^1_t=\int_0^t \tilde a^1_ud\langle M^1\rangle _u = \int_ 0^t \tilde a^1_u (h_u)^2 d\langle M^X\rangle _u.
\]
\end{itemize}
We now state our main result of this section:
\bethe\label{MainThm}
Assume that $\tilde a^{1}\Delta M^1<1$ holds almost surely. We consider the following
conditions:
\begin{itemize}
\item[(C1)] $dA^{2} \ll d\langle M^2 \rangle $. We denote $\tilde a^{2} $ the density of $dA^{2} $ with respect to $d\langle M^2\rangle $.
\item[(C2)] $\tilde a^{2}\Delta M^2<1$.
\item[(C3)] $\mathbb{E}\left [D^*_T\right ]=1$, where:
\be
D^*_t:= \mathcal E_t\left( -\int_0^\cdot \tilde a^1_sdM^1_s \right)
\mathcal E_t\left( -\int_0^\cdot \tilde a^{2}_sdM^2_s\right),\quad t\in[0,T].
\ee
\end{itemize}
If (C1)-(C3) are satisfied, the price processes $(X,Y)$ satisfy (NFLVR-S). Additionally, the probability measure $\mathbb{P}^*$ defined as:
\be\label{MSM}
\frac{d\mathbb{P}^*}{d\mathbb{P}}\Big|_{\mathcal{G}_t}: =D^*_t,\quad t\in[0,T].
\ee
is a supermartingale measure for $(X,Y)$ that we call the fundamental supermartingale measure for $(X,Y)$.
Conversely, if the price processes $(X,Y)$ satisfy (NFLVR-S), then (C1) and (C2) hold true, so that the process $D^*$ is a strictly positive local martingale.
\eethe
Before proving the theorem, let us give some simple examples in order to illustrate the many processes involved, in particular the different decompositions of the process $V^Y$. Note that we do not consider below that $X$ and $Y$ are converging prices; examples with converging prices are provided in Section \ref{sec::Examples}.
\begin{exam}
Suppose that $B^1$ and $B^2$ are two independent Brownian motions and
\begin{align*}
X_t&=X_0+B^1_{t\wedge \theta}\quad\text{ with }\theta:=\inf\{t\in[0,T], X_t=0\}, T\text{ fixed}\\
Y_t&=Y_0+\int_0^t F_sds+\int_0^tH_s dB^1_s+\int_0^tG_sdB^2_s, t\in[0,T],
\end{align*}
with $H$ and $G$ some predictable processes, that for simplicity we assume bounded. Let us identify the key processes introduced previously in this section.
We have $h_t=H_t$ and: $\langle M^1 \rangle _t=\int_0^{t\wedge \theta} (H_s)^2ds$; $\langle M^2 \rangle _t=\int_0^t \left[(H_s)^2\ind_{\{\theta\leq s\}} +(G_s)^2 \right]ds$. Moreover,
the process $A_t= \int_0^t (F_s)^+ ds $ decomposes $A=A^1+A^2$ with:
\begin{align*}
A^1_t&=\int_0^{t\wedge \theta} \ind_{\{H_s>0\}}(F_s) ^+ds=\int_0^t\tilde a^1_sd\langle M^1\rangle_s, & \textcolor{blue}ox{where}\quad \tilde a^1_t &=\frac{ \ind_{\{H_t>0\}}(F_t)^+}{(H_t)^2}\\
A^2_t&=\int_0^t \ind_{\{H_s\leq 0\}\cup \{\theta\leq s\}}(F_s)^+ds.
\end{align*}The existence of the density process $\tilde a^2$ is not guaranteed. The absolute continuity condition (C1) in the Theorem \ref{MainThm} becomes:
The process $G$ is non null on the set:
\[
\{ (t,\omega) |\theta(\omega)>t, H_t(\omega)\leq 0,F_t(\omega)>0\} \cup \{ (t,\omega) |\theta(\omega)\leq t, H_t(\omega)= 0,F_t(\omega)>0\}.
\]
When this is the case, we have $A^2_t=\int_0^t\tilde a^2_sd\langle M^2\rangle_s$ with
\[
\tilde a^2_t= \ind_{\{\theta> t\}}\frac{\ind_{\{H_t\leq 0\}}(F_t)^+}{(G_t)^2}+\ind_{\{\theta \leq t\}}\frac{(F_t)^+}{(H_t)^2+(G_t)^2}
\]
and the following process
\[
D^*:=\mathcal E \left(-\int_0^{\theta\wedge \cdot} \ind_{\{H_s> 0\}}\frac{(F_s)^+}{H_s} dB^1_s \right)\mathcal E \left(-\int_0^\cdot \ind_{\{h_s \leq 0\}}f(B^1_s)^+ dB^2_s \right).
\] is the candidate for the density of the fundamental supermartingale measure.
The theorem then states that there exists a super-martingale measure if (the other conditions being fulfilled) $\mathbb{E}\left[D^*_T\right]=1$.
\end{exam}
\begin{exam}Another simple example is the one where $M^2\equiv 0$. In this case the theorem simply says that $A$ should not increase on the sets where $d\langle X,Y\rangle< 0$, otherwise (NFLVR-S) does not hold. See also Subsection \ref{Example1}.
\end{exam}
\begin{exam} If the process $ \langle X,Y\rangle$ is strictly increasing, then $A^2\equiv 0$ and only the condition (C3): $\mathbb{E}\left[\mathcal E_T\left( -\int_0^\cdot \tilde a^1_sdM^1_s \right)\right]=1$ needs to be checked. However, if this not fulfilled, we cannot in general conclude to absence of (NFLVR-S) as (C3) is not a necessary condition.
\end{exam}
\noindent {\it Proof. $\, $} \textit{(Proof of the Theorem \ref{MainThm})}
"$\Leftarrow$" Condition (C1) ensures the existence of a process
$\tilde a^2$, such that:
\[
A^2_t=\int_0^t \tilde a^2_ud\langle M^2\rangle _u
\]and $\tilde a^2$ is nonnegative, due to the increasing property
of $A^2$. Condition (C3), implies that the process $D$ is a
martingale, while condition (C2) ensures that it is strictly
positive (indeed, if a semimartingale $H$ satisfies $\Delta H>-1$,
then the stochastic exponential process $\mathcal E (H)$ is
strictly positive. If we take $H_t:=-\int_0^t \tilde a^{2}_s
dM^2_s$, we get that $\Delta H=-\tilde a^{2}\Delta M^2$; the corresponding
condition for $\tilde a^{1}$ was already assumed to hold).
We define:
\[
\frac{d\mathbb{P}^*}{d\mathbb{P}}\Big|_{\mathcal{G}_T}:=D^*_T
\]
which is indeed an equivalent probability measure. It is easy to
check that it is a supermartingale measure: indeed, under $\mathbb{P}^*$,
$$dX_t=dJ_t^X- \tilde a^1_t h_t d \langle M^X\rangle _t+ dm^*_t =dJ^X_t-\tilde a^1_t (h_t)^+ d \langle M^X\rangle _t+ dm^*_t
$$ where $m^*$ is a
$\mathbb{P}^*$-martingale. The processes $J^X$ and $-\int \tilde a^1_s(h)^+_s d \langle M^X\rangle_s$ being decreasing, $X$ is a supermartingale under $\mathbb{P}^*$. Also:
$$dY_t= dV^Y_t+dM^Y_t= dA^1_t+dA^2_t-da_t +dM^* _t- \tilde a^1_t (h_t)^2 d \langle M^X\rangle _t- \tilde a^2_t d \langle M^2\rangle _t= dM^*_t -da_t$$
where $M^*$ is a $\mathbb{P}^*$-martingale.
"$\Rightarrow$" We assume that there exists an equivalent
supermartingale measure, that we denote $\widetilde \mathbb{P}$. Without loss of
generality, the density process has the representation
\be\label{measuretilde}
\frac{d\widetilde \mathbb{P}}{d\mathbb{P}}\Big |_{\mathcal{G}_t}=\mathcal E(-L)_t,
\ee where $L$ can be decomposed as:
\[
L_t=\int_0^t\ell^1_udM^1_u+\int_0^t\ell^2_udM^2_u+U_t.
\]with $U$ a local martingale orthogonal to both $M^1$ and $M^2$.
We use the notation $\{dA\neq 0\}$ for the support of the measure $dA (\omega)$.
The processes $X$ and $Y$ are $\widetilde \mathbb{P}$-supermartingales; therefore we need to have
simultaneously:
\begin{enumerate}
\item[(i)] $(\langle M^X,L\rangle_t, t\in [0,T])$ is an increasing process;
\item[(ii)] $\left (\int_0^t\ind_{\{dA\neq 0\}}d\langle
M^Y,L\rangle_u - A_t,t\in[0,T]\right )$ is a decreasing process.
\end{enumerate}
Condition (i) is obtained as follows. The process $X$ is a $\widetilde \mathbb{P}$-supermartingale if and only if $J^X -\langle M^X, L\rangle$ is a decreasing process. But $J^X$ is decreasing and $d J^X$ is orthogonal to $d \langle M^X\rangle$, therefore the condition (i) appears as necessary and sufficient for $X$ to be a $\widetilde \mathbb{P}$-supermartingale.
Also, some clarifications concerning the condition (ii) above. The process $Y$ is a $\widetilde \mathbb{P}$-supermartingale if and only if $ V^Y -\langle M^Y,L\rangle $ is a decreasing process. But $ V^Y -\langle M^Y,L\rangle $ is decreasing if and only if the two processes $(A_t-\int_0^t\ind_{\{dA\neq 0\}}d\langle M^Y,L\rangle_u,t\in[0,T])$ and $(-a_t-\int_0^t\ind_{\{da\neq 0\}}d\langle M^Y,L\rangle_u,t\in[0,T])$ are decreasing. However, the last condition is not going to be exploited here.
From condition (i) above, we obtain that necessarily the process $ h\ell^1 $ is positive (i.e., nonegative). In particular on the set $\{h<0\}$ the process $\ell^1$ has negative or null values only.
Let us now analyze condition (ii). For simplicity, we denote:
$\tilde \ell ^1_t:= \ell^1_t\ind_{\{dA\neq 0\}}$ and $\tilde \ell^2_t:=
\ell^2_t\ind_{\{dA\neq 0\}}$. From the above observation regarding $\ell^1$, the process $\tilde \ell^1$ satisfies:
\begin{equation}\label{tildel1}
\tilde \ell^1\ind_{\{h<0\}}\leq 0,
\end{equation}
We recall that the process $ A $
decomposes as $\int_0^t \tilde a^1_ud\langle M^1\rangle_u +A^2_t$,
with $\tilde a^1$ satisfying $\tilde a^1 =\tilde a^1 \ind_{\{h > 0\}}$ and hence:
\begin{align*}
&\left(\int_0^t\ind_{\{dA\neq 0\}}d\langle M^Y,L\rangle_u-A_t\right)=\\
&=\int_0^t (\tilde \ell^1_u-\tilde a^1_u)\ind_{\{h_u> 0\}}d\langle M^1\rangle_u+\int_0^t \tilde
\ell^2_ud\langle M^2\rangle_u-\left (A^2_t- \int_0^t \tilde \ell^1_u\ind_{\{h_u\leq 0\}}d\langle M^1\rangle_u \right).
\end{align*}
The process above should be increasing. Because both
processes $ A^2 $ and $ -\int_0^ \cdot \tilde
\ell ^1_u\ind_{\{h_u\leq 0\}}d\langle M^1\rangle_u = -\int_0^ \cdot \tilde
\ell ^1_u\ind_{\{h_u< 0\}}d\langle M^1\rangle_u $ are increasing (see (\ref{tildel1})) and they
do not increase (i.e., they stay constant) on the set $\{h_t>
0\}$, it follows that the process:
\begin{equation}\label{decr}
\int_0^\cdot \ind_{\{h_u\leq 0\}} \tilde \ell^2_ud\langle M^2\rangle_u-C
\end{equation}
needs to be increasing, where $C_t:=A^2_t-\int_0^t \tilde \ell^1_u\ind_{\{h_u\leq 0\}}d\langle M^1\rangle_u$ is increasing. It follows from Theorem \ref{diffmeasures}
in the Appendix A that $C$ is absolutely continuous with respect
to $d\langle M^2\rangle $. Because $C$ is the sum of two increasing processes, then each term should be absolutely continuous with respect to $d\langle M^2\rangle $, that is:
\begin{equation}\label{ell1bis}
\int_0^t \tilde \ell^1_u\ind _{\{h_u\leq0\}}d\langle M^1\rangle_u=\int_0^t
\tilde \ell^1_u\ind_{\{h_u\leq0\}}e_u d\langle M^2\rangle_u
\end{equation}
for some nonnegative process $(e_t)$, and
\[
A^2_t=\int_0^t\tilde a^2_ud\langle M^2\rangle _u
\]
for a nonnegative process $ \tilde a^2 = \tilde a^2 \ind_{\{h \leq0\}}$.
It follows that the condition (C1) in the theorem must hold. In particular, the local martingale $D^*$ exists.
It remains to show that (C2) holds as well, a property that triggers the strict positivity the local martingale $D^*$. Below we show that (C2) is a consequence of the strict positivity of the local martingale $ \mathcal E(-L) $ in (\ref{measuretilde}). We notice first that, the process in (\ref{decr}) being decreasing:
\[
(\tilde a^2_t- \tilde \ell^1_te_t-\tilde \ell^2_t)\ind_{\{h_t\leq0\}}\leq 0,
\]
and therefore:
\begin{equation}\label{ineq1}
0\leq \tilde a^2_t\leq ( \tilde \ell^1_te_t+\tilde \ell^2_t)\ind _{\{h_t\leq 0\}}\leq \tilde \ell^2_t\ind_{\{h_t\leq0\}}.
\end{equation}
To obtain the last inequality above, we use $\tilde \ell^1e\ind _{\{h\leq 0\}}\leq0$ ($e$ being a positive process). Indeed: $\ind_{\{h<0\}}\tilde \ell^1\leq 0$ as in (\ref{tildel1}) and the set $\{h=0\}$ we have $\ind_{\{h=0\}}d\langle M^X\rangle =0$ and therefore, using the equality (\ref{ell1bis}) the process $\ind_{\{h=0\}}\tilde \ell^1 e =0$.
As the process $ \mathcal E(-L) $ in (\ref{measuretilde}) is
strictly positive, and from the orthogonality of $M^1$
and $M^2$, it follows that we must have: $-\ell ^{1}\Delta M^1>-1$ and $-\ell ^{2}\Delta M^2>-1$. In particular, the last inequality holds on the set $\{\tilde a^2 > 0\}\cap\{h \leq 0\}$ (notice that on this set we have $\ell ^2>0$, which follows from (\ref{ineq1})). Then, the inequalities in (\ref{ineq1}) ensure that
\[
-\tilde a^{2}\Delta M^2>-1
\]
Indeed, (\ref{ineq1}) implies that $ -\tilde a^2 \geq -\tilde \ell^2 \ind_{\{h \leq 0\}}$, hence, if $\Delta M^2 >0$, one has $ -\tilde a^2 \Delta M^2 \geq -\tilde \ell^2 \ind_{\{h \leq0\}}\Delta M^2 \geq -\ind_{\{h \leq 0\}}\geq -1$. If $\Delta M^2 <0$, one has $-\tilde a^2 \Delta M^2 \geq 0>-1$.
Therefore the condition (C2) in the theorem holds as well.
$\Box$ \vskip 5 pt
Theorem \ref{MainThm} emphasizes the fact that the condition (C1) is necessary for (NFLVR-S) to hold. In the remaining of this section we reveal a systematic arbitrage portfolio when (C1) fails. For this, we identify the set where the condition fails (i.e., the arbitrage set):
\[
\mathcal A:=\{(\omega, t)\in\Omega\times [0,T(\omega)] \;|\; dA^2_t(\omega)>0 \text{ and } d\langle M^2\rangle_t(\omega)=0\};
\]in other words, in $\mathcal A$ the measure $dA^2$ is not absolutely continuous with respect to $d\langle M^2\rangle$. The condition (C1) can be rewritten as: $\mathbb{P}(\omega: \exists t, (\omega,t)\in \mathcal A)=0$.
We introduce the d\'ebut of $\mathcal A$:
\[
D_\mathcal A:=\inf\{t\geq 0\;|\; (t,\omega)\in \mathcal A\},
\] with the usual convention: $\inf\emptyset=\infty$.
The random time $D_A$ is a predictable $\mathbb{G}$ stopping time. This can be proved as follows. The processes $A^2$ and $\langle M^2\rangle$ are $\mathbb{G}$-predictable,
hence the set $\mathcal A$ is $\mathbb{G}$-predictable. Furthermore, $A^2$ and $\langle M^2\rangle$ are right continuous, so that $[\![D_A]\!]\subset \mathcal A$. We conclude using Proposition 2.40, p. 354 in \cite{Nikeghbali06}.
The exit time from $\mathcal A$:
\[
E_\mathcal A:=\inf\{t>D_\mathcal A\;|\; (t,\omega)\notin \mathcal A\}
\]is as well a predictable stopping time (it can be also written as the d\'ebut of the set $\{(\omega, t)\in\Omega\times [\![D_\mathcal A\wedge T,T]\!] \;|\; dA^2_t(\omega)=0 \text{ or } d\langle M^2\rangle_t(\omega)>0\}$).
To construct our arbitrage portfolio we use a trading strategy $\pi=(\pi^C,\pi^X,\pi^Y)$, where $\pi^X_t\geq 0$ represents the
quantity of asset $X$ in the portfolio at time $t$, $\pi^Y_t\geq
0$ the quantity of asset $Y$ and $\pi^C_t\in \mathbb R$ is the
amount invested in the risk-free asset (cash) at time $t$ to have a self financing strategy (see Definition \ref{DefTradingStrategy}).
We recall that the value of the portfolio at time $t\in[0,T]$ writes:
\be\label{Vpi}
V^\pi_t:=\pi^C_t+\pi^X _tX_t+\pi^Y_t Y_t.
\ee
Additionally, our arbitrage portfolio will satisfy the following conditions:
\begin{itemize}
\item[(a)] it is initiated at time $D_\mathcal A$ at no cost: $V^\pi_{D_\mathcal A}=0$.
\item[(b)] at some $\mathbb{G}$ stopping time $S\leq T$ the portfolio has positive value: $V^\pi_S\geq 0$ $a.s.$ with $\mathbb{P}(V^\pi_S>0)>0$. In our case $S$ is any stopping time less or equal to $E_\mathcal A$.
\item[(c)] the underlying trading strategy $\pi$ is admissible in the sense of the Definition \ref{DefTradingStrategy} (some of the admissibility conditions are already implied by the previous points).
\end{itemize}
Such a portfolio is indeed the following one: $\pi_{0}=(0,0,0)$ (that is, no initial investment), then the self financing strategy associated with
\begin{align}\label{piX}
&\pi^X_t=-h_{t}\ind_{\{t\in[\![D_\mathcal A, E_\mathcal A[\![\}} \\\label{piY}
&\pi^Y_t=\ind_{\{t\in[\![D_\mathcal A, E_\mathcal A[\![\}}.
\end{align} The lemma below shows that the portfolio value is increasing, in particular it is bounded from below, which ensures that the underlying trading strategies are admissible, that is, (c) is satisfied. It also proves that condition (b) holds (i.e., the portfolio is an arbitrage) as soon as we have a violation of (C1), that is: $\mathbb{P}(\omega: \exists t, (\omega,t)\in \mathcal A)>0$.
\bl\label{Arbitrage} The value of a self-financing portfolio $V^\pi$ with $\pi$ as in (\ref{piX})-(\ref{piY}) is an increasing process, and strictly increasing for $(\omega,t)\in\mathcal A$.
\el
\noindent {\it Proof. $\, $}
The portfolio value is constant outside the set $\mathcal A$, therefore we only need to investigate the behaviour of the prices processes $X$ and $Y$ inside the set $\mathcal A$.
The portfolio being self-financing, we have:
\begin{align*}
dV^\pi_t&=-h_tdX_t+dY_t=\left(-h_tdJ^X_t-h_tdM^X_t\right)+\left(dV^Y_t+h_tdM^X_t +dM^2_t\right)\\
&=-h_tdJ^X_t+dA^2_t+dM^2_t.
\end{align*}The last equality appears as a consequence of the fact that in $\mathcal A$ we have $dA^2>0$ so that $da=dA^1=0$ and $dV^Y=dA^2$.
We recall the following properties: $dA^2$ is absolutely continuous with respect to $d\langle M^Y\rangle$ (consequence of the fact that $dA$ is absolutely continuous with respect to $d\langle M^Y\rangle$, see footnote page 9); and inside $\mathcal A$ we have $dA^2$ is orthogonal to $d\langle M^2\rangle$. It follows that inside $\mathcal A$, $dA^2$ is absolutely continuous with respect to $d\langle M^1\rangle$ and hence also with respect to $\langle M^X\rangle$. On the other hand, $dJ^X$ is orthogonal to $ d\langle M^X\rangle$. It follows that in $\mathcal A$ the process $J^X$ is constant, that is: $dJ^X\equiv 0$. From this, we deduce that the dynamics of the portfolio's value can be rewritten:
\begin{align*}
dV^\pi_t&=dA^2_t+dM^2_t\text{ for }(\omega, t)\in\mathcal A.
\end{align*}
We now notice that inside $\mathcal A$ we have $d\langle M^2\rangle \equiv 0$, by definition of $\mathcal A$, which implies that $M^2$ is constant inside $\mathcal A$. This simplifies the dynamics of $V^\pi$:
\begin{align*}
dV^\pi_t&=dA^2_t \text{ for }(\omega, t)\in\mathcal A
\end{align*}that is, $V^\pi$ is strictly increasing for $(\omega, t)\in\mathcal A$.
$\Box$ \vskip 5 pt
\section{Some Examples of Converging Prices}\label{sec::Examples}
We keep the notation of Section \ref{sec::MainResult}.
\subsection{The martingale $M^2$ is null}\label{Example1}
In this case, we can derive the following quadratic co-variation rule:
\begin{Lemma}\label{ThmNARule}
We suppose that $X$ and $Y$ satisfy the hypotheses from the previous section with $M^2\equiv 0$. If (NFLVR-S) holds then the process:
\[
\int_0^t\ind_{\{d \langle X,Y\rangle \leq 0\}}dY_s
\]is a $(\mathbb{G},\mathbb{P})$-supermartingale, which is to say:
\[
\int_0^t\ind_{\{d \langle X,Y\rangle \leq 0\}}dV^Y_s
\]is a decreasing process.
\end{Lemma}
\noindent {\it Proof. $\, $} The result
follows as an application of the Theorem \ref{MainThm}.
$\Box$ \vskip 5 pt
As an example, let us suppose $T$ is constant, that $M^X$ is a continuous martingale with deterministic quadratic variation, $f$ a deterministic function and $F(t)=\int_0^t f(s)d\langle M^X\rangle_s$. Then, the following are converging prices:
\begin{align*}
X_t=& X_0+M^X_t\\
Y_t=& Y_0+ \int_0^t M^X_s f(s)d\langle M^X\rangle_s +
\int_0^th_sdM^X_s.
\end{align*}
with
\[
h_t= 1+F(T)-F(t)
\]
Consider $F(t)=1-e^{rt}$
for some $r>0$ (and implicitly $f(t)<0$), then the process $h_t=1+
e^{rt}-e^{rT}$ is negative in an interval of the form $[0,S]$ with
$S<T$, provided that $T$ is large enough. By Lemma \ref{ThmNARule}, there are arbitrage
opportunities if the martingale $M^X$ has negative excursions in the
interval $[0,S]$.
Let us now consider the case of a "survival claim":
$\xi=\ind_{\{\tau\ > T\}}$, i.e., that pays one monetary
unit if some event $\tau$ does not occur before some fixed
maturity $T$. Suppose that for all investors $\tau$ is a totally inaccessible stopping time; it admits a constant $(\mathbb{F}^X,\mathbb{Q}^X)$ intensity $\lambda^X$, resp. a constant $(\mathbb{F}^Y,\mathbb{Q}^Y)$ intensity $\lambda^Y$. In this case, $X$ (resp. $Y$) is increasing on the stochastic interval $[0,\tau\wedge T)$ and has a downward jump at $\tau$ if $\tau\leq T$. More precisely:
\begin{align*}
X_t&=\mathbb{Q}^X(\tau>T|\mathcal{F}^X_t)=\ind_{\{\tau>t\}}e^{-\lambda^X(T-t)} \\
Y_t&=\mathbb{Q}^Y(\tau>T|\mathcal{F}^Y_t)=\ind_{\{\tau>t\}}e^{-\lambda^Y(T-t)}.
\end{align*}
(NFLVR-S) holds in this model (for instance $\mathbb{Q}^m$ is supermartingale measure with $m=\arg\max_{i\in\{X,Y\}}\lambda^i$). This is in line with Lemma \ref{ThmNARule}: $[X,Y]_t=\Delta X_\tau\Delta Y_\tau\ind_{\{\tau\leq
t\}}\geq 0$ and hence
$\langle X,Y\rangle \geq 0$.
Now, consider an alternative of the above example, where in the filtration $\mathbb{F}^X$, the stopping time $\tau$ is predictable, but it is totally inaccessible in $\mathbb{F}^Y$ with constant intensity, i.e., $Y$ is increasing on the stochastic interval $[0,\tau\wedge T)$ and has a downward jump at $\tau$ if $\tau\leq T$ as above. In the filtration $\mathbb{G}$ the stopping time $\tau$ is predictable (because it is predictable in $\mathbb{F}^X\subset \mathbb{G}$), therefore the price process $Y$ appears to be $\mathbb{G}$-predictable and of finite variation, in particular $\langle X,Y\rangle\equiv 0$.
Then, there are arbitrage opportunities: in the filtration $\mathbb{G}$ there is no change of measure to make it a supermartingale. An obvious arbitrage strategy consists in buying $Y$ and selling it just before $\tau$.
\subsection{Investors with similar risk attitudes in the two markets}\label{Ex_SimRisk}
Let us assume that $\mathbb{P}$ is a martingale measure for both prices $X$ and $Y$ in their own filtration (but not in the filtration $\mathbb{G}$), that is: $X_t=\mathbb{E}[\xi|\mathcal{F}^X_t]$ and $Y_t=\mathbb{E}[\xi|\mathcal{F}^Y_t]$.
We illustrate with an example of a defaultable asset: $\xi=\ind_{\{\tau>T\}}\mathcal E(B)_T$, the maturity $T$ being fixed. We assume that $B$ is a Brownian motion and $\tau$, the default time of the issuer is an exponentially distributed random variable wih parameter $\lambda$, which is independent from the Brownian motion $B$.
We assume that the following information sets are available for each of the two markets and the insider, respectively, for $t\in[0,T]$:
\begin{align*}
\mathcal{F}^X_t& =\sigma(B_T)\vee\sigma(\tau \wedge s,s\leq t)\\
\mathcal{F}^Y_t& =\sigma(B_s, s\leq t )\vee\sigma(\tau)\\
\mathcal{G}_t&= \sigma(B_s, s\leq t )\vee\sigma(B_T)\vee\sigma(\tau).
\end{align*}
We denote
$$N_t=\ind_{\{\tau\leq t\}}-\lambda (t\wedge \tau),$$ which is an
$\mathbb{F}^X$-martingale. Also,
we notice that the $\mathbb{F}^Y$ Brownian motion $B$ is a
semimartingale in the larger filtration $\mathbb{G}$, namely
$$B_t=-\int_0^{t\wedge T}\frac{B_T-B_u}{T-u}du +\beta_t$$ with $\beta$ being a $(\mathbb{G},\mathbb{P})$ Brownian motion.
Easy computations show that:
\[
X_t=\ind_{\{\tau>t\}}e^{-\lambda(T-t)}{\mathcal E}_T(B) ={\mathcal E}_T(B) e^{-\lambda
T}-\int_0^t X_{s-} dN_s,
\]
which is an $\mathbb{F}^{X}$-martingale. However, in the filtration $\mathbb{G}$ the process $X$ is predictable and of finite variation. As it is not decreasing, we conclude by Theorem \ref{ThmGenRepr} that $X$ does not fulfil (NFLVR-S). On the other hand, $Y$ is given by the following $\mathbb{F}^Y$-martingale:
\[
Y_t=\ind_{\{\tau>T\}}\mathcal E(B)_t=\ind_{\{\tau>T\}}+\int_0^t Y_u dB_u
\]
while in the larger filtration $\mathbb{G}$, the following decomposition holds for $Y$:
\[
Y_t=\ind_{\{\tau>T\}}-\int_0^tY_u\frac{B_T-B_u}{T-u}du+ \int_0^{t}Y_ud\beta_u,
\]
where the integral $K_t:=\int_0^tY_u\frac{B_T-B_u}{T-u}du$ is well defined. From \cite{jy:faux} this condition is equivalent to $\int_0^T \frac{ Y_s }{\sqrt{T-s}}ds <\infty$ and, since $E(Y_t)\leq 1$ one has $\mathbb{E}\left ( \int_0^T \frac{|Y_s|}{\sqrt{T-s}}ds\right)<\infty$.
This type of model is known for not satisfying (NFLVR). Imposing short sales constraints for the insider does not prevent the free lunches. The candidate density process for the fundamental supermartingale measure is:
\[
D^*_t=\mathcal E_t\left(\int_0^\cdot \frac{(B_T-B_u)^+}{T-u}d\beta_u\right),
\] the fact that $\frac{(B_T-B_u)^+}{T-u}$ is not square integrable prevents it from being a valid change of measure. See, e.g., \cite{aj}, section 4.2.1.
\subsection{Different risk attitudes in the two markets}
We work directly in the filtration $\mathbb{G}$, generated by two independent $\mathbb{P}$-Brownian motions $B$ and $\beta$. We denote $W:=\rho B+\sqrt{1-\rho^2} \beta$ for some $\rho\in[-1,1]$. The two asset prices are supposed to be as follows:
\begin{align*}
X_t&=X_0+B_t\\
Y_t&=\mathbb{E}^{\mathbb{Q}^Y}[X_T|\mathcal{G}_t],
\end{align*}
with
\[
\frac{d\mathbb{Q}^Y}{d\mathbb{P}}\Big|_{\mathcal{G}_t}:=\mathcal E\left(-\int_0^\cdot W^{Y}_udB_u\right)_t,
\]
and with $W^Y$ satisfying $dW^Y_t=\rho W^Y_t dt + dW_t$. We consider $T=\inf\{t\geq 0, X_t=0\}\wedge \begin{array}r T$ with $\begin{array}r T$ non random (so that the price processes are positive).
Under the above assumptions, the processes $W^Y$, $B^{Y}_t=B_t+\int_0^tW^Y_udu$ and $\beta$ are $\mathbb{Q}^Y$-Brownian motions. It can be easily computed that $Y$ has the following $(\mathbb{G},\mathbb{P})$ decomposition: \[
Y_t=X_0+\int_0^t(1-\rho(T-u))W^Y_udu+\int_0^t(1-\rho(T-u))dB_u-\sqrt{1-\rho^2}\int_0^t(T-u)d\beta_u.
\] Here $\beta$ is a $(\mathbb{G},\mathbb{P})$-Brownian motion, independent from $B$.
We have for $t\leq T$:
\begin{align*}
M^1_t&=\int_0^t h_udB_u\text{ with }h_t=1-\rho(T-t)\\
M^2_t&=-\sqrt{1-\rho^2}\int_0^t(T-u)d\beta_u\\
A_t& =\int_0^t \left(\frac{W^Y_u}{h_u}\right)^+d\langle M^1\rangle _u.
\end{align*}
For simplicity we fix $\begin{array}r T=2$.
We can conclude using Theorem \ref{MainThm} that (NFLVR-S) holds true:
\begin{enumerate}
\item If $\rho\leq 0$, then $h>0$ and conditions (C1) and (C2) are trivially satisfied with $\tilde a^2\equiv 0$. Condition (C3) also holds true.
\item If $\rho>0$ then, $\{(\omega,t)|h_t\leq 0\}=[0,\max(0,2-1/\rho)]$. For $\rho\in(1/2,1]$, this interval is not empty and $dA^2_t>0$ whenever $W^Y_t<0$, and these negative excursions of $W^Y$ occur $a.s.$ on every bounded interval. Hewever, there are no arbitrage opportunities in this case neither: all conditions are fulfilled to construct the fundamental supermartingale measure $\mathbb{P}^*$.
\end{enumerate}
\subsection{Filtering models with vanishing noise}Another class of examples fitting in the framework of converging prices are filtering models where the noise in the observation process is vanishing at a fixed time $T$.
Let us consider a very simple filtering model. As in the previous example, $X$ is a $\mathbb{P}$-Brownian motion starting at $X_0$. Suppose that for $t<T$, the information flow $\mathbb{F}^Y$ is generated by a noisy observation of $X$, modeled by
\[
O_t=\int_0^tf(X_s)ds+W_t,
\]
$W$ being a Brownian motion independent from $X$. Furthermore, at time $T$ the value $\xi=X_T=Y_T$ can be observed fully. More precisely:
\begin{align*}
\mathcal{F}^Y_t&=\sigma(O_s,s\leq t),\text{ for } t<T\\
\mathcal{F}^Y_T&=\sigma(O_s,s\leq T)\vee\sigma(\xi).
\end{align*}
As usual, we denote $\mathcal{G}_t=\sigma(X_s,s\leq t)\vee\mathcal{F}^Y_t$. Now, we set:
\[
Y_t:=\mathbb{E}[\xi|\mathcal{F}^Y_t].
\]
We denote by $N_t=O_t-\int_0^t\widehat{f(X_s)}ds$ the innovation process, where as usual $\widehat{ f(X)}$ is the $\mathbb{F}^Y$-optional projection of the process $f(X)$. We obtain for some $\mathbb{F}^Y$-predictable process $\psi$ and for $t<T$:
\[
Y_t=\mathbb{E}[X_t|\mathcal{F}^Y_t]=X_0+\int_0^t\psi_udN_u.
\]
The process $\psi$ is given by: $\psi_t=\widehat{f(X_t)X_t}-\widehat{X_t}\widehat{f(X_t)}$ (see Theorem 3.35 in \cite{BainCrisan}). In the filtration $\mathbb{G}$, replacing $N_t=W_t+\int_0^t(f(X_s)-\widehat{f(X_s)})ds$ we obtain the following representation:
\begin{align*}
Y_t&= X_0+\left(\int_0^t\psi_u (f(X_u)-\widehat{f(X_u)})du+\ind_{\{t\geq T\}}(\xi-Y_{T-})\right)+\int_0^t\psi_udW_u\\
&=X_0+V^Y_t+M^2_t.
\end{align*}
Here we have an example with $M^1\equiv 0$. We can use Theorem
\ref{ThmGenRepr} to deduce that the dynamics of $Y$ is not
compatible with (NFLVR-S): we can write
\[
V^Y_t= J^Y_t+\left\{\int_0^t\psi_u (f(X_u)-\widehat{f(X_u)})du\right\}
\] where $J^Y_t=\ind_{\{t\geq T\}}(\xi-Y_{T-})$ is not a decreasing process.
\appendix
\renewcommand{\Alph{subsection}}{\Alph{subsection}}
\section{Some recalls on measures and increasing processes}\label{A}
For the reader's convenience we gather here some elementary results that were used in the paper.
\begin{Theorem}\label{diffmeasures} Let $\mu^1$ and $\mu^2$ be two finite (possibly signed) measures.
\begin{itemize}
\item[(a)] Assume that both $\mu^1$ and $\mu^2$ are positive measures. Then, $(\mu^1-\mu^2)$ is a positive measure only if $\mu^2$ is absolutely continuous with respect to $\mu^1$.
\item[(b)] Assume that $\mu^1\bot \mu^2$ and furthermore $(\mu^1+\mu^2)$ is a positive measure. Then, both $\mu^1$ and $\mu^2$ are positive measures.
\end{itemize}
\end{Theorem}
\noindent {\it Proof. $\, $}
\begin{itemize}
\item[(a)]Suppose that $(\mu^1-\mu^2)$ is a measure on the $\sigma$-algebra $\mathcal{F}$. Then, for all $A\in\mathcal{F}$, $(\mu^1-\mu^2)(A)\geq 0.$ In particular, if $A$ is such that $\mu^1(A)=0$ then: $(\mu^1-\mu^2)(A)= \mu^1(A)-\mu^2(A)=-\mu^2(A) \geq 0$, which implies that $\mu^2(A)=0$ (since we also have $\mu^2(A)\geq 0$ for $\mu^2$ being a positive measure). In other words: $\mu^2 \ll\mu^1$.
\item[(b)] For all $A\in\mathcal{F}$, $(\mu^1+\mu^2)(A)\geq 0$; the orthogonality condition implies that $\mu^1(A)\in\{(\mu^1+\mu^2)(A),0\}$ and $\mu^2(A)\in\{(\mu^1+\mu^2)(A),0\}$ hence both are positive measures.
\end{itemize}
$\Box$ \vskip 5 pt
An increasing process can be seen as a random measure on $\mathbb{R}^+$, $dA_t (\omega)$, whose distribution function is $A_\bullet(\omega)$. Similarly, a process of finite variation can be seen as a
signed random measure, since it can be written as the difference of two increasing processes.
\begin{Proposition}[\cite{JacodShi} p.30]
Let $A$, $B$ be finite variation processes (resp. increasing processes) such that $dB\ll dA$. Then, there exists an optional (resp. nonnegative) process $H$ such that $B=\int HdA$ up to an evanescent set. If moreover $A$ and $B$ are predictable, one may choose $H$ to be predictable.
\end{Proposition}
\begin{Proposition}[\cite{DelbScha95}]\label{AnnexA3}
Let $A$, $B$ be c\`adl\`ag, predictable processes of finite variation, with $B$ being increasing. Then, there is a predictable process $\varphi$ and a predictable subset $N$ of $\mathbb R_+\times\Omega$ such that:
\[
A=\int\varphi d B+\int \ind_NdA
\]
and:
\[
\int_{\mathbb R_+}\ind_N(u)dB_u=0.
\]
\end{Proposition}
\end{document} |
\begin{document}
\def\ques{{\colr \underline{??????}\colb}} \def\nto#1{{\colC \footnote{\em \colC #1}}} \def\fractext#1#2{{#1}/{#2}} \def\fracsm#1#2{{\textstyle{\frac{#1}{#2}}}} \def\nnonumber{} \def\les{\lesssim} \def\RR{R} \def\JJ{J} \def\VV{V} \def\WW{W} \def\colr{{}} \def\colg{{}} \def\colb{{}} \def\colu{{}} \def\cole{{}} \def\colA{{}} \def\colB{{}} \def\colC{{}} \def\colD{{}} \def\colE{{}} \def\colF{{}} \ifnum\coloryes=1 \definecolor{coloraaaa}{rgb}{0.1,0.2,0.8} \definecolor{colorbbbb}{rgb}{0.1,0.7,0.1} \definecolor{colorcccc}{rgb}{0.8,0.3,0.9} \definecolor{colordddd}{rgb}{0.0,.5,0.0} \definecolor{coloreeee}{rgb}{0.8,0.3,0.9} \definecolor{colorffff}{rgb}{0.8,0.9,0.9} \definecolor{colorgggg}{rgb}{0.5,0.0,0.4} \definecolor{coloroooo}{rgb}{0.45,0.0,0} \def\colb{\color{black}} \def\colr{\color{red}} \def\cole{\color{coloroooo}} \def\colu{\color{blue}} \def\colg{\color{colordddd}} \def\colgray{\color{colorffff}} \def\colA{\color{coloraaaa}} \def\colB{\color{colorbbbb}} \def\colC{\color{colorcccc}} \def\colD{\color{colordddd}} \def\colE{\color{coloreeee}} \def\colF{\color{colorffff}} \def\colG{\color{colorgggg}} \fi \ifnum\isitdraft=1 \chardef\coloryes=1 \baselineskip=17.6pt \pagestyle{myheadings} \def\const{\mathop{\rm const}\nolimits} \def\diam{\mathop{\rm diam}\nolimits} \def\rref#1{{\ref{#1}{\rm \tiny \fbox{\tiny #1}}}} \def\theequation{\fbox{\bf \thesection.\arabic{equation}}} \def\startnewsection#1#2{
\colg \section{#1}\colb\label{#2} \setcounter{equation}{0} \pagestyle{fancy} \lhead{\colb Section~\ref{#2}, #1 } \cfoot{} \rfoot{\thepage\ of \pageref{LastPage}} \lfoot{\colb{\today,~\currenttime}~}} \chead{} \rhead{\thepage} \def\nnewpage{
} \newcounter{startcurrpage} \newcounter{currpage} \def\llll#1{{\rm\tiny\fbox{#1}}} \def\blackdot{{\color{red}{\hskip-.0truecm\rule[-1mm]{4mm}{4mm}\hskip.2truecm}}\hskip-.3truecm} \def\bluedot{{\colC {\hskip-.0truecm\rule[-1mm]{4mm}{4mm}\hskip.2truecm}}\hskip-.3truecm} \def\purpledot{{\colA{\rule[0mm]{4mm}{4mm}}\colb}} \def\pdot{\purpledot} \else \baselineskip=12.8pt \def\blackdot{{\color{red}{\hskip-.0truecm\rule[-1mm]{4mm}{4mm}\hskip.2truecm}}\hskip-.3truecm} \def\purpledot{{\rule[-3mm]{8mm}{8mm}}} \def\pdot{} \fi \def\textand{\qquad \text{and}\qquad} \def\pp{p} \def\qq{{\tilde p}} \def\KK{K} \def\MM{M} \def\ema#1{{#1}} \def\emb#1{#1} \ifnum\isitdraft=1 \def\llabel#1{\nonumber} \else \def\llabel#1{\nonumber} \fi \def\tepsilon{\tilde\epsilon} \def\epsilonz{\epsilon_0} \def\restr{\bigm|} \def\fgsdfgwertsfsrsfgsdfgfsdfasdf{\int}\def\into{\int_{\Omega}} \def\intu{\int_{\Gamma_1}} \def\intl{\int_{\Gamma_0}} \def\tpar{\tilde\partial} \def\bpar{\,|\nabla_2|} \def\barpar{\bar\partial} \def\Omegae{\Omega_{\text e}} \def\Gammac{\Gamma_{\text c}} \def\Gammaf{\Gamma_{\text f}} \def\Omegaf{\Omega_{\text f}} \def\FF{F} \def\gdot{{\color{green}{\hskip-.0truecm\rule[-1mm]{4mm}{4mm}\hskip.2truecm}}\hskip-.3truecm} \def\tdot{{\color{green}{\hskip-.0truecm\rule[-.5mm]{2mm}{4mm}\hskip.2truecm}}\hskip-.2truecm} \def\bdot{{\color{blue}{\hskip-.0truecm\rule[-.5mm]{2mm}{4mm}\hskip.2truecm}}\hskip-.2truecm} \def\cydot{{\color{cyan} {\hskip-.0truecm\rule[-1mm]{4mm}{4mm}\hskip.2truecm}}\hskip-.3truecm} \def\rdot{{\color{red} {\hskip-.0truecm\rule[-1mm]{4mm}{4mm}\hskip.2truecm}}\hskip-.3truecm} \def\nts#1{{\color{red}\hbox{\bf ~#1~}}} \def\ntsr#1{\vskip.0truecm{\color{red}\hbox{\bf ~#1~}}\vskip0truecm} \def\ntsf#1{\footnote{\hbox{\bf ~#1~}}}
\def\ntsf#1{\footnote{\color{red}\hbox{\bf ~#1~}}} \def\bigline#1{~\\\hskip2truecm~~~~{#1}{#1}{#1}{#1}{#1}{#1}{#1}{#1}{#1}{#1}{#1}{#1}{#1}{#1}{#1}{#1}{#1}{#1}{#1}{#1}{#1}\\} \def\biglineb{\bigline{$\downarrow\,$ $\downarrow\,$}} \def\biglinem{\bigline{---}} \def\biglinee{\bigline{$\uparrow\,$ $\uparrow\,$}} \def\ceil#1{\lceil #1 \rceil} \def\gdot{{\color{green}{\hskip-.0truecm\rule[-1mm]{4mm}{4mm}\hskip.2truecm}}\hskip-.3truecm} \def\bluedot{{\color{blue} {\hskip-.0truecm\rule[-1mm]{4mm}{4mm}\hskip.2truecm}}\hskip-.3truecm} \def\rdot{{\color{red} {\hskip-.0truecm\rule[-1mm]{4mm}{4mm}\hskip.2truecm}}\hskip-.3truecm} \def\dbar{\bar{\partial}} \newtheorem{Theorem}{Theorem}[section] \newtheorem{Corollary}[Theorem]{Corollary} \newtheorem{Proposition}[Theorem]{Proposition} \newtheorem{Lemma}[Theorem]{Lemma} \newtheorem{Remark}[Theorem]{Remark} \newtheorem{definition}{Definition}[section] \def\theequation{\thesection.\arabic{equation}} \def\cmi#1{{\color{red}IK: #1}} \def\cmj#1{{\color{red}IK: #1}} \def\cml{\rm \colr Linfeng:~} \def\TT{\mathbf{T}} \def\XX{\mathbf{X}} \def\scl{,} \def\sqrtg{\sqrt{g}} \def\DD{{\mathcal D}} \def\OO{\tilde\Omega} \def\EE{{\mathcal E}} \def\lot{{\rm l.o.t.}} \def\endproof{
$\Box$\\} \def\square{
$\Box$\\} \def\inon#1{\ \ \ \ \text{~~~~~~#1}} \def\comma{ {\rm ,\qquad{}} } \def\commaone{ {\rm ,\qquad{}} } \def\dist{\mathop{\rm dist}\nolimits} \def\ad{\mathop{\rm ad}\nolimits} \def\sgn{\mathop{\rm sgn\,}\nolimits} \def\Tr{\mathop{\rm Tr}\nolimits} \def\dive{\mathop{\rm div}\nolimits} \def\grad{\mathop{\rm grad}\nolimits} \def\curl{\mathop{\rm curl}\nolimits} \def\det{\mathop{\rm det}\nolimits} \def\supp{\mathop{\rm supp}\nolimits} \def\re{\mathop{\rm {\mathbb R}e}\nolimits} \def\wb{\bar{\omega}} \def\Wb{\bar{W}} \def\indeq{\quad{}} \def\indeqtimes{\indeq\indeq\indeq\indeq\times} \def\period{.} \def\semicolon{\,;} \def\bfx{\mathbf{x}} \newcommand{sec_setting} We consider the fluid-structure problem for a free boundary system involving the motion of an elastic body immersed in a compressible fluid. Let $\Omegaf (t)$ and $\Omegae (t)$ be the domains occupied by the fluid and the solid body at time $t$ in $\mathbb{R}^3$, whose common boundary is denoted by $\Gammac(t)$. The fluid is modeled by the compressible Navier-Stokes equations, which in Eulerian coordinates reads \begin{align} & \rho_t + \dive (\rho u) = 0 \inon{in~$(0,T)\times \Omegaf(t)$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH21} \\ & \rho u_t + \rho (u \cdot \nabla)u - \lambda \dive (\nabla u + (\nabla u)^T) - \mu \nabla \dive u + \nabla p = 0 \inon{in~$(0,T) \times \Omegaf(t)$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH22} \end{align} where $\rho = \rho (t,x) \in \mathbb{R}_+$ is the density, $u = u(t,x) \in \mathbb{R}^3$ is the velocity, $p = p(t,x) \in \mathbb{R}_+$ is the pressure, and $\lambda, \mu>0$ are physical constants. (We remark that the condition for $\lambda$ and $\mu$ can be relaxed to $\lambda>0$ and $3\lambda + 2\mu >0$.) The system \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH21}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH22}{sec_setting} We consider the fluid-structure problem for a free boundary system involving the motion of an elastic body immersed in a compressible fluid. Let $\Omegaf (t)$ and $\Omegae (t)$ be the domains occupied by the fluid and the solid body at time $t$ in $\mathbb{R}^3$, whose common boundary is denoted by $\Gammac(t)$. The fluid is modeled by the compressible Navier-Stokes equations, which in Eulerian coordinates reads \begin{align} & \rho_t + \dive (\rho u) = 0 \inon{in~$(0,T)\times \Omegaf(t)$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH21} \\ & \rho u_t + \rho (u \cdot \nabla)u - \lambda \dive (\nabla u + (\nabla u)^T) - \mu \nabla \dive u + \nabla p = 0 \inon{in~$(0,T) \times \Omegaf(t)$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH22} \end{align} where $\rho = \rho (t,x) \in \mathbb{R}_+$ is the density, $u = u(t,x) \in \mathbb{R}^3$ is the velocity, $p = p(t,x) \in \mathbb{R}_+$ is the pressure, and $\lambda, \mu>0$ are physical constants. (We remark that the condition for $\lambda$ and $\mu$ can be relaxed to $\lambda>0$ and $3\lambda + 2\mu >0$.) The system \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH21}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH22} is defined on $\Omegaf (t)$ which set to $ \Omegaf =\Omegaf (0) $ and evolves in time. The dynamics of the coupling between the compressible fluid and the elastic body are best described in the Lagrangian coordinates. Namely, we introduce the Lagrangian flow map $\eta(t,\cdot) \colon \Omegaf \to \Omegaf (t)$ and
rewrite the system \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH21}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH22} as \begin{align} & \RR_t - \RR a_{kj} \partial_k v_j = 0 \inon{in~$(0,T)\times \Omegaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH260} \\ & \partial_t v_j - \lambda \RR a_{kl} \partial_k (a_{ml} \partial_m v_j + a_{mj} \partial_m v_l) - \mu \RR a_{kj} \partial_k(a_{mi} \partial_m v_i ) + \RR a_{kj} \partial_k (q (\RR^{-1} )) = 0 \inon{in~$(0,T)\times \Omegaf$}, \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH261} \end{align} for $j=1,2,3$, where $\RR (t,x)= \rho^{-1} (t, \eta(t,x))$ is the reciprocal of the Lagrangian density, $v(t,x) = u(t, \eta(t,x))$ is the Lagrangian velocity, $a(t,x)= (\nabla \eta (t,x))^{-1}$ is the inverse matrix of the flow map and $q$ is a given function of the density.
The system \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH260}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH261} is expressed in terms of Lagrangian coordinates and posed in a fixed domain $\Omegaf$. \par On the other hand, the elastic body is modeled by the wave equation in Lagrangian coordinates, which is posed in a fixed domain $\Omegae$ as \begin{align} w_{tt} - \Delta w = 0 \inon{in~$(0,T) \times \Omegae$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH23} \end{align} where $(w, w_t)$ are the displacement and the structure velocity. The interaction boundary conditions are the velocity and stress matching conditions, which are formulated in Lagrangian coordinates over the fixed common boundary $\Gammac = \Gammac(0)$ as \begin{align} & v_j = \partial_t w_{j} \inon{on~$(0,T) \times \Gammac$}, \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH262} \\& \partial_k w_j \nu^k = \lambda \JJ a_{kl} (a_{ml} \partial_m v_j + a_{mj} \partial_m v_l ) \nu^k + \mu \JJ a_{kj} a_{mi} \partial_m v_i \nu^k - \JJ a_{kj} q \nu^k \inon{on~$(0,T) \times \Gammac$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH263} \end{align} for $j=1,2,3$, where $J(t,x) = \det (\nabla \eta(t,x))$ is the Jacobian and $\nu$ is the unit normal vector to~$\Gammac$, which is outward with respect to~$\Omegae$. In the present paper, we consider the reference configurations $\Omega = \Omegaf \cup \Omegae \cup \Gammac$, $\Omegaf$, and $\Omegae$ given by \begin{align} \begin{split} & \Omega = \{y=(y_1,y_2,y_3) \in \mathbb{R}^3 : (y_1, y_2) \in \mathbb{T}^2, 0<y_3< L_3\} , \\ & \Omegaf = \{y=(y_1,y_2,y_3) \in \mathbb{R}^3 : (y_1, y_2) \in \mathbb{T}^2, 0<y_3< L_1 ~\text{~or~}~ L_2<y_3< L_3 \}, \\& \Omegae = \{y=(y_1,y_2,y_3) \in \mathbb{R}^3 : (y_1, y_2) \in \mathbb{T}^2, L_1<y_3< L_2\} , \end{split} \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH313} \end{align} where $0<L_1 <L_2 <L_3$ and $\mathbb{T}^2$ is the two-dimensional torus with the side $2\pi$. Thus, the common boundary is expressed as \begin{align} \Gammac = \{(y_1, y_2) \in \mathbb{R}^2 : (y_1, y_2, y_3) \in \Omega, y_3= L_1 ~\text{~or~}~ y_3= L_2\} , \llabel{rVaWW4nTZXVbRVoQ77hVLX6K2kqFWFmaZnsF9Chp8KxrscSGPiStVXBJ3xZcD5IP4Fu9LcdTR2VwbcLDlGK1ro3EEyqEAzw6sKeEg2sFfjzMtrZ9kbdxNw66cxftlzDGZhxQAWQKkSXjqmmrEpNuG6Pyloq8hHlSfMaLXm5RzEXW4Y1Bqib3UOhYw95h6f6o8kw6frZwg6fIyXPnae1TQJMt2TTfWWfjJrXilpYGrUlQ4uM7Dsp0rVg3gIEmQOzTFh9LAKO8csQu6mh25r8WqRIDZWgSYkWDulL8GptZW10GdSYFUXLzyQZhVZMn9amP9aEWzkau06dZghMym3RjfdePGln8s7xHYCIV9HwKa6vEjH5J8Ipr7NkCxWR84TWnqs0fsiPqGgsId1fs53AT71qRIczPX77Si23GirL9MQZ4FpigdruNYth1K4MZilvrRk6B4W5B8Id3Xq9nhxEN4P6ipZla2UQQx8mdag7rVD3zdDrhBvkLDJotKyV5IrmyJR5etxS1cvEsYxGzj2TrfSRmyZo4Lm5DmqNiZdacgGQ0KRwQKGXg9o8v8wmBfUutCOcKczzkx4UfhuAa8pYzWVq9Sp6CmAcZLMxceBXDwugsjWuiiGlvJDb08hBOVC1pni64TTqOpzezqZDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH360} \end{align} while the outer boundary is represented by \begin{align} \Gammaf = \{y \in \bar{\Omega} : y_3 = 0\} \cup \{y\in \bar{\Omega} : y_3 = L_3\} . \llabel{BJy5oKS8BhHsdnKkHgnZlUCm7j0IvYjQE7JN9fdEDddys3y1x52pbiGLca71jG3euliCeuzv2R40Q50JZUBuKdU3mMay0uoS7ulWDh7qG2FKw2TJXzBES2JkQ4UDy4aJ2IXs4RNH41spyTGNhhk0w5ZC8B3nUBp9p8eLKh8UO4fMqY6wlcAGMxCHtvlOxMqAJoQQU1e8a2aX9Y62rlIS6dejKY3KCUm257oClVeEe8p1zUJSvbmLdFy7ObQFNlJ6FRdFkEmqMN0FdNZJ08DYuq2pLXJNz4rOZkZX2IjTD1fVtz4BmFIPi0GKDR2WPhOzHzTLPlbAEOT9XW0gbTLb3XRQqGG8o4TPE6WRcuMqMXhs6xOfv8stjDiu8rtJtTKSKjlGkGwt8nFDxjA9fCmiuFqMWjeox5Akw3wSd81vK8c4C0OdjCHIseHUOhyqGx3KwOlDql1Y4NY4IvI7XDE4cFeXdFVbCFHaJsb4OC0huMj65J4favgGo7qY5XtLyizYDvHTRzd9xSRVg0Pl6Z89XzfLhGlHIYBx9OELo5loZx4wag4cnFaCEKfA0uzfwHMUVM9QyeARFe3Py6kQGGFxrPf6TZBQRla1a6AekerXgkblznSmmhYjcz3ioWYjzh33sxRJMkDosEAAhUDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH318} \end{align} To close the system, we impose the homogeneous Dirichlet boundary condition \begin{align} & v = 0 \inon{on~$(0,T)\times \Gammaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH266} \end{align} on the outer boundary $\Gammaf$ and the periodic boundary conditions for $w$, $\rho$, and $u$ on the lateral boundary, i.e., \begin{align} w(t, \cdot), \rho(t, \eta(t, \cdot)), u(t, \eta(t, \cdot)) ~~\text{periodic in the $y_1$~and~$y_2$~directions} . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH267} \end{align} Note that the inverse matrix of the flow map $a$ satisfies the ODE system \begin{align} & \frac{\partial a}{\partial t} (t,x) = -a(t,x) \nabla v(t,x) a(t,x) \inon{in~$[0,T] \times \Omegaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH204} \\ & a (0) = I_3 \inon{in~$\Omegaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH264} \end{align} while the Jacobian satisfies the ODE system \begin{align} & \frac{\partial \JJ}{\partial t} (t,x) = \JJ (t,x) a_{kj} (t,x) \partial_k v_j (t,x) \inon{in~$[0,T] \times \Omegaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH210} \\& \JJ(0) = 1 \inon{in~$\Omegaf$} . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH212} \end{align} The initial data of the system \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH260}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH23} is given as \begin{align} \begin{split} & (\RR, v, w, w_t)(0) = (\RR_0, v_0, w_0, w_1) \inon{in~$\Omegaf\times \Omegaf \times \Omegae\times \Omegae$} , \\& (\RR_0, v_0, w_0, w_1) ~\text{periodic in the $y_1$ and $y_2$ directions} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH265} \end{split} \end{align} where $w_0 = 0$. Denote \begin{align} H^{r,s} ((0,T) \times \Omegaf) = H^r ((0,T), L^2 (\Omegaf)) \cap L^2 ((0,T), H^s (\Omegaf)) , \llabel{OOzaQfKZ0cn5kqYPnW71vCT69aEC9LDEQ5SBK4JfVFLAoQpNdzZHAlJaLMnvRqH7pBBqOr7fvoaeBSA8TEbtxy3jwK3v244dlfwRLDcgX14vTpWd8zyYWjweQmFyD5y5lDNlZbAJaccldkxYn3VQYIVv6fwmHz19w3yD4YezRM9BduEL7D92wTHHcDogZxZWRWJxipvfz48ZVB7FZtgK0Y1woCohLAi70NOTa06u2sYGlmspVl2xy0XB37x43k5kaoZdeyEsDglRFXi96b6w9BdIdKogSUMNLLbCRzeQLUZmi9O2qvVzDhzv1r6spSljwNhG6s6iSdXhobhbp2usEdl95LPAtrBBibPCwShpFCCUayzxYS578rof3UwDPsCIpESHB1qFPSW5tt0I7ozjXun6cz4cQLBJ4MNmI6F08S2Il8C0JQYiUlI1YkKoiubVtfGuOegSllvb4HGn3bSZLlXefaeN6v1B6m3Ek3JSXUIjX8PdNKIUFNJvPHaVr4TeARPdXEV7BxM0A7w7jep8M4QahOihEVoPxbi1VuGetOtHbPtsO5r363Rez9nA5EJ55pcLlQQHg6X1JEWK8Cf9kZm14A5lirN7kKZrY0K10IteJd3kMGwopVnfYEG2orGfj0TTAXtecJKeTMDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH49} \end{align} with the corresponding norm \begin{align} \Vert f\Vert_{H^{r,s} ((0,T) \times \Omegaf)}^2 = \Vert f\Vert_{H^r ((0,T), L^2 (\Omegaf))}^2 + \Vert f \Vert_{L^2 ((0,T), H^s (\Omegaf))}^2. \llabel{0x1N9f0lRpQkPM373r0iA6EFs1F6f4mjOB5zu5GGTNclBmkb5jOOK4ynyMy04oz6m6AkzNnPJXhBnPHRuN5LyqSguz5NnW2lUYx3fX4huLieHL30wg93Xwcgj1I9dO9bEPCR0vc6A005QVFy1lyK7oVRVpbJzZnxYdcldXgQaDXY3gzx368ORJFK9UhXTe3xYbVHGoYqdHgVyf5kKQzmmK49xxiApjVkwgzJOdE4vghAv9bVIHewcVqcbSUcF1pHzolNjTl1BurcSamIPzkUS8wwSa7wVWR4DLVGf1RFr599HtyGqhDT0TDlooamgj9ampngaWenGXU2TzXLhIYOW5v2dArCGsLks53pWAuAyDQlF6spKydHT9Z1Xn2sU1g0DLlaoYuLPPB6YKoD1M0fiqHUl4AIajoiVQ6afVT6wvYMd0pCYBZp7RXHdxTb0sjJ0Beqpkc8bNOgZ0Tr0wqh1C2HnYQXM8nJ0PfuGJBe2vuqDukLVAJwv2tYcJOM1uKh7pcgoiiKt0b3eURecDVM7ivRMh1T6pAWlupjkEjULR3xNVAu5kEbnrVHE1OrJ2bxdUPyDvyVix6sCBpGDSxjBCn9PFiuxkFvw0QPofRjy2OFItVeDBtDzlc9xVyA0de9Y5h8c7dYCFkFlvDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH37} \end{align} We write $H^{r,s}_{\Gammac}$ for the analogous space for functions defined on $(0,T) \times \Gammac$. For simplicity of notation, we frequently write \begin{align} K^s = H^{s/2, s} = H^{s/2, s} ((0,T) \times \Omegaf). \llabel{WPDSuNVI6MZ72u9MBtK9BGLNsYplX2yb5UHgHADbW8XRzkvUJZShWQHGoKXyVArsHTQ1VbddK2MIxmTf6wET9cXFbuuVxCbSBBp0v2JMQ5Z8z3pMEGpTU6KCcYN2BlWdp2tmliPDHJQWjIRRgqi5lAPgiklc8ruHnvYFMAIrIh7Ths9tEhAAYgSswZZfws19P5weJvMimbsFHThCnSZHORmyt98w3U3zantzAyTwq0CjgDIEtkbh98V4uo52jjAZz1kLoC8oHGvZ5RuGwv3kK4WB50ToMtq7QWG9mtbSIlc87ruZfKwZPh31ZAOsq8ljVQJLTXCgyQn0vKESiSqBpawtHxcIJe4SiE1izzximkePY3s7SX5DASGXHqCr38VYP3HxvOIRZtMfqNoLFoU7vNdtxzwUkX32t94nFdqqTRQOvYqEbigjrSZkTN7XwtPFgNsO7M1mbDAbtVB3LGCpgE9hVFKYLcSGmF8637aZDiz4CuJbLnpE7yl85jg1MTPOLOGEPOeMru1v25XLJFzhwgElnuYmqrX1YKVKvgmMK7gI46h5kZBOoJtfC5gVvA1kNJr2o7om1XNpUwtCWXfFTSWDjsIwuxOJxLU1SxA5ObG3IOUdLqJcCArgzKM08DvX2mui13Tt71IwqoDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH47} \end{align} \par Our main result states the local-in-time existence of solution to the system \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH260}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH23} with the mixed boundary conditions \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH262}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH267} and the initial data \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH265}. \par \cole \begin{Theorem} \label{T01} Let $s\in (2, 2+ \epsilon_0]$ where $\epsilon_0 \in (0,1/2)$. Assume that $\RR_0 \in H^s (\Omegaf), \RR_0^{-1} \in H^s (\Omegaf)$, $v_0 \in H^s (\Omegaf)$, $w_1 \in H^{s-1/2} (\Omegae)$, and $w_0 = 0$, with the compatibility conditions \begin{align} & w_{1j} = v_{0j} \inon{on~$\Gammac$} , \llabel{FUI0EEf5SV2vxcySYIQGrqrBHIDTJv1OB1CzDIDdW4E4jJmv6KtxoBOs9ADWBq218BJJzRyUQi2GpweET8LaO4ho95g4vWQmoiqjSwMA9CvnGqxl1LrYuMjGboUpuvYQ2CdBlAB97ewjc5RJESFGsORedoM0bBk25VEKB8VA9ytAEOyofG8QIj27aI3jyRmzyETKxpgUq4BvbcD1b1gKByoE3azgelVNu8iZ1w1tqtwKx8CLN28ynjdojUWvNH9qyHaXZGhjUgmuLI87iY7Q9MQWaiFFSGzt84mSQq25ONltTgbl8YDQSAzXqpJEK7bGL1UJn0f59vPrwdtd6sDLjLoo18tQXf55upmTadJDsELpH2vqYuTAmYzDg951PKFP6pEizIJQd8NgnHTND6z6ExRXV0ouUjWTkAKABeAC9Rfjac43AjkXnHdgSy3v5cBets3VXqfpPBqiGf90awg4dW9UkvRiJy46GbH3UcJ86hWVaCMjedsUcqDSZ1DlP2mfBhzu5dvu1i6eW2YNLhM3fWOdzKS6Qov14wxYYd8saS38hIlcPtS4l9B7hFC3JXJGpstll7a7WNrVMwunmnmDc5duVpZxTCl8FI01jhn5Bl4JzaEV7CKMThLji1gyZuXcIv4033NqZLITGUDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH270} \\& v_{0j} = 0 \inon{on~$\Gammaf$} , \llabel{x3ClPCBKO3vRUimJql5blI9GrWyirWHoflH73ZTeZXkopeq8XL1RQ3aUj6Essnj20MA3AsrSVft3F9wzB1qDQVOnHCmmP3dWSbjstoj3oGjadvzqcMB6Y6kD9sZ0bdMjtUThULGTWU9Nmr3E4CNbzUOvThhqL1pxAxTezrHdVMgLYTTrSfxLUXCMrWAbE69K6XHi5re1fx4GDKkiB7f2DXzXez2k2YcYc4QjUyMYR1oDeYNWf74hByFdsWk4cUbCRDXaq4eDWd7qbOt7GOuoklgjJ00J9IlOJxntzFVBCFtpABpVLEE2y5Qcgb35DU4igj4dzzWsoNFwvqjbNFma0amFKivAappzMzrVqYfOulMHafaBk6JreOQBaTEsJBBtHXjn2EUCNleWpcvWJIggWXKsnB3wvmoWK49Nl492ogR6fvc8ffjJmsWJr0jzI9pCBsIUVofDkKHUb7vxpuQUXA6hMUryvxEpcTqlTkzz0qHbXpO8jFuh6nwzVPPzpA8961V78cO2Waw0yGnCHVqBVjTUHlkp6dGHOdvoEE8cw7QDL1o1qg5TXqoV720hhQTyFtpTJDg9E8Dnsp1QiX98ZVQN3sduZqcn9IXozWhFd16IB0K9JeBHvi364kQlFMMJOn0OUBrnvpYyjUDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH271} \\& \lambda (\partial_k v_{0j} + \partial_j v_{0k} )\nu^k + \mu \partial_i v_{0i} \nu^j - q(\RR_0^{-1}) \nu^j = 0 \inon{on~$\Gammac$} , \llabel{BOfsPzxl4zcMnJHdqOjSi6NMn8bR6kPeklTFdVlwDSrhT8Qr0sChNh88j8ZAvvWVD03wtETKKNUdr7WEK1jKSIHFKh2sr1RRVRa8JmBtkWI1ukuZTF2B4p8E7Y3p0DX20JM3XzQtZ3bMCvM4DEAwBFp8qYKpLSo1a5sdRPfTg5R67v1T4eCJ1qg14CTK7u7agjQ0AtZ1Nh6hkSys5CWonIOqgCL3u7feRBHzodSJp7JH8u6RwsYE0mcP4rLaWAtlyRwkHF3eiUyhIiA19ZBu8mywf42nuyX0eljCt3Lkd1eUQEZoOZrA2OqfoQ5CahrByKzFgDOseim0jYBmXcsLAyccCJBTZPEjyzPb5hZKWOxT6dytu82IahtpDm75YDktQvdNjWjIQH1BAceSZKVVP136vL8XhMm1OHKn2gUykFUwN8JMLBqmnvGuwGRoWUoNZY2PnmS5gQMcRYHxLyHuDo8bawaqMNYtonWu2YIOzeB6RwHuGcnfio47UPM5tOjszQBNq7mcofCNjou83emcY81svsI2YDS3SyloBNx5FBVBc96HZEOXUO3W1fIF5jtEMW6KW7D63tH0FCVTZupPlA9aIoN2sf1Bw31ggLFoDO0Mx18ooheEdKgZBCqdqpasaHFhxBrEaRgAuIDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH272} \\& \lambda \partial_k (\partial_k v_{0j} + \partial_j v_{0k}) + \mu \partial_j \partial_k v_{0k} - \partial_k (q (\RR^{-1}_0 )) = 0 \inon{on~$\Gammaf$} , \llabel{5dqmWWBMuHfv90ySPtGhFFdYJJLf3Apk5CkSzr0KbVdisQkuSAJEnDTYkjPAEMua0VCtCFfz9R6Vht8UacBe7opAnGa7AbLWjHcsnARGMbn7a9npaMflftM7jvb200TWxUC4lte929joZrAIuIao1ZqdroCL55LT4Q8kNyvsIzPx4i59lKTq2JBBsZbQCECtwarVBMTH1QR6v5srWhRrD4rwf8ik7KHEgeerFVTErONmlQ5LR8vXNZLB39UDzRHZbH9fTBhRwkA2n3pg4IgrHxdfEFuz6REtDqPdwN7HTVtcE18hW6yn4GnnCE3MEQ51iPsGZ2GLbtCSthuzvPFeE28MM23ugTCdj7z7AvTLa1AGLiJ5JwWCiDPyMqa8tAKQZ9cfP42kuUzV3h6GsGFoWm9hcfj51dGtWyZzC5DaVt2Wi5IIsgDB0cXLM1FtExERIZIZ0RtQUtWcUCmFmSjxvWpZcgldopk0D7aEouRkuIdOZdWFORuqbPY6HkWOVi7FuVMLWnxpSaNomkrC5uIZK9CjpJyUIeO6kgb7tr2SCYx5F11S6XqOImrs7vv0uvAgrb9hGPFnkRMj92HgczJ660kHbBBlQSIOY7FcX0cuyDlLjbU3F6vZkGbaKaMufjuxpn4Mi457MoLNW3DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH273} \end{align} for $j=1,2,3$. Then the system \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH260}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH23} with the boundary conditions \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH262}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH267} and the initial data \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH265} admits a unique solution \begin{align*} & v \in K^{s+1} ((0,T)\times \Omegaf) \\& \RR \in H^1 ((0,T), H^s (\Omegaf)) \\& w \in C([0,T], H^{s+1/4 -\epsilon_0} (\Omegae)) \\& w_t \in C([0,T], H^{s-3/4 -\epsilon_0}(\Omegae)) , \end{align*} for some $T >0$, where the corresponding norms are bounded by a function of the norms of the initial data. \end{Theorem} \colb \par \begin{Remark} \label{R01} {\rm We assume $v_0 \in H^s (\Omegaf)$ where $s\in (2, 2+\epsilon_0]$ with $\epsilon_0 \in (0, 1/2)$ since the elliptic regularity for $\Vert v\Vert_{L^2_t H^4_x}$ in \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH67} requires that $\RR^{-1} \in L^\infty H^2 ((0,T)\times \Omegaf)$. From the density equation \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH260}, we deduce that the regularity for the initial velocity must be at least in $H^2 (\Omegaf)$, showing the optimality of the range $s\geq2$. It would be interesting to find whether the statement of the theorem holds for the borderline case $s=2$. \square } \end{Remark} \par The proof of the theorem is given in Section~\ref{sec05} below. For simplicity, we present the proof for the pressure law $q(\RR) = \RR$, noting that the general case follows completely analogously. \par \startnewsection{Space-time trace, interpolation, and hidden regularity inequalities}{sec03} In this section, we provide four auxiliary results needed in the fixed point arguments. The first lemma provides an estimate for the trace in a space-time norm and is an essential ingredient when constructing solutions to the nonlinear parabolic-wave system in Section~\ref{sec05} below. \par \cole \begin{Lemma} \label{L06} Let $r>1/2$ and $\theta\geq0$. If $u\in L^{2}((0,T)\scl H^{r}(\Omegaf)) \cap H^{2\theta r/(2r-1)} ((0,T), L^{2}(\Omegaf))$, then $ u \in H^{\theta} ((0,T), L^{2}(\Gammac)) $, and for all $\epsilon \in(0,1]$ we have the inequality \begin{align} \begin{split} \Vert u\Vert_{H^{\theta}((0,T)\scl L^{2}(\Gammac))} \leq \epsilon \Vert u \Vert_{H^{2\theta r/(2r-1)}((0,T)\scl L^{2}(\Omegaf))} + C_{\epsilon}\Vert u \Vert_{L^{2}((0,T)\scl H^{r}(\Omegaf))} , \end{split} \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH65} \end{align} where $C_{\epsilon}>0$ is a constant depending on $\epsilon$. \end{Lemma} \colb \par \begin{proof}[Proof of Lemma~\ref{L06}] It is sufficient to prove the inequality for $u\in C_{0}^{\infty}({\mathbb R} \times {\mathbb R}^3)$ with the trace taken on the set \begin{equation} \Gamma=\bigl\{ (t,x_1, x_2, x_3)\in{\mathbb R} \times {\mathbb R}^3 : x_3=0
\bigr\} . \llabel{eImcj6OOSe59afAhglt9SBOiFcYQipj5uN19NKZ5Czc231wxGx1utgJB4ueMxx5lrs8gVbZs1NEfI02RbpkfEOZE4eseo9teNRUAinujfeJYaEhns0Y6XRUF1PCf5eEAL9DL6a2vmBAU5AuDDtyQN5YLLWwPWGjMt4hu4FIoLCZLxeBVY5lZDCD5YyBwOIJeHVQsKobYdqfCX1tomCbEj5m1pNx9pnLn5A3g7Uv777YUgBRlNrTyjshaqBZXeAFtjyFlWjfc57t2fabx5Ns4dclCMJcTlqkfquFDiSdDPeX6mYLQzJzUmH043MlgFedNmXQPjAoba07MYwBaC4CnjI4dwKCZPO9wx3en8AoqX7JjN8KlqjQ5cbMSdhRFstQ8Qr2ve2HT0uO5WjTAiiIWn1CWrU1BHBMvJ3ywmAdqNDLY8lbxXMx0DDvco3RL9Qz5eqywVYqENnO8MH0PYzeVNi3yb2msNYYWzG2DCPoG1VbBxe9oZGcTU3AZuEKbkp6rNeTX0DSMczd91nbSVDKEkVazIqNKUQapNBP5B32EyprwPFLvuPiwRPl1GTdQBZEAw3d90v8P5CPAnX4Yo2q7syr5BW8HcT7tMiohaBW9U4qrbumEQ6XzMKR2BREFXk3ZOMVMYSw9SF5ekqDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH322} \end{equation} the general case is settled by the partition of unity and straightening of the boundary. Since it should be clear from the context, we usually do not distinguish in notation between a function and its trace. Denoting by $\hat u$ the Fourier transform of $u$ with respect to $(t, x_1,x_2,x_3)$, we have \begin{equation} \Vert u \Vert^{2}_{H^{\theta}((0,T)\scl L^{2}(\Gamma))} \les \fgsdfgwertsfsrsfgsdfgfsdfasdf_{-\infty}^{\infty} \fgsdfgwertsfsrsfgsdfgfsdfasdf_{-\infty}^{\infty} \fgsdfgwertsfsrsfgsdfgfsdfasdf_{-\infty}^{\infty} (1+ \tau^{2})^{\theta} \left|\fgsdfgwertsfsrsfgsdfgfsdfasdf_{-\infty}^\infty\hat{u}(\xi_{1},\xi_{2}, \xi_3, \tau)\,d\xi_3 \right|^{2} \, d \tau \, d \xi_{1} \, d \xi_{2} . \llabel{0myNKGnH0qivlRA18CbEzidOiuyZZ6kRooJkLQ0EwmzsKlld6KrKJmRxls12KG2bv8vLxfJwrIcU6Hxpq6pFy7OimmodXYtKt0VVH22OCAjfdeTBAPvPloKQzLEOQlqdpzxJ6JIzUjnTqYsQ4BDQPW6784xNUfsk0aM78qzMuL9MrAcuVVKY55nM7WqnB2RCpGZvHhWUNg93F2eRT8UumC62VH3ZdJXLMScca1mxoOO6oOLOVzfpOBOX5EvKuLz5sEW8a9yotqkcKbDJNUslpYMJpJjOWUy2U4YVKH6kVC1Vx1uvykOyDszo5bzd36qWH1kJ7JtkgV1JxqrFnqmcUyZJTp9oFIcFAk0ITA93SrLaxO9oUZ3jG6fBRL1iZ7ZE6zj8G3MHu86Ayjt3flYcmTkjiTSYvCFtJLqcJPtN7E3POqGOKe03K3WV0epWXDQC97YSbADZUNp81GFfCPbj3iqEt0ENXypLvfoIz6zoFoF9lkIunXjYyYL52UbRBjxkQUSU9mmXtzIHOCz1KH49ez6PzqWF223C0Iz3CsvuTR9sVtQCcM1eopDPy2lEEzLU0USJtJb9zgyGyfiQ4foCx26k4jLE0ula6aSIrZQHER5HVCEBL55WCtB2LCmveTDzVcp7URgI7QuFbFDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH226} \end{equation} Denote by \begin{equation} \gamma=\frac{2r-1}{2\theta} \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH114} \end{equation} the quotient between the exponents $r$ and $2\theta r/(2r-1)$ in~\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH65}. Then, with $\lambda>0$ to be determined below, we have \begin{align} \begin{split} & \Vert u \Vert^{2}_{H^{\theta}((0,T)\scl L^{2}(\Gamma))} \les \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\mathbb{R}^{3}} (1+ \tau^{2})^{\theta} \left| \fgsdfgwertsfsrsfgsdfgfsdfasdf_{-\infty}^{\infty} \hat{u}(\xi_{1},\xi_{2}, \xi_{3}, \tau) \, d \xi_{3} \right|^{2} \, d \tau \, d \xi_{1} \, d \xi_{2} \\&\indeq \les \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\mathbb{R}^{3}} (1+ \tau^{2})^{\theta} \left( \fgsdfgwertsfsrsfgsdfgfsdfasdf^{\infty}_{-\infty} \frac{(1+(\xi^{2}_{1}+ \xi^{2}_{2})^{\gamma} + \epsilon^{-2} \xi^{2\gamma}_{3}+\tau^{2})^{\lambda/2}}{(1+(\xi^{2}_{1} + \xi^{2}_{2})^{\gamma} +\epsilon^{-2} \xi^{2\gamma}_{3}+\tau^{2})^{\lambda/2}} |\hat{u}| \, d \xi_{3} \right)^{2} \, d \tau \, d \xi_{1} \, d \xi_{2} \\&\indeq \les \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\mathbb{R}^{3}} (1+ \tau^{2})^{\theta} \left(\fgsdfgwertsfsrsfgsdfgfsdfasdf^{\infty}_{-\infty} \bigl(1+(\xi^{2}_{1}+ \xi^{2}_{2})^{\gamma}+ \epsilon^{-2} \xi^{2\gamma}_{3}+\tau^{2}\bigr)^{\lambda} |\hat{u}|^{2} \, d \xi_{3}\right) \\&\indeq\indeq\indeq\indeq \times \left( \fgsdfgwertsfsrsfgsdfgfsdfasdf_{-\infty}^{\infty} \frac{d\xi_3} {(1+(\xi^{2}_{1}+ \xi^{2}_{2})^{\gamma}+ \epsilon^{-2} \xi^{2\gamma}_{3}+\tau^{2})^{\lambda}} \right)\, d \tau \, d \xi_{1} \, d \xi_{2} , \end{split} \llabel{w9VTxJwGrzsVWM9sMJeJNd2VGGFsiWuqC3YxXoJGKwIo71fgsGm0PYFBzX8eX7pf9GJb1oXUs1q06KPLsMucNytQbL0Z0Qqm1lSPj9MTetkL6KfsC6ZobYhc2quXy9GPmZYj1GoeifeJ3pRAfn6Ypy6jNs4Y5nSEpqN4mRmamAGfYHhSaBrLsDTHCSElUyRMh66XU7hNzpZVC5VnV7VjL7kvWKf7P5hj6t1vugkLGdNX8bgOXHWm6W4YEmxFG4WaNEbGKsv0p4OG0NrduTeZaxNXqV4BpmOdXIq9abPeDPbUZ4NXtohbYegCfxBNttEwcDYSD637jJ2ms6Ta1J2xZPtKnPwAXAtJARc8n5d93TZi7q6WonEDLwWSzeSueYFX8cMhmY6is15pXaOYBbVfSChaLkBRKs6UOqG4jDVabfbdtnyfiDBFI7uhB39FJ6mYrCUUTf2X38J43KyZg87igFR5Rz1t3jH9xlOg1h7P7Ww8wjMJqH3l5J5wU8eH0OogRCvL7fJJg1ugRfMXIGSuEEfbh3hdNY3x197jRqePcdusbfkuJhEpwMvNBZVzLuqxJ9b1BTfYkRJLjOo1aEPIXvZAjvXnefhKGsJGawqjtU7r6MPoydEH26203mGiJhFnTNCDBYlnPoKO6PDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH230} \end{align} where we used the Cauchy-Schwartz inequality in $\xi_3$. Note that, using a substitution, \begin{equation} \fgsdfgwertsfsrsfgsdfgfsdfasdf_{-\infty}^{\infty} \frac{dx}{(A^2+\epsilon^{-2}x^{2\gamma})^{\lambda}} \les \epsilon^{1/\gamma} A^{1/\gamma-2\lambda} \comma A,\epsilon>0 , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH315} \end{equation} provided $\lambda$ satisfies $ 2 \gamma \lambda >1 $, which is by \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH114} equivalent to \begin{equation} \lambda > \frac{\theta}{2r-1} . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH392} \end{equation} Note that $2\gamma\lambda>1$ implies $1/\gamma-2\lambda<0$ for the exponent of $A$ in~\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH315}. Now we use \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH315} for the integral in $\xi_3$ with $A=(1+(\xi^{2}_{1}+ \xi^{2}_{2})^{\gamma}+ \tau^{2})^{1/2}$, while noting that \begin{align} \begin{split} (1+\tau^2)^{\theta} A^{1/\gamma-2\lambda} & = \frac{ (1+\tau^2)^{\theta} }{ (1+(\xi_1^{2}+ \xi_2^2)^{\gamma}+ \tau^{2})^{\lambda-1/2\gamma} } \leq (1+\tau^2)^{\theta-\lambda+1/2\gamma} \\& \leq (1+(\xi^{2}_{1}+ \xi^{2}_{2})^{\gamma}+ \epsilon^{-2} \xi^{2\gamma}_{3}+\tau^{2})^{\theta-\lambda+1/2\gamma} , \end{split} \llabel{8Th sw ELzX U3X7 Ebd1Kd Z7 v 1rN 3Gi irR XG KWK0 99ov BM0FDJ Cv k opY NQ2 aN9 4Z 7k0U nUKa mE3OjU 8D F YFF okb SI2 J9 V9gV lM8A LWThDP nP u 3EL 7HP D2V Da ZTgg zcCC mbvc70 qq P cC9 mt6 0og cr TiA3 HEjw TK8ymK eu J Mc4 q6d Vz2 00 XnYU tLR9 GYjPXv FO V r6W 1zU K1W bP ToaW JJuK nxBLnd 0f t DEb Mmj 4lo HY yhZy MjM9 1zQS4p 7z 8 eKa 9h0 Jrb ac ekci rexG 0z4n3x z0 Q OWS vFj 3jL hW XUIU 21iI AwJtI3 Rb W a90 I7r zAI qI 3UEl UJG7 tLtUXz w4 K DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH316} \end{align} provided $ \lambda-\fractext{1}{2\gamma}\leq \theta$, i.e., \begin{equation} \lambda \leq \frac{2 r \theta}{2r-1} . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH118} \end{equation} Under the condition \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH118}, we thus obtain \begin{align} \begin{split} & \Vert u \Vert^{2}_{H^{\theta}((0,T)\scl L^{2}(\Gammac))} \les \epsilon^{1/\gamma} \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\mathbb{R}\times{\mathbb R}^{3}} \bigl(1+(\xi^{2}_{1}+ \xi^{2}_{2})^{\gamma} + \epsilon^{-2} \xi^{2\gamma}_{3}+\tau^{2}\bigr)^{\theta+1/2\gamma} |\hat{u}|^{2} \, d \xi_{3} \, d \xi_{2}\, d \xi_{1} \, d \tau \\&\indeq\indeq \les \epsilon^{1/\gamma} \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\mathbb{R}\times{\mathbb R}^{3}} \bigl(1+\epsilon^{-2}(\xi^{2}_{1}+ \xi^{2}_{2}+\xi_{3}^2)^{\gamma}+\tau^{2}\bigr)^{\theta+1/2\gamma} |\hat{u}|^{2} \, d \xi_{3} \, d \xi_{2}\, d \xi_{1} \, d \tau \\&\indeq\indeq \les \epsilon^{-2\theta} \Vert u \Vert^{2}_{L^{2}((0,T)\scl H^{\gamma\theta+1/2}(\Omegaf))} + \epsilon^{1/\gamma} \Vert u \Vert^{2}_{H^{\theta+1/2\gamma}((0,T)\scl L^{2}(\Omegaf))} , \end{split} \llabel{QNE TvX zqW au jEMe nYlN IzLGxg B3 A uJ8 6VS 6Rc PJ 8OXW w8im tcKZEz Ho p 84G 1gS As0 PC owMI 2fLK TdD60y nH g 7lk NFj JLq Oo Qvfk fZBN G3o1Dg Cn 9 hyU h5V SP5 z6 1qvQ wceU dVJJsB vX D G4E LHQ HIa PT bMTr sLsm tXGyOB 7p 2 Os4 3US bq5 ik 4Lin 769O TkUxmp I8 u GYn fBK bYI 9A QzCF w3h0 geJftZ ZK U 74r Yle ajm km ZJdi TGHO OaSt1N nl B 7Y7 h0y oWJ ry rVrT zHO8 2S7oub QA W x9d z2X YWB e5 Kf3A LsUF vqgtM2 O2 I dim rjZ 7RN 28 4KGY trVa WW4nDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH331} \end{align} for all $\epsilon\in(0,1]$. Using \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH114}, we get \begin{align} \begin{split} & \Vert u \Vert^{2}_{H^{\theta}((0,T)\scl L^{2}(\Gammac))} \les \epsilon^{-2\theta} \Vert u \Vert^{2}_{L^{2}((0,T)\scl H^{r}(\Omegaf))} + \epsilon^{2\theta/(2r-1)} \Vert u \Vert^{2}_{H^{2\theta r/(2r-1)}((0,T)\scl L^{2}(\Omegaf))} , \end{split} \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH319} \end{align} for all $\epsilon\in(0,1]$. Finally, note that $\lambda=2r\theta/(2r-1)$ satisfies \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH392} and \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH118} under the condition $r>1/2$. \end{proof} \par Optimizing $\epsilon\in(0,1]$ in \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH319} by using \begin{equation} \epsilon = \left( \frac{ \Vert u \Vert_{L^{2}((0,T)\scl H^{r}(\Omegaf))} }{ \Vert u \Vert_{L^{2}((0,T)\scl H^{r}(\Omegaf))} + \Vert u \Vert_{H^{2\theta r/(2r-1)}((0,T)\scl L^{2}(\Omegaf))} } \right)^{(2r-1)/2r \theta} , \llabel{TZ XV b RVo Q77 hVL X6 K2kq FWFm aZnsF9 Ch p 8Kx rsc SGP iS tVXB J3xZ cD5IP4 Fu 9 Lcd TR2 Vwb cL DlGK 1ro3 EEyqEA zw 6 sKe Eg2 sFf jz MtrZ 9kbd xNw66c xf t lzD GZh xQA WQ KkSX jqmm rEpNuG 6P y loq 8hH lSf Ma LXm5 RzEX W4Y1Bq ib 3 UOh Yw9 5h6 f6 o8kw 6frZ wg6fIy XP n ae1 TQJ Mt2 TT fWWf jJrX ilpYGr Ul Q 4uM 7Ds p0r Vg 3gIE mQOz TFh9LA KO 8 csQ u6m h25 r8 WqRI DZWg SYkWDu lL 8 Gpt ZW1 0Gd SY FUXL zyQZ hVZMn9 am P 9aE Wzk au0 6d ZghM yDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH388} \end{equation} we obtain a trace inequality \begin{align} \begin{split} \Vert u\Vert_{H^{\theta}((0,T)\scl L^{2}(\Gammac))} \les \Vert u \Vert_{L^{2}((0,T)\scl H^{r}(\Omegaf))}^{1/2r} \Vert u \Vert_{H^{2\theta r/(2r-1)}((0,T)\scl L^{2}(\Omegaf))}^{(2r-1)/2r} + \Vert u \Vert_{L^{2}((0,T)\scl H^{r}(\Omegaf))} , \end{split} \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH137} \end{align} which is a more explicit version of~\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH65}. \par The second lemma provides a space-time interpolation inequality needed in several places in Sections~\ref{sec05} and \ref{sec06} below. \par \cole \begin{Lemma} \label{L12} Let $\alpha,\beta>0$. If $u\in H^{\alpha}((0,T)\scl L^2(\Omegaf)) \cap L^2((0,T), H^{\beta}(\Omegaf))$, then $ u \in H^{\theta} ((0,T), H^{\lambda}(\Omegaf)) $ and for all $\epsilon>0$, we have the inequality \begin{align} \begin{split} \Vert u\Vert_{H^{\theta} ((0,T), H^{\lambda}(\Omegaf))} \leq \epsilon \Vert u \Vert_{H^{\alpha}((0,T)\scl L^2(\Omegaf))} + C_{\epsilon}\Vert u \Vert_{L^2((0,T), H^{\beta}(\Omegaf))} , \end{split} \llabel{m3R jfdePG ln 8 s7x HYC IV9 Hw Ka6v EjH5 J8Ipr7 Nk C xWR 84T Wnq s0 fsiP qGgs Id1fs5 3A T 71q RIc zPX 77 Si23 GirL 9MQZ4F pi g dru NYt h1K 4M Zilv rRk6 B4W5B8 Id 3 Xq9 nhx EN4 P6 ipZl a2UQ Qx8mda g7 r VD3 zdD rhB vk LDJo tKyV 5IrmyJ R5 e txS 1cv EsY xG zj2T rfSR myZo4L m5 D mqN iZd acg GQ 0KRw QKGX g9o8v8 wm B fUu tCO cKc zz kx4U fhuA a8pYzW Vq 9 Sp6 CmA cZL Mx ceBX Dwug sjWuii Gl v JDb 08h BOV C1 pni6 4TTq Opzezq ZB J y5o KS8 BhH sDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH338} \end{align} for all $\theta\in(0,\alpha)$ and $\lambda\in(0,\beta)$ such that \begin{equation} \frac{\theta}{\alpha} + \frac{\lambda}{\beta} \leq 1 , \llabel{d nKkH gnZl UCm7j0 Iv Y jQE 7JN 9fd ED ddys 3y1x 52pbiG Lc a 71j G3e uli Ce uzv2 R40Q 50JZUB uK d U3m May 0uo S7 ulWD h7qG 2FKw2T JX z BES 2Jk Q4U Dy 4aJ2 IXs4 RNH41s py T GNh hk0 w5Z C8 B3nU Bp9p 8eLKh8 UO 4 fMq Y6w lcA GM xCHt vlOx MqAJoQ QU 1 e8a 2aX 9Y6 2r lIS6 dejK Y3KCUm 25 7 oCl VeE e8p 1z UJSv bmLd Fy7ObQ FN l J6F RdF kEm qM N0Fd NZJ0 8DYuq2 pL X JNz 4rO ZkZ X2 IjTD 1fVt z4BmFI Pi 0 GKD R2W PhO zH zTLP lbAE OT9XW0 gb T Lb3 XDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH326} \end{equation} where $C_{\epsilon} >0$ is a constant depending on $\epsilon$. \end{Lemma} \colb \par \begin{proof}[Proof of Lemma~\ref{L12}] Using a partition of unity, straightening of the boundary, and a Sobolev extension, it is sufficient to prove the inequality in the case $\Omegaf={\mathbb R}^3$ and $u\in C_{0}^{\infty}({\mathbb R}\times{\mathbb R}^3)$. Then, using the Parseval identity and the definition of the Sobolev, we only need to prove \begin{align} \begin{split} (1+|\tau|^{2\theta})(1 + |\xi|^{2 \lambda}) \leq \epsilon^2 (1+|\tau|^{2\alpha}) + C_{\epsilon} (1+|\xi|^{2\beta}) , \end{split} \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH328} \end{align} for $\tau \in {\mathbb R}$ and $\xi\in{\mathbb R}^{n}$. Finally, \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH328} follows from Young's inequality. \end{proof} \par In the last part of this section, we address the regularity for the wave equation. We first recall the hidden regularity result for the wave equation \begin{align} & w_{tt} - \Delta{w}= 0 \inon{in~ $ (0,T)\times\Omegae $}, \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH14} \\& w = \psi \inon{on~$ (0,T)\times \Gammac $} , \\& w \text{~~periodic~in~the~$y_1$~and~$y_2$~directions}
, \end{align} and the initial data \begin{equation} (w,w_t)(0,\cdot)= (w_0,w_1) \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH358} \end{equation} (cf.~\cite{LLT}). \par \cole \begin{Lemma}[\cite{LLT}] \label{L03} Assume that $(w_{0},w_{1}) \in H^{\beta}( \Omegae)\times H^{\beta -1}( \Omegae)$, where $\beta \geq1$, and $$\psi \in C([0,T]\scl H^{\beta-1/2}(\Gammac)) \cap H^{\beta}((0,T)\times \Gammac).$$ Then there exists a solution $(w, w_{t}) \in C([0,T]\scl H^{\beta}( \Omegae) \times H^{\beta-1}( \Omegae))$ of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH14}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH358}, which satisfies the estimate \begin{align} \begin{split} &\Vert w \Vert_{C([0,T]\scl H^{\beta}( \Omegae))} + \Vert w_{t} \Vert_{C([0,T]\scl H^{\beta-1}( \Omegae))} + \left\Vert \frac{\partial w}{\partial \nu} \right\Vert_{H^{\beta-1}([0,T] \times \Gammac)} \\&\indeq \les \Vert w_{0} \Vert_{H^{\beta}( \Omegae)} + \Vert w_{1} \Vert_{H^{\beta-1}( \Omegae)} + \Vert \psi \Vert_{H^{\beta}([0,T] \times \Gammac)} , \end{split} \llabel{RQ qGG 8o 4TPE 6WRc uMqMXh s6 x Ofv 8st jDi u8 rtJt TKSK jlGkGw t8 n FDx jA9 fCm iu FqMW jeox 5Akw3w Sd 8 1vK 8c4 C0O dj CHIs eHUO hyqGx3 Kw O lDq l1Y 4NY 4I vI7X DE4c FeXdFV bC F HaJ sb4 OC0 hu Mj65 J4fa vgGo7q Y5 X tLy izY DvH TR zd9x SRVg 0Pl6Z8 9X z fLh GlH IYB x9 OELo 5loZ x4wag4 cn F aCE KfA 0uz fw HMUV M9Qy eARFe3 Py 6 kQG GFx rPf 6T ZBQR la1a 6Aeker Xg k blz nSm mhY jc z3io WYjz h33sxR JM k Dos EAA hUO Oz aQfK Z0cn 5kqYPn W7DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH339} \end{align} where the implicit constant depends on $\Omegae$. \end{Lemma} \colb \par In the final lemma of this section, we recall an essential trace regularity result for the wave equation from~\cite{RV}. \par \cole \begin{Lemma}[\cite{RV}] \label{L13} Assume that $(w_0, w_1) \in H^{\beta} (\Omegae) \times H^{\beta+1} (\Omegae)$, where $0<\beta <5/2$, and \begin{align*} \psi \in L^2((0,T), H^{\beta +2} (\Gammac)) \cap H^{\beta/2+1}((0,T), H^{\beta/2+1}(\Gammac)), \end{align*} with the compatibility condition $\partial_t \psi|_{t=0} = w_1$. Then there exists a solution $w$ of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH14}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH358} such that \begin{align} \begin{split} \left\Vert \frac{\partial w}{\partial \nu} \right\Vert_{L^2((0,T), H^{\beta+1} (\Gammac))} & \les \Vert w_0 \Vert_{H^{\beta+2} (\Omegae)} + \Vert w_1 \Vert_{H^{\beta+1} (\Omegae)} + \Vert \psi \Vert_{L^2((0,T), H^{\beta+2}(\Gammac))} \\&\indeq + \Vert \psi \Vert_{H^{\beta/2 +1}((0,T), H^{\beta/2 +1}(\Gammac))} , \llabel{ 1 vCT 69a EC9 LD EQ5S BK4J fVFLAo Qp N dzZ HAl JaL Mn vRqH 7pBB qOr7fv oa e BSA 8TE btx y3 jwK3 v244 dlfwRL Dc g X14 vTp Wd8 zy YWjw eQmF yD5y5l DN l ZbA Jac cld kx Yn3V QYIV v6fwmH z1 9 w3y D4Y ezR M9 BduE L7D9 2wTHHc Do g ZxZ WRW Jxi pv fz48 ZVB7 FZtgK0 Y1 w oCo hLA i70 NO Ta06 u2sY GlmspV l2 x y0X B37 x43 k5 kaoZ deyE sDglRF Xi 9 6b6 w9B dId Ko gSUM NLLb CRzeQL UZ m i9O 2qv VzD hz v1r6 spSl jwNhG6 s6 i SdX hob hbp 2u sEdl 95LP ADKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH03} \end{split} \end{align} where the implicit constant depends on $\Omegae$. \end{Lemma} \colb \par \startnewsection{The nonhomogeneous parabolic problem}{sec04} In this section, we consider the parabolic problem \begin{align} \partial_t u - \lambda \RR \dive (\nabla u + (\nabla u)^T) - \mu \RR \nabla \dive u & = f \inon{in~$(0,T) \times\Omegaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH48} \end{align} with the nonhomogeneous boundary conditions and the initial data \begin{align} & \lambda (\partial_k u_j + \partial_j u_k) \nu^k + \mu \partial_k u_k \nu^j = h_j \inon{on~$(0,T)\times\Gammac$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH44} \\ & u =0 \inon{on~$(0,T)\times \Gammaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH45} \\& u~~\text{periodic in the $y_1$~and~$y_2$ directions}, \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH320} \\& u(0) = u_0 \inon{in~$\Omegaf$} ,
\label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH46} \end{align} for $j=1,2,3$. To state the maximal regularity for \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH48}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH46}, we consider the homogeneous version when \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH44}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH46} is replaced by \begin{align} & \lambda (\partial_k u_j + \partial_j u_k) \nu^k + \mu \partial_k u_k \nu^j = 0 \inon{on~$(0,T)\times \Gammac$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH02} \\ & u = 0 \inon{on~$(0,T)\times \Gammaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH17} \\& u~~\text{periodic in the $y_1$~and~$y_2$ directions}, \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH317} \\& u(0) = 0 \inon{in~$\Omegaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH18} \end{align} for $j=1,2,3$. \par \cole \begin{Lemma} \label{L01} Assume that \begin{align} (\RR, \RR^{-1}) \in L^\infty ([0,T], H^2(\Omegaf)) \times L^\infty ([0,T], H^2(\Omegaf)) \cap H^1 ([0,T], L^\infty (\Omegaf)) , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH05} \end{align} for some $T>0$. Then the parabolic problem \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH48} with the boundary conditions and the initial data \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH02}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH18} admits a solution $u$ satisfying \begin{align} \begin{split} \Vert u \Vert_{K^2 ( (0, T)\times \Omegaf)} \les \Vert f\Vert_{K^0 ((0,T) \times \Omegaf)} \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH50} \end{split} \end{align} and \begin{align} \begin{split} \Vert u \Vert_{K^4 ( (0, T)\times \Omegaf)} \les \Vert f\Vert_{K^2 ((0,T) \times \Omegaf)} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH59} \end{split} \end{align} where the implicit constants depend on the norms of $\RR$ and $\RR^{-1}$ in \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH05}. \end{Lemma} \colb \par \begin{proof} Analogously to \cite[Theorem~3.2]{LM}, the parabolic problem \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH48} admits a solution $u\in K^2((0,T) \times\Omegaf)$ if $f \in K^0((0,T) \times \Omegaf)$ and $u \in K^4((0,T) \times \Omegaf)$ if $f \in K^2((0,T) \times \Omegaf)$. In the reminder of the proof we shall prove the regularity. Taking the $L^2$-inner product of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH48} with $u$, we arrive at \begin{align} \frac{1}{2} \frac{d}{dt} \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} |u|^2 - \lambda \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR u_j \partial_k (\partial_k u_j + \partial_j u_k) - \mu \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR u_j \partial_j \partial_k u_k = \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} f u . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH25} \end{align} For the second and third terms on the left side of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH25}, we integrate by parts with respect to $\partial_k$ and $\partial_j$ to get \begin{align} \begin{split} -\lambda \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR u_j \partial_k (\partial_k u_j + \partial_j u_k) & =
\lambda \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Gammac} \RR u_j (\partial_k u_j + \partial_j u_k) \nu^k + \lambda \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR \partial_k u_j (\partial_k u_j + \partial_{j} u_k ) \\ &\indeq + \lambda \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} u_j \partial_k \RR (\partial_k u_j + \partial_{j} u_k ) \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH28} \end{split} \end{align} and \begin{align} \begin{split} - \mu \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR u_j \partial_j \partial_k u_k & = \mu \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Gammac} \RR u_j \partial_k u_k \nu^j + \mu \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR \partial_j u_j \partial_k u_k + \mu \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} u_j \partial_j \RR \partial_k u_k . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH29} \end{split} \end{align} Inserting \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH28}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH29} into \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH25} and appealing to \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH02}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH17}, we get \begin{align} \begin{split} & \frac{1}{2} \frac{d}{dt} \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} |u |^2 + \lambda\fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR \partial_k u_j (\partial_k u_j+\partial_j u_k) + \mu \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR \partial_j u_j \partial_k u_k \\ &\indeq = \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} f u - \lambda \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} u_j \partial_k \RR (\partial_k u_j + \partial_{j} u_k ) - \mu \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} u_j \partial_j \RR \partial_k u_k \\ &\indeq \les \Vert f\Vert_{L^2}^2 + \Vert u \Vert_{L^2}^2 + \Vert u\Vert_{L^4} \Vert \nabla \RR \Vert_{L^4} \Vert \nabla u\Vert_{L^2} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH30} \end{split} \end{align} where the last inequality follows from H\"older's and Young's inequalities. Note that for any $v \in H^1(\Omegaf)$, using the Sobolev and Young's inequalities, we have \begin{align} \Vert v\Vert_{L^4} \les \Vert v\Vert_{H^1}^{3/4} \Vert v\Vert_{L^2}^{1/4} \les \epsilon \Vert v\Vert_{H^1} + C_{\epsilon} \Vert v\Vert_{L^2} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH330} \end{align} for any $\epsilon \in (0,1]$, where $C_\epsilon>0$ is a constant depending on $\epsilon$. We integrate \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH30} in time from $0$ to $t$ and use \begin{equation} (\partial_k u_j+\partial_j u_k) \partial_k u_j = \frac12 \sum_{j,k=1}^{3} (\partial_k u_j+\partial_j u_k)^2 , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH06} \end{equation} obtaining \begin{align} \begin{split} & \Vert u(t) \Vert_{L^2}^2 + \sum_{j,k=1}^3 \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR ( \partial_k u_j + \partial_j u_k )^2 + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR |\partial_k u_k|^2 \\ &\indeq \les \Vert f \Vert_{L^2_t L^2_x}^2 + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert u \Vert_{L^2}^2 + \epsilon \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert u\Vert_{H^1}^2 + C_{\epsilon} \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert u\Vert_{L^2} \Vert u\Vert_{H^1} \\&\indeq \les \Vert f \Vert_{L^2_t L^2_x}^2 + (\epsilon + C_\epsilon \bar{\epsilon} ) \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert u \Vert_{H^1}^2 + C_{\epsilon,\bar{\epsilon}} \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert u\Vert_{L^2}^2 , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH32} \end{split} \end{align} for any $\epsilon, \bar{\epsilon} \in (0,1]$, where we used \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH330} and Young's inequality. By Korn's inequality, we get \begin{align} \begin{split} \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert u\Vert_{H^1}^2 \les \sum_{j,k=1}^3 \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR (\partial_k u_j + \partial_j u_k)^2 + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert u\Vert_{L^2}^2 . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH33} \end{split} \end{align} From \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH32}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH33} it follows that \begin{align} \begin{split} \Vert u(t) \Vert_{L^2}^2 + \Vert u\Vert_{L^2_t H^1_x}^2 & \les \Vert f \Vert_{L^2_t L^2_x}^2 + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert u \Vert_{L^2}^2 , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH35} \end{split} \end{align} by choosing $\epsilon, \bar{\epsilon}>0$ sufficiently small. By Gronwall's inequality, we obtain \begin{align} \begin{split} \Vert u(t)\Vert_{L^2}^2 & \les \Vert f\Vert_{L^2_t L^2_x}^2 , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH34} \end{split} \end{align} and then, after using \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH34} in \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH35}, we arrive at \begin{align} \begin{split} \Vert u\Vert_{L^2_t H^1_x}^2 & \les \Vert f\Vert_{L^2_t L^2_x}^2 . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH36} \end{split} \end{align} \par Next, we take the $L^2$-inner product of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH48} with $u_t$, obtaining \begin{align} \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} |u_t|^2 - \lambda \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR u_{tj} \partial_k (\partial_k u_j + \partial_j u_k) - \mu \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR u_{tj} \partial_j \partial_k u_k = \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} f u_t . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH07} \end{align} Then, proceeding as in \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH28}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH29}, we get \begin{align} \begin{split} -\lambda \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR u_{tj} \partial_k (\partial_k u_j + \partial_j u_k) & = \lambda \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Gammac} \RR u_{tj} (\partial_k u_j + \partial_j u_k) \nu^k + \lambda \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR \partial_k u_{tj} (\partial_k u_j + \partial_{j} u_k ) \\ &\indeq + \lambda \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} u_{tj} \partial_k \RR (\partial_k u_j + \partial_{j} u_k ) \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH08} \end{split} \end{align} and \begin{align} \begin{split} - \mu \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR u_{tj} \partial_j \partial_k u_k & = \mu \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Gammac} \RR u_{tj} \partial_k u_k \nu^j + \mu \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR \partial_j u_{tj} \partial_k u_k + \mu \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} u_{tj} \partial_j \RR \partial_k u_k . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH09} \end{split} \end{align} Inserting \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH08}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH09} into \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH07} and appealing to \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH02}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH17}, we arrive at \begin{align} \begin{split} & \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} |u_t|^2 + \frac{\lambda}{2} \frac{d}{dt} \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR \partial_k u_j (\partial_k u_j +\partial_{j} u_k ) + \frac{\mu}{2} \frac{d}{dt} \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR \partial_j u_j \partial_k u_k \\ &\indeq = \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} f u_t + \lambda \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} u_{tj} \partial_k \RR (\partial_k u_j + \partial_{j} u_k ) + \mu \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} u_{tj} \partial_j \RR \partial_k u_k
\\ &\indeq\indeq + \frac{\lambda}{2} \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR_t \partial_k u_j (\partial_k u_j + \partial_j u_k) + \frac{\mu}{2} \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR_t \partial_j u_j \partial_k u_k \\&\indeq \les C_\epsilon \Vert f\Vert_{L^2}^2 + \epsilon \Vert u_t \Vert_{L^2}^2 + \Vert \nabla \RR \Vert_{L^4} \Vert \nabla u \Vert_{L^4} \Vert u_t \Vert_{L^2} + \Vert \RR_t \Vert_{L^\infty} \Vert \nabla u\Vert_{L^2}^2 , \llabel{trBBi bP C wSh pFC CUa yz xYS5 78ro f3UwDP sC I pES HB1 qFP SW 5tt0 I7oz jXun6c z4 c QLB J4M NmI 6F 08S2 Il8C 0JQYiU lI 1 YkK oiu bVt fG uOeg Sllv b4HGn3 bS Z LlX efa eN6 v1 B6m3 Ek3J SXUIjX 8P d NKI UFN JvP Ha Vr4T eARP dXEV7B xM 0 A7w 7je p8M 4Q ahOi hEVo Pxbi1V uG e tOt HbP tsO 5r 363R ez9n A5EJ55 pc L lQQ Hg6 X1J EW K8Cf 9kZm 14A5li rN 7 kKZ rY0 K10 It eJd3 kMGw opVnfY EG 2 orG fj0 TTA Xt ecJK eTM0 x1N9f0 lR p QkP M37 3r0 iA 6EFDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH20} \end{split} \end{align} for any $\epsilon \in (0,1]$, where we used H\"older's and Young's inequalities. Integrating in time from $0$ to $t$ and using the Young, Sobolev, and Korn inequalities with \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH330}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH06}, we get \begin{align} \begin{split} & \Vert u_t \Vert_{L^2_t L^2_x }^2 + \Vert u(t) \Vert_{H^1}^2 \\&\indeq \les C_\epsilon \Vert f \Vert_{L^2_t L^2_x}^2 + \epsilon \Vert u_t\Vert_{L^2_t L^2_x}^2 + \Vert u(t) \Vert_{L^2}^2 + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t (\bar{\epsilon} \Vert u\Vert_{H^2} + C_{\bar{\epsilon}} \Vert u\Vert_{H^1} ) \Vert u_t\Vert_{L^2} + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert \RR_t \Vert_{L^\infty} \Vert u\Vert_{H^1}^2 \\ &\indeq \les C_\epsilon \Vert f \Vert_{L^2_t L^2_x}^2 + (\epsilon + \bar{\epsilon} + \tilde{\epsilon} C_{\bar{\epsilon}}) \Vert u_t \Vert_{L^2_t L^2_x}^2 + \Vert u(t) \Vert_{L^2}^2 + \bar{\epsilon} \Vert u\Vert_{L^2_t H^2_x}^2 + C_{\bar{\epsilon}, \tilde{\epsilon}} \Vert u\Vert_{L^2_t H^1_x}^2 \\&\indeq\indeq + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert \RR_t \Vert_{L^\infty} \Vert u\Vert_{H^1}^2 , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH24} \end{split} \end{align} for any $\epsilon, \bar{\epsilon}, \tilde{\epsilon} \in (0,1]$. For the space regularity, note that $u$ is the solution of the elliptic problem \begin{align} \begin{split} & -\lambda \dive(\nabla u + (\nabla u)^T) - \mu \nabla \dive u = -\frac{\partial_t u}{\RR} + \frac{f}{\RR} \inon{in~$(0,T)\times\Omegaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH67} \end{split} \end{align} with the boundary conditions \begin{align} & \lambda (\partial_k u_j + \partial_j u_k)\nu^k + \mu \partial_k u_k \nu^j = 0 \inon{on~$(0,T) \times \Gammac$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH68} \\ & u = 0 \inon{on~$(0,T) \times\Gammaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH69} \end{align} for $j=1,2,3$. From the elliptic regularity for \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH67}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH69} it follows that \begin{align} \Vert u\Vert_{H^2} \les \left\Vert \frac{1}{\RR} u_t \right\Vert_{L^2} + \left\Vert \frac{1}{\RR} f\right\Vert_{L^2} \les \Vert u_t \Vert_{L^2} + \Vert f\Vert_{L^2} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH160} \end{align} from where \begin{align} \begin{split} \Vert u\Vert_{L^2_t H^2_x} & \les \Vert u_t \Vert_{L^2_t L^2_x} + \Vert f \Vert_{L^2_t L^2_x} . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH158} \end{split} \end{align} Combining \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH34}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH36}, \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH24}, and \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH158}, we obtain \begin{align} \begin{split} \Vert u_t \Vert_{L^2_t L^2_x }^2 + \Vert u(t) \Vert_{H^1}^2 & \les \Vert f \Vert_{L^2_t L^2_x}^2 + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert \RR_t \Vert_{L^\infty} \Vert u \Vert_{H^1}^2 , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH56} \end{split} \end{align} by taking $\epsilon, \bar{\epsilon}, \tilde{\epsilon}>0$ sufficiently small. Using Gronwall's inequality, we arrive at \begin{align} \begin{split} \Vert u(t)\Vert_{H^1}^2 & \leq C \Vert f\Vert_{L^2_t L^2_x}^2 \exp\left(C \fgsdfgwertsfsrsfgsdfgfsdfasdf_\tau^t \Vert \RR_t(\tau) \Vert_{L^\infty} d\tau \right) \leq C \Vert f\Vert_{L^2_t L^2_x}^2 , \llabel{s 1F6f 4mjOB5 zu 5 GGT Ncl Bmk b5 jOOK 4yny My04oz 6m 6 Akz NnP JXh Bn PHRu N5Ly qSguz5 Nn W 2lU Yx3 fX4 hu LieH L30w g93Xwc gj 1 I9d O9b EPC R0 vc6A 005Q VFy1ly K7 o VRV pbJ zZn xY dcld XgQa DXY3gz x3 6 8OR JFK 9Uh XT e3xY bVHG oYqdHg Vy f 5kK Qzm mK4 9x xiAp jVkw gzJOdE 4v g hAv 9bV IHe wc Vqcb SUcF 1pHzol Nj T l1B urc Sam IP zkUS 8wwS a7wVWR 4D L VGf 1RF r59 9H tyGq hDT0 TDlooa mg j 9am png aWe nG XU2T zXLh IYOW5v 2d A rCG sLk s5DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH57} \end{split} \end{align} and thus \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH56} implies \begin{align} \begin{split} \Vert u\Vert_{H^1_t L^2_x}^2 \les \Vert f\Vert_{L^2_t L^2_x}^2 . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH39} \end{split} \end{align} From \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH158} and \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH39} it follows that \begin{align*} \begin{split} \Vert u\Vert_{K^2} & \les \Vert u\Vert_{L^2_t H^2_x} + \Vert u\Vert_{H^1_t L^2_x} \les \Vert f\Vert_{L^2_t L^2_x} , \end{split} \end{align*} completing the proof of~\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH50}. \par Differentiating \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH48} in time and taking the $L^2$-inner product with $u_t$, we arrive at \begin{align} \begin{split} & \frac{1}{2} \frac{d}{dt} \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} |u_t |^2 - \lambda \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR u_{tj} \partial_k(\partial_k u_{tj} + \partial_j u_{tk}) - \mu \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR u_{tj} \partial_j \partial_k u_{tk} \\ &\indeq = \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} f_t u_t + \lambda \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR_t u_{tj} \partial_k (\partial_k u_j + \partial_j u_k) + \mu \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR_t u_{tj} \partial_{j} \partial_k u_k . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH60} \end{split} \end{align} We proceed as in \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH28}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH29} to obtain \begin{align} \begin{split} - \lambda \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR u_{tj} \partial_k(\partial_k u_{tj} + \partial_j u_{tk}) & = \lambda \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Gammac} \RR u_{tj} (\partial_k u_{tj} + \partial_j u_{tk}) \nu^k + \lambda\fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} u_{tj} \partial_k \RR (\partial_k u_{tj} + \partial_j u_{tk}) \\&\indeq + \lambda \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR \partial_k u_{tj} (\partial_k u_{tj} + \partial_j u_{tk}) \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH61} \end{split} \end{align} and \begin{align} \begin{split} - \mu \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR u_{tj} \partial_j \partial_k u_{tk} & = \mu \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Gammac} \RR u_{tj} \partial_k u_{tk} \nu^j + \mu \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} u_{tj} \partial_j \RR \partial_k u_{tk} + \mu \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR \partial_j u_{tj} \partial_k u_{tk} . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH62} \end{split} \end{align} Inserting \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH61}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH62} into \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH60}, we get \begin{align*} \begin{split} & \frac{1}{2} \frac{d}{dt} \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} |u_t|^2 + \lambda \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR \partial_k u_{tj} (\partial_k u_{tj} + \partial_j u_{tk}) + \mu \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR \partial_j u_{tj} \partial_k u_{tk} \\ &\indeq \les \Vert f_t\Vert_{L^2}^2 + \Vert u_{t}\Vert_{L^2}^2 + \Vert \RR_t \Vert_{L^\infty} \Vert u_t \Vert_{L^2} \Vert u\Vert_{H^2} + \Vert u_t\Vert_{L^4} \Vert \nabla \RR\Vert_{L^4} \Vert u_t\Vert_{H^1} , \end{split} \end{align*} where we used Young's, H\"older's, and Sobolev inequalities. Integrating in time from $0$ to $t$ and using the Young's and Korn's inequality and \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH330}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH06}, we obtain \begin{align} \begin{split} \Vert u_t(t) \Vert_{L^2}^2 + \Vert u_t \Vert_{L^2_t H^1_x}^2 & \les \Vert f\Vert_{H_t^1 L_x^2}^2 + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert \RR_t \Vert_{L^\infty} \Vert u \Vert_{H^2}^2 + + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert \RR_t \Vert_{L^\infty} \Vert u_t \Vert_{L^2}^2 + (\epsilon + \bar{\epsilon} C_\epsilon ) \Vert u_t\Vert_{L^2_t H^1_x}^2 \\ &\indeq + C_{\epsilon, \bar{\epsilon}} \Vert u_t\Vert_{L^2_t L^2_x}^2 , \llabel{3 pW AuAy DQlF 6spKyd HT 9 Z1X n2s U1g 0D Llao YuLP PB6YKo D1 M 0fi qHU l4A Ia joiV Q6af VT6wvY Md 0 pCY BZp 7RX Hd xTb0 sjJ0 Beqpkc 8b N OgZ 0Tr 0wq h1 C2Hn YQXM 8nJ0Pf uG J Be2 vuq Duk LV AJwv 2tYc JOM1uK h7 p cgo iiK t0b 3e URec DVM7 ivRMh1 T6 p AWl upj kEj UL R3xN VAu5 kEbnrV HE 1 OrJ 2bx dUP yD vyVi x6sC BpGDSx jB C n9P Fiu xkF vw 0QPo fRjy 2OFItV eD B tDz lc9 xVy A0 de9Y 5h8c 7dYCFk Fl v WPD SuN VI6 MZ 72u9 MBtK 9BGLNs Yp l X2DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH103} \end{split} \end{align} for any $\epsilon, \bar{\epsilon} \in (0,1]$. From the elliptic regularity \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH160} and \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH39} it follows that \begin{align} \begin{split} \Vert u_t(t) \Vert_{L^2}^2 + \Vert u_t \Vert_{L^2_t H^1_x}^2 & \les \Vert f\Vert_{H_t^1 L_x^2}^2 + \Vert u_{t}\Vert_{L^2_t L^2_x}^2 + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert \RR_t \Vert_{L^\infty} \Vert u_t \Vert_{L^2}^2 + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert \RR_t \Vert_{L^\infty} \Vert f \Vert_{L^2}^2 \\& \les \Vert f\Vert_{H_t^1 L_x^2}^2 + \Vert u_{t}\Vert_{L^2_t L^2_x}^2 + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert \RR_t \Vert_{L^\infty} \Vert u_t \Vert_{L^2}^2
\label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH162} \end{split} \end{align} by taking $\epsilon, \bar{\epsilon} >0$ sufficiently small, where in the last inequality we used $\Vert f\Vert_{L^\infty_t L^2_x} \les \Vert f\Vert_{H^1_t L^2_x}$. Appealing to Gronwall's inequality, \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH162} implies \begin{align} \begin{split} \Vert u_t(t) \Vert_{L^2}^2 \les \Vert f\Vert_{K^2}^2 , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH104} \end{split} \end{align} and then, after using \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH104} in \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH162}, we arrive at \begin{align} \begin{split} \Vert u_t\Vert_{L^2_t H^1_x}^2 \les \Vert f\Vert_{K^2}^2 . \llabel{y b5U HgH AD bW8X Rzkv UJZShW QH G oKX yVA rsH TQ 1Vbd dK2M IxmTf6 wE T 9cX Fbu uVx Cb SBBp 0v2J MQ5Z8z 3p M EGp TU6 KCc YN 2BlW dp2t mliPDH JQ W jIR Rgq i5l AP gikl c8ru HnvYFM AI r Ih7 Ths 9tE hA AYgS swZZ fws19P 5w e JvM imb sFH Th CnSZ HORm yt98w3 U3 z ant zAy Twq 0C jgDI Etkb h98V4u o5 2 jjA Zz1 kLo C8 oHGv Z5Ru Gwv3kK 4W B 50T oMt q7Q WG 9mtb SIlc 87ruZf Kw Z Ph3 1ZA Osq 8l jVQJ LTXC gyQn0v KE S iSq Bpa wtH xc IJe4 SiE1 izzximDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH331} \end{split} \end{align} \par Differentiating \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH48} in time and taking the $L^2$-inner product with $u_{tt}$, we obtain \begin{align*} \begin{split} & \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} |u_{tt}|^2 + \frac{\lambda}{2} \frac{d}{dt} \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR \partial_k u_{tj} (\partial_k u_{tj} + \partial_j u_{tk}) + \frac{\mu}{2} \frac{d}{dt} \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR \partial_j u_{tj} \partial_k u_{tk} \\ & = \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} f_t u_{tt} - \lambda \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} u_{ttj} \partial_k \RR (\partial_k u_{tj} + \partial_j u_{tk}) - \mu \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} u_{ttj} \partial_j \RR \partial_k u_{tk} \\&\indeq + \frac{\lambda}{2} \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR_t \partial_k u_{tj} (\partial_k u_{tj} + \partial_j u_{tk}) + \frac{\mu}{2} \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} \RR_t \partial_j u_{tj} \partial_k u_{tk} + \lambda \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} u_{ttj} \RR_t \partial_k (\partial_k u_j + \partial_j u_k) \\&\indeq + \mu \fgsdfgwertsfsrsfgsdfgfsdfasdf_{\Omegaf} u_{ttj} \RR_t \partial_{jk} u_k , \end{split} \end{align*} where we integrated by parts in spatial variables. We proceed as in \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH60}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH162} to get \begin{align} \begin{split} \Vert u_{tt}\Vert_{L^2_t L^2_x}^2 + \Vert u_t (t)\Vert_{H^1}^2 & \les C_{\tilde{\epsilon}} \Vert f\Vert_{H^1_t L^2_x}^2 + \Vert u_t (t)\Vert_{L^2}^2 + \epsilon \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert u_t\Vert_{H^2} \Vert u_{tt} \Vert_{L^2} + C_{\tilde{\epsilon}} \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert \RR_t \Vert_{L^\infty}^2 \Vert f \Vert_{L^2}^2 \\ &\indeq\indeq + (\bar{\epsilon} C_\epsilon + \tilde{\epsilon} + \tilde{\epsilon}) \Vert u_{tt} \Vert_{L^2_t L^2_x}^2 + C_{\epsilon, \bar{\epsilon}} \Vert u_t\Vert_{L^2_t H^1_x}^2 + C_{\tilde{\epsilon}} \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t (1+\Vert \RR_t\Vert_{L^\infty}^2 ) \Vert u_t\Vert_{H^1}^2 , \llabel{ ke P Y3s 7SX 5DA SG XHqC r38V YP3Hxv OI R ZtM fqN oLF oU 7vNd txzw UkX32t 94 n Fdq qTR QOv Yq Ebig jrSZ kTN7Xw tP F gNs O7M 1mb DA btVB 3LGC pgE9hV FK Y LcS GmF 863 7a ZDiz 4CuJ bLnpE7 yl 8 5jg Many Thanks, POL OG EPOe Mru1 v25XLJ Fz h wgE lnu Ymq rX 1YKV Kvgm MK7gI4 6h 5 kZB OoJ tfC 5g VvA1 kNJr 2o7om1 XN p Uwt CWX fFT SW DjsI wuxO JxLU1S xA 5 ObG 3IO UdL qJ cCAr gzKM 08DvX2 mu i 13T t71 Iwq oF UI0E Ef5S V2vxcy SY I QGr qrB HID TJDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH63} \end{split} \end{align} for any $\epsilon, \bar{\epsilon}, \tilde{\epsilon} \in (0,1]$, where we used Young's, H\"older's, Sobolev, and Korn inequalities. Note that $u_t$ is the solution of the elliptic problem \begin{align*} \begin{split} & -\lambda \dive(\nabla u_t + (\nabla u_t)^T) - \mu \nabla \dive u_t = -\RR^{-1} u_{tt} + \RR^{-2} u_t \RR_t + \RR^{-1} f_t - \RR^{-2} \RR_t f \inon{in~$(0,T)\times\Omegaf$} , \end{split} \end{align*} with the boundary conditions \begin{align*} & \lambda (\partial_k u_{tj } + \partial_j u_{tk})\nu^k + \mu \partial_k u_{tk} \nu^j = 0 \inon{in~$(0,T) \times \Gammac$}, \\ & u_{tj} = 0 \inon{in~$(0,T) \times \Gammaf$}, \end{align*} for $j=1,2,3$. The elliptic regularity implies that \begin{align} \begin{split} \Vert u_t\Vert_{H^2} & \les \Vert u_{tt}\Vert_{L^2} + \Vert u_t \RR_t \Vert_{L^2} + \Vert f_t \Vert_{L^2} + \Vert \RR_t f \Vert_{L^2} \\& \les \Vert u_{tt}\Vert_{L^2} + \Vert u_t \Vert_{L^2} \Vert \RR_t \Vert_{L^\infty} + \Vert f_t \Vert_{L^2} + \Vert \RR_t \Vert_{L^\infty} \Vert f\Vert_{L^2} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH163}
\end{split} \end{align} where we used H\"older's inequality. From \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH104}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH163}, we obtain \begin{align} \begin{split} \Vert u_{tt}\Vert_{L^2_t L^2_x}^2 + \Vert u_t (t)\Vert_{H^1}^2 & \les \Vert f\Vert_{K^2}^2 + \Vert u_t (t)\Vert_{L^2}^2 + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t ( 1 + \Vert \RR_t\Vert_{L^\infty}^2 ) \Vert u_t\Vert_{H^1}^2 + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert \RR_t \Vert_{L^\infty}^2 \Vert f \Vert_{L^2}^2 \\& \les \Vert f\Vert_{K^2}^2 + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t ( 1 + \Vert \RR_t\Vert_{L^\infty}^2 ) \Vert u_t\Vert_{H^1}^2 , \llabel{ v1OB 1CzD IDdW4E 4j J mv6 Ktx oBO s9 ADWB q218 BJJzRy UQ i 2Gp weE T8L aO 4ho9 5g4v WQmoiq jS w MA9 Cvn Gqx l1 LrYu MjGb oUpuvY Q2 C dBl AB9 7ew jc 5RJE SFGs ORedoM 0b B k25 VEK B8V A9 ytAE Oyof G8QIj2 7a I 3jy Rmz yET Kx pgUq 4Bvb cD1b1g KB y oE3 azg elV Nu 8iZ1 w1tq twKx8C LN 2 8yn jdo jUW vN H9qy HaXZ GhjUgm uL I 87i Y7Q 9MQ Wa iFFS Gzt8 4mSQq2 5O N ltT gbl 8YD QS AzXq pJEK 7bGL1U Jn 0 f59 vPr wdt d6 sDLj Loo1 8tQXf5 5u p mTa dJDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH182} \end{split} \end{align} by taking $\epsilon, \bar{\epsilon}, \tilde{\epsilon}>0$ sufficiently small, where we used $\Vert f\Vert_{L^\infty_t L^2_x} \les \Vert f\Vert_{H^1_t L^2_x}$. Appealing to Gronwall's inequality, we arrive at \begin{align} \begin{split} \Vert u_t (t)\Vert_{H^1}^2 & \les \Vert f\Vert_{K^2}^2 , \llabel{D sEL pH 2vqY uTAm YzDg95 1P K FP6 pEi zIJ Qd 8Ngn HTND 6z6ExR XV 0 ouU jWT kAK AB eAC9 Rfja c43Ajk Xn H dgS y3v 5cB et s3VX qfpP BqiGf9 0a w g4d W9U kvR iJ y46G bH3U cJ86hW Va C Mje dsU cqD SZ 1DlP 2mfB hzu5dv u1 i 6eW 2YN LhM 3f WOdz KS6Q ov14wx YY d 8sa S38 hIl cP tS4l 9B7h FC3JXJ Gp s tll 7a7 WNr VM wunm nmDc 5duVpZ xT C l8F I01 jhn 5B l4Jz aEV7 CKMThL ji 1 gyZ uXc Iv4 03 3NqZ LITG Ux3ClP CB K O3v RUi mJq l5 blI9 GrWy irWHof lH DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH64} \end{split} \end{align} whence \begin{align} \begin{split} \Vert u_{tt} \Vert_{L^2_t L^2_x}^2 \les \Vert f\Vert_{K^2}^2 . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH105} \end{split} \end{align} From the $H^4$ regularity of the elliptic problem \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH67}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH69} and \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH163} it follows that \begin{align} \begin{split} \Vert u\Vert_{H^4} & \les \Vert \RR^{-1} u_t \Vert_{H^2} + \Vert \RR^{-1} f \Vert_{H^2} \\& \les \Vert u_{tt} \Vert_{L^2} + \Vert \RR_t \Vert_{L^\infty} \Vert u_t\Vert_{L^2} + \Vert \RR_t \Vert_{L^\infty} \Vert f\Vert_{L^2} + \Vert f_t\Vert_{L^2} + \Vert f\Vert_{H^2} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH164} \end{split} \end{align} since $H^2$ is an algebra. We combine \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH104} and \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH105}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH164} to get \begin{align*} \begin{split} \Vert u\Vert_{K^4} & = \Vert u\Vert_{L^2_t H^4_x} + \Vert u\Vert_{H^2_t L^2_x} \\ & \les \Vert u_{tt}\Vert_{L^2_t L^2_x} + \Vert \RR_t \Vert_{L^2_t L^\infty_x} \Vert u_t\Vert_{L^\infty_t L^2_x} + \Vert \RR_t \Vert_{L^2_t L^\infty_x} \Vert f\Vert_{H^1_t L^2_x} + \Vert f\Vert_{K^2} \les \Vert f\Vert_{K^2} , \end{split} \end{align*} completing the proof of~\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH59}. \end{proof} \par The following lemma provides a maximal regularity for the parabolic system~\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH48}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH46}. \par \cole \begin{Lemma} \label{L02} Let $s\in (2, 2+\epsilon_0]$ where $\epsilon_0 \in (0,1/2)$. Assume the compatibility conditions \begin{align} & h_j(0) = \lambda (\partial_k u_{0j} + \partial_j u_{0k}) \nu^k + \mu \partial_k u_{0k} \nu^j \inon{on~$\Gammac$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH42} \\ & u_{0j} = 0 \inon{on~$\Gammaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH43} \end{align} for $j=1,2,3$, and suppose that \begin{align} \begin{split} (\RR, \RR^{-1}, h, f) & \in L^\infty ([0,T], H^2(\Omegaf)) \times L^\infty ([0,T], H^2(\Omegaf)) \cap H^1 ([0,T], L^\infty (\Omegaf)) \\&\indeqtimes H^{s/2-1/4, s-1/2} ((0,T)\times \Gammac) \times K^{s-1} ((0,T)\times \Omegaf) , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH332} \end{split} \end{align} for some $T>0$. Then the system \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH48}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH46} admits a solution $u$ satisfying \begin{align} \begin{split} \Vert u\Vert_{K^{s+1} ( (0,T) \times \Omegaf )} & \les \Vert h\Vert_{H^{s/2-1/4, s-1/2}((0,T)\times \Gammac)} + \Vert u_0\Vert_{H^s} + \Vert f\Vert_{K^{s-1} ( (0,T) \times \Omegaf )} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH92} \end{split} \end{align} where the implicit constant depends on the norms of $\RR$ and $\RR^{-1}$ in \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH332}. \end{Lemma} \colb \par \begin{proof} From \cite[Theorem~2.3]{LM} and the compatibility conditions \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH42}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH43} it follows that there exists $v \in K^{s+1}$ satisfying the boundary conditions and the initial data \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH44}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH46} with \begin{align} \begin{split} \Vert v \Vert_{K^{s+1}} \les \Vert h\Vert_{H^{s/2-1/4, s-1/2}_{\Gammac}} + \Vert u_0\Vert_{H^s} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH83} \end{split} \end{align} since $s>1/2$. Now we consider the homogeneous parabolic problem \begin{align} \begin{split} \partial_t w
- \lambda \RR \dive(\nabla w + (\nabla w)^T) - \mu \RR \nabla \dive w & = F \inon{in~$(0,T)\times\Omegaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH268} \end{split} \end{align} with the homogeneous boundary conditions and the initial data \begin{align} & \lambda (\partial_k w_j + \partial_j w_k) \nu^k + \mu \partial_k w_k \nu^j = 0 \inon{on~$(0,T)\times\Gammac$} , \\ & w =0 \inon{on~$(0,T)\times\Gammaf$} , \\ & w~~\text{periodic in the $y_1$~and~$y_2$ directions}, \\ & w(0) = 0 \inon{in~$\Omegaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH04} \end{align} for $j=1,2,3$, where \begin{align} \begin{split} F= -f + \partial_t v - \lambda \RR \dive(\nabla v + (\nabla v)^T) - \mu \RR \nabla \dive v . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH85} \end{split} \end{align} By Lemma~\ref{L01}, there exists a solution $w$ to the system \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH268}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH85} satisfying \begin{align} \begin{split} \Vert w\Vert_{K^2} \les \Vert F\Vert_{K^0} \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH81} \end{split} \end{align} and \begin{align} \Vert w\Vert_{K^4} & \les \Vert F\Vert_{K^2} . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH82} \end{align} From \cite[Theorem~6.2]{LM} and \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH81}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH82} it follows that \begin{align} \begin{split} \Vert w\Vert_{K^{s+1}} & \les \Vert F\Vert_{K^{s-1}} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH84} \end{split} \end{align} since $s \neq \text{integer}+1/2$ and $s/2 \neq \text{integer}$. From \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH85}, we get \begin{align} \begin{split} \Vert F\Vert_{K^{s-1}} \les \Vert f\Vert_{K^{s-1}} + \Vert v_t\Vert_{K^{s-1}} + \Vert R \nabla^2 v\Vert_{K^{s-1}} . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH86} \end{split} \end{align} For the second term on the right side of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH86}, we obtain \begin{align} \begin{split} \Vert v_t\Vert_{K^{s-1}} & \les \Vert v_t\Vert_{L^2_t H^{s-1}_x} + \Vert v_t\Vert_{H^{(s-1)/2}_t L^2_x} \les \Vert v\Vert_{K^{s+1}} , \llabel{7 3ZT eZX kop eq 8XL1 RQ3a Uj6Ess nj 2 0MA 3As rSV ft 3F9w zB1q DQVOnH Cm m P3d WSb jst oj 3oGj advz qcMB6Y 6k D 9sZ 0bd Mjt UT hULG TWU9 Nmr3E4 CN b zUO vTh hqL 1p xAxT ezrH dVMgLY TT r Sfx LUX CMr WA bE69 K6XH i5re1f x4 G DKk iB7 f2D Xz Xez2 k2Yc Yc4QjU yM Y R1o DeY NWf 74 hByF dsWk 4cUbCR DX a q4e DWd 7qb Ot 7GOu oklg jJ00J9 Il O Jxn tzF VBC Ft pABp VLEE 2y5Qcg b3 5 DU4 igj 4dz zW soNF wvqj bNFma0 am F Kiv Aap pzM zr VqYf OulM HaDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH300} \end{split} \end{align} where we used Lemma~\ref{L12}. Regarding the last term on the right side of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH86}, we appeal to H\"older's and Sobolev inequalities, yielding \begin{align} \begin{split} \Vert \RR \nabla^2 v\Vert_{L^2_t H^{s-1}_x} & \les \Vert \RR \Vert_{L^\infty_t H^2_x} \Vert \nabla^2 v \Vert_{L^2_t H^{s-1}_x} \les \Vert v \Vert_{K^{s+1}} \llabel{faBk 6J r eOQ BaT EsJ BB tHXj n2EU CNleWp cv W JIg gWX Ksn B3 wvmo WK49 Nl492o gR 6 fvc 8ff jJm sW Jr0j zI9p CBsIUV of D kKH Ub7 vxp uQ UXA6 hMUr yvxEpc Tq l Tkz z0q HbX pO 8jFu h6nw zVPPzp A8 9 61V 78c O2W aw 0yGn CHVq BVjTUH lk p 6dG HOd voE E8 cw7Q DL1o 1qg5TX qo V 720 hhQ TyF tp TJDg 9E8D nsp1Qi X9 8 ZVQ N3s duZ qc n9IX ozWh Fd16IB 0K 9 JeB Hvi 364 kQ lFMM JOn0 OUBrnv pY y jUB Ofs Pzx l4 zcMn JHdq OjSi6N Mn 8 bR6 kPe klT Fd VlwDDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH88} \end{split} \end{align} and \begin{align} \begin{split} \Vert \RR \nabla^2 v\Vert_{H^{(s-1)/2}_t L^2_x} & \les \Vert \RR \Vert_{W^{(s-1)/2, 4}_t L^\infty_x} \Vert \nabla^2 v\Vert_{L^4_t L^2_x} + \Vert \RR \Vert_{L^\infty_t H^2_x} \Vert \nabla^2 v\Vert_{H^{(s-1)/2}_t L^2_x} \\& \les \Vert \RR \Vert_{H^1_t L^\infty_x} \Vert \nabla^2 v\Vert_{H^{(s-1)/2}_t L^2_x} + \Vert v\Vert_{K^{s+1}} \les \Vert v\Vert_{K^{s+1}} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH89} \end{split} \end{align} since $3/2<s<5/2$. Note that from \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH268}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH85} we infer that the difference $u=v-w$ is a solution of the system~\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH48}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH46}. From \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH83} and \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH84}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH89} it follows that \begin{align} \begin{split} \Vert u\Vert_{K^{s+1}} & \les \Vert w\Vert_{K^{s+1}} + \Vert v\Vert_{K^{s+1}} \les \Vert h\Vert_{H^{s/2-1/4, s-1/2}_{\Gammac}} + \Vert u_0\Vert_{H^s} + \Vert f\Vert_{K^{s-1}} , \llabel{ SrhT 8Qr0sC hN h 88j 8ZA vvW VD 03wt ETKK NUdr7W EK 1 jKS IHF Kh2 sr 1RRV Ra8J mBtkWI 1u k uZT F2B 4p8 E7 Y3p0 DX20 JM3XzQ tZ 3 bMC vM4 DEA wB Fp8q YKpL So1a5s dR P fTg 5R6 7v1 T4 eCJ1 qg14 CTK7u7 ag j Q0A tZ1 Nh6 hk Sys5 CWon IOqgCL 3u 7 feR BHz odS Jp 7JH8 u6Rw sYE0mc P4 r LaW Atl yRw kH F3ei UyhI iA19ZB u8 m ywf 42n uyX 0e ljCt 3Lkd 1eUQEZ oO Z rA2 Oqf oQ5 Ca hrBy KzFg DOseim 0j Y BmX csL Ayc cC JBTZ PEjy zPb5hZ KW O xT6 dyt u82DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH90} \end{split} \end{align} concluding the proof of~\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH92}. \end{proof} \par \startnewsection{Solution to a parabolic-wave system}{sec05} In this section, we consider the coupled parabolic-wave system \begin{align} & \partial_t v - \lambda \RR \dive (\nabla v + (\nabla v)^T) -\mu \RR\nabla \dive v + \RR \nabla ( \RR^{-1} ) = f \inon{in~$(0,T)\times \Omegaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH71} \\ & \RR_t - \RR \dive v = 0 \inon{in~$(0,T)\times \Omegaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH93} \\ & \partial_{tt} w - \Delta w = 0 \inon{in~$(0,T)\times\Omegae$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH72} \end{align} with the boundary conditions \begin{align} & v = \partial_t w \inon{on~$(0,T)\times\Gammac$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH73} \\ & \lambda (\partial_k v_j + \partial_j v_k) \nu^k + \mu \partial_k v_k \nu^j = \partial_k w_j \nu^k + \RR^{-1} \nu^j + h_j \inon{on~$(0,T)\times \Gammac$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH75} \\& v, w~~\text{periodic in the $y_1$~and~$y_2$ directions}, \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH321}
\\& v = 0 \inon{on~$(0,T)\times\Gammaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH76} \end{align} for $j =1,2,3$, and the initial data \begin{align} \begin{split} & (v, \RR, w, w_t)(0) = (v_0, \RR_0, w_0, w_1) \inon{in~$\Omegaf \times \Omegaf \times \Omegae \times \Omegae$} , \\ & (v_0, \RR_0, w_0, w_1) ~\text{periodic in the $y_1$~and~$y_2$ directions} , \\ & w_0 = 0. \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH77} \end{split} \end{align} \par To provide the maximal regularity for the system~\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH71}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH77}, we state the following necessary a~priori density estimates. \cole \begin{Lemma} \label{L04} Let $s\in (2, 2 + \epsilon_0]$ where $\epsilon_0 \in (0,1/2)$. Consider the ODE system \begin{align} & \RR_t - \RR \dive v = 0 \inon{in~$(0,T) \times \Omegaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH146} \\ & \RR(0) = \RR_0 \inon{on~$\Omegaf$} . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH147} \end{align} Assume $ (\RR_0, \RR_0^{-1}) \in H^s (\Omegaf) \times H^s (\Omegaf) $ and $\Vert v\Vert_{K^{s+1}} \leq M$ for some constants $T>0$ and $M>1$. Then there exists a constant $T_0 \in (0,T)$ depending on $M$ such that we have \begin{enumerate}[label=(\roman*)] \item $\Vert \RR \Vert_{L^\infty_t L^\infty_x} \les 1$ and $\Vert \RR^{-1} \Vert_{L^\infty_t L^\infty_x} \les 1$ in $ [0, T_0]\times \Omegaf$. \item $ \Vert \RR \Vert_{L^\infty_t H^s_x} + \Vert \RR \Vert_{H^1_t L^\infty_x} + \Vert \RR^{-1} \Vert_{L^\infty_t H^s_x} + \Vert \RR^{-1} \Vert_{H^1_t L^\infty_x} \les 1$. \item $\Vert \RR\Vert_{H^1_t H^s_x} \les M$. \end{enumerate} We emphasize that the implicit constants in the above inequalities are independent of $M$. \end{Lemma} \colb \par \begin{proof}[Proof of Lemma~\ref{L04}] (i) The solution of the ODE system \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH146}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH147} reads \begin{align} \begin{split} \RR(t,x) = \RR_0 (x) e^{\fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \dive v(\tau) d\tau} \inon{in~$[0, T]\times \Omegaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH148} \end{split} \end{align} from where it follows that \begin{align} \begin{split} \Vert \RR \Vert_{L^\infty_t L^\infty_x} \les \Vert \RR_0 \Vert_{H^s} e^{\fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert \dive v(\tau) \Vert_{L^\infty_x} d\tau } \les e^{T^{1/2} M } \les 1 \llabel{ Ia htpD m75Y DktQvd Nj W jIQ H1B Ace SZ KVVP 136v L8XhMm 1O H Kn2 gUy kFU wN 8JML Bqmn vGuwGR oW U oNZ Y2P nmS 5g QMcR YHxL yHuDo8 ba w aqM NYt onW u2 YIOz eB6R wHuGcn fi o 47U PM5 tOj sz QBNq 7mco fCNjou 83 e mcY 81s vsI 2Y DS3S yloB Nx5FBV Bc 9 6HZ EOX UO3 W1 fIF5 jtEM W6KW7D 63 t H0F CVT Zup Pl A9aI oN2s f1Bw31 gg L FoD O0M x18 oo heEd KgZB Cqdqpa sa H Fhx BrE aRg Au I5dq mWWB MuHfv9 0y S PtG hFF dYJ JL f3Ap k5Ck Szr0Kb Vd i sQkDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH150} \end{split} \end{align} and \begin{align} \begin{split} \Vert \RR^{-1} \Vert_{L^\infty_t L^\infty_x} \les \Vert \RR_0^{-1} \Vert_{H^s} e^{ \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert \dive v(\tau) \Vert_{L^\infty_x} d\tau } \les e^{T^{1/2} M} \les 1 , \llabel{ uSA JEn DT YkjP AEMu a0VCtC Ff z 9R6 Vht 8Ua cB e7op AnGa 7AbLWj Hc s nAR GMb n7a 9n paMf lftM 7jvb20 0T W xUC 4lt e92 9j oZrA IuIa o1Zqdr oC L 55L T4Q 8kN yv sIzP x4i5 9lKTq2 JB B sZb QCE Ctw ar VBMT H1QR 6v5srW hR r D4r wf8 ik7 KH Egee rFVT ErONml Q5 L R8v XNZ LB3 9U DzRH ZbH9 fTBhRw kA 2 n3p g4I grH xd fEFu z6RE tDqPdw N7 H TVt cE1 8hW 6y n4Gn nCE3 MEQ51i Ps G Z2G Lbt CSt hu zvPF eE28 MM23ug TC d j7z 7Av TLa 1A GLiJ 5JwW CiDPyM DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH151} \end{split} \end{align} by taking $T_0>0$ sufficiently small. \par (ii) From \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH148} we have \begin{align} \begin{split} \Vert \RR \Vert_{L^\infty_t H^s_x} \les \Vert \RR_0 \Vert_{H^s} \Vert e^{\fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \dive v(\tau) d\tau} \Vert_{L^\infty_t H^s_x} \les 1 \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH187} \end{split} \end{align} and \begin{align} \begin{split} \Vert \RR^{-1} \Vert_{L^\infty_t H^s_x} \les \Vert \RR_0^{-1} \Vert_{H^s} \Vert e^{-\fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \dive v(\tau) d\tau} \Vert_{L^\infty_t H^s_x} \les 1 , \llabel{qa 8 tAK QZ9 cfP 42 kuUz V3h6 GsGFoW m9 h cfj 51d GtW yZ zC5D aVt2 Wi5IIs gD B 0cX LM1 FtE xE RIZI Z0Rt QUtWcU Cm F mSj xvW pZc gl dopk 0D7a EouRku Id O ZdW FOR uqb PY 6HkW OVi7 FuVMLW nx p SaN omk rC5 uI ZK9C jpJy UIeO6k gb 7 tr2 SCY x5F 11 S6Xq OImr s7vv0u vA g rb9 hGP Fnk RM j92H gczJ 660kHb BB l QSI OY7 FcX 0c uyDl LjbU 3F6vZk Gb a KaM ufj uxp n4 Mi45 7MoL NW3eIm cj 6 OOS e59 afA hg lt9S BOiF cYQipj 5u N 19N KZ5 Czc 23 1wxG x1utDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH186} \end{split} \end{align} by taking $T_0>0$ sufficiently small. From \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH146}, using H\"older's and Sobolev inequalities, we obtain \begin{align*} \begin{split} \Vert \RR_t \Vert_{L^2_t L^\infty_x} \les \Vert \RR\Vert_{L^\infty_t L^\infty_x} \Vert \dive v\Vert_{L^2_t L^\infty_x} \les \epsilon \Vert v\Vert_{L^2_t H^{s+1}_x} + C_\epsilon \Vert v\Vert_{L^2_t L^2_x} , \end{split} \end{align*} for any $\epsilon \in (0,1]$, since $s\geq 2$. It then follows \begin{align} \begin{split} \Vert \RR_t \Vert_{L^2_t L^\infty_x} \les 1 + T^{1/2} Q(M) \Vert v\Vert_{L^\infty_t L^2_x} \les 1+ T^{1/2} Q(M) , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH223} \end{split} \end{align} and \begin{align} \begin{split} \Vert (\RR^{-1} )_t \Vert_{L^2_t L^\infty_x} \les \Vert \RR_t \Vert_{L^2_t L^\infty_x} \les 1 + T^{1/2} Q(M) , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH224} \end{split} \end{align} by taking $\epsilon=1/M$, since $\Vert v\Vert_{L^2_t L^2_x}\les T^{1/2} \Vert v\Vert_{L^\infty_t L^2_x} \les T^{1/2} \Vert v\Vert_{H^{(s+1)/2}_t L^2_x}$. In \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH223}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH224} and below, $Q$ denotes a generic nondecreasing function which may vary from inequality to inequality. Combining \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH187}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH224}, we conclude the proof of (iii) by taking $T_0>0$ sufficiently small. \par
(iii) From \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH146} and \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH187}, we have \begin{align*} \begin{split} \Vert \RR_t \Vert_{L^2_t H^s_x} \les \Vert \RR \Vert_{L^\infty_t H^s_x} \Vert \dive v\Vert_{L^2_t H^s_x} \les \Vert v\Vert_{L^2_t H^{s+1}_x} \les M , \end{split} \end{align*} where we used H\"older's inequality. \end{proof} \par The following theorem provides the local existence for the parabolic-wave system~\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH71}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH77}. \cole \begin{Theorem} \label{T03} Let $s\in (2, 2+\epsilon_0]$ where $\epsilon_0 \in (0, 1/2)$. Assume the compatibility conditions \begin{align*} & w_{1j} = v_{0j} \inon{on~$\Gammac$} , \\& v_{0j} = 0 \inon{on~$\Gammaf$} , \\& \lambda (\partial_k v_{0j} + \partial_j v_{0k} )\nu^k + \mu \partial_i v_{0i} \nu^j - \RR_0^{-1} \nu^j - \partial_k w_{0j} \nu^k = 0 \inon{on~$\Gammac$} , \\& \lambda \partial_k (\partial_k v_{0j} + \partial_j v_{0k}) + \mu \partial_j \partial_k v_{0k} - \partial_k (\RR^{-1}_0 ) = 0 \inon{on~$\Gammaf$} . \end{align*} Suppose that \begin{align*} & (v_0, w_0, w_1, \RR_0^{-1}, \RR_0) \in H^s(\Omegaf) \times H^{s+1/2} (\Omegae) \times H^{s-1/2} (\Omegae) \times H^s(\Omegaf) \times H^s(\Omegaf) , \end{align*} and the nonhomogeneous terms satisfy \begin{align*} (f, h) \in K^{s-1} ((0,T) \times \Omegaf) \times H^{s-1/2, s/2-1/4} ((0,T) \times \Gammac ) , \end{align*} where $T>0$. Then the system \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH71}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH77} admits a unique solution \begin{align*} \begin{split} (v, \RR, w, w_t) & \in K^{s+1} ([0,T_0] \times \Omegaf) \times H^1 ([0, T_0], H^s(\Omegaf)) \\&\indeqtimes C([0,T_0], H^{s+1/4-\epsilon_0}(\Omegae)) \times C([0,T_0], H^{s-3/4-\epsilon_0}(\Omegae)) , \end{split} \end{align*} for some constant $T_0 \in (0, T)$, where the corresponding norms are bounded by a function of the initial data. \end{Theorem} \colb \par We define \begin{align} \begin{split} Z_{T} & = \{v \in K^{s+1} : v(0) = v_0 ~~\text{in}~~\Omegaf, v=0 ~~\text{on}~~ (0,T) \times\Gammaf, \\&\indeq\indeq\indeq v~~\text{periodic in the $y_1$~and~$y_2$ directions}, ~~\text{and}~~ \Vert v\Vert_{K^{s+1}} \leq M \} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH188} \end{split} \end{align} where $M>1$ and $T\in (0,1)$ are constants, both to be determined below. For any $v\in Z_T$, we first obtain the solution $\RR$ by using the explicit formula \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH148} with the initial data $\RR(0) = \RR_0$. Then we solve the wave equation \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH72} for $w$ with the boundary condition and the initial data \begin{align} & w(t) = w(0) + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t v(\tau) d\tau \inon{on~$(0,T)\times \Gammac$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH123} \\ & (w, w_t) (0) = (w_0, w_1) \inon{in~$\Omegae$} . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH122} \end{align} With $(\RR, w)$ constructed, we define a mapping \begin{align*} \Lambda \colon v (\in Z_T )\mapsto \bar{v} , \end{align*} where $\bar{v}$ is the solution of the nonhomogeneous parabolic problem \begin{align} \begin{split} & \partial_t \bar{v} - \lambda \RR \dive (\nabla \bar{v} + (\nabla \bar{v})^T ) - \mu \RR \nabla \dive \bar{v} = f - \RR \nabla \RR^{-1} \inon{in~$(0,T)\times \Omegaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH124} \end{split} \end{align} with the boundary conditions and the initial data \begin{align} & \lambda (\partial_k \bar{v}_j + \partial_j \bar{v}_k) \nu^k + \mu \partial_k \bar{v}_k \nu^j = \partial_k w_j \nu^k + \RR^{-1} \nu^j + h_j \inon{on~$(0,T)\times\Gammac$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH125} \\ & \bar{v} = 0 \inon{on~$(0,T)\times\Gammaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH126} \\& \bar{v}~~\text{periodic in the $y_1$~and~$y_2$ directions}, \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH364} \\& \bar{v}(0) = v_0 \inon{in~$\Omegaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH301} \end{align} for $j=1,2,3$. We shall prove that $\Lambda$ is a contraction mapping and use the Banach fixed-point theorem. \par \subsection{Uniform boundedness of the iterative sequence} In this section we show that the mapping $\Lambda$ is well-defined from $Z_T$ to $Z_T$, for some sufficiently large constant $M>1$ and sufficiently small constant $T_0 \in (0,1)$. We emphasize that the implicit constants below in this section are independent of $M$. From Lemmas~\ref{L02} and~\ref{L04} it follows that \begin{align} \begin{split} \Vert \bar{v} \Vert_{K^{s+1}} & \les \left\Vert \frac{\partial w}{\partial \nu} \right\Vert_{H^{s/2-1/4, s-1/2}_{\Gammac}} + \Vert h\Vert_{H^{s/2-1/4, s-1/2}_{\Gammac}} + \Vert v_0\Vert_{H^s} + \Vert f\Vert_{K^{s-1}} + \Vert \RR^{-1} \nabla \RR \Vert_{K^{s-1}} \\&\indeq + \Vert \RR^{-1} \nu\Vert_{H^{s/2-1/4, s-1/2}_{\Gammac}} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH111} \end{split} \end{align} for a sufficiently small constant $T_0>0$ depending on $M$. \par For the first term on the right side of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH111}, we appeal to Lemma~\ref{L13} and obtain \begin{align} \begin{split} \left\Vert \frac{\partial w}{\partial \nu} \right\Vert_{L^2_t H^{s-1/2}(\Gammac)} & \les \Vert w_0\Vert_{H^{s+1/2}} + \Vert w_1\Vert_{H^{s-1/2}} + \Vert w \Vert_{L^2 H^{s+1/2} (\Gammac)} + \Vert w \Vert_{H^{s/2+1/4}_t H^{s/2+1/4} (\Gammac)} \\& \les \Vert w_0\Vert_{H^{s+1/2}} + \Vert w_1\Vert_{H^{s-1/2}} + \Vert w \Vert_{L^2_t H^{s+1/2} (\Gammac)} + \Vert v\Vert_{H^{s/2-3/4}_t H^{s/2+3/4}_x} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH115} \end{split} \end{align}
where we used the trace inequality. Regarding the third term on the far right side, using \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH123}, we get \begin{align} \begin{split} \Vert w \Vert_{L^2_t H^{s+1/2}(\Gammac)} \les T^{1/2} \Vert \left(\fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t v ds\right)\Vert_{H^{s+1/2} (\Gammac)} \les T \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert v\Vert_{L^2_t H^{s+1/2} (\Gammac)} \les T M , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH350} \end{split} \end{align} while for the fourth term, we appeal to Lemma~\ref{L12}, obtaining \begin{align} \begin{split} \Vert v\Vert_{H^{s/2-3/4}_t H^{s/2+3/4}_x} & \les \epsilon_1 \Vert v\Vert_{H^{(s+1)/2}_t L^2_x} + C_{\epsilon_1} \Vert v\Vert_{L^2_t H^{(s+1)(2s+3)/10}_x} \\& \les \epsilon_1 \Vert v\Vert_{H^{(s+1)/2}_t L^2_x} + C_{\epsilon_1} \epsilon_2 \Vert v\Vert_{L^2_t H^{s+1}_x} + C_{\epsilon_1} T^{1/2} \Vert v\Vert_{H^{(s+1)/2}_t L^2_x} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH351} \end{split} \end{align} for any $\epsilon_1, \epsilon_2 \in (0,1]$, since $s<7/2$ and $\Vert v\Vert_{L^2_t L^2_x}\les T^{1/2} \Vert v\Vert_{H^{(s+1)/2}_t L^2_x}$. From \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH115}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH351} it follows that \begin{align} \begin{split} \left\Vert \frac{\partial w}{\partial \nu} \right\Vert_{L^2_t H^{s-1/2}(\Gammac)} \les \Vert w_0\Vert_{H^{s+1/2}} + \Vert w_1\Vert_{H^{s-1/2}} + (\epsilon_1 + C_{\epsilon_1} \epsilon_2 + C_{\epsilon_1}T^{1/2}) M . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH352} \end{split} \end{align} For the time component of the first term on the right side of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH111}, we use the trace inequality and arrive at \begin{align} \begin{split} \left\Vert \frac{\partial w}{\partial \nu}\right\Vert_{H^{s/2-1/4}_t L^2(\Gammac)} & \les \Vert \nabla v\Vert_{L^2_t L^2(\Gammac)} + \left\Vert \frac{\partial w}{\partial \nu}\right\Vert_{L^2_t L^2(\Gammac)} \les \Vert v \Vert_{L^2_t H^{s/2+3/4}} + \left\Vert \frac{\partial w}{\partial \nu} \right\Vert_{L^2_t H^{s-1/2} (\Gammac)} \\& \les \Vert w_0\Vert_{H^{s+1/2}} + \Vert w_1\Vert_{H^{s-1/2}} + (\epsilon_1 + C_{\epsilon_1} \epsilon_2 + C_{\epsilon_1}T^{1/2}) M , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH353} \end{split} \end{align} since $1/2<s<5/2$, where we used~\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH351}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH352}. From \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH352}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH353}, we have \begin{align} \left\Vert \frac{\partial w}{\partial \nu}\right\Vert_{H^{s/2-1/4, s-1/2}_{\Gammac}} \les \Vert w_0\Vert_{H^{s+1/2}} + \Vert w_1\Vert_{H^{s-1/2}} + (\epsilon_1 + C_{\epsilon_1} \epsilon_2 + C_{\epsilon_1}T^{1/2}) M . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH361} \end{align} \par On the other hand, using Lemma~\ref{L03}, we have the interior regularity estimate \begin{align} \begin{split} & \Vert w \Vert_{C([0,T], H^{s+1/4 -\epsilon_0} (\Omegae))} + \Vert w_t \Vert_{C([0,T], H^{s-3/4-\epsilon_0}(\Omegae))} \\&\indeq \les \Vert w_0\Vert_{H^{s+1/4-\epsilon_0}} + \Vert w_1\Vert_{H^{s-3/4 -\epsilon_0}} + \Vert w\Vert_{H_{\Gammac}^{s+1/4 -\epsilon_0, s+1/4 -\epsilon_0}} . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH180} \end{split} \end{align} For the last term on the right side, we use Lemma~\ref{L06} to get \begin{align} \begin{split} \Vert w\Vert_{H_{\Gammac}^{s+1/4 -\epsilon_0, s+1/4-\epsilon_0}} & \les \Vert v \Vert_{H^{s-3/4-\epsilon_0}_t L^2(\Gammac)} + \Vert w \Vert_{L^2_t L^2(\Gammac)} + \Vert w\Vert_{L^2_t H^{s+1/4 - \epsilon_0}(\Gammac)} \\& \les \epsilon_1 \Vert v\Vert_{H^{(s+1)/2}_t L^2_x} + C_{\epsilon_1} \Vert v\Vert_{L^2_t H^{(s+1)/(5+4\epsilon_0-2s)}_x} + \Vert w \Vert_{L^2_t H^{s+1/2}(\Gammac)} \\& \les (\epsilon_1 + \epsilon_2 C_{\epsilon_1} + C_{\epsilon_1} T^{1/2} ) M , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH181} \end{split} \end{align} since $s-3/4-\epsilon_0 \leq s/2+1/4$ and $s\leq 2+2\epsilon_0$, where we used~\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH350}. From \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH180}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH181}, it follows that \begin{align} \begin{split} & \Vert w \Vert_{C([0,T], H^{s+1/4 -\epsilon_0} (\Omegae))} + \Vert w_t \Vert_{C([0,T], H^{s-3/4-\epsilon_0}(\Omegae))} \\&\indeq \les \Vert w_0\Vert_{H^{s+1/4}} + \Vert w_1\Vert_{H^{s-3/4}} + (\epsilon_1 + \epsilon_2 C_{\epsilon_1} + C_{\epsilon_1} T^{1/2} ) M . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH116} \end{split} \end{align} \par For the space component of the norm of the fifth term on the right side of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH111}, using Lemma~\ref{L04}, we arrive at \begin{align} \begin{split} \Vert \RR^{-1} \nabla \RR\Vert_{L^2_t H^{s-1}_x} & \les T^{1/2} \Vert \RR^{-1} \Vert_{L^\infty_t H^s_x} \Vert \nabla \RR\Vert_{L^\infty_t H^{s-1}_x} \les T^{1/2} Q( M) , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH214} \end{split} \end{align} where we appealed to H\"older's inequality. For the time component, we get \begin{align} \begin{split} \Vert \RR^{-1} \nabla \RR\Vert_{H^{(s-1)/2}_t L^2_x} \les \Vert \RR^{-1} \nabla \RR\Vert_{H^1_t L^2_x} \les \Vert (\RR^{-1} \nabla \RR)_t \Vert_{L^2_t L^2_x} + \Vert \RR^{-1} \nabla \RR\Vert_{L^2_t L^2_x} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH112} \end{split} \end{align} since $s<3$. Regarding the first term on the far right side of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH112}, using H\"older's and Sobolev inequalities, together with an application of Lemma~\ref{L04}, we obtain \begin{align} \begin{split} \Vert (\RR^{-1} \nabla \RR)_t \Vert_{L^2_t L^2_x} & \les \Vert \RR_t \nabla \RR\Vert_{L^2_t L^2_x} + \Vert \nabla \RR_t\Vert_{L^2_t L^2_x} \\& \les
\Vert \dive v \Vert_{L^2_t L^4_x} \Vert \nabla \RR_0 \Vert_{L^4} \Vert e^{\fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \dive v(\tau) d\tau}\Vert_{L^\infty_t L^\infty_x} \\&\indeq + \Vert \dive v \Vert_{L^2_t L^\infty_x} \Vert \RR_0 \Vert_{L^\infty} \Vert e^{\fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \dive v(\tau) d\tau}\Vert_{L^\infty_t L^\infty_x} \Vert \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \dive \nabla v(\tau) d\tau \Vert_{L^\infty_t L^2_x} \\&\indeq + \Vert \dive \nabla v\Vert_{L^2_t L^2_x} \\& \les (\epsilon + C_{\epsilon} T^{1/2})Q( M ) , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH113} \end{split} \end{align} for any $\epsilon \in (0,1]$. \par For the space component of the last term on the right side of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH111}, we use the trace inequality to obtain \begin{align} \begin{split} \Vert \RR^{-1} \nu\Vert_{L^2_t H^{s-1/2}(\Gammac)} \les \Vert \RR^{-1} \Vert_{L^2_t H^s_x} \les T^{1/2} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH189} \end{split} \end{align} where the last inequality follows from Lemma~\ref{L04}, while for the time component, we get \begin{align} \begin{split} \Vert \RR^{-1} \nu\Vert_{H^{s/2-1/4}_t L^2(\Gammac)} & \les \Vert \RR^{-1} \Vert_{H^1_t H^1_x} \les \Vert \RR^{-1} \Vert_{L^2_t H^1_x} + \Vert \RR^{-2} \RR_t \Vert_{L^2_t H^1_x} \\& \les T^{1/2} \Vert \RR^{-1} \Vert_{L^\infty_t H^s_x} + \Vert \RR^{-1} \Vert_{L^\infty_t H^s_x} \Vert \dive v \Vert_{L^2_t H^1_x} \\& \les T^{1/2} + ( \epsilon + C_\epsilon T^{1/2} ) M , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH190} \end{split} \end{align} since $s/2-1/4 \leq 1$. \par Combining \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH111}, \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH361}, and \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH116}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH190}, we arrive at \begin{align} \begin{split} & \Vert \bar{v}\Vert_{K^{s+1}} + \Vert w\Vert_{C([0,T], H^{s+1/4 -\epsilon_0} (\Omegae))} + \Vert w_t \Vert_{C([0,T], H^{s-3/4-\epsilon_0}(\Omegae))} \\&\indeq \les \Vert h\Vert_{H^{s-1/2, s/2-1/4}_{\Gammac}} + \Vert v_0\Vert_{H^s} + \Vert f\Vert_{K^{s-1}} + \Vert w_0\Vert_{H^{s+1/2}} + \Vert w_1\Vert_{H^{s-1/2}} \\&\indeq\indeq + (\epsilon +\epsilon_1+\epsilon_2 C_{\epsilon_1} + C_{\epsilon, \epsilon_1, \epsilon_2} T^{1/2} ) Q(M) , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH119} \end{split} \end{align} for any $\epsilon, \epsilon_1, \epsilon_2 \in (0,1]$. Taking $\epsilon$, $\epsilon_1$, $\epsilon_2$, and $T_0>0$ sufficiently small, it follow that \begin{align} \begin{split} \Vert \bar{v}\Vert_{K^{s+1}} \leq M , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH245} \end{split} \end{align} by allowing $M\geq 1$ sufficiently large. Thus, we have shown that the mapping $\Lambda \colon v\mapsto \bar{v}$ is well-defined from $Z_T$ to $Z_T$, for some $M\geq 1$ as in \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH245} and some sufficiently small $T_0>0$. \par \subsection{Contracting property} In this section we shall prove \begin{align} \begin{split} \Vert \Lambda(v_1) - \Lambda(v_2) \Vert_{K^{s+1}} \leq \frac{1}{2} \Vert v_1 - v_2 \Vert_{K^{s+1}} \comma v_1, v_2 \in Z_T , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH121} \end{split} \end{align} where $M\geq 1$ is fixed as in \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH245} and $T_0 \in (0,1)$ is sufficiently small to be determined below. We emphasize that the implicit constants below are allowed to depend on $M$. Let $v_1, v_2 \in Z_T$ and $(\RR_1, \xi_1, \xi_{1t}, \bar{v}_1)$ and $(\RR_2, \xi_2, \xi_{2t}, \bar{v}_2)$ be the corresponding solutions of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH93}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH72}, \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH123}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH122}, and \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH124}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH364} with the same initial data $(\RR_0, w_0, w_1, v_0)$ and the same nonhomogeneous terms $(f,h)$. We denote $\tilde{\VV} = \bar{v}_1 - \bar{v}_2$, $\tilde{v}= v_1 - v_2$, $\tilde{\RR} = \RR_1-\RR_2$, and $\tilde{\xi} = \xi_1 - \xi_2$. The difference $\tilde{V}$ satisfies \begin{align*} \begin{split} \tilde{V}_t - \lambda \RR_1 \dive (\nabla \tilde{V} + (\nabla \tilde{V})^T) - \mu \RR_1 \nabla \dive \tilde{V} = g \inon{in~$(0,T)\times \Omegaf$} , \end{split} \end{align*} with the boundary conditions and the initial data \begin{align*} & \lambda (\partial_k \tilde{\VV}_j
+ \partial_j \tilde{\VV}_k) \nu^k + \mu \partial_k \tilde{\VV}_k \nu^j = \partial_k \tilde{\xi}_j \nu^k - \RR_1^{-1} \RR_2^{-1} \tilde{\RR} \nu^j \inon{on~$(0,T)\times\Gammac$} , \\& \tilde{\VV} = 0 \inon{on~$(0,T)\times \Gammaf$} , \\& \tilde{V}~~\text{periodic in the $y_1$~and~$y_2$ directions}, \\& \tilde{\VV} (0) = 0 \inon{in~$\Omegaf$} , \end{align*} for $j= 1,2,3$, where \begin{align} \begin{split} g = - \RR_1 \nabla \RR_1^{-1} + \RR_2 \nabla \RR_2^{-1} + \lambda \tilde{R} \dive (\nabla \bar{v}_2 + (\nabla \bar{v}_2)^T) + \mu \tilde{R} \nabla \dive \bar{v}_2 . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH128} \end{split} \end{align} \par \begin{proof}[Proof of Theorem~\ref{T03}] We proceed as in \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH111} to obtain \begin{align} \begin{split} \Vert \tilde{\VV}\Vert_{K^{s+1}} & \les \left\Vert \frac{\partial \tilde{\xi}}{\partial \nu} \right\Vert_{H^{s/2-1/4, s-1/2 }_{\Gammac}} + \Vert \tilde{\RR} \RR_1^{-1} \RR_2^{-1} \nabla \RR_2 \Vert_{K^{s-1}} + \Vert \RR_1^{-1} \nabla \tilde{R} \Vert_{K^{s-1}} + \Vert \tilde{R} \nabla^2 \bar{v}_2 \Vert_{K^{s-1}} \\&\indeq + \Vert \RR_1^{-1} \RR_2^{-1} \tilde{\RR} \nu\Vert_{H^{s/2-1/4, s-1/2 }_{\Gammac}} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH127} \end{split} \end{align} where the last inequality follows from~\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH128}. The difference $\tilde{\xi}$ satisfies the wave equation \begin{align*} \tilde{\xi}_{tt} - \Delta \tilde{\xi} = 0 \inon{in~$(0,T) \times \Omegae$} , \end{align*} with the boundary condition and the initial data \begin{align*} & \tilde{\xi}(t) = \tilde{\xi}(0) + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \tilde{v} (\tau) d\tau \inon{on~$(0,T)\times \Gammac$} , \\ & (\tilde{\xi}, \tilde{\xi}_t) (0) = (0, 0) \inon{in~$\Omegae$} . \end{align*} For the first term on the right side of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH127}, we proceed as in \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH115}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH361} to obtain \begin{align} \begin{split} \left\Vert \frac{\partial \tilde{\xi}}{\partial \nu}\right\Vert_{H^{s/2- 1/4, s-1/2}_{\Gammac}} \les (\epsilon_1 + \epsilon_2 C_{\epsilon_1} + C_{\epsilon_1} T^{1/2} ) \Vert \tilde{v}\Vert_{K^{s+1}} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH144} \end{split} \end{align} for any $\epsilon_1, \epsilon_2 \in (0,1]$. Since the difference $\tilde{R}$ satisfies the ODE system \begin{align} & \tilde{\RR}_t - \tilde{\RR} \dive v_2 = \RR_1 \dive \tilde{v} \inon{in~$(0,T)\times \Omegaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH174} \\ & \tilde{\RR}(0) = 0 \inon{in~$\Omegaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH136} \end{align} the solution is given by \begin{align} \begin{split} \tilde{\RR}(t,x) = e^{\fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \dive v_2 (\tau) d\tau} \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t e^{-\fgsdfgwertsfsrsfgsdfgfsdfasdf_0^\tau \dive v_2} \RR_1(\tau) \dive \tilde{v}(\tau) d\tau . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH173} \end{split} \end{align} \par For the second term on the right side of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH127}, we obtain
\begin{align} \begin{split} \Vert\tilde{\RR} \RR_1^{-1} \RR_2^{-1} \nabla \RR_2 \Vert_{L^2_t H^{s-1}_x} \les T^{1/2} \Vert \tilde{\RR} \Vert_{L^\infty_t H^s_x} \Vert \RR_1^{-1} \Vert_{L^\infty_t H^s_x} \Vert \RR_2^{-1} \Vert_{L^\infty_t H^s_x} \Vert \RR_2 \Vert_{L^\infty_t H^{s}_x} \les T^{1/2} \Vert \tilde{v}\Vert_{K^{s+1}} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH134} \end{split} \end{align} and \begin{align} \begin{split} & \Vert (\tilde{\RR} \RR_1^{-1} \RR_2^{-1} \nabla \RR_2)_t \Vert_{L^2_t L^2_x} \\&\indeq \les \Vert \tilde{\RR}_t \nabla \RR_2 \Vert_{L^2_t L^2_x} + \Vert \tilde{\RR} \RR_{1t} \nabla \RR_2 \Vert_{L^2_t L^2_x} + \Vert \tilde{\RR} \RR_{2t} \nabla \RR_2 \Vert_{L^2_t L^2_x} + \Vert \tilde{\RR} \nabla \RR_{2t} \Vert_{L^2_t L^2_x} \\&\indeq \les \Vert \tilde{R} \Vert_{L^\infty_t L^\infty_x} \Vert \dive v_2 \Vert_{L^2_t L^4_x} \Vert \nabla \RR_2 \Vert_{L^\infty_t L^4_x} + \Vert \RR_1 \Vert_{L^\infty_t L^\infty_x} \Vert \dive \tilde{v}\Vert_{L^2_t L^\infty_x} \Vert \nabla \RR_2 \Vert_{L^\infty_t L^2_x} \\&\indeq\indeq + \Vert \tilde{\RR} \Vert_{L^\infty_t L^\infty_x} \Vert \dive v_1 \Vert_{L^2_t L^4_x} \Vert \nabla \RR_2 \Vert_{L^\infty_t L^4_x} + \Vert \tilde{\RR} \Vert_{L^\infty_t L^\infty_x} \Vert \dive v_2 \Vert_{L^2_t L^4_x} \Vert \nabla \RR_2 \Vert_{L^\infty_t L^4_x} \\&\indeq\indeq + \Vert \tilde{\RR} \Vert_{L^\infty_t L^\infty_x} \Vert \nabla \RR_{2t}\Vert_{L^2_t L^2_x} \\&\indeq \les (\epsilon + C_\epsilon T^{1/2}) \Vert \tilde{v} \Vert_{K^{s+1}} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH135} \end{split} \end{align} for any $\epsilon \in (0,1]$, where we used H\"older's inequality, Lemma~\ref{L04}, \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH174}, and~\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH173}. \par For the third term on the right side of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH127}, we get \begin{align} \begin{split} \Vert \RR_1 \nabla \tilde{\RR} \Vert_{L^2_t H^{s-1}_x} \les T^{1/2} \Vert \RR_1 \Vert_{L^\infty_t H^s_x} \Vert \tilde{\RR} \Vert_{L^\infty_t H^s_x} \les T^{1/2} \Vert \tilde{v}\Vert_{K^{s+1}} \llabel{ gJB4ue Mx x 5lr s8g VbZ s1 NEfI 02Rb pkfEOZ E4 e seo 9te NRU Ai nujf eJYa Ehns0Y 6X R UF1 PCf 5eE AL 9DL6 a2vm BAU5Au DD t yQN 5YL LWw PW GjMt 4hu4 FIoLCZ Lx e BVY 5lZ DCD 5Y yBwO IJeH VQsKob Yd q fCX 1to mCb Ej 5m1p Nx9p nLn5A3 g7 U v77 7YU gBR lN rTyj shaq BZXeAF tj y FlW jfc 57t 2f abx5 Ns4d clCMJc Tl q kfq uFD iSd DP eX6m YLQz JzUmH0 43 M lgF edN mXQ Pj Aoba 07MY wBaC4C nj I 4dw KCZ PO9 wx 3en8 AoqX 7JjN8K lq j Q5c bMS dhR Fs tDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH138} \end{split} \end{align} and \begin{align} \begin{split} \Vert (\RR_1 \nabla \tilde{\RR})_t \Vert_{L^2_t L^2_x} & \les \Vert \RR_{1t} \nabla \tilde{\RR} \Vert_{L^2_t L^2_x} + \Vert \RR_1 \nabla \tilde{\RR}_t \Vert_{L^2_t L^2_x} \\& \les \Vert \RR_{1t} \Vert_{L^2_t L^\infty_x} \Vert \nabla \tilde{\RR} \Vert_{L^\infty_t L^2_x} + \Vert \RR_{1} \Vert_{L^\infty_t L^\infty_x} \Vert \nabla \tilde{\RR}_t \Vert_{L^2_t L^2_x} \\& \les (\epsilon + C_\epsilon T^{1/2}) \Vert \tilde{v} \Vert_{K^{s+1}} , \llabel{Q8Q r2ve 2HT0uO 5W j TAi iIW n1C Wr U1BH BMvJ 3ywmAd qN D LY8 lbx XMx 0D Dvco 3RL9 Qz5eqy wV Y qEN nO8 MH0 PY zeVN i3yb 2msNYY Wz G 2DC PoG 1Vb Bx e9oZ GcTU 3AZuEK bk p 6rN eTX 0DS Mc zd91 nbSV DKEkVa zI q NKU Qap NBP 5B 32Ey prwP FLvuPi wR P l1G TdQ BZE Aw 3d90 v8P5 CPAnX4 Yo 2 q7s yr5 BW8 Hc T7tM ioha BW9U4q rb u mEQ 6Xz MKR 2B REFX k3ZO MVMYSw 9S F 5ek q0m yNK Gn H0qi vlRA 18CbEz id O iuy ZZ6 kRo oJ kLQ0 Ewmz sKlld6 Kr K JmR xls DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH139} \end{split} \end{align} where we appealed to Lemma~\ref{L04}, \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH174}, and~\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH173}. \par Regarding the fourth term on the right side of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH127}, it follows that \begin{align} \begin{split} \Vert \tilde{\RR} \nabla^2 \bar{v}_2\Vert_{L^2_t H^{s-1}_x} \les \Vert \tilde{\RR}\Vert_{L^\infty_t H^s_x} \Vert \bar{v}_2 \Vert_{L^2_t H^{s+1}_x} \les T^{1/2} \Vert \tilde{v} \Vert_{K^{s+1}} \llabel{12K G2 bv8v LxfJ wrIcU6 Hx p q6p Fy7 Oim mo dXYt Kt0V VH22OC Aj f deT BAP vPl oK QzLE OQlq dpzxJ6 JI z Ujn TqY sQ4 BD QPW6 784x NUfsk0 aM 7 8qz MuL 9Mr Ac uVVK Y55n M7WqnB 2R C pGZ vHh WUN g9 3F2e RT8U umC62V H3 Z dJX LMS cca 1m xoOO 6oOL OVzfpO BO X 5Ev KuL z5s EW 8a9y otqk cKbDJN Us l pYM JpJ jOW Uy 2U4Y VKH6 kVC1Vx 1u v ykO yDs zo5 bz d36q WH1k J7Jtkg V1 J xqr Fnq mcU yZ JTp9 oFIc FAk0IT A9 3 SrL axO 9oU Z3 jG6f BRL1 iZ7ZE6 zj 8 DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH141} \end{split} \end{align} and \begin{align} \begin{split} \Vert \tilde{\RR} \nabla^2 \bar{v}_2 \Vert_{H^{(s-1)/2}_t L^2_x} & \les \Vert \tilde{\RR} \Vert_{L^\infty_t L^\infty_x} \Vert \nabla^2 \bar{v}_2\Vert_{H^{(s-1)/2}_t L^2_x} + \Vert \tilde{\RR} \Vert_{W^{(s-1)/2, 4}_t L^\infty_x} \Vert \nabla^2 \bar{v}_2 \Vert_{L^4_t L^2_x} \\& \les T^{1/2} \Vert \tilde{v} \Vert_{K^{s+1}} \Vert \nabla^2 \bar{v}_2\Vert_{H^{(s-1)/2}_t L^2} + \Vert \tilde{\RR} \Vert_{H^1_t L^\infty_x} \Vert \nabla^2 \bar{v}_2\Vert_{H^{(s-1)/2}_t L^2} \\& \les ( \epsilon + C_\epsilon T^{1/2} ) \Vert \tilde{v} \Vert_{K^{s+1}}
, \llabel{G3M Hu8 6Ay jt 3flY cmTk jiTSYv CF t JLq cJP tN7 E3 POqG OKe0 3K3WV0 ep W XDQ C97 YSb AD ZUNp 81GF fCPbj3 iq E t0E NXy pLv fo Iz6z oFoF 9lkIun Xj Y yYL 52U bRB jx kQUS U9mm XtzIHO Cz 1 KH4 9ez 6Pz qW F223 C0Iz 3CsvuT R9 s VtQ CcM 1eo pD Py2l EEzL U0USJt Jb 9 zgy Gyf iQ4 fo Cx26 k4jL E0ula6 aS I rZQ HER 5HV CE BL55 WCtB 2LCmve TD z Vcp 7UR gI7 Qu FbFw 9VTx JwGrzs VW M 9sM JeJ Nd2 VG GFsi WuqC 3YxXoJ GK w Io7 1fg sGm 0P YFBz X8eX 7pf9DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH142} \end{split} \end{align} since $s/2-1/4 \leq 1$ and $1/4\leq s/2-1/2$. \par For the last term on the right side of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH127}, using the trace inequality, we arrive at \begin{align} \begin{split} \Vert \RR_1^{-1} \RR_2^{-1} \tilde{\RR} \nu\Vert_{L^2_t H^{s-1/2}(\Gammac)} \les \Vert \tilde{\RR} \Vert_{L^2_t H^{s}_x} \les T^{1/2} \Vert \tilde{v}\Vert_{K^{s+1}} \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH192} \end{split} \end{align} and \begin{align} \begin{split} \Vert \RR_1^{-1} \RR_2^{-1} \tilde{\RR} \nu\Vert_{H^{s/2-1/4}_t L^2(\Gammac)} \les \Vert \RR_1^{-1} \RR_2^{-1} \tilde{\RR} \Vert_{H^1_t H^1_x} \les \Vert (\RR_1^{-1} \RR_2^{-1} \tilde{\RR})_t \Vert_{L^2_t H^1_x} + \Vert \RR_1^{-1} \RR_2^{-1} \tilde{\RR} \Vert_{L^2_t H^1_x} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH191} \end{split} \end{align} since $s/2-1/4 \leq 1$. For the first term on the right side of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH191}, we proceed as in \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH135} to obtain \begin{align} \begin{split} \Vert (\RR_1^{-1} \RR_2^{-1} \tilde{\RR})_t \Vert_{L^2_t H^1_x} & \les \Vert \RR_{1t} \tilde{\RR} \Vert_{L^2_t H^1_x} + \Vert \RR_{2t} \tilde{\RR} \Vert_{L^2_t H^1_x} + \Vert \tilde{\RR}_t \Vert_{L^2_t H^1_x} \les ( \epsilon+ C_{\epsilon}T^{1/2} ) \Vert \tilde{v} \Vert_{K^{s+1}} . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH193} \end{split} \end{align} The second term on the right side of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH191} is estimated analogously as in~\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH192}. \par From \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH127}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH144} and \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH134}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH193} it follows that \begin{align*} \begin{split} \Vert \tilde{\VV} \Vert_{K^{s+1}} \les (\epsilon + \epsilon_1 + \epsilon_2 C_{\epsilon_1} + C_{\epsilon, \epsilon_1, \epsilon_2} T^{1/2} ) \Vert \tilde{v}\Vert_{K^{s+1}} . \end{split} \end{align*} Taking $\epsilon$, $\epsilon_1$, $\epsilon_2$, and $T_0>0$ sufficiently small, we conclude the proof of~\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH121}. Thus, the mapping $\Lambda$ is contracting and from \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH119} and Lemma~\ref{L04} it follows that there exists a unique solution \begin{align*} \begin{split} (v, \RR, w, w_t) & \in K^{s+1} ((0,T_0) \times \Omegaf) \times H^1 ((0, T_0), H^s(\Omegaf)) \\&\indeqtimes C([0,T_0], H^{s+1/4-\epsilon_0}(\Omegae)) \times C([0,T_0], H^{s-3/4-\epsilon_0}(\Omegae)) , \end{split} \end{align*} for some $T_0>0$. \end{proof} \par \startnewsection{Solution to the Navier-Stokes-wave system}{sec06} In this section, we provide the local existence for the coupled Navier-Stokes-wave system \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH260}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH23} with the boundary conditions \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH262}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH267} and the initial data~\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH265}. Let $v\in Z_T$ where $Z_T$ is as in \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH188}, with constants $M>1$ and $T\in (0,1)$ both to be determined below. Let $ \eta (t,x) = x + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t v(\tau, x) d\tau $ be the corresponding Lagrangian flow map and $a (t,x)= (\nabla \eta(t,x))^{-1}$ be the inverse matrix of the flow map, while $\JJ(t,x) = \det (\nabla \eta(t,x))$ denotes the Jacobian. First, we solve \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH260} for $\RR$ with the initial data $\RR(0)=\RR_0$, obtaining \begin{align*} \begin{split} \RR(t,x) = \RR_0(x) e^{\fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t a_{kj} (\tau) \partial_k v_j (\tau) d\tau} \inon{in~$[0,T]\times \Omegaf$} . \end{split} \end{align*} Then we solve the wave equation \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH23} for $w$ with the boundary condition \begin{align*} & w(t,x) = w(0) + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t v(\tau, x) d\tau \inon{on~$(0,T)\times \Gammac$} \end{align*} and the initial data \begin{align*} & (w, w_t)(0) = (w_0, w_1) \inon{in~$\Omegae$} . \end{align*} With $(\RR, w, \eta, \JJ, a)$ constructed, we define a mapping \begin{align*} \Pi\colon v\in Z_T \mapsto \bar{v} , \end{align*} where $\bar{v}$ is the solution of the nonhomogeneous parabolic problem \begin{align} & \partial_t \bar{v}_{j} - \lambda \RR \partial_k (\partial_j \bar{v}_k + \partial_k \bar{v}_j) - \mu \RR \partial_j \partial_k \bar{v}_k =
f_j \inon{in~$(0,T)\times \Omegaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH175} \\ & \lambda (\partial_k \bar{v}_j + \partial_j \bar{v}_k) \nu^k + \mu \partial_k \bar{v}_k \nu^j = \partial_k w_j \nu^k + h_j \inon{in~$(0,T) \times \Gammac$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH176} \\& \bar{v}~~\text{periodic in the $y_1$~and~$y_2$ directions}, \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH400} \\& \bar{v}(0) = v_0 \inon{in~$\Omegaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH302} \end{align} for $j=1,2,3$. In \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH175}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH176}, we denote \begin{align} \begin{split} f_j & = \lambda \RR \partial_k (b_{mk} \partial_m \bar{v}_j + b_{mj} \partial_m \bar{v}_k) + \lambda \RR b_{kl} \partial_k (b_{ml} \partial_m \bar{v}_j + b_{mj} \partial_m \bar{v}_l) + \lambda \RR b_{kl} \partial_k (\partial_l \bar{v}_j + \partial_j \bar{v}_l) \\&\indeq + \mu \RR \partial_j (b_{mi} \partial_m \bar{v}_i) + \mu \RR b_{kj} \partial_k (b_{mi} \partial_m \bar{v}_i) + \mu \RR b_{kj} \partial_k \partial_i \bar{v}_i - \RR b_{kj} \partial_k \RR^{-1} - \RR \partial_j \RR^{-1} \\& =: I_1 + I_2 + I_3 + I_4 + I_5 + I_6 + I_7 + I_8 \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH177} \end{split} \end{align} and \begin{align} \begin{split} h_j & = \lambda (1-\JJ) (\partial_k \bar{v}_j + \partial_j \bar{v}_k) \nu^k + \mu (1-\JJ) \partial_k \bar{v}_k \nu^j - \lambda \JJ b_{kl} (b_{ml} \partial_m \bar{v}_j + b_{mj} \partial_m \bar{v}_l) \nu^k \\&\indeq + \JJ b_{kj} \RR^{-1} \nu^k + (\JJ-1) \RR^{-1} \nu^j - \lambda \JJ (b_{mk} \partial_m \bar{v}_j + b_{mj} \partial_m \bar{v}_k) \nu^k - \lambda \JJ b_{kl} (\partial_l \bar{v}_j + \partial_j \bar{v}_l )\nu^k \\&\indeq - \mu J b_{kj} b_{mi} \partial_m \bar{v}_i \nu^k - \mu \JJ b_{mi} \partial_m \bar{v}_i \nu^j - \mu \JJ b_{kj} \partial_i \bar{v}_i \nu^k + \RR^{-1} \nu^j \\& = : K_1 + K_2 + K_3 + K_4 + K_5 + K_6 + K_7 + K_8 + K_9 + K_{10} + K_{11} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH178} \end{split} \end{align} for $j=1,2,3$, where $b_{ml}= a_{ml}- \delta_{ml}$ for $m,l = 1,2,3$. \par Before we bound the terms in \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH177}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH178} and construct a contraction mapping as in Section~\ref{sec05}, we provide some necessary a~priori estimates on the variable coefficients. \par \subsection{The Lagrangian flow map, Jacobian matrix, and density estimates} We start with the necessary Jacobian and the inverse matrix of the flow map estimates. \cole \begin{Lemma} \label{L08} Let $v\in K^{s+1}$ with $\eta (t,x)= x + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t v(\tau, x) d\tau$ the associated Lagrangian map. Denote $b = a - I_3$ and $J=\det (\nabla \eta)$ where $a= (\nabla \eta)^{-1}$. Suppose that $\Vert v\Vert_{K^{s+1}} \leq M$ for some constants $T>0$ and $M>1$. Then for any $\epsilon \in (0,1]$, there exists a constant $T_0 \in (0, T)$ depending on $M>1$ such that the following statements hold: \begin{enumerate}[label=(\roman*)] \item $\Vert b \Vert_{L^\infty_t H^s_x} + \Vert b\Vert_{H^1_t L^\infty_x} + \Vert b\Vert_{H^1_t H^1_x}\les \epsilon$, \item $\Vert b\Vert_{H^1_t H^s_x} \les M$, \item $\Vert 1-\JJ \Vert_{L^\infty_t H^s_x} \les \epsilon$, \item $\JJ(t,x) \les 1$ and $\JJ(t,x)^{-1} \les 1$, for any $(t,x) \in [0, T_0]\times \Omegaf$, \item $\Vert \JJ \Vert_{L^\infty_t H^s_x} + \Vert \JJ \Vert_{H^1_t L^\infty_x} + \Vert \JJ^{-1} \Vert_{L^\infty_t H^s_x} + \Vert \JJ^{-1} \Vert_{H^1_t L^\infty_x} \les 1$, and \item $\Vert \JJ \Vert_{H^1_t H^s_x} \les M$. \end{enumerate} We emphasize that the implicit constants in the above inequalities are independent of $M$. \end{Lemma} \colb \par \begin{proof}[Proof of Lemma~\ref{L08}] (i) From \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH204} we have \begin{align} -b_t = b (\nabla v) b + b\nabla v + (\nabla v ) b + \nabla v . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH211} \end{align} Note that $b(0) = 0$. From the fundamental theorem of calculus, it follows that \begin{align*} \begin{split} \Vert b(t) \Vert_{H^s} & \les \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert b \Vert_{H^s}^2 \Vert \nabla v \Vert_{H^s} d\tau + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert b \Vert_{H^s} \Vert \nabla v\Vert_{H^s} d\tau + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert \nabla v\Vert_{H^s} d\tau \\& \les \epsilon_1 \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert v\Vert_{H^{s+1}}^2 d\tau + C_{\epsilon_1 } \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t (\Vert b\Vert_{H^{s}}^4 + \Vert b\Vert_{H^s}^2) d\tau + T^{1/2} \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert v\Vert_{H^{s+1}}^2 d\tau , \end{split} \end{align*} for any $\epsilon_1 \in (0,1]$. Using Gronwall's inequality and taking $\epsilon_1, T_0>0$ sufficiently small, we arrive at \begin{align} \Vert b\Vert_{L^\infty_x H^s_x} \les \epsilon \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH280} . \end{align} From \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH211}, we use H\"older's inequality to obtain \begin{align} \begin{split} \Vert b_t\Vert_{L^2_t L^\infty_x} & \les \Vert \nabla v\Vert_{L^2_t L^\infty_x} \Vert b \Vert_{L^\infty_t L^\infty_x}^2 + \Vert \nabla v\Vert_{L^2_t L^\infty_x} \Vert b \Vert_{L^\infty_t L^\infty_x} + \Vert \nabla v\Vert_{L^2_t L^\infty_x} \\& \les (\epsilon_1 + C_{\epsilon_1} T^{1/2}) M \les \epsilon \llabel{GJ b1 o XUs 1q0 6KP Ls MucN ytQb L0Z0Qq m1 l SPj 9MT etk L6 KfsC 6Zob Yhc2qu Xy 9 GPm ZYj 1Go ei feJ3 pRAf n6Ypy6 jN s 4Y5 nSE pqN 4m Rmam AGfY HhSaBr Ls D THC SEl UyR Mh 66XU 7hNz pZVC5V nV 7 VjL 7kv WKf 7P 5hj6 t1vu gkLGdN X8 b gOX HWm 6W4 YE mxFG 4WaN EbGKsv 0p 4 OG0 Nrd uTe Za xNXq V4Bp mOdXIq 9a b PeD PbU Z4N Xt ohbY egCf xBNttE wc D YSD 637 jJ2 ms 6Ta1 J2xZ PtKnPw AX A tJA Rc8 n5d 93 TZi7 q6Wo nEDLwW Sz e Sue YFX 8cM hm Y6is 1DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH303} \end{split} \end{align} and \begin{align} \begin{split} \Vert b_t \Vert_{L^2_t H^1_x} & \les \Vert \nabla v\Vert_{L^2_t H^1_x} \Vert b \Vert_{L^\infty_t H^s_x}^2 + \Vert \nabla v\Vert_{L^2_t H^1_x} \Vert b \Vert_{L^\infty_t H^s_x} + \Vert \nabla v\Vert_{L^2_t H^1_x} \\& \les (\epsilon_1 + C_{\epsilon_1} T^{1/2}) M \les \epsilon , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH279} \end{split} \end{align} by taking $\epsilon_1$ and $T_0>0$ sufficiently small. Combining \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH280}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH279}, we conclude the proof of (i). \par (ii) From \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH211} it follows that \begin{align*} \begin{split} \Vert b_t\Vert_{L^2_t H^s_x} & \les \Vert \nabla v\Vert_{L^2_t H^s_x} \Vert b \Vert_{L^\infty_t H^s_x}^2 + \Vert \nabla v\Vert_{L^2_t H^s_x} \Vert b \Vert_{L^\infty_t H^s_x} + \Vert \nabla v\Vert_{L^2_t H^s_x} \les M , \end{split} \end{align*} completing the proof of (ii). \par (iii) Since the Jacobian matrix $\JJ$ satisfies the ODE system \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH210}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH212}, the solution is given by the explicit formula as \begin{align} \begin{split} \JJ (t,x) =
e^{\fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t a_{kj} (\tau, x)\partial_k v_j (\tau, x) d\tau} . \llabel{5pX aOYBbV fS C haL kBR Ks6 UO qG4j DVab fbdtny fi D BFI 7uh B39 FJ 6mYr CUUT f2X38J 43 K yZg 87i gFR 5R z1t3 jH9x lOg1h7 P7 W w8w jMJ qH3 l5 J5wU 8eH0 OogRCv L7 f JJg 1ug RfM XI GSuE Efbh 3hdNY3 x1 9 7jR qeP cdu sb fkuJ hEpw MvNBZV zL u qxJ 9b1 BTf Yk RJLj Oo1a EPIXvZ Aj v Xne fhK GsJ Ga wqjt U7r6 MPoydE H2 6 203 mGi JhF nT NCDB YlnP oKO6Pu XU 3 uu9 mSg 41v ma kk0E WUpS UtGBtD e6 d Kdx ZNT FuT i1 fMcM hq7P Ovf0hg Hl 8 fqv I3R K39 fDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH207} \end{split} \end{align} Thus, using the nonlinear estimate of the Sobolev norm, we arrive at \begin{align*} \begin{split} \Vert 1- \JJ \Vert_{L^\infty_t H^s_x} \les T^{1/2} Q(M) \les \epsilon , \end{split} \end{align*} by taking $T_0>0$ sufficiently small. \par (iv), (v), and (vi) are analogous to the proof of Lemma~\ref{L04} using (i)--(iii). Thus we omit the details. \end{proof} \par The following lemma provides the necessary a~priori density estimates. \cole \begin{Lemma} \label{L09} Let $s\in (2, 2+\epsilon_0]$ where $\epsilon_0 \in (0,1/2)$. Consider the ODE system \begin{align*} & \RR_t - \RR a_{kj} \partial_k v_j = 0 \inon{in~$(0,T)\times \Omegaf$} , \\ & \RR(0) = \RR_0 \inon{on~$\Omegaf$} . \end{align*} Let $b_{kj}=a_{kj}-\delta_{kj}$ for $k,j =1,2,3$. Assume \begin{align*} (\RR_0, \RR_0^{-1}, b) \in H^s(\Omegaf) \times H^s(\Omegaf) \times L^\infty ([0,T], H^s (\Omegaf)) , \end{align*} and $\Vert v\Vert_{K^{s+1}} \leq M$, for some $T>0$ and $M>1$. Then there exists a constant $T_0 \in (0,T)$ depending on $M$ such that the following statements hold: \begin{enumerate}[label=(\roman*)] \item $\Vert \RR \Vert_{L^\infty_t L^\infty_x} \les 1$ and $\Vert \RR^{-1} \Vert_{L^\infty_t L^\infty_x} \les 1$ in $ [0, T_0]\times \Omegaf$, \item $\Vert \RR \Vert_{L^\infty_t H^s_x} + \Vert \RR \Vert_{H^1_t L^\infty_x} + \Vert \RR^{-1} \Vert_{L^\infty_t H^s_x} + \Vert \RR^{-1} \Vert_{H^1_t L^\infty_x} \les 1$, and \item $\Vert \RR\Vert_{H^1_t H^s_x} \les M$. \end{enumerate} We emphasize that the implicit constants in the above inequalities are independent of $M$. \end{Lemma} \colb The proof of Lemma~\ref{L09} is analogous to the proof of Lemma~\ref{L04}. The modifications needed are the estimates of $b_{kj}$ which are provided in Lemma~\ref{L08}. Thus we omit the details. \par \subsection{Uniform boundedness of the iterative sequence} In this section we shall prove that the mapping $\Pi$ is well-defined from $Z_T$ to $Z_T$, for some sufficiently large $M>1$ and sufficiently small $T \in (0,1)$. From Lemma~\ref{L02} and~\ref{L09} it follows that \begin{align} \begin{split} & \Vert \bar{v}\Vert_{K^{s+1}} \les \left\Vert \frac{\partial w}{\partial \nu} \right\Vert_{H^{s/2-1/4, s-1/2 }_{\Gammac}} + \Vert h\Vert_{H^{s-1/2, s/2-1/4}_{\Gammac}} + \Vert v_0\Vert_{H^s} + \Vert f\Vert_{K^{s-1}} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH201} \end{split} \end{align} for some constant $T \in (0,1)$ depending on $M$, where $f$ and $h$ are as in~\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH177}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH178}. We emphasize that the implicit constants in this section are independent of $M$. \par For the first term on the right side of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH201}, we proceed as in \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH111}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH116} to obtain \begin{align} \begin{split} & \Vert \bar{v}\Vert_{K^{s+1}} + \Vert w\Vert_{C([0,T_0], H^{s+1/4 -\epsilon_0} (\Omegae))} + \Vert w_t \Vert_{C([0,T_0], H^{s-3/4-\epsilon_0}(\Omegae))} \\&\indeq \les \Vert h\Vert_{H^{s/2-1/4, s-1/2}_{\Gammac}} + \Vert v_0\Vert_{H^s} + \Vert f\Vert_{K^{s-1}} + \Vert w_0 \Vert_{H^{s+1/2}} \\&\indeq\indeq + \Vert w_1\Vert_{H^{s-1/2}} + (\epsilon_1 + \epsilon_2 C_{\epsilon_1} + C_{\epsilon_1} T^{1/2}) M , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH202} \end{split} \end{align} for any $\epsilon_1, \epsilon_2 \in (0,1]$. \par Next, we estimate the $K^{s-1}$ norm of the terms on the right side of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH177} for $j=1,2,3$. For the term $I_1$, we use H\"older's inequality, we get \begin{align} \begin{split} \Vert I_1\Vert_{L^2_t H^{s-1}_x} & \les \Vert \RR \nabla b \nabla \bar{v}\Vert_{L^2_t H^{s-1}_x} + \Vert \RR b \nabla^2 \bar{v}\Vert_{L^2_t H^{s-1}_x} \les \Vert \RR \Vert_{L^\infty_t H^s_x} \Vert b\Vert_{L^\infty_t H^{s}_x} \Vert \bar{v} \Vert_{L^2_t H^{s+1}_x} \les \epsilon \Vert \bar{v} \Vert_{K^{s+1}} \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH228} \end{split} \end{align} and \begin{align} \begin{split} \Vert I_1\Vert_{H^{(s-1)/2}_t L^2_x} & \les \Vert \RR \nabla b \nabla \bar{v}\Vert_{H^{(s-1)/2}_t L^2_x} + \Vert \RR b \nabla^2 \bar{v}\Vert_{H^{(s-1)/2}_t L^2_x} \\& \les \Vert \RR \Vert_{L^\infty_t L^\infty_x} \Vert \nabla b\Vert_{L^\infty_t L^4_x} \Vert \nabla \bar{v} \Vert_{H^{(s-1)/2}_t L^4_x} + \Vert \RR \Vert_{W^{(s-1)/2, 4}_t L^\infty_x} \Vert \nabla b\Vert_{L^\infty_t L^4_x} \Vert \nabla \bar{v} \Vert_{L^4_t L^4_x} \\&\indeq + \Vert \RR \Vert_{L^\infty_t L^\infty_x} \Vert \nabla b\Vert_{W^{(s-1)/2, 4}_t L^6_x} \Vert \nabla \bar{v} \Vert_{L^4_t L^3_x} + \Vert \RR \Vert_{L^\infty_t L^\infty_x} \Vert b\Vert_{L^\infty_t L^\infty_x} \Vert \nabla^2 \bar{v} \Vert_{H^{(s-1)/2}_t L^2_x} \\&\indeq + \Vert \RR \Vert_{W^{(s-1)/2, 4}_t L^\infty_x} \Vert b\Vert_{L^\infty_t L^\infty_x} \Vert \nabla^2 \bar{v} \Vert_{L^4_t L^2_x} + \Vert \RR \Vert_{L^\infty_t L^\infty_x} \Vert b\Vert_{W^{(s-1)/2, 4}_t L^6_x} \Vert \nabla^2 \bar{v} \Vert_{L^4_t L^3_x} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH215} \end{split} \end{align} where we used Lemmas~\ref{L08}--\ref{L09}. From Lemma~\ref{L12} and the Sobolev inequality it follows that \begin{align} \begin{split} \Vert \nabla \bar{v} \Vert_{L^4_t L^3_x} \les \Vert \bar{v} \Vert_{H^{1/4}_t H^{3/2}_x} \les \epsilon \Vert \bar{v} \Vert_{H^{(s-1)/2}_t L^2_x} + C_\epsilon \Vert \bar{v} \Vert_{L^2_t H^s_x} \les (\epsilon + C_\epsilon T^{1/2} ) \Vert \bar{v} \Vert_{K^{s+1}} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH227} \end{split} \end{align} for any $\epsilon \in (0,1]$. Combining \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH228}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH227}, we arrive at \begin{align} \Vert I_1 \Vert_{K^{s+1}} \les M (\epsilon + C_\epsilon T^{1/2} ) \Vert \bar{v} \Vert_{K^{s+1}} . \llabel{n 9MaC Zgow 6e1iXj KC 5 lHO lpG pkK Xd Dxtz 0HxE fSMjXY L8 F vh7 dmJ kE8 QA KDo1 FqML HOZ2iL 9i I m3L Kva YiN K9 sb48 NxwY NR0nx2 t5 b WCk x2a 31k a8 fUIa RGzr 7oigRX 5s m 9PQ 7Sr 5St ZE Ymp8 VIWS hdzgDI 9v R F5J 81x 33n Ne fjBT VvGP vGsxQh Al G Fbe 1bQ i6J ap OJJa ceGq 1vvb8r F2 F 3M6 8eD lzG tX tVm5 y14v mwIXa2 OG Y hxU sXJ 0qg l5 ZGAt HPZd oDWrSb BS u NKi 6KW gr3 9s 9tc7 WM4A ws1PzI 5c C O7Z 8y9 lMT LA dwhz Mxz9 hjlWHj bJ 5 CqM jDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH240} \end{align} For the term $I_2$, we use H\"older's and Sobolev inequalities, obtaining \begin{align} \begin{split} \Vert I_2 \Vert_{L^2_t H^{s-1}_x} & \les \Vert \RR b \nabla b \nabla \bar{v} \Vert_{L^2_t H^{s-1}_x} + \Vert \RR b b \nabla^2 \bar{v} \Vert_{L^2_t H^{s-1}_x} \les \Vert \RR \Vert_{L^\infty_t H^s_x} \Vert b \Vert_{L^\infty_t H^s_x}^2 \Vert \bar{v} \Vert_{L^\infty_t H^{s+1}_x} \les \epsilon \Vert \bar{v} \Vert_{K^{s+1}} \llabel{ht y9l Mn 4rc7 6Amk KJimvH 9r O tbc tCK rsi B0 4cFV Dl1g cvfWh6 5n x y9Z S4W Pyo QB yr3v fBkj TZKtEZ 7r U fdM icd yCV qn D036 HJWM tYfL9f yX x O7m IcF E1O uL QsAQ NfWv 6kV8Im 7Q 6 GsX NCV 0YP oC jnWn 6L25 qUMTe7 1v a hnH DAo XAb Tc zhPc fjrj W5M5G0 nz N M5T nlJ WOP Lh M6U2 ZFxw pg4Nej P8 U Q09 JX9 n7S kE WixE Rwgy Fvttzp 4A s v5F Tnn MzL Vh FUn5 6tFY CxZ1Bz Q3 E TfD lCa d7V fo MwPm ngrD HPfZV0 aY k Ojr ZUw 799 et oYuB MIC4 ovEY8D OLDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH241} \end{split} \end{align} and \begin{align} \begin{split} & \Vert I_2 \Vert_{H^{(s-1)/2}_t L^2_x} \les \Vert \RR b \nabla b \nabla \bar{v} \Vert_{H^{(s-1)/2}_t L^2_x} + \Vert \RR b b \nabla^2 \bar{v} \Vert_{H^{(s-1)/2}_t L^2_x} \\&\indeq \les \Vert \RR \Vert_{L^\infty_t L^\infty_x} \Vert b \Vert_{L^\infty_t L^\infty_x} \Vert \nabla b \Vert_{L^\infty_t L^4_x} \Vert \nabla \bar{v} \Vert_{H^{(s-1)/2}_t L^4_x} + \Vert \RR \Vert_{W^{(s-1)/2, 4}_t L^\infty_x} \Vert b \Vert_{L^\infty_t L^\infty_x} \Vert \nabla b \Vert_{L^\infty_t L^6_x} \Vert \nabla \bar{v} \Vert_{L^4_t L^3_x} \\&\indeq\indeq + \Vert \RR \Vert_{L^\infty_t L^\infty_x} \Vert b \Vert_{L^\infty_t L^\infty_x} \Vert \nabla b \Vert_{W^{(s-1)/2, 4}_t L^6_x} \Vert \nabla \bar{v} \Vert_{L^4_t L^3_x} + \Vert \RR \Vert_{L^\infty_t L^\infty_x} \Vert b \Vert_{W^{(s-1)/2, 4}_t L^\infty_x} \Vert \nabla b \Vert_{L^\infty_t L^6_x} \Vert \nabla \bar{v} \Vert_{L^4_t L^3_x} \\&\indeq\indeq + \Vert \RR \Vert_{L^\infty_t L^\infty_x} \Vert b \Vert_{L^\infty_t L^\infty_x}^2 \Vert \nabla^2 \bar{v} \Vert_{H^{(s-1)/2}_t L^2_x} + \Vert \RR \Vert_{W^{(s-1)/2,4}_t L^\infty_x} \Vert b \Vert_{L^\infty_t L^\infty_x}^2 \Vert \nabla^2 \bar{v} \Vert_{L^4_t L^2_x} \\&\indeq\indeq + \Vert \RR \Vert_{L^\infty_t L^\infty_x} \Vert b \Vert_{W^{(s-1)/2, 4}_t L^\infty_x} \Vert b \Vert_{L^\infty_t L^\infty_x} \Vert \nabla^2 \bar{v} \Vert_{L^4_t L^2_x} \\&\indeq \les M (\epsilon + C_\epsilon T^{1/2}) \Vert \bar{v} \Vert_{K^{s+1}} , \llabel{ N URV Q5l ti1 iS NZAd wWr6 Q8oPFf ae 5 lAR 9gD RSi HO eJOW wxLv 20GoMt 2H z 7Yc aly PZx eR uFM0 7gaV 9UIz7S 43 k 5Tr ZiD Mt7 pE NCYi uHL7 gac7Gq yN 6 Z1u x56 YZh 2d yJVx 9MeU OMWBQf l0 E mIc 5Zr yfy 3i rahC y9Pi MJ7ofo Op d enn sLi xZx Jt CjC9 M71v O0fxiR 51 m FIB QRo 1oW Iq 3gDP stD2 ntfoX7 YU o S5k GuV IGM cf HZe3 7ZoG A1dDmk XO 2 KYR LpJ jII om M6Nu u8O0 jO5Nab Ub R nZn 15k hG9 4S 21V4 Ip45 7ooaiP u2 j hIz osW FDu O5 HdGr djvv tDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH216} \end{split} \end{align} where the last inequality follows from Lemmas~\ref{L08}--\ref{L09}, \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH227}, and the inequality \begin{align} \Vert \nabla^2 \bar{v} \Vert_{L^4_t L^2_x} \les \Vert \bar{v} \Vert_{H^{1/4}_t H^2_x} \les
\epsilon \Vert \bar{v} \Vert_{H^{(s-1)/2}_t L^2_x} + C_\epsilon \Vert \bar{v} \Vert_{L^2_t H^{s+1/2}_x} \les (\epsilon + C_\epsilon T^{1/2} ) \Vert \bar{v} \Vert_{K^{s+1}} . \llabel{TLBjo vL L iCo 6L5 Lwa Pm vD6Z pal6 9Ljn11 re T 2CP mvj rL3 xH mDYK uv5T npC1fM oU R RTo Loi lk0 FE ghak m5M9 cOIPdQ lG D LnX erC ykJ C1 0FHh vvnY aTGuqU rf T QPv wEq iHO vO hD6A nXuv GlzVAv pz d Ok3 6ym yUo Fb AcAA BItO es52Vq d0 Y c7U 2gB t0W fF VQZh rJHr lBLdCx 8I o dWp AlD S8C HB rNLz xWp6 ypjuwW mg X toy 1vP bra uH yMNb kUrZ D6Ee2f zI D tkZ Eti Lmg re 1woD juLB BSdasY Vc F Uhy ViC xB1 5y Ltql qoUh gL3bZN YV k orz wa3 650 qW hF2DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH51} \end{align} The terms $I_3$, $I_4$, $I_5$, and $I_6$ are estimated analogously as $I_1$ and $I_2$, and we get \begin{align} \Vert I_3 \Vert_{K^{s+1}} + \Vert I_4 \Vert_{K^{s+1}} + \Vert I_5 \Vert_{K^{s+1}} + \Vert I_6 \Vert_{K^{s+1}} \les M (\epsilon + C_\epsilon T^{1/2}) \Vert \bar{v} \Vert_{K^{s+1}} . \llabel{2 epiX cAjA4Z V4 b cXx uB3 NQN p0 GxW2 Vs1z jtqe2p LE B iS3 0E0 NKH gY N50v XaK6 pNpwdB X2 Y v7V 0Ud dTc Pi dRNN CLG4 7Fc3PL Bx K 3Be x1X zyX cj 0Z6a Jk0H KuQnwd Dh P Q1Q rwA 05v 9c 3pnz ttzt x2IirW CZ B oS5 xlO KCi D3 WFh4 dvCL QANAQJ Gg y vOD NTD FKj Mc 0RJP m4HU SQkLnT Q4 Y 6CC MvN jAR Zb lir7 RFsI NzHiJl cg f xSC Hts ZOG 1V uOzk 5G1C LtmRYI eD 3 5BB uxZ JdY LO CwS9 lokS NasDLj 5h 8 yni u7h u3c di zYh1 PdwE l3m8Xt yX Q RCA bwe aLDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH229} \end{align} For the term $I_7$, we obtain \begin{align} \begin{split} \Vert I_7\Vert_{L^2_t H^{s-1}_x} & \les \Vert \RR^{-1} b \nabla \RR\Vert_{L^2_t H^{s-1}_x} \les \Vert \RR^{-1} \Vert_{L^\infty_t H^s_x} \Vert b \Vert_{L^\infty_t H^s_x} \Vert \RR \Vert_{L^\infty_t H^{s}_x} \les \epsilon \llabel{i N8 qA9N 6DRE wy6gZe xs A 4fG EKH KQP PP KMbk sY1j M4h3Jj gS U One p1w RqN GA grL4 c18W v4kchD gR x 7Gj jIB zcK QV f7gA TrZx Oy6FF7 y9 3 iuu AQt 9TK Rx S5GO TFGx 4Xx1U3 R4 s 7U1 mpa bpD Hg kicx aCjk hnobr0 p4 c ody xTC kVj 8t W4iP 2OhT RF6kU2 k2 o oZJ Fsq Y4B FS NI3u W2fj OMFf7x Jv e ilb UVT ArC Tv qWLi vbRp g2wpAJ On l RUE PKh j9h dG M0Mi gcqQ wkyunB Jr T LDc Pgn OSC HO sSgQ sR35 MB7Bgk Pk 6 nJh 01P Cxd Ds w514 O648 VD8iJ5 4F W 6rDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH243} \end{split} \end{align} and \begin{align} \begin{split} \Vert I_7\Vert_{H^{(s-1)/2}_t L^2_x} & \les \Vert \RR^{-1} b \nabla \RR\Vert_{H^{(s-1)/2}_t L^2_x} \\& \les \Vert \RR^{-1} \Vert_{H^1_t L^\infty_x} \Vert b \Vert_{L^\infty_t L^\infty_x} \Vert \nabla \RR \Vert_{L^\infty_t L^2_x} + \Vert \RR^{-1} \Vert_{L^\infty_t L^\infty_x} \Vert b \Vert_{L^\infty_t L^\infty_x} \Vert \nabla \RR \Vert_{H^1_t L^2_x} \\&\indeq + \Vert \RR^{-1} \Vert_{L^\infty_t L^\infty_x} \Vert b \Vert_{H^1_t L^\infty_x} \Vert \nabla \RR \Vert_{L^\infty_t L^2_x} \\& \les \epsilon M, \llabel{s 6Sy qGz MK fXop oe4e o52UNB 4Q 8 f8N Uz8 u2n GO AXHW gKtG AtGGJs bm z 2qj vSv GBu 5e 4JgL Aqrm gMmS08 ZF s xQm 28M 3z4 Ho 1xxj j8Uk bMbm8M 0c L PL5 TS2 kIQ jZ Kb9Q Ux2U i5Aflw 1S L DGI uWU dCP jy wVVM 2ct8 cmgOBS 7d Q ViX R8F bta 1m tEFj TO0k owcK2d 6M Z iW8 PrK PI1 sX WJNB cREV Y4H5QQ GH b plP bwd Txp OI 5OQZ AKyi ix7Qey YI 9 1Ea 16r KXK L2 ifQX QPdP NL6EJi Hc K rBs 2qG tQb aq edOj Lixj GiNWr1 Pb Y SZe Sxx Fin aK 9Eki CHV2 a13f7GDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH244} \end{split} \end{align} where we appealed to Lemmas~\ref{L08}--\ref{L09}. We proceed as in \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH214}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH113} to estimate the term $I_8$, obtaining \begin{align} \begin{split} \Vert I_8\Vert_{K^{s-1}} \les (\epsilon + C_\epsilon T^{1/2}) Q(M) . \llabel{ 3G 3 oDK K0i bKV y4 53E2 nFQS 8Hnqg0 E3 2 ADd dEV nmJ 7H Bc1t 2K2i hCzZuy 9k p sHn 8Ko uAR kv sHKP y8Yo dOOqBi hF 1 Z3C vUF hmj gB muZq 7ggW Lg5dQB 1k p Fxk k35 GFo dk 00YD 13qI qqbLwy QC c yZR wHA fp7 9o imtC c5CV 8cEuwU w7 k 8Q7 nCq WkM gY rtVR IySM tZUGCH XV 9 mr9 GHZ ol0 VE eIjQ vwgw 17pDhX JS F UcY bqU gnG V8 IFWb S1GX az0ZTt 81 w 7En IhF F72 v2 PkWO Xlkr w6IPu5 67 9 vcW 1f6 z99 lM 2LI1 Y6Na axfl18 gT 0 gDp tVl CN4 jf GSbC ro5DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH217} \end{split} \end{align} Using the estimates on $I_1$--$I_8$ in \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH177}, it follows that \begin{align} \Vert f\Vert_{K^{s-1}} \les (\epsilon + C_\epsilon T^{1/2}) Q(M) \Vert \bar{v} \Vert_{K^{s+1}} + (\epsilon + C_\epsilon T^{1/2}) Q(M) . \llabel{D v78Cxa uk Y iUI WWy YDR w8 z7Kj Px7C hC7zJv b1 b 0rF d7n Mxk 09 1wHv y4u5 vLLsJ8 Nm A kWt xuf 4P5 Nw P23b 06sF NQ6xgD hu R GbK 7j2 O4g y4 p4BL top3 h2kfyI 9w O 4Aa EWb 36Y yH YiI1 S3CO J7aN1r 0s Q OrC AC4 vL7 yr CGkI RlNu GbOuuk 1a w LDK 2zl Ka4 0h yJnD V4iF xsqO00 1r q CeO AO2 es7 DR aCpU G54F 2i97xS Qr c bPZ 6K8 Kud n9 e6SY o396 Fr8LUx yX O jdF sMr l54 Eh T8vr xxF2 phKPbs zr l pMA ubE RMG QA aCBu 2Lqw Gasprf IZ O iKV Vbu Vae 6a DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH218} \end{align} \par Next we estimate the $H^{s-1/2, s/2-1/4}_{\Gammac}$ norm of the terms on the right side of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH178} for $j=1,2,3$. For the term $K_1$, we use H\"older's and the trace inequalities, to obtain \begin{align} \begin{split} \Vert K_1\Vert_{L^2_t H^{s-1/2} (\Gammac)} & \les \Vert (1-\JJ) \nabla \bar{v} \Vert_{L^2_t H^s_x} \les \Vert 1-\JJ \Vert_{L^\infty_t H^s_x} \Vert \bar{v} \Vert_{L^2_t H^{s+1}_x} \les \epsilon \Vert \bar{v} \Vert_{K^{s+1}} \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH220} \end{split} \end{align} and \begin{align} \begin{split} \Vert K_1\Vert_{H^{s/2-1/4}_t L^2 (\Gammac)} & \les \Vert (1-\JJ) \nabla \bar{v} \Vert_{H^{s/2}_t L^2_x} + \Vert (1-\JJ) \nabla \bar{v} \Vert_{L^2_t H^s_x} \\& \les \Vert \nabla \bar{v} \Vert_{H^{s/2}_t L^2_x} \Vert 1-\JJ \Vert_{L^\infty_t L^\infty_x} + \Vert \nabla \bar{v} \Vert_{L^\infty_t L^2_x} \Vert 1-\JJ \Vert_{H^{s/2}_t L^\infty_x} \\&\indeq + \Vert (1-\JJ) \nabla \bar{v} \Vert_{L^2_t H^s_x} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH231} \end{split} \end{align} where we used Lemmas~\ref{L06} and~\ref{L08}. From \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH210} and Lemma~\ref{L08} it follows that \begin{align} \begin{split} \Vert \JJ_t \Vert_{H^{(s-2)/2}_t L^\infty_x} & \les \Vert \JJ b \nabla v \Vert_{H^{(s-2)/2}_t L^\infty_x} + \Vert \JJ \dive v \Vert_{H^{(s-2)/2}_t L^\infty_x} \\& \les \Vert \JJ \Vert_{W^{(s-2)/2, \infty}_t L^\infty_x} \Vert b\Vert_{L^\infty_t L^\infty_x} \Vert \nabla v \Vert_{L^2_t L^\infty_x} + \Vert \JJ \Vert_{L^\infty_t L^\infty_x} \Vert b\Vert_{W^{(s-2)/2, \infty}_t L^\infty_x} \Vert \nabla v \Vert_{L^2_t L^\infty_x} \\&\indeq + \Vert \JJ \Vert_{L^\infty_t L^\infty_x} \Vert b\Vert_{L^\infty_t L^\infty_x} \Vert \nabla v \Vert_{H^{(s-2)/2}_t L^\infty_x} + \Vert \JJ \Vert_{W^{(s-2)/2 ,\infty}_t L^\infty_x} \Vert \dive v\Vert_{L^2_t L^\infty_x} \\&\indeq + \Vert \JJ \Vert_{L^\infty_t L^\infty_x} \Vert \dive v\Vert_{H^{(s-2)/2}_t L^\infty_x} \\& \les (\epsilon + C_\epsilon T^{1/2} )Q(M) , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH233} \end{split} \end{align} since \begin{align} \Vert \nabla v\Vert_{H^{(s-2)/2}_t L^\infty_x} \les \epsilon \Vert v\Vert_{H^{(s+1)/2}_t L^2_x} + C_\epsilon \Vert v \Vert_{L^2_t H^{s+3/4}_x} \les (\epsilon + C_\epsilon T^{1/2}) M . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH235} \end{align} Combining \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH220}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH233}, we have \begin{align} \begin{split} \Vert K_1\Vert_{H^{s/2-1/4}_t L^2 (\Gammac)} & \les \Vert \nabla \bar{v} \Vert_{H^{s/2}_t L^2_x} \Vert 1-\JJ \Vert_{L^\infty_t L^\infty_x} + \Vert \nabla \bar{v} \Vert_{L^\infty_t L^2_x} \Vert 1-\JJ \Vert_{H^{s/2}_t L^\infty_x} + \Vert (1-\JJ) \nabla \bar{v} \Vert_{L^2_t H^s_x} \\& \les (\epsilon + C_\epsilon T^{1/2} )Q(M) \Vert \bar{v} \Vert_{K^{s+1}} , \llabel{bauf y9Kc Fk6cBl Z5 r KUj htW E1C nt 9Rmd whJR ySGVSO VT v 9FY 4uz yAH Sp 6yT9 s6R6 oOi3aq Zl L 7bI vWZ 18c Fa iwpt C1nd Fyp4oK xD f Qz2 813 6a8 zX wsGl Ysh9 Gp3Tal nr R UKt tBK eFr 45 43qU 2hh3 WbYw09 g2 W LIX zvQ zMk j5 f0xL seH9 dscinG wu P JLP 1gE N5W qY sSoW Peqj MimTyb Hj j cbn 0NO 5hz P9 W40r 2w77 TAoz70 N1 a u09 boc DSx Gc 3tvK LXaC 1dKgw9 H3 o 2kE oul In9 TS PyL2 HXO7 tSZse0 1Z 9 Hds lDq 0tm SO AVqt A1FQ zEMKSb ak z nw8 39wDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH234} \end{split} \end{align} where we used Lemma~\ref{L08} to bound the factor $\Vert 1-\JJ \Vert_{L^2_t L^\infty_x}$. The term $K_2$ is estimated analogously to $K_1$, and we arrive at \begin{align} \begin{split} \Vert K_2 \Vert_{H^{s/2-1/4, s-1/2}_{\Gammac}} \les (\epsilon + C_\epsilon T^{1/2} )Q(M) \Vert \bar{v} \Vert_{K^{s+1}} . \end{split} \end{align} For the term $K_3$, we use the trace inequality to obtain \begin{align} \begin{split} \Vert K_3\Vert_{L^2_t H^{s-1/2} (\Gammac)} & \les \Vert \JJ b b \nabla \bar{v} \Vert_{L^2_t H^{s}_x} \les \Vert \JJ \Vert_{L^\infty_t H^s_x} \Vert b\Vert_{L^\infty_t H^s_x}^2 \Vert \bar{v} \Vert_{L^2_t H^{s+1}_x} \les \epsilon \Vert \bar{v} \Vert_{K^{s+1}} \llabel{ nH1 Dp CjGI k5X3 B6S6UI 7H I gAa f9E V33 Bk kuo3 FyEi 8Ty2AB PY z SWj Pj5 tYZ ET Yzg6 Ix5t ATPMdl Gk e 67X b7F ktE sz yFyc mVhG JZ29aP gz k Yj4 cEr HCd P7 XFHU O9zo y4AZai SR O pIn 0tp 7kZ zU VHQt m3ip 3xEd41 By 7 2ux IiY 8BC Lb OYGo LDwp juza6i Pa k Zdh aD3 xSX yj pdOw oqQq Jl6RFg lO t X67 nm7 s1l ZJ mGUr dIdX Q7jps7 rc d ACY ZMs BKA Nx tkqf Nhkt sbBf2O BN Z 5pf oqS Xtd 3c HFLN tLgR oHrnNl wR n ylZ NWV NfH vO B1nU Ayjt xTWW4o Cq PDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH238} \end{split} \end{align} and \begin{align} \begin{split} \Vert K_3\Vert_{H^{s/2-1/4}_t L^2 ( \Gammac)} & \les \Vert \JJ b b \nabla \bar{v} \Vert_{H^{s/2}_t L^2_x} + \Vert \JJ b b \nabla \bar{v} \Vert_{L^2_t H^s_x} \\& \les \Vert \JJ \Vert_{H^{s/2}_t L^\infty_x} \Vert b\Vert_{L^\infty_t L^\infty_x}^2 \Vert \nabla \bar{v} \Vert_{L^\infty_t L^2_x} + \Vert \JJ \Vert_{L^\infty_t L^\infty_x} \Vert b\Vert_{H^{s/2}_t L^\infty_x} \Vert b\Vert_{L^\infty_t L^\infty_x} \Vert \nabla \bar{v} \Vert_{L^\infty_t L^2_x} \\&\indeq + \Vert \JJ \Vert_{L^\infty_t L^\infty_x} \Vert b\Vert_{L^\infty_t L^\infty_x}^2 \Vert \nabla \bar{v} \Vert_{H^{s/2}_t L^2_x} + \Vert \JJ b b \nabla \bar{v} \Vert_{L^2_t H^s_x}
, \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH237} \end{split} \end{align} where we appealed to Lemma~\ref{L08}. From \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH211} it follows that \begin{align} \begin{split} \Vert b_t \Vert_{H^{(s-2)/2}_t L^\infty_x} & \les \Vert b \nabla v b \Vert_{H^{(s-2)/2}_t L^\infty_x} + \Vert b \nabla v \Vert_{H^{(s-2)/2}_t L^\infty_x} + \Vert \nabla v \Vert_{H^{(s-2)/2}_t L^\infty_x} \\& \les \Vert b \Vert_{W^{(s-2)/2, \infty}_t L^\infty_x} \Vert \nabla v\Vert_{L^2_t L^\infty_x} \Vert b \Vert_{L^\infty_t L^\infty_x} + \Vert \nabla v\Vert_{H^{(s-2)/2}_t L^\infty_x} \Vert b \Vert_{L^\infty_t L^\infty_x}^2 \\&\indeq + \Vert b \Vert_{W^{(s-2)/2, \infty}_t L^\infty_x} \Vert \nabla v\Vert_{L^2_t L^\infty_x} + \Vert \nabla v\Vert_{H^{(s-2)/2}_t L^\infty_x} \Vert b \Vert_{L^\infty_t L^\infty_x} + \Vert \nabla v \Vert_{H^{(s-2)/2}_t L^\infty_x} \\& \les ( \epsilon + C_\epsilon T^{1/2} ) Q(M) , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH236} \end{split} \end{align} where we used Lemma~\ref{L08} and~\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH235}. Combining \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH233} and \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH237}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH236}, we arrive at \begin{align} \begin{split} \Vert K_3\Vert_{H^{s/2-1/4}_t L^2 ( \Gammac)} \les ( \epsilon + C_\epsilon T^{1/2} ) Q(M) \Vert \bar{v} \Vert_{K^{s+1}} , \llabel{ Rtu Vua nMk Lv qbxp Ni0x YnOkcd FB d rw1 Nu7 cKy bL jCF7 P4dx j0Sbz9 fa V CWk VFo s9t 2a QIPK ORuE jEMtbS Hs Y eG5 Z7u MWW Aw RnR8 FwFC zXVVxn FU f yKL Nk4 eOI ly n3Cl I5HP 8XP6S4 KF f Il6 2Vl bXg ca uth8 61pU WUx2aQ TW g rZw cAx 52T kq oZXV g0QG rBrrpe iw u WyJ td9 ooD 8t UzAd LSnI tarmhP AW B mnm nsb xLI qX 4RQS TyoF DIikpe IL h WZZ 8ic JGa 91 HxRb 97kn Whp9sA Vz P o85 60p RN2 PS MGMM FK5X W52OnW Iy o Yng xWn o86 8S Kbbu 1Iq1 SyPDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH52} \end{split} \end{align} where we used Lemma~\ref{L12}. Regarding the terms $K_4$ and $K_5$, we proceed as in \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH189}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH190}, obtaining \begin{align} \begin{split} \Vert K_4\Vert_{H^{s/2-1/4, s-1/2}_{\Gammac}} & \les \Vert \JJ b \RR^{-1} \Vert_{L^2_t H^s_x} + \Vert \JJ b \RR^{-1} \Vert_{H^1_t H^1_x} \\& \les T^{1/2} \Vert \JJ \Vert_{L^\infty_t H^s_x} \Vert b \Vert_{L^\infty_t H^s_x} \Vert \RR^{-1} \Vert_{L^\infty_t H^{s}_x} + \Vert \JJ_t \Vert_{L^2_t H^1_x} \Vert b \Vert_{L^\infty_t H^s_x} \Vert \RR^{-1} \Vert_{L^\infty_t H^s_x} \\&\indeq + \Vert \JJ \Vert_{L^\infty_t H^s_x} \Vert b_t \Vert_{L^2_t H^1_x} \Vert \RR^{-1} \Vert_{L^\infty_t H^s_x} + \Vert \JJ \Vert_{L^\infty_t H^s_x} \Vert b \Vert_{L^\infty_t H^s_x} \Vert \RR^{-2} \RR_t \Vert_{L^2_t H^1_x} \\& \les ( \epsilon + C_\epsilon T^{1/2} ) M \end{split} \llabel{kHJ VC v seV GWr hUd ew Xw6C SY1b e3hD9P Kh a 1y0 SRw yxi AG zdCM VMmi JaemmP 8x r bJX bKL DYE 1F pXUK ADtF 9ewhNe fd 2 XRu tTl 1HY JV p5cA hM1J fK7UIc pk d TbE ndM 6FW HA 72Pg LHzX lUo39o W9 0 BuD eJS lnV Rv z8VD V48t Id4Dtg FO O a47 LEH 8Qw nR GNBM 0RRU LluASz jx x wGI BHm Vyy Ld kGww 5eEg HFvsFU nz l 0vg OaQ DCV Ez 64r8 UvVH TtDykr Eu F aS3 5p5 yn6 QZ UcX3 mfET Exz1kv qE p OVV EFP IVp zQ lMOI Z2yT TxIUOm 0f W L1W oxC tlX Ws 9HU4 DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH53}\end{align} and \begin{align} \begin{split} \Vert K_5 \Vert_{H^{s/2-1/4, s-1/2}_{\Gammac}} & \les \Vert (\JJ - 1) \RR^{-1} \Vert_{L^2_t H^s_x} + \Vert (\JJ - 1) \RR^{-1} \Vert_{H^1_t H^1_x} \\& \les T^{1/2} \Vert \JJ -1 \Vert_{L^\infty_t H^s_x} \Vert \RR^{-1} \Vert_{L^\infty_t H^{s}_x} + \Vert \JJ_t \Vert_{L^2_t H^1_x} \Vert \RR^{-1} \Vert_{L^\infty_t H^s_x} \\&\indeq + \Vert \JJ \Vert_{L^\infty_t H^s_x} \Vert \RR^{-2} \RR_t \Vert_{L^2_t H^1_x} \\& \les ( \epsilon + C_\epsilon T^{1/2} ) M , \end{split} \llabel{EF0I Z1WDv3 TP 4 2LN 7Tr SuR 8u Mv1t Lepv ZoeoKL xf 9 zMJ 6PU In1 S8 I4KY 13wJ TACh5X l8 O 5g0 ZGw Ddt u6 8wvr vnDC oqYjJ3 nF K WMA K8V OeG o4 DKxn EOyB wgmttc ES 8 dmT oAD 0YB Fl yGRB pBbo 8tQYBw bS X 2lc YnU 0fh At myR3 CKcU AQzzET Ng b ghH T64 KdO fL qFWu k07t DkzfQ1 dg B cw0 LSY lr7 9U 81QP qrdf H1tb8k Kn D l52 FhC j7T Xi P7GF C7HJ KfXgrP 4K O Og1 8BM 001 mJ PTpu bQr6 1JQu6o Gr 4 baj 60k zdX oD gAOX 2DBk LymrtN 6T 7 us2 Cp6 eZm DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH54} \end{align} where the we appealed to Lemmas~\ref{L08} and~\ref{L09}. The terms $K_7$, $K_8$, $K_9$, and $K_{10}$ are estimated analogously to $K_3$, and we have \begin{align} \begin{split} & \Vert K_7 \Vert_{H^{s/2-1/4, s-1/2}_{\Gammac}} + \Vert K_8 \Vert_{H^{s/2-1/4, s-1/2}_{\Gammac}} + \Vert K_9 \Vert_{H^{s/2-1/4, s-1/2}_{\Gammac}} + \Vert K_{10} \Vert_{H^{s/2-1/4, s-1/2}_{\Gammac}} \\&\indeq\indeq \les ( \epsilon + C_\epsilon T^{1/2} ) Q(M) \Vert \bar{v} \Vert_{K^{s+1}} . \llabel{1a VJTY 8vYP OzMnsA qs 3 RL6 xHu mXN AB 5eXn ZRHa iECOaa MB w Ab1 5iF WGu cZ lU8J niDN KiPGWz q4 1 iBj 1kq bak ZF SvXq vSiR bLTriS y8 Q YOa mQU ZhO rG HYHW guPB zlAhua o5 9 RKU trF 5Kb js KseT PXhU qRgnNA LV t aw4 YJB tK9 fN 7bN9 IEwK LTYGtn Cc c 2nf Mcx 7Vo Bt 1IC5 teMH X4g3JK 4J s deo Dl1 Xgb m9 xWDg Z31P chRS1R 8W 1 hap 5Rh 6Jj yT NXSC Uscx K4275D 72 g pRW xcf AbZ Y7 Apto 5SpT zO1dPA Vy Z JiW Clu OjO tE wxUB 7cTt EDqcAb YG d ZQZ DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH239} \end{split} \end{align} For the term $K_{11}$, we proceed as in \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH189}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH190} to obtain \begin{align} K_{11} \les (\epsilon + C_\epsilon T^{1/2}) Q(M) . \llabel{fsQ 1At Hy xnPL 5K7D 91u03s 8K 2 0ro fZ9 w7T jx yG7q bCAh ssUZQu PK 7 xUe K7F 4HK fr CEPJ rgWH DZQpvR kO 8 Xve aSB OXS ee XV5j kgzL UTmMbo ma J fxu 8gA rnd zS IB0Y QSXv cZW8vo CO o OHy rEu GnS 2f nGEj jaLz ZIocQe gw H fSF KjW 2Lb KS nIcG 9Wnq Zya6qA YM S h2M mEA sw1 8n sJFY Anbr xZT45Z wB s BvK 9gS Ugy Bk 3dHq dvYU LhWgGK aM f Fk7 8mP 20m eV aQp2 NWIb 6hVBSe SV w nEq bq6 ucn X8 JLkI RJbJ EbwEYw nv L BgM 94G plc lu 2s3U m15E YAjs1G LDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH55} \end{align} \par Collecting the above estimates, we arrive at \begin{align*} \begin{split} \Vert \bar{v}\Vert_{K^{s+1}} & \les \Vert v_0\Vert_{H^s} + \Vert w_0 \Vert_{H^{s+1/2}} + \Vert w_1\Vert_{H^{s-1/2}} + (\epsilon + C_{\epsilon} T^{1/2}) Q(M) \\&\indeq\indeq + (\epsilon + C_{\epsilon} T^{1/2}) Q(M) \Vert \bar{v} \Vert_{K^{s+1}} . \end{split} \end{align*} Taking $M>1$ sufficiently large and $\epsilon, T>0$ sufficiently small we have \begin{align} \Vert \bar{v} \Vert_{K^{s+1}} \leq M. \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH250} \end{align} Thus, the mapping $\Pi \colon v\mapsto \bar{v}$ is well-defined from $Z_T$ to $Z_T$, for some $M>1$ as in \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH250} and some sufficiently small $T>0$. \par \subsection{Contracting property} In this section we shall prove \begin{align} \Vert \Pi (v_1) - \Pi (v_2) \Vert_{K^{s+1}} \leq \frac{1}{2} \Vert v_1 - v_2 \Vert_{K^{s+1}} \comma v_1, v_2 \in Z_T, \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH298} \end{align} where $M>1$ is fixed as in \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH250} and $T\in (0,1)$ is sufficiently small to be determined below. Note that the implicit constants below are allowed to depend on $M$. \par Let $v_1, v_2 \in Z_T$ and $(\eta_1, \JJ_1, a_1)$ and $(\eta_2, \JJ_2, a_2)$ be the corresponding Lagrangian flow map, Jacobian, and the inverse matrix of the flow map. First we solve the for $(\RR_1, \RR_2)$ from \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH260} with the same initial data $\RR_0$. Then we solve for $(\xi_1, \xi_{1t})$ and $(\xi_2, \xi_{2t})$ from \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH23} with the boundary conditions \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH262}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH263} and \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH267} and the same initial data $(w_0, w_1)$. To obtain the next iterate $(\bar{v}_1, \bar{v}_2)$, we solve \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH261} with the boundary conditions \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH262} and \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH266} and with the same initial data $v_0$. Denote $b_1 = a_1 - I_3$, $b_2 = a_2 - I_3$, $\tilde{b} = b_1 - b_2$, $\tilde{V} = \bar{v}_1 - \bar{v}_2$, $\tilde{v} = v_1 - v_2$, $\tilde{\RR} = \RR_1 - \RR_2$, $\tilde{\xi} = \xi_1 - \xi_2$, $\tilde{\eta}= \eta_1 -\eta_2$, and $\tilde{\JJ} = \JJ_1 - \JJ_2$. The difference $\tilde{\VV}$ satisfies \begin{align*} & \partial_t \tilde{\VV}_j - \lambda \tilde{\RR} \partial_k (\partial_j \tilde{\VV}_k + \partial_k \tilde{\VV}_j) - \mu \tilde{\RR} \partial_j \partial_k \tilde{\VV}_k = \tilde{f}_j \inon{in~$(0,T)\times \Omegaf$} , \llabel{n h zG8 vmh ghs Qc EDE1 KnaH wtuxOg UD L BE5 9FL xIp vu KfJE UTQS EaZ6hu BC a KXr lni r1X mL KH3h VPrq ixmTkR zh 0 OGp Obo N6K LC E0Ga Udta nZ9Lvt 1K Z eN5 GQc LQL L0 P9GX uakH m6kqk7 qm X UVH 2bU Hga v0 Wp6Q 8JyI TzlpqW 0Y k 1fX 8gj Gci bR arme Si8l w03Win NX w 1gv vcD eDP Sa bsVw Zu4h aO1V2D qw k JoR Shj MBg ry glA9 3DBd S0mYAc El 5 aEd pII DT5 mb SVuX o8Nl Y24WCA 6d f CVF 6Al a6i Ns 7GCh OvFA hbxw9Q 71 Z RC8 yRi 1zZ dM rpt7 3dou DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH242} \\ & \lambda (\partial_k \tilde{\VV}_j + \partial_j \tilde{\VV}_k) \nu^k + \mu \partial_k \tilde{\VV}_k \nu^j = \partial_k \tilde{\xi}_j \nu^k + \tilde{h}_j \inon{in~$(0,T) \times \Gammac$} , \llabel{ogkAkG GE 4 87V ii4 Ofw Je sXUR dzVL HU0zms 8W 2 Ztz iY5 mw9 aB ZIwk 5WNm vNM2Hd jn e wMR 8qp 2Vv up cV4P cjOG eu35u5 cQ X NTy kfT ZXA JH UnSs 4zxf Hwf10r it J Yox Rto 5OM FP hakR gzDY Pm02mG 18 v mfV 11N n87 zS X59D E0cN 99uEUz 2r T h1F P8x jrm q2 Z7ut pdRJ 2DdYkj y9 J Yko c38 Kdu Z9 vydO wkO0 djhXSx Sv H wJo XE7 9f8 qh iBr8 KYTx OfcYYF sM y j0H vK3 ayU wt 4nA5 H76b wUqyJQ od O u8U Gjb t6v lc xYZt 6AUx wpYr18 uO v 62v jnw FrC rf Z4DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH246} \\& \tilde{\VV} ~\text{periodic in the $y_1$ and $y_2$ directions} , \llabel{nl vJuh 2SpVLO vp O lZn PTG 07V Re ixBm XBxO BzpFW5 iB I O7R Vmo GnJ u8 Axol YAxl JUrYKV Kk p aIk VCu PiD O8 IHPU ndze LPTILB P5 B qYy DLZ DZa db jcJA T644 Vp6byb 1g 4 dE7 Ydz keO YL hCRe Ommx F9zsu0 rp 8 Ajz d2v Heo 7L 5zVn L8IQ WnYATK KV 1 f14 s2J geC b3 v9UJ djNN VBINix 1q 5 oyr SBM 2Xt gr v8RQ MaXk a4AN9i Ni n zfH xGp A57 uA E4jM fg6S 6eNGKv JL 3 tyH 3qw dPr x2 jFXW 2Wih pSSxDr aA 7 PXg jK6 GGl Og 5PkR d2n5 3eEx4N yG h d8Z RkO NDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH401} \\& \tilde{\VV}(0) = 0 \inon{in~$\Omegaf$} , \llabel{MQ qL q4sE RG0C ssQkdZ Ua O vWr pla BOW rS wSG1 SM8I z9qkpd v0 C RMs GcZ LAz 4G k70e O7k6 df4uYn R6 T 5Du KOT say 0D awWQ vn2U OOPNqQ T7 H 4Hf iKY Jcl Rq M2g9 lcQZ cvCNBP 2B b tjv VYj ojr rh 78tW R886 ANdxeA SV P hK3 uPr QRs 6O SW1B wWM0 yNG9iB RI 7 opG CXk hZp Eo 2JNt kyYO pCY9HL 3o 7 Zu0 J9F Tz6 tZ GLn8 HAes o9umpy uc s 4l3 CA6 DCQ 0m 0llF Pbc8 z5Ad2l GN w SgA XeN HTN pw dS6e 3ila 2tlbXN 7c 1 itX aDZ Fak df Jkz7 TzaO 4kbVhn YH f TDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH304} \end{align*} where \begin{align} \begin{split} \tilde{f}_j & = \lambda \tilde{\RR} \partial_k (\partial_j \bar{v}_{2k} + \partial_k \bar{v}_{2j}) + \mu \tilde{\RR} \partial_j \partial_k \bar{v}_{2k}
+ \lambda \tilde{\RR} \partial_k (b_{1mk} \partial_m \bar{v}_{1j} + b_{1mj} \partial_m \bar{v}_{1k}) \\&\indeq + \lambda \RR_2 \partial_k (b_{1mk} \partial_m \tilde{\VV}_j + b_{1mj} \partial_m \tilde{\VV}_k) + \lambda \RR_2 \partial_k (\tilde{b}_{mk} \partial_m \bar{v}_{2j} + \tilde{b}_{mj} \partial_m \bar{v}_{2k}) \\&\indeq + \lambda \tilde{\RR} b_{1kl} \partial_k (b_{1ml} \partial_m \bar{v}_{1j} + b_{1mj} \partial_m \bar{v}_{1l}) + \lambda \RR_2 \tilde{b}_{kl} \partial_k (b_{1ml} \partial_m \bar{v}_{1j} + b_{1mj} \partial_m \bar{v}_{1l} ) \\&\indeq + \lambda \RR_2 b_{2kl} \partial_k (\tilde{b}_{ml} \partial_m \bar{v}_{1j} + \tilde{b}_{mj} \partial_m \bar{v}_{1l} ) + \lambda \RR_2 b_{2kl} \partial_k ( b_{2ml} \partial_m \tilde{\VV}_j + b_{2mj} \partial_m \tilde{\VV}_l ) \\&\indeq + \lambda \tilde{\RR} b_{1kl} \partial_k (\partial_l \bar{v}_{1j} + \partial_j \bar{v}_{1l} ) + \lambda \RR_2 \tilde{b}_{kl} \partial_k (\partial_l \bar{v}_{1j} + \partial_j \bar{v}_{1l}) + \lambda \RR_2 b_{2kl} \partial_k (\partial_l \tilde{\VV}_j + \partial_j \tilde{\VV}_l ) \\&\indeq + \mu \RR \partial_j (b_{1mi} \partial_m \bar{v}_{1i}) + \mu \RR_2 \partial_j (\tilde{b}_{mi} \partial_m \bar{v}_{1i}) + \mu \RR_2 \partial_j (b_{2mi} \partial_m \tilde{\VV}) \\&\indeq + \mu \tilde{\RR} b_{1kj} \partial_k (b_{1mi} \partial_m \bar{v}_{1i}) + \mu \RR_2 \tilde{b}_{kj} \partial_k (b_{1mi} \partial_m \bar{v}_{1i}) + \mu \RR_2 b_{2kj} \partial_k ( \tilde{b}_{mi} \partial_m \bar{v}_{1i}) \\&\indeq + \mu \RR_2 b_{2kj} \partial_k (b_{2mi} \partial_m \tilde{\VV}_{i}) + \mu \tilde{\RR} b_{1kj} \partial_k \partial_i \bar{v}_{1i} + \mu \RR_2 \tilde{b}_{kj} \partial_k \partial_i \bar{v}_{1i} + \mu \RR_2 b_{2kj} \partial_k \partial_i \tilde{\VV}_{i} \\&\indeq - \RR_1^{-1} \RR_2^{-1} \tilde{\RR} b_{1kj} \partial_k \RR_1 + \RR_2^{-1} \tilde{b}_{kj} \partial_k \RR_1 + \RR_2^{-1} b_{2kj} \partial_k \tilde{\RR} - \RR_1^{-1} \RR_2^{-1} \tilde{\RR} \partial_j \RR_1 \\&\indeq + \RR_2^{-1} \partial_j \tilde{\RR} \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH248} \end{split} \end{align} and \begin{align} \begin{split} \tilde{h}_j & = - \lambda \tilde{\JJ} (\partial_k \bar{v}_{1j} + \partial_j \bar{v}_{1k} ) \nu^k + \lambda (1- \JJ_2) (\partial_k \tilde{\VV}_j + \partial_j \tilde{\VV}_k) \nu^k - \mu \tilde{\JJ} \partial_k \bar{v}_{1k} \nu^j + \mu (1- \JJ_2) \partial_k \tilde{\VV}_k \nu^j \\&\indeq + \lambda \tilde{\JJ} b_{1kl} (b_{1ml} \partial_m \bar{v}_{1j} + b_{1mj} \partial_m \bar{v}_{1l} ) \nu^k + \lambda \JJ_2 \tilde{b}_{kl} (b_{1ml} \partial_m \bar{v}_{1j} + b_{1mj} \partial_m \bar{v}_{1l} ) \nu^k \\&\indeq + \lambda \JJ_2 b_{2kl} (\tilde{b}_{ml} \partial_m \bar{v}_{1j} + \tilde{b}_{mj} \partial_m \bar{v}_{1l} ) \nu^k + \lambda \JJ_2 b_{2kl} (b_{2ml} \partial_m \tilde{\VV}_{j} + b_{2mj} \partial_m \tilde{\VV}_{l} ) \nu^k \\&\indeq + \tilde{\JJ} b_{1kj} \RR_1^{-1} \nu^k + \JJ_2 \tilde{b}_{kj} \RR_1^{-1} \nu^k - \JJ_2 b_{2kj} \RR_1^{-1} \RR_2^{-1} \tilde{\RR} \nu^k - \tilde{\JJ} \RR_1^{-1} \nu^j - (\JJ_2-1) \RR_1^{-1} \RR_2^{-1} \tilde{\RR} \nu^j \\&\indeq - \lambda \tilde{\JJ} (b_{1mk} \partial_m \bar{v}_{1j} + b_{1mj} \partial_m \bar{v}_{1k}) \nu^k - \lambda \JJ_2 (\tilde{b}_{mk} \partial_m \bar{v}_{1j} + \tilde{b}_{mj} \partial_m \bar{v}_{1k}) \nu^k \\&\indeq - \lambda \JJ_2 (b_{2mk} \partial_m \tilde{\VV}_{j} + b_{2mj} \partial_m \tilde{\VV}_{1k}) \nu^k - \lambda \tilde{\JJ} b_{1kl} (\partial_l \bar{v}_{1j} + \partial_j \bar{v}_{1l} )\nu^k - \lambda \JJ_2 \tilde{b}_{kl} (\partial_l \bar{v}_{1j} + \partial_j \bar{v}_{1l} )\nu^k \\& \indeq - \lambda \JJ_2 b_{2kl} (\partial_l \tilde{\VV}_{j} + \partial_j \tilde{\VV}_{l} )\nu^k - \mu \tilde{\JJ} b_{1kj} b_{1mi} \partial_m \bar{v}_{1i} \nu^k - \mu \JJ_2 \tilde{b}_{kj} b_{1mi} \partial_m \bar{v}_{1i} \nu^k \\&\indeq - \mu \JJ_2 b_{2kj} \tilde{b}_{mi} \partial_m \bar{v}_{1i} \nu^k - \mu \JJ_2 b_{2kj} b_{2mi} \partial_m \tilde{\VV}_{i} \nu^k - \mu \tilde{\JJ} b_{1mi} \partial_m \bar{v}_{1i} \nu^j - \mu \JJ_2 \tilde{b}_{mi} \partial_m \bar{v}_{1i} \nu^j \\& \indeq - \mu \JJ_2 b_{2mi} \partial_m \tilde{\VV}_{i} \nu^j - \mu \tilde{\JJ} b_{1kj} \partial_i \bar{v}_{1i} \nu^k - \mu \JJ_2 \tilde{b}_{kj} \partial_i \bar{v}_{1i} \nu^k - \mu \JJ_2 b_{2kj} \partial_i \tilde{\VV}_{i} \nu^k - \RR_1^{-1} \RR_2^{-1} \tilde{\RR} \nu^j , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH249} \end{split}
\end{align} for $j=1,2,3$. \par Before we bound the terms on the right sides of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH248} and~\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH249}, we provide necessary a~priori estimates for the differences of the density, Jacobian, inverse matrix of the flow map. \par \cole \begin{Lemma} \label{L10} Let $v_1, v_2 \in Z_T$. Suppose $\Vert v_1\Vert_{K^{s+1}} \leq M$ and $\Vert v_2\Vert_{K^{s+1}} \leq M$ for some $T>0$, where $M>1$ is fixed as in~\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH250}. Then for every $\epsilon \in (0,1]$, there exists a sufficiently small $T_0 \in (0,1)$ depending on $M$ such that the following statements hold: \begin{enumerate}[label=(\roman*)] \item $\Vert \tilde{b} \Vert_{L^\infty_t H^s_x} + \Vert b\Vert_{H^1_t L^\infty_x} \les \epsilon \Vert \tilde{v} \Vert_{K^{s+1}}$, \item $\Vert \tilde{\RR} \Vert_{L^\infty_t H^s_x} + \Vert \tilde{\RR} \Vert_{H^1_t L^\infty_x} \les \epsilon \Vert_{K^{s+1}}$, \item $\Vert \tilde{\JJ} \Vert_{L^\infty_t H^s_x} \les \epsilon \Vert \tilde{v} \Vert_{K^{s+1}}$, and \item $\Vert \tilde{\RR} \Vert_{H^1_t H^s_x} + \Vert \tilde{b} \Vert_{H^1_t H^s_x} + \Vert \tilde{\JJ} \Vert_{H^1_t H^s_x} \les \Vert \tilde{v} \Vert_{K^{s+1}}$, \end{enumerate} \end{Lemma} \colb \par \begin{proof}[Proof of Lemma~\ref{L10}] (i) The difference $\tilde{b}$ satisfies the ODE \begin{align} \begin{split} -\tilde{b}_t & = \tilde{b} ( \nabla v_1 ) b_1 + b_2 ( \nabla \tilde{v} ) b_1 + b_2 ( \nabla v_2 ) \tilde{b} + (\nabla \tilde{v} ) b_1 + (\nabla v_2 ) \tilde{b} + \tilde{b} ( \nabla v_1 ) \\&\indeq + b_2 ( \nabla \tilde{v} ) + \nabla \tilde{v} \inon{in~$(0,T)\times \Omegaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH251} \end{split} \end{align} with the initial data $b(0) = 0$. From the fundamental theorem of calculus it follows that \begin{align*} \begin{split} \Vert \tilde{b} (t) \Vert_{H^s} & \les \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert \tilde{b} \Vert_{H^s} \Vert \nabla v_1 \Vert_{H^s} \Vert b_1 \Vert_{H^s} + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert b_2 \Vert_{H^s} \Vert \nabla \tilde{v} \Vert_{H^s} \Vert b_1 \Vert_{H^s} \\&\indeq + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert b_2 \Vert_{H^s} \Vert \nabla v_2 \Vert_{H^s} \Vert \tilde{b} \Vert_{H^s} + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert \nabla \tilde{v} \Vert_{H^s} \Vert b_1 \Vert_{H^s} + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert \nabla \tilde{v} \Vert_{H^s} \\&\indeq + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert \nabla v_2 \Vert_{H^s} \Vert \tilde{b} \Vert_{H^s} + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert \tilde{b} \Vert_{H^s} \Vert \nabla v_1 \Vert_{H^s} + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert b_2 \Vert_{H^s} \Vert \nabla \tilde{v} \Vert_{H^s} \\& \les \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert \nabla \tilde{v} \Vert_{H^s} + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert \tilde{b} \Vert_{H^s} (\Vert \nabla v_1\Vert_{H^s} + \Vert \nabla v_1\Vert_{H^s}) , \end{split} \end{align*} where the last inequality follows from Lemma~\ref{L08}. Using Gronwall's inequality, we arrive at \begin{align} \begin{split} \Vert \tilde{b} \Vert_{L^\infty_t H^s_x} \les \epsilon \Vert \tilde{v} \Vert_{K^{s+1}} . \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH255} \end{split} \end{align} From \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH251} and H\"older's inequality, we have \begin{align} \begin{split} \Vert \tilde{b}_t \Vert_{L^2_t L^\infty_x} & \les \Vert \tilde{b} \Vert_{L^\infty_t L^\infty_x} \Vert \nabla v_1 \Vert_{L^2_t L^\infty_x} \Vert b_1\Vert_{L^\infty_t L^\infty_x} + \Vert b_2 \Vert_{L^\infty_t L^\infty_x} \Vert \nabla \tilde{v} \Vert_{L^2_t L^\infty_x} \Vert b_1 \Vert_{L^\infty_t L^\infty_x} \\&\indeq + \Vert b_2 \Vert_{L^\infty_t L^\infty_x} \Vert \nabla v_2 \Vert_{L^2_t L^\infty_x} \Vert \tilde{b} \Vert_{L^\infty_t L^\infty_x} + \Vert \nabla \tilde{v} \Vert_{L^2_t L^\infty_x} \Vert b_1\Vert_{L^\infty_t L^\infty_x} \\&\indeq + \Vert \nabla v_2 \Vert_{L^2_t L^\infty_x} \Vert \tilde{b} \Vert_{L^\infty_t L^\infty_x} + \Vert \tilde{b} \Vert_{L^\infty_t L^\infty_x} \Vert \nabla v_1 \Vert_{L^2_t L^\infty_x} + \Vert b_2 \Vert_{L^\infty_t L^\infty_x} \Vert \nabla \tilde{v} \Vert_{L^2_t L^\infty_x} \\&\indeq + \Vert \nabla \tilde{v} \Vert_{L^2_t L^\infty_x} \\& \les (\epsilon+ C_\epsilon T^{1/2} ) \Vert \tilde{v} \Vert_{K^{s+1}} \les \epsilon \Vert \tilde{v} \Vert_{K^{s+1}} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH295} \end{split} \end{align} by taking $T_0>0$ sufficiently small. \par (ii) Since the difference $\tilde{R}$ satisfies the ODE \begin{align} & \tilde{\RR}_t - \tilde{\RR} (\dive v_2 + b_{1kj} \partial_k v_{1j} ) = \RR_1 \dive \tilde{v} + \RR_2 \tilde{b}_{kj} \partial_k v_{1j} + \RR_2 b_{2kj} \partial_k \tilde{v}_j \inon{in~$(0,T) \times \Omegaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH256} \\ & \tilde{\RR} (0) = 0 \inon{in~$\Omegaf$} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH257} \end{align} thus the solution is given as \begin{align} \begin{split} \tilde{\RR}(t,x) & = e^{\fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t (\dive v_2 + b_{1kj} \partial_k v_{1j} ) d\tau} \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t e^{-\fgsdfgwertsfsrsfgsdfgfsdfasdf_0^\tau (\dive v_2 + b_{1kj} \partial_k v_{1j} )} \\& \indeqtimes (\RR_1 \dive \tilde{v} + \RR_2 \tilde{b}_{kj} \partial_k v_{1j} + \RR_2 b_{2kj} \partial_k \tilde{v}_j ) d\tau \inon{in~$[0,T] \times \Omegaf$} . \llabel{da 9C3 WCb tw MXHW xoCC c4Ws2C UH B sNL FEf jS4 SG I4I4 hqHh 2nCaQ4 nM p nzY oYE 5fD sX hCHJ zTQO cbKmvE pl W Und VUo rrq iJ zRqT dIWS QBL96D FU d 64k 5gv Qh0 dj rGlw 795x V6KzhT l5 Y FtC rpy bHH 86 h3qn Lyzy ycGoqm Cb f h9h prB CQp Fe CxhU Z2oJ F3aKgQ H8 R yIm F9t Eks gP FMMJ TAIy z3ohWj Hx M R86 KJO NKT c3 uyRN nSKH lhb11Q 9C w rf8 iiX qyY L4 zh9s 8NTE ve539G zL g vhD N7F eXo 5k AWAT 6Vrw htDQwy tu H Oa5 UIO Exb Mp V2AH puuC HWItfDKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH252} \end{split} \end{align} It follows that \begin{align} \begin{split} \Vert \tilde{R} \Vert_{L^\infty_t H^s_x} \les \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert \nabla \tilde{v} \Vert_{H^s} + \fgsdfgwertsfsrsfgsdfgfsdfasdf_0^t \Vert \tilde{b} \nabla v_{1} \Vert_{H^s} \les \epsilon \Vert \tilde{v} \Vert_{K^{s+1}} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH402} \end{split} \end{align} where we used~\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH255}. Using \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH256}, the H\"older's, and Sobolev inequalities, we obtain \begin{align} \begin{split} \Vert \tilde{\RR}_t \Vert_{L^2_t L^\infty_x} & \les \Vert \tilde{\RR} \Vert_{L^\infty_t L^\infty_x} \Vert \nabla v_2 \Vert_{L^2_t L^\infty_x} + \Vert \tilde{\RR} \Vert_{L^\infty_t L^\infty_x} \Vert b_1 \Vert_{L^\infty_t L^\infty_x} \Vert \nabla v_1 \Vert_{L^2_t L^\infty_x} \\&\indeq + \Vert \RR_1 \Vert_{L^\infty_t L^\infty_x} \Vert \nabla \tilde{v} \Vert_{L^2_t L^\infty_x} + \Vert \RR_2 \Vert_{L^\infty_t L^\infty_x} \Vert \tilde{b} \Vert_{L^\infty_t L^\infty_x} \Vert \nabla v_1 \Vert_{L^2_t L^\infty_x} \\&\indeq + \Vert \RR_2 \Vert_{L^\infty_t L^\infty_x} \Vert b_2 \Vert_{L^\infty_t L^\infty_x} \Vert \nabla \tilde{v} \Vert_{L^2_t L^\infty_x} \\& \les (\epsilon + C_\epsilon T^{1/2}) \Vert \tilde{v} \Vert_{K^{s+1}} \les \epsilon \Vert \tilde{v} \Vert_{K^{s+1}} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH290} \end{split} \end{align} by taking $T>0$ sufficiently small. Thus we conclude the proof of (ii) by combining \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH402}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH290}.
\par (iii) The difference $\tilde{\JJ}$ satisfies the same ODE system \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH256}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH257} and from (ii) it follows that \begin{align*} \Vert \tilde{\JJ} \Vert_{L^\infty_t H^s_x} \les \epsilon \Vert \tilde{v} \Vert_{K^{s+1}} . \end{align*} \par (iv) The proof is analogous to (i)--(iii) using \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH295} and~\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH290}. Thus we omit the details. \end{proof} \par \begin{proof}[Proof of Theorem~\ref{T01}] From Lemma~\ref{L02} and~\ref{L09} it follows that \begin{align} \begin{split} \Vert \tilde{\VV}\Vert_{K^{s+1}} & \les \left\Vert \frac{\partial \tilde{\xi}}{\partial \nu} \right\Vert_{H^{s-1/2, s/2-1/4}_{\Gammac}} + \Vert \tilde{h} \Vert_{H^{s/2-1/4, s-1/2}_{\Gammac}} + \Vert \tilde{f} \Vert_{K^{s-1}} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH293} \end{split} \end{align} where $\tilde{f}$ and $\tilde{h}$ are as in \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH248}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH249}, for $j=1,2,3$. \par We proceed as in \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH115}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH361} to bound the first term on the right side of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH293}, obtaining \begin{align} \left\Vert \frac{\partial \tilde{\xi}}{\partial \nu} \right\Vert_{H^{s/2-1/4, s-1/2}_{\Gammac}} \les (\epsilon_1 + \epsilon_2 C_{\epsilon_1} + C_{\epsilon_1} T^{1/2}) \Vert \tilde{v} \Vert_{K^{s+1}} , \llabel{r3 rs6F23 6o b LtD vN9 KqA pO uold 3sec xqgSQN ZN f w5t BGX Pdv W0 k6G4 Byh9 V3IicO nR 2 obf x3j rwt 37 u82f wxwj SmOQq0 pq 4 qfv rN4 kFW hP HRmy lxBx 1zCUhs DN Y INv Ldt VDG 35 kTMT 0ChP EdjSG4 rW N 6v5 IIM TVB 5y cWuY OoU6 Sevyec OT f ZJv BjS ZZk M6 8vq4 NOpj X0oQ7r vM v myK ftb ioR l5 c4ID 72iF H0VbQz hj H U5Z 9EV MX8 1P GJss Wedm hBXKDA iq w UJV Gj2 rIS 92 AntB n1QP R3tTJr Z1 e lVo iKU stz A8 fCCg Mwfw 4jKbDb er B Rt6 T8O Zyn NODKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH297} \end{align} for any $\epsilon_1, \epsilon_2 \in (0,1]$. \par Next we estimate the $K^{s-1}$ norm of terms on the right side of~\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH248} for $j=1,2,3$. The term $\tilde{\RR} b_{1kj} \partial_k (b_{1mi} \partial_m \bar{v}_{1i})$ is bounded as \begin{align*} \begin{split} \Vert \tilde{\RR} b_{1kj} \partial_k (b_{1mi} \partial_m \bar{v}_{1i}) \Vert_{L^2_t H^{s-1}_x} & \les \Vert \tilde{\RR} \Vert_{L^\infty_t H^s_x} \Vert b_1 \Vert_{L^\infty_t H^s_x}^2 \Vert \bar{v}_1 \Vert_{L^2_t H^{s+1}_x} \les \epsilon \Vert \tilde{v} \Vert_{K^{s+1}} \end{split} \end{align*} and \begin{align*} \begin{split} & \Vert \tilde{\RR} b_{1kj} \partial_k (b_{1mi} \partial_m \bar{v}_{1i}) \Vert_{H^{(s-1)/2}_t L^2_x} \\ & \les \Vert \tilde{\RR} \Vert_{W^{(s-1)/2, 4}_t L^\infty_x} \Vert b_1 \Vert_{L^\infty_t L^\infty_x}^2 \Vert \nabla^2 \bar{v}_1 \Vert_{L^4_t L^2_x} + \Vert \tilde{\RR} \Vert_{L^\infty_t L^\infty_x} \Vert b_1 \Vert_{W^{(s-1)/2, 4}_t L^\infty_x} \Vert b_1 \Vert_{L^\infty_t L^\infty_x} \Vert \nabla^2 \bar{v}_1 \Vert_{L^4_t L^4_x} \\&\indeq + \Vert \tilde{\RR} \Vert_{L^\infty_t L^\infty_x} \Vert b_1 \Vert_{L^\infty_t L^\infty_x} \Vert b_1 \Vert_{L^\infty_t L^\infty_x} \Vert \nabla^2 \bar{v}_1 \Vert_{H^{(s-1)/2}_t L^2_x} + \Vert \tilde{\RR} \Vert_{W^{(s-1)/2, 4}_t L^\infty_x} \Vert b_1 \Vert_{L^\infty_t L^\infty_x} \Vert \nabla b_1 \Vert_{L^\infty_t L^6_x} \Vert \nabla^2 \bar{v}_1 \Vert_{L^4_t L^3_x} \\&\indeq + \Vert \tilde{\RR} \Vert_{L^\infty_t L^\infty_x} \Vert b_1 \Vert_{L^\infty_t L^\infty_x} \Vert b_1 \Vert_{L^\infty_t L^\infty_x} \Vert \nabla^2 \bar{v}_1 \Vert_{H^{(s-1)/2}_t L^2_x} + \Vert \tilde{\RR} \Vert_{L^\infty_t L^\infty_x} \Vert b_1 \Vert_{L^\infty_t L^\infty_x} \Vert \nabla b_1 \Vert_{W^{(s-1)/2, 4}_t L^6_x} \Vert \nabla^2 \bar{v}_1 \Vert_{L^4_t L^3_x} \\&\indeq + \Vert \tilde{\RR} \Vert_{L^\infty_t L^\infty_x} \Vert b_1 \Vert_{W^{(s-1)/2, 4}_t L^\infty_x} \Vert \nabla b_1 \Vert_{L^\infty_t L^6_x} \Vert \nabla^2 \bar{v}_1 \Vert_{L^4_t L^3_x} \\& \les \epsilon \Vert \tilde{v} \Vert_{K^{s+1}} , \end{split} \end{align*} where we used Lemma~\ref{L10}. The term $\mu \RR_2 b_{2kj} \partial_k \partial_i \tilde{\VV}_{i}$ is estimated as \begin{align*} \begin{split} \Vert \mu \RR_2 b_{2kj} \partial_k \partial_i \tilde{\VV}_{i} \Vert_{L^2_t H^{s-1}_x} & \les \Vert \RR_2 \Vert_{L^\infty_t H^s_x} \Vert b_2 \Vert_{L^\infty_t H^s_x} \Vert \nabla^2 \tilde{\VV} \Vert_{L^2_t H^{s-1}_x} \les \epsilon \Vert \tilde{\VV} \Vert_{K^{s+1}} \end{split} \end{align*} and \begin{align*} \begin{split} \Vert \mu \RR_2 b_{2kj} \partial_k \partial_i \tilde{\VV}_{i} \Vert_{H^{(s-1)/2}_t L^2_x} & \les \Vert \RR_2 \Vert_{W^{(s-1)/2, 4}_t L^\infty_x} \Vert b_2 \Vert_{L^\infty_t L^\infty_x} \Vert \nabla^2 \tilde{\VV} \Vert_{L^4_t L^2_x} \\&\indeq + \Vert \RR_2 \Vert_{L^\infty_t L^\infty_x} \Vert b_2 \Vert_{W^{(s-1)/2, 4}_t L^\infty_x} \Vert \nabla^2 \tilde{\VV} \Vert_{L^4_t L^2_x} \\&\indeq + \Vert \RR_2 \Vert_{L^\infty_t L^\infty_x} \Vert b_2 \Vert_{L^\infty_t L^\infty_x} \Vert \nabla^2 \tilde{\VV} \Vert_{H^{(s-1)/2}_t L^2_x} \\& \les \epsilon \Vert \tilde{\VV} \Vert_{K^{s+1}} , \end{split} \end{align*} where we used Lemma~\ref{L08}--\ref{L09}. Other terms on the right side of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH248} are treated analogously as in Theorem~\ref{T03} using Lemma~\ref{L08}--\ref{L10}, and we arrive at \begin{align} \Vert \tilde{f} \Vert_{K^{s-1}} \les \epsilon \Vert \tilde{v} \Vert_{K^{s+1}} + \epsilon \Vert \tilde{\VV} \Vert_{K^{s+1}} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH291} \end{align} by taking $T>0$ sufficiently small. \par Next we estimate the $H^{s/2-1/4, s-1/2}_{\Gammac}$ norm of the terms on the right side of~\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH249} for $j=1,2,3$. The term $\lambda (1- \JJ_2) \partial_k \tilde{\VV}_j \nu^k$ is estimated as \begin{align*} \begin{split} & \Vert \lambda (1- \JJ_2) (\partial_k \tilde{\VV}_j + \partial_j \tilde{\VV}_k) \nu^k \Vert_{L^2_t H^{s-1/2} (\Gammac)} \\&\indeq \les \Vert (1- \JJ_2) (\partial_k \tilde{\VV}_j + \partial_j \tilde{\VV}_k) \nu^k \Vert_{L^2_t H^{s}_x} \les \Vert 1- \JJ_2 \Vert_{L^\infty_t H^s_x} \Vert \tilde{\VV} \Vert_{L^2_t H^{s+1}_x} \les \epsilon \Vert \tilde{\VV} \Vert_{K^{s+1}} \end{split} \end{align*} and \begin{align*} \begin{split} & \Vert \lambda (1- \JJ_2) (\partial_k \tilde{\VV}_j + \partial_j \tilde{\VV}_k) \nu^k \Vert_{H^{s/2-1/4}_t L^2 (\Gammac)} \\&\indeq \les \Vert (1- \JJ_2) (\partial_k \tilde{\VV}_j + \partial_j \tilde{\VV}_k) \Vert_{H^{s/2-1/4}_t H^{1/2}_x} \\&\indeq \les \Vert 1- \JJ_2 \Vert_{H^1_t H^s_x} \Vert \nabla \tilde{\VV} \Vert_{L^\infty_t H^{1/2}_x} + \Vert 1- \JJ_2 \Vert_{L^\infty_t H^s_x} \Vert \nabla \tilde{\VV} \Vert_{H^{s/2-1/4}_t H^{1/2}_x} \\&\indeq \les (\epsilon + C_\epsilon T^{1/2}) \Vert \tilde{\VV} \Vert_{K^{s+1}} , \end{split} \end{align*} where we used the trace inequality. The term $\mu \JJ_2 b_{2kj} \tilde{b}_{mi} \partial_m \bar{v}_{1i} \nu^k$ is estimated as \begin{align*} \begin{split} & \Vert \mu \JJ_2 b_{2kj} \tilde{b}_{mi} \partial_m \bar{v}_{1i} \nu^k \Vert_{L^2_t H^{s-1/2} (\Gammac)} \\&\indeq \les \Vert \JJ_2 b_{2kj} \tilde{b}_{mi} \partial_m \bar{v}_{1i} \Vert_{L^2_t H^s_x} \les \Vert \JJ_2 \Vert_{L^\infty_t H^s_x} \Vert b_2 \Vert_{L^\infty_t H^s_x} \Vert \tilde{b} \Vert_{L^\infty_t H^s_x} \Vert \nabla \bar{v}_1 \Vert_{L^2_t H^s_x} \les \epsilon \Vert \tilde{v} \Vert_{K^{s+1}} \end{split} \end{align*} and \begin{align*} \begin{split} & \Vert \mu \JJ_2 b_{2kj} \tilde{b}_{mi} \partial_m \bar{v}_{1i} \nu^k \Vert_{H^{s/2-1/4}_t L^2 (\Gammac)} \\&\indeq \les \Vert \JJ_2 b_{2kj} \tilde{b}_{mi} \partial_m \bar{v}_{1i} \nu^k \Vert_{H^{s/2-1/4}_t H^{1/2}_x} \\ &\indeq \les \Vert \JJ_2 \Vert_{H^1_t H^s_x} \Vert b_2 \Vert_{L^\infty_t H^s_x} \Vert \tilde{b} \Vert_{L^\infty_t H^s_x} \Vert \nabla \bar{v} \Vert_{L^\infty_t H^{1/2}_x} + \Vert \JJ_2 \Vert_{L^\infty_t H^s_x} \Vert b_2 \Vert_{H^1_t H^s_x} \Vert \tilde{b} \Vert_{L^\infty_t H^s_x} \Vert \nabla \bar{v} \Vert_{L^\infty_t H^{1/2}_x} \\&\indeq\indeq + \Vert \JJ_2 \Vert_{L^\infty_t H^s_x} \Vert b_2 \Vert_{L^\infty_t H^s_x} \Vert \tilde{b} \Vert_{H^1_t H^s_x} \Vert \nabla \bar{v} \Vert_{L^\infty_t H^{1/2}_x} + \Vert \JJ_2 \Vert_{L^\infty_t H^s_x} \Vert b_2 \Vert_{L^\infty_t H^s_x} \Vert \tilde{b} \Vert_{L^\infty_t H^s_x} \Vert \nabla \bar{v} \Vert_{H^{s/2-1/4}_t H^{1/2}_x} \\&\indeq \les \epsilon \Vert \tilde{v} \Vert_{K^{s+1}} . \end{split} \end{align*} Other terms on the right side of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH249} are treated analogously as in Theorem~\ref{T03} using Lemma~\ref{L08}--\ref{L10}, and we arrive at \begin{align} \Vert \tilde{h} \Vert_{H^{s/2-1/4, s-1/2}_{\Gammac}} \les \epsilon \Vert \tilde{v} \Vert_{K^{s+1}} + \epsilon \Vert \tilde{\VV} \Vert_{K^{s+1}} , \label{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH296} \end{align} by taking $T>0$ sufficiently small.
\par Since the terms involving $\Vert \tilde{\VV} \Vert_{K^{s+1}}$ on the right side of \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH291}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH296} are absorbed to the left side \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH293} by taking $\epsilon>0$ sufficiently small, we obtain from \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH293}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH296} that \begin{align*} \begin{split} \Vert \tilde{\VV}\Vert_{K^{s+1}} & \les \epsilon \Vert \tilde{v} \Vert_{K^{s+1}} , \end{split} \end{align*} completing the proof of~\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH298} by taking $\epsilon>0$ sufficiently small. From \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH202} and Lemma~\ref{L09} it follows that the system \eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH260}--\eqref{DKHGJBLHVKDJHVLIUEIORYJDKLLKDJHFLDGHLISODJLKJKGJKSFDJHBXCKUYWEJHSFGLSDHLISUDH267} admits a unique solution \begin{align*} \begin{split} (v, \RR, w, w_t) & \in K^{s+1} ((0,T_0)\times \Omegaf) \times H^1 ((0,T_0), H^s (\Omegaf)) \\& \indeqtimes C([0,T_0], H^{s+1/4 -\epsilon_0} (\Omegae)) \times C([0,T_0], H^{s-3/4 -\epsilon_0}(\Omegae)) , \end{split} \end{align*} with the corresponding norms bounded by a function of the initial data. \end{proof} \par \colb \section*{Acknowledgments} IK was supported in part by the NSF grant DMS-1907992, while LL was supported in part by the NSF grants DMS-2009458 and DMS-1907992. The work was undertaken while the authors were members of the MSRI program ``Mathematical problems in fluid dynamics'' during the Spring~2021 semester (NSF~DMS-1928930). \par \small
\end{document} |
\betaegin{document}
\betaegin{abstract}
We show that MA$_{\kappa}$ implies that each collection of ${P}_{\mathfrak c}$-points of size at most $\kappa$ which has a $P_{\mathfrak c}$-point as an $RK$ upper bound also has a ${P}_{\mathfrak c}$-point as an $RK$ lower bound.
\end{abstract}
\maketitle
\section{Introduction}
The Rudin-Keisler ($RK$) ordering of ultrafilters has received considerable attention since its introduction in the 1960s. For example, one can take a look at \mathfrak cite{rudin,Rudin:66,mer,blass,kn,RS,RK}, or \mathfrak cite{RV}.
Recall the definition of the Rudin-Keisler ordering.
\betaegin{definition}
Let $\mathcal U$ and $\mathcal V$ be ultrafilters on $\omega$. We say that $\mathcal U\lambdae_{RK}\mathcal V$ if there is a function $f$ in $\omega^{\omega}$ such that $A\in \mathcal U$ if and only if $f^{-1}(A)\in\mathcal V$ for every $A\subseteq \omega$.
\end{definition}
When $\mathcal U$ and $\mathcal V$ are ultrafilters on $\omega$ and $\mathcal U\lambdae_{RK}\mathcal V$, we say that $\mathcal U$ is \emph{Rudin-Keisler (RK) reducible} to $\mathcal V$, or that $\mathcal U$ is \emph{Rudin-Keisler (RK) below} $\mathcal V$.
In case $\mathcal U\lambdae_{RK}\mathcal V$ and $\mathcal V\lambdae_{RK}\mathcal U$ both hold, then we say that $\mathcal U$ and $\mathcal V$ are \emph{Rudin-Keisler equivalent}, and write $\mathcal U\equiv_{RK}\mathcal V$.
Very early in the investigation of this ordering of ultrafilters, it was noticed that the class of P-points is particularly interesting.
Recall that an ultrafilter $\mathcal U$ on $\omega$ is called a \emph{P-point} if for any $\set{a_n:n<\omega}\subseteq\mathcal U$ there is an $a\in\mathcal U$ such that $a\subseteq^* a_n$ for every $n<\omega$, i.e. the set $a\setminus a_n$ is finite for every $n<\omega$.
P-points were first constructed by Rudin in \mathfrak cite{rudin}, under the assumption of the Continuum Hypothesis.
The class of P-points forms a downwards closed initial segment of the class of all ultrafilters.
In other words, if $\mathcal U$ is a P-point and $\mathcal V$ is any ultrafilter on $\omega$ with $\mathcal V \; {\lambdaeq}_{RK} \; \mathcal U$, then $\mathcal V$ is also a P-point.
Hence understanding the order-theoretic structure of the class of P-points can provide information about the order-theoretic structure of the class of all ultrafilters on $\omega$.
One of the first systematic explorations of the order-theoretic properties of the class of all ultrafilters, and particularly of the class of P-points, under ${\lambdaeq}_{RK}$ was made by Blass in \mathfrak cite{blassphd} and \mathfrak cite{blass}, where he proved many results about this ordering under the assumption of Martin's Axiom (MA).
Let us note here that it is not possible to construct P-points in ZFC only, as was proved by Shelah (see \mathfrak cite{proper}).
Thus some set-theoretic assumption is needed to ensure the existence of P-points.
The most commonly used assumption when studying the order-theoretic properties of the class of P-points is MA.
Under MA every ultrafilter has character $\mathfrak c$.
Therefore, the ${P}_{\mathfrak c}$-points are the most natural class of P-points to focus on under MA.
Again, the ${P}_{\mathfrak c}$-points form a downwards closed subclass of the P-points.
\betaegin{definition} \lambdaabel{def:pc}
An ultrafilter $\mathcal U$ on $\omega$ is called a $P_{\mathfrak c}$\emph{-point} if for every $\alpha<\mathfrak c$ and any $\set{a_i:i<\alpha}\subseteq \mathcal U$ there is an $a\in\mathcal U$ such that $a\subseteq^* a_i$ for every $i<\alpha$.
\end{definition}
In Theorem 5 from \mathfrak cite{blass}, Blass proved in ZFC that if $\set{\mathcal U_n:n<\omega}$ is a countable collection of P-points and if there is a P-point $\mathcal V$ such that $\mathcal U_n\lambdae_{RK}\mathcal V$ for every $n<\omega$, then there is a P-point $\mathcal U$ such that $\mathcal U\lambdae_{RK}\mathcal U_n$ for every $n<\omega$.
In other words, if a countable family of P-points has an upper bound, then it also has a lower bound.
The main result of this paper generalizes Blass' theorem to families of ${P}_{\mathfrak c}$-points of size less than $\mathfrak c$ under MA.
More precisely, if MA holds and a family of ${P}_{\mathfrak c}$-points of size less than $\mathfrak c$ has an $RK$ upper bound which is a ${P}_{\mathfrak c}$-point, then the family also has an RK lower bound.
Blass proved his result via some facts from \mathfrak cite{blassmodeltheory} about non-standard models of complete arithmetic.
In order to state these results, we introduce a few notions from \mathfrak cite{blassmodeltheory}.
The language $L$ will consist of symbols for all relations and all functions on $\omega$.
Let $N$ be the standard model for this language, its domain is $\omega$ and each relation or function denotes itself.
Let $M$ be an elementary extension of $N$, and let ${}^*R$ be the relation in $M$ denoted by $R$, and let ${}^*f$ be the function in $M$ denoted by $f$.
Note that if $a\in M$, then the set $\set{{}^*f(a):f:\omega\to\omega}$ is the domain of an elementary submodel of $M$.
Submodel like this, i.e.\@ generated by a single element, will be called \emph{principal}.
It is not difficult to prove that a principal submodel generated by $a$ is isomorphic to the ultrapower of the standard model by the ultrafilter ${\mathcal U}_{a} = \{X \subseteq \omegamega: a \in {}^{\alphast}{X}\}$.
If $A,B\subseteq M$, we say that they are \emph{cofinal with each other} iff $(\forall a\in A)(\exists b\in B)\ a\ {}^*\!\!\lambdae b$ and $(\forall b\in B)(\exists a\in A)\ b\ {}^*\!\!\lambdae a$.
Finally, we can state Blass' theorem.
\betaegin{theorem}[Blass, Theorem 3 in \mathfrak cite{blassmodeltheory}]\lambdaabel{t:blassmodel}
Let $M_i$ ($i<\omega$) be countably many pairwise cofinal submodels of $M$.
Assume that at least one of the $M_i$ is principal.
Then $\betaigcap_{i<\omega}M_i$ is cofinal with each $M_i$, in fact it contains a principal submodel cofinal with each $M_i$.
\end{theorem}
After proving this theorem, Blass states that it is not known to him whether Theorem \ref{t:blassmodel} can be extended to larger collections of submodels.
The proof of our main result clarifies this, namely in Theorem \ref{theorem3} below we prove that under MA it is possible to extend it to collections of models of size less than $\mathfrak c$ provided that there is a principal model that is isomorphic to an ultrapower induced by a ${P}_{\mathfrak c}$-point.
Then we proceed and use this result to prove Theorem \ref{maintheorem} where we extend Theorem 5 from \mathfrak cite{blass} to collections of fewer than $\mathfrak c$ many ${P}_{\mathfrak c}$-points.
Recall that MA$_{\alpha}$ is the statement that for every partial order $P$ which satisfies the countable chain condition and for every collection $\mathcal{D} = \{{D}_{i}: i < \alpha\}$ of dense subsets of $P$, there is a filter $G\subseteq P$ such that $G\mathfrak cap {D}_{i}\neq\emptyset$ for every $i < \alpha$.
\section{The lower bound}
In this section we prove the results of the paper.
We begin with a purely combinatorial lemma about functions.
\betaegin{definition}\lambdaabel{closedness}
Let $\alphalpha$ be an ordinal, let $\mathcal F=\set{f_i:i<\alpha}\subseteq \omega^{\omega}$ be a family of functions, and let $A$ be a subset of $\alpha$.
We say that a set $F\subseteq \omega$ is ($A,\mathcal F$)\emph{-closed} if $f_i^{-1}(f_i''F)\subseteq F$ for each $i\in A$.
\end{definition}
\betaegin{remark}
Notice that if $F$ is ($A,\mathcal F$)-closed, then $f_i^{-1}(f_i''F)=F$ for each $i\in A$.
\end{remark}
\betaegin{lemma}\lambdaabel{finiteunion}
Let $\alpha$ be an ordinal, let $\mathcal F=\set{f_i:i<\alpha}\subseteq \omega^{\omega}$ be a family of functions, and let $A$ be a subset of $\alpha$.
Suppose that $m<\omega$, and that $F_k$ is ($A,\mathcal F$)-closed subset of $\omega$, for each $k<m$.
Then the set $F=\betaigcup_{k<m}F_k$ is ($A,\mathcal F$)-closed.
\end{lemma}
\betaegin{proof}
To prove that $F$ is ($A,\mathcal F$)-closed take any $i\in A$, and $n\in f_i^{-1}(f_i''F)$.
This means that there is some $n'\in F$ such that $f_i(n)=f_i(n')$.
Let $k<m$ be such that $n'\in F_k$.
Then $n\in f_i^{-1}(f_i''F_k)$.
Since $F_k$ is ($A,\mathcal F$)-closed, $n\in f_i^{-1}(f_i''F_k)\subseteq F_k$.
Thus $n\in F_k\subseteq F$.
\end{proof}
\betaegin{lemma}\lambdaabel{ensuringcondition}
Let $\alpha<\mathfrak c$ be an ordinal.
Let $\mathcal F=\set{f_i:i<\alpha}\subseteq \omega^{\omega}$ be a family of finite-to-one functions.
Suppose that for each $i,j<\alpha$ with $i<j$, there is $l<\omega$ such that $f_j(n)=f_j(m)$ whenever $f_{i}(n)=f_{i}(m)$ and $n,m\gammae l$.
Then for each finite $A\subseteq \alpha$, and each $n<\omega$, there is a finite $(A,\mathcal F)$-closed set $F$ such that $n\in F$.
\end{lemma}
\betaegin{proof}
First, if $A$ is empty, then we can take $F=\set{n}$.
So fix a non-empty finite $A\subseteq\alpha$, and $n<\omega$.
For each $i,j\in A$ such that $i<j$, by the assumption of the lemma, take $l_{ij}<\omega$ such that for each $n,m\gammae l_{ij}$, if $f_i(n)=f_i(m)$, then $f_j(n)=f_j(m)$.
Since $A$ is a finite set, there is $l=\max\set{l_{ij}:i,j\in A,\ i<j}$.
So $l$ has the property that for every $i,j\in A$ with $i<j$, if $f_i(n)=f_i(m)$ and $n,m\gammae l$, then $f_j(n)=f_j(m)$.
Let $i_0=\max(A)$.
Clearly, $f_i''l$ is finite for each $i\in A$, and since each $f_i$ is finite-to-one the set ${f}^{-1}_{i}\betar{f_i''l}$ is finite for every $i\in A$.
Since the set $A$ is also finite, there is $l'<\omega$ such that $\betaigcup_{i\in A}f_i^{-1}\betar{f_i''l}\subseteq l'$.
Again, since $f_{i_0}$ is finite-to-one there is $l''<\omega$ such that $f^{-1}_{i_0}\betar{f_{i_0}''l'}\subseteq l''$.
Note that by the definition of numbers $l'$ and $l''$, we have $l''\gammae l'\gammae l$.
\betaegin{claim}\lambdaabel{closedset}
For all $k<\omega$, if $k\gammae l''$, then the set $f_{i_0}^{-1}(f_{i_0}''\set{k})$ is ($A,\mathcal F$)-closed.
\end{claim}
\betaegin{proof}
Fix $k\gammae l''$ and let $X=f_{i_0}^{-1}(f_{i_0}''\set{k})$.
First observe that $X\mathfrak cap l'=\emptyset$.
To see this suppose that there is $m\in X\mathfrak cap l'$.
Since $m\in X$, $f_{i_0}(m)=f_{i_0}(k)$.
Together with $m\in l'$, this implies that $k\in f_{i_0}^{-1}(f_{i_0}''\set{m})\subseteq f_{i_0}^{-1}(f_{i_0}''l')\subseteq l''$.
Thus $k<l''$ contradicting the choice of $k$.
Secondly, observe that if $m<l$ and $k'\in X$, then $f_i(m)\neq f_i(k')$ for each $i\in A$.
To see this, fix $m<l$ and $k'\in X$, and suppose that for some $i\in A$, $f_i(m)=f_i(k')$.
This means that $k'\in f_i^{-1}(f_i''\set{m})\subseteq f_i^{-1}(f_i''l)\subseteq l'$ contradicting the fact that $X\mathfrak cap l'=\emptyset$.
Now we will prove that $X$ is ($A,\mathcal F$)-closed.
Take any $i\in A$ and any $m\in f_i^{-1}(f_i''X)$.
We should prove that $m\in X$.
Since $m\in f_i^{-1}(f_i''X)$, $f_i(m)\in f_i''X$ so there is some $k'\in X$ such that $f_i(m)=f_i(k')$.
By the second observation, $m\gammae l$.
By the first observation $k'\gammae l'\gammae l$.
By the assumption of the lemma, since $m,k'\gammae l$, and $f_i(m)=f_i(k')$, it must be that $f_{i_0}(m)=f_{i_0}(k')$.
Since $k'\in X=f_{i_0}^{-1}(f_{i_0}''\set{k})$, it must be that $f_{i_0}(k)=f_{i_0}(k')=f_{i_0}(m)$.
This means that $m\in f_{i_0}^{-1}(f_{i_0}''\set{k})=X$ as required.
Thus $f_{i_0}^{-1}(f_{i_0}''\set{k})$ is $(A,\mathcal F)$-closed.
\end{proof}
Now we inductively build a tree $T\subseteq \omega^{<\omega}$ we will be using in the rest of the proof.
Fix a function $\mathbb Phi:\omega\to\omega^{<\omega}$ so that $\mathbb Phi^{-1}(\sigma)$ is infinite for each $\sigma\in \omega^{<\omega}$.
For each $m<\omega$ let $u_m=\mathbb Phi(m)\betar{\alphabs{\mathbb Phi(m)}-1}$, i.e. $u_m$ is the last element of the sequence $\mathbb Phi(m)$.
Let $T_0=\set{\emptyset,\seq{n}}$ (recall that $n$ is given in the statement of the lemma).
Suppose that $m\gammae 1$, and that $T_m$ is given.
If $\mathbb Phi(m)$ is a leaf node of $T_m$, then let
\[\textstyle
Z_m=\lambdaeft(\betaigcup_{i\in A}f_i^{-1}(f_i''\set{u_m})\right)\setminus\lambdaeft(\betaigcup_{\eta\in T_m}\omegaperatorname{ran}ge(\eta)\right),
\]
and $T_{m+1}=T_m\mathcal Up\set{\mathbb Phi(m)^{\frown}\seq{k}:k\in Z_m}$.
If $\mathbb Phi(m)$ is not a leaf node of $T_m$, then $T_{m+1}=T_m$.
Finally, let $T=\betaigcup_{m<\omega}T_m$ and $F=\betaigcup_{\eta\in T}\omegaperatorname{ran}ge(\eta)$.
\betaegin{claim}\lambdaabel{subclaimtree}
If $\sigma$ is a non-empty element of the tree $T$, then there is $m_0\gammae 1$ such that $\sigma$ is a leaf node of $T_{m_0}$, that $\sigma=\mathbb Phi(m_0)$ and that $$\textstyle\betaigcup_{i\in A}f_i^{-1}(f_i''\set{u_{m_0}})\subseteq \betaigcup_{\eta\in T_{m_0+1}}\omegaperatorname{ran}ge(\eta).$$
\end{claim}
\betaegin{proof}
Fix a non-empty $\sigma$ in $T$.
Let $m_1=\min\set{k<\omega:\sigma\in T_k}$.
Since $\alphabs{\sigma}>0$, $\sigma$ is a leaf node of $T_{m_1}$.
Consider the set $W=\set{m\gammae m_1:\mathbb Phi(m)=\sigma}$.
Since the set $\set{m<\omega:\mathbb Phi(m)=\sigma}$ is infinite, $W$ is non-empty subset of positive integers, so it has a minimum.
Let $m_0=\min W$.
Note that if $m_0=m_1$, then $\sigma$ is a leaf node of $T_{m_0}$.
If $m_0>m_1$, by the construction of the tree $T$, since $\mathbb Phi(k)\neq \sigma$ whenever $m_1\lambdae k<m_0$, it must be that $\sigma$ is a leaf node of every $T_k$ for $m_1<k\lambdae m_0$.
Thus $\sigma$ is a leaf node of $T_{m_0}$ and $\mathbb Phi(m_0)=\sigma$.
Again by the construction of the tree $T$, we have $T_{m_0+1}=T_{m_0}\mathcal Up \set{\sigma^{\frown}\seq{k}:k\in Z_{m_0}}$.
This means that $$\textstyle\betaigcup_{\eta\in T_{m_0+1}}\omegaperatorname{ran}ge(\eta)=Z_{m_0}\mathcal Up \betaigcup_{\eta\in T_{m_0}}\omegaperatorname{ran}ge(\eta).$$
Finally, the definition of $Z_{m_0}$ implies that
\[\textstyle
\betaigcup_{i\in A}f_i^{-1}(f_i''\set{u_{m_0}})\subseteq Z_{m_0}\mathcal Up\betaigcup_{\eta\in T_{m_0}}\omegaperatorname{ran}ge(\eta)=\betaigcup_{\eta\in T_{m_0+1}}\omegaperatorname{ran}ge(\eta),
\]
as required.
\end{proof}
\betaegin{claim}\lambdaabel{closedtree}
The set $F$ is ($A,\mathcal F$)-closed, and contains $n$ as an element.
\end{claim}
\betaegin{proof}
Since $\seq{n}\in T_0$, $n\in F$.
To see that $F$ is ($A,\mathcal F$)-closed, take any $j\in A$, and any $w\in f_j^{-1}\betar{f_j''F}$.
We have to show that $w\in F$.
Since $w\in f_j^{-1}\betar{f_j''F}$, there is $m\in F$ such that $f_j(w)=f_j(m)$.
Since $m\in F=\betaigcup_{\eta\in T}\omegaperatorname{ran}ge(\eta)$, there is $\sigma$ in $T$ such that $\sigma(k)=m$ for some $k<\omega$.
Consider $\sigma\upharpoonright(k+1)$.
Since $\sigma\upharpoonright(k+1)\in T$, by Claim \ref{subclaimtree} there is $m_0\gammae 1$ such that $\mathbb Phi(m_0)=\sigma\upharpoonright(k+1)$, that $\sigma\upharpoonright(k+1)$ is a leaf node of $T_{m_0}$ and that (note that $u_{m_0}=\sigma(k)=m$)
\[\textstyle
\betaigcup_{i\in A}f_i^{-1}(f_i''\set{m})\subseteq \betaigcup_{\eta\in T_{m_0+1}}\omegaperatorname{ran}ge(\eta)\subseteq \betaigcup_{\eta\in T}\omegaperatorname{ran}ge(\eta)=F.
\]
So $w\in f_j^{-1}(f_j''\set{m})\subseteq F$ as required.
\end{proof}
\betaegin{claim}\lambdaabel{finitetree}
The tree $T$ is finite.
\end{claim}
\betaegin{proof}
First we prove that each level of $T$ is finite.
For $k<\omega$ let $T_{(k)}$ be the $k$-th level of $T$, i.e. $T_{(k)}=\set{\sigma\in T: \alphabs{\sigma}=k}$.
Clearly $T_{(0)}$ and $T_{(1)}$ are finite.
So suppose that $T_{(k)}$ is finite.
Let $T_{(k)}=\set{\sigma_0,\sigma_2,\deltaots,\sigma_t}$ be enumeration of that level.
For $s\lambdae t$ let $m_s$ be such that $\mathbb Phi(m_s)=\sigma_s$ and that $\sigma_s$ is a leaf node of $T_{m_s}$.
Note that by the construction of the tree $T$ all nodes at the level $T_{(k+1)}$ are of the form $\sigma_s^{\frown}\seq{r}$ where $s\lambdae t$ and $r\in Z_{m_s}$.
Since the set $A$ is finite and all functions $f_i$ (for $i\in A$) are finite-to-one, $Z_{m_s}$ is finite for every $s\lambdae t$.
Thus there are only finitely many nodes of the form $\sigma_s^{\frown}\seq{r}$ where $s\lambdae t$ and $r\in Z_{m_s}$, hence the level $T_{(k+1)}$ must also be finite.
This proves by induction that each level of $T$ is finite.
Suppose now that $T$ is infinite.
By K\"{o}nig's lemma, since each level of $T$ is finite, $T$ has an infinite branch $b$.
By definition of the sets $Z_m$ ($m<\omega$), each node of $T$ is 1-1 function, so $b$ is also an injection from $\omega$ into $\omega$.
In particular, the range of $b$ is infinite.
Let $k=\min\set{m<\omega:b(m)\gammae l''}$, and let $\sigma=b\upharpoonright(k+1)$.
Clearly, $\sigma\in T$.
By Claim \ref{subclaimtree}, there is $m_0<\omega$ such that $\sigma$ is a leaf node of $T_{m_0}$, that $\mathbb Phi(m_0)=\sigma$, and that $\betaigcup_{i\in A}f_i^{-1}(f_i''\set{\sigma(k)})\subseteq \betaigcup_{\eta\in T_{m_0+1}}\omegaperatorname{ran}ge(\eta)$.
Since $\sigma(k)=b(k)\gammae l''$, Claim \ref{closedset} implies that the set $Y=f_{i_0}^{-1}(f_{i_0}''\set{\sigma(k)})$ is $(A,\mathcal F)$-closed.
By the construction $T_{m_0+1}=T_{m_0}\mathcal Up\set{\sigma^{\frown}\seq{m}:m\in Z_{m_0}}$.
Since $b$ is an infinite branch, there is $m'\in Z_{m_0}$ such that $b(k+1)=m'$.
Now $m'\in Z_{m_0}\subseteq \betaigcup_{i\in A}f_i^{-1}\betar{f_i''\set{\sigma(k)}}$, the fact that $\sigma(k)\in Y$, and the fact that $Y$ is ($A,\mathcal F$)-closed, together imply that
$$\textstyle m'\in \betaigcup_{i\in A}f_i^{-1}\betar{f_i''\set{\sigma(k)}}\subseteq \betaigcup_{i\in A}f_i^{-1}\betar{f_i''Y}\subseteq Y.$$
Consider the node $\tau=\sigma^{\frown}\seq{m'}=b\upharpoonright(k+2)$.
Since $b$ is an infinite branch, it must be that $\tau^{\frown}\seq{b(k+2)}\in T$.
By Claim \ref{subclaimtree}, there is $m_1$ such that $\tau$ is a leaf node of $T_{m_1}$ and that $\mathbb Phi(m_1)=\tau$.
Clearly, $m_1>m_0$ and $\tau^{\frown}\seq{b(k+2)}\in T_{m_1+1}$.
Recall that we have already shown that $\betaigcup_{i\in A}f_i^{-1}(f_i''\set{\sigma(k)})\subseteq \betaigcup_{\eta\in T_{m_0+1}}\omegaperatorname{ran}ge(\eta)$.
Thus $Y\subseteq\betaigcup_{\eta\in T_{m_0+1}}\omegaperatorname{ran}ge(\eta)$.
This, together with the fact that $\tau(k+1)=m'\in Y$, that $Y$ is $(A,\mathcal F)$-closed, and $m_1>m_0$ jointly imply that
\[\textstyle
\betaigcup_{i\in A}f_i^{-1}(f_i''\set{\tau(k+1)})\subseteq Y\subseteq\betaigcup_{\eta\in T_{m_0}+1}\omegaperatorname{ran}ge(\eta)\subseteq \betaigcup_{\eta\in T_{m_1}}\omegaperatorname{ran}ge(\eta).
\]
This means that
\[\textstyle
b(k+2)\in Z_{n_1}=\betaigcup_{i\in A}f_i^{-1}(f_i''\set{\tau(k+1)})\setminus \betaigcup_{\eta\in T_{m_1}}\omegaperatorname{ran}ge(\eta)=\emptyset,
\]
which is clearly impossible.
Thus, $T$ is not infinite.
\end{proof}
To finish the proof, note that by Claim \ref{closedtree} the set $F$ is $(A,\mathcal F)$-closed and contains $n$ as an element, while by Claim \ref{finitetree} the set $F$ is finite. So $F$ satisfies all the requirements of the conclusion of the lemma.
\end{proof}
The following lemma is the main application of Martin's Axiom.
Again, it does not directly deal with ultrafilters, but with collections of functions.
\betaegin{lemma}[MA$_{\alpha}$]\lambdaabel{mainlemma}
Let $\mathcal F=\set{f_i:i<\alpha}\subseteq \omega^{\omega}$ be a family of finite-to-one functions.
Suppose that for each non-empty finite set $A\subseteq\alpha$, and each $n<\omega$, there is a finite ($A,\mathcal F$)-closed set $F$ containing $n$ as an element.
Then there is a finite-to-one function $h\in\omega^{\omega}$, and a collection $\set{e_i:i<\alpha}\subseteq \omega^{\omega}$ such that for each $i<\alpha$, there is $l<\omega$ such that $h(n)=e_i(f_i(n))$ whenever $n\gammae l$.
\end{lemma}
\betaegin{proof}
We will apply MA$_{\alpha}$, so we first define the poset we will be using. Let $\mathcal P$ be the set of all $p=\seq{g_p,h_p}$ such that
\betaegin{enumerate}[label=(\mathbb Roman*)]
\item\lambdaabel{uslov1} $h_p:N_p\to \omega$ where $N_p$ is a finite subset of $\omega$,
\item\lambdaabel{uslov2} $g_p=\seq{g^i_p:i\in A_p}$ where $A_p\in [\alpha]^{<\alphaleph_0}$, and $g^i_p:f_i''N_p\to \omega$ for each $i\in A_p$,
\item\lambdaabel{uslov3} $N_p$ is ($A_p,\mathcal F$)-closed.
\end{enumerate}
Define the ordering relation $\lambdae$ on $\mathcal P$ as follows: $q\lambdae p$ iff
\betaegin{enumerate}[resume,label=(\mathbb Roman*)]
\item\lambdaabel{ext1} $N_p\subseteq N_q$,
\item\lambdaabel{ext2} $A_p\subseteq A_q$,
\item\lambdaabel{ext3} $h_q\upharpoonright N_p=h_p$,
\item\lambdaabel{ext4} $g_q^i\upharpoonright f_i''N_p=g_p^i$ for each $i\in A_p$,
\item\lambdaabel{ext5} $h_q(n)>h_q(m)$ whenever $m\in N_p$ and $n\in N_q\setminus N_p$,
\item\lambdaabel{ext6} $h_q(n)=g_q^i(f_i(n))$ for each $n\in N_q\setminus N_p$ and $i\in A_p$.
\end{enumerate}
It is clear that $\seq{\mathcal P,\lambdae}$ is a partially ordered set.
\betaegin{claim}\lambdaabel{addingn}
Let $p\in \mathcal P$, $n_0<\omega$, and suppose that $A\subseteq \alpha$ is finite such that $A_p\subseteq A$.
Then there is $q\lambdae p$ such that $n_0\subseteq N_q$ and that $N_q$ is ($A,\mathcal F$)-closed.
\end{claim}
\betaegin{proof}
Applying the assumption of the lemma to the finite set $A$, and each $k\in n_0\mathcal Up N_p$, we obtain sets $F_k$ ($k\in n_0\mathcal Up N_p$) such that $k\in F_k$ and $f_i^{-1}(f_i''F_k)\subseteq F_k$ for each $k\in n_0\mathcal Up N_p$ and $i\in A$.
Let $N_q=\betaigcup_{k\in n_0\mathcal Up N_p}F_k$, let $A_q=A_p$, and let $t=\max\set{h_p(k)+1:k\in N_p}$.
Finally, define
\[
h_q(n)=\lambdaeft\{\betaegin{array}{l} h_p(n),\ \mbox{if}\ n\in N_p\\
t,\ \mbox{if}\ n\in N_q\setminus N_p\end{array}\right.\ \mbox{and}\ g^i_q(k)=\lambdaeft\{\betaegin{array}{l} g^i_p(k),\ \mbox{if}\ k\in f_i''N_p\\
t,\ \mbox{if}\ k\in f_i''N_q\setminus f_i''N_p\end{array}\right.\ \mbox{for}\ i\in A_q.
\]
Let $g_q$ denote the sequence $\seq{g^i_q:i\in A_q}$.
Clearly $n_0\subseteq N_q$.
By Lemma \ref{finiteunion}, $N_q$ is ($A,\mathcal F$)-closed.
We still have to show that $q=\seq{g_q,h_q}\in \mathcal P$ and $q\lambdae p$.
Since $h_q$ is defined on $N_q$ and $N_q$ finite, since $A_q=A_p$ and $g_p^i$ is defined on $f_i''N_q$ for $i\in A_p$, and since $N_q$ is $(A_q,\mathcal F)$-closed, conditions \ref{uslov1}-\ref{uslov3} are satisfied by $q$.
Thus $q\lambdae p$.
Next we show $q\lambdae p$.
Conditions \ref{ext1}-\ref{ext4} are obviously satisfied by the definition of $q$.
Since $h_q(n)=t>h_p(k)=h_q(k)$ for each $n\in N_q\setminus N_p$ and $k\in N_p$, conditions \ref{ext4} is also satisfied.
So we still have to check \ref{ext6}.
Take any $i\in A_p$ and $n\in N_q\setminus N_p$.
By the definition of $h_q$, we have $h_q(n)=t$.
Once we prove that $f_i(n)\in f_i''N_q\setminus f_i''N_p$, we will be done because in that case the definition of $g^i_p$ implies that $g^i_p(f_i(n))=t$ as required.
So suppose the contrary, that $f_i(n)\in f_i''N_p$.
Since $p$ is a condition and $A_q=A_p$, it must be that $n\in f_i^{-1}(f_i''N_p)\subseteq N_p$.
But this contradicts the choice of $n$.
Thus condition \ref{ext6} is also satisfied and $q\lambdae p$.
\end{proof}
\betaegin{claim}\lambdaabel{addingj}
Let $p\in\mathcal P$, and $j_0<\alpha$. Then there is $q\lambdae p$ such that $j_0\in A_q$.
\end{claim}
\betaegin{proof}
Let $A_q=A_p\mathcal Up\set{j_0}$.
Applying Claim \ref{addingn} to $A_q$ and $n=0$, we obtain a condition $p'\lambdae p$ such that $N_{p'}$ is $(A_q,\mathcal F)$-closed.
Let $N_q=N_{p'}$, $h_{q}=h_{p'}$, and $g^i_q=g^i_{p'}$ for $i\in A_p$.
Define $g^{j_0}_q(k)=0$ for each $k\in f_{j_0}''N_{p'}$, and let $g_p$ denote the sequence $\seq{g^i_q:i\in A_q}$.
Since $j_0\in A_q$, to finish the proof of the claim it is enough to show that $q=\seq{g_q,h_q}\in \mathcal P$, and that $q\lambdae p'$.
Conditions \ref{uslov1}-\ref{uslov3} are clear from the definition of $q$ because $p'\in\mathcal P$ and $N_q=N_{p'}$, $h_q=h_{p'}$, $g_q^{j_0}:f_{j_0}''N_q\to\omega$, and $g_q^i=g_p^i$ for $i\in A_q\setminus\set{j_0}$.
Conditions \ref{ext1}-\ref{ext4} are clear by the definition of $h_q$ and $g_q$.
Conditions \ref{ext5} and \ref{ext6} are vacuously true because $N_{p'}=N_q$.
Thus the claim is proved.
\end{proof}
\betaegin{claim}\lambdaabel{merging}
If $p,q\in \mathcal P$ are such that $h_p=h_q$ and $g^i_p=g^i_q$ for $i\in A_p\mathfrak cap A_q$, then $p$ and $q$ are compatible in $\mathcal P$.
\end{claim}
\betaegin{proof}
We proceed to define $r\lambdae p,q$.
Let $N=N_p=N_q$.
Let $$t=\max\set{h_p(n)+1:n\in N}.$$
Applying the assumption of the lemma to $A=A_p\mathcal Up A_q$, and each $k\in N$, we obtain ($A,\mathcal F$)-closed sets $F_k$ ($k\in N$).
By Claim \ref{finiteunion}, the set $N_r=\betaigcup_{k\in N}F_k$ is ($A,\mathcal F$)-closed.
Let $A_r=A$, and define
\[
h_r(n)=\lambdaeft\{\betaegin{array}{l} h_p(n),\mbox{ if}\ n\in N\\[1mm]
t,\mbox{ if}\ n\in N_r\setminus N\end{array}\right.\hskip-1.5mm\mbox{and }g^i_r(k)=\lambdaeft\{\betaegin{array}{l} g^i_p(k),\mbox{ if}\ i\in A_p\mbox{ and }k\in f_i''N\\[1mm]
g^i_q(k),\mbox{ if}\ i\in A_q\mbox{ and }k\in f_i''N\\[1mm]
t,\ \mbox{ if}\ k\in f_i''N_r\setminus f_i''N\end{array}\right.\hskip-1.5mm,
\]
for $i\in A_r$. Let $g_r$ denote the sequence $\seq{g^i_r:i\in A_r}$.
As we have already mentioned, $r=\seq{h_r,g_r}$ satisfies \ref{uslov3}, and it clear that \ref{uslov1} and \ref{uslov2} are also true for $r$.
To see that $r\lambdae p$ and $r\lambdae q$ note that conditions \ref{ext1}-\ref{ext5} are clearly satisfied.
We will check that $r$ and $p$ satisfy \ref{ext6} also.
Take any $n\in N_r\setminus N$ and $i\in A_p$.
By the definition of $h_r$, $h_r(n)=t$.
By the definition of $g_r^i$, if $f_i(n)\in f_i''N_r\setminus f_i''N$, then $g^i_r(f_i(n))=t=h_r(n)$.
So suppose this is not the case, i.e. that $f_i(n)\in f_i''N$.
This would mean that $n\in f_i^{-1}(f_i''N)$, which is impossible because $n\notin N$ and $N$ is $(A,\mathcal F)$-closed because $p\in \mathcal P$.
Thus we proved $r\lambdae p$.
In the same way it can be shown that $r\lambdae q$.
\end{proof}
\betaegin{claim}\lambdaabel{ccc}
The poset $\mathcal P$ satisfies the countable chain condition.
\end{claim}
\betaegin{proof}
Let $\seq{p_{\xi}:\xi<\omega_1}$ be an uncountable set of conditions in $\mathcal P$.
Since $h_{p_{\xi}}\in [\omega\times\omega]^{<\omega}$ for each $\xi<\omega_1$, there is an uncountable set $\Gamma\subseteq\omega_1$, and $h\in [\omega\times\omega]^{<\omega}$ such that $h_{p_{\xi}}=h$ for each $\xi\in \Gamma$.
Consider the set $\seq{A_{p_{\xi}}:\xi\in\Gamma}$.
By the $\Delta$-system lemma, there is an uncountable set $\Delta\subseteq\Gamma$, and a finite set $A\subseteq\alpha$ such that $A_{p_{\xi}}\mathfrak cap A_{p_{\eta}}=A$ for each $\xi,\eta\in\Delta$.
Since $\Delta$ is uncountable and $g_{p_{\xi}}^i\in [\omega\times\omega]^{<\omega}$ for each $i\in A$ and $\xi\in\Delta$, there is an uncountable set $\Theta\subseteq\Delta$ and $g_i$ for each $i\in A$, such that $g^i_{p_{\xi}}=g_i$ for each $\xi\in\Theta$ and $i\in A$.
Let $\xi$ and $\eta$ in $\Theta$ be arbitrary.
By Claim \ref{merging}, $p_{\xi}$ and $p_{\eta}$ are compatible in $\mathcal P$.
\end{proof}
Consider sets $D_{j}=\set{p\in \mathcal P: j\in A_p}$ for $j\in\alpha$, and $D_{m}=\set{p\in \mathcal P:m\in N_p}$ for $m\in\omega$.
By Claim \ref{addingj} and Claim \ref{addingn}, these sets are dense in $\mathcal P$.
By MA$_{\alpha}$ there is a filter $\mathcal G\subseteq \mathcal P$ intersecting all these sets.
Clearly, $h=\betaigcup_{p\in \mathcal G}h_p$ and $e_i=\betaigcup_{p\in\mathcal G}g_p^i$, for each $i\in \alpha$, are functions from $\omega$ into $\omega$.
We will prove that these functions satisfy the conclusion of the lemma.
First we will prove that $h$ is finite-to-one.
Take any $m\in \omega$ and let $k=h(m)$.
By the definition of $h$, there is $p\in \mathcal G$ such that $h_p(m)=k$.
Suppose that $h^{-1}(\set{k})\not\subseteq N_p$.
This means that there is an integer $m_0\notin N_p$ such that $h(m_0)=k$.
Let $q\in \mathcal G$ be such that $h_q(m_0)=k$.
Now for a common extension $r\in\mathcal G$ of both $p$ and $q$, it must be that $h_r(m_0)=h_p(m)$, contradicting the fact that $r\lambdae p$, in particular condition \ref{ext5} is violated in this case.
We still have to show that for each $i\in \alpha$, there is $l\in\omega$ such that $h(n)=e_i(f_i(n))$ whenever $n\gammae l$.
So take $i\in\alpha$.
By Claim \ref{addingj}, there is $p\in\mathcal G$ such that $i\in A_p$.
Let $l=\max(N_p)+1$.
We will prove that $l$ is as required.
Take any $n\gammae l$.
By Claim \ref{addingn}, there is $q\in\mathcal G$ such that $n\in q$.
Let $r\in \mathcal G$ be a common extension of $p$ and $q$.
Since $n\notin N_p$ and $r\lambdae p$, it must be that $h_r(n)=g_r^i(f_i(n))$, according to condition \ref{ext6}.
Hence $h(n)=e_i(f_i(n))$, as required.
\end{proof}
Before we move to the next lemma let us recall that if $c$ is any element of the model $M$, then $\mathcal U=\set{X\subseteq \omega: c\in {}^*X}$ is ultrafilter on $\omega$.
\betaegin{lemma}\lambdaabel{ensuringensuring}
Let $\alpha<\mathfrak c$ be an ordinal.
Let $\seq{M_i:i<\alpha}$ be a $\subseteq$-decreasing sequence of principal submodels of $M$, i.e. each $M_i$ is generated by a single element $a_i$ and $M_j\subseteq M_i$ whenever $i<j<\alpha$.
Let each $M_i$ ($i<\alpha$) be cofinal with $M_0$.
Suppose that $\mathcal U_0=\set{X\subseteq \omega: a_0\in \prescript{*}{}X}$ is a $P_{\mathfrak c}$-point.
Then there is a family $\set{f_i:i<\alpha}\subseteq \omega^{\omega}$ of finite-to-one functions such that $\prescript{*}{}f_i(a_0)=a_i$ for $i<\alpha$, and that for $i,j<\alpha$ with $i<j$, there is $l<\omega$ such that $f_j(n)=f_j(m)$ whenever $f_i(n)=f_i(m)$ and $m,n\gammae l$.
\end{lemma}
\betaegin{proof}
Let $i<j<\alpha$. Since $M_j\subseteq M_i$, and $M_i$ is generated by $a_i$, there is a function $\varphi_{ij}:\omega\to\omega$ such that $\prescript{*}{}\varphi_{ij}(a_i)=a_j$.
Since $M_j$ is cofinal with $M_i$, by Lemma in \mathfrak cite[page 104]{blassmodeltheory}, if $i<j<\alpha$, then there is a set $Y_{ij}\subseteq\omega$ such that $a_i\in \prescript{*}{}Y_{ij}$ and that $\varphi_{ij}\upharpoonright Y_{ij}$ is finite-to-one.
For $i<\alpha$ let $g_i:\omega\to\omega$ be defined as follows: if $n<\omega$, then for $n\notin Y_{0i}$ let $g_i(n)=n$, while for $n\in Y_{0i}$ let $g_i(n)=\varphi_{0i}(n)$.
Note that $\prescript{*}{}g_i(a_0)=a_i$, and that $g_i$ is finite-to-one.
The latter fact follows since $g_i$ is one-to-one on $\omega\setminus Y_{0i}$ and on $Y_{0i}$ it is equal to $\varphi_{0i}$, which is finite-to-one on $Y_{0i}$.
Now by the second part of Lemma on page 104 in \mathfrak cite{blassmodeltheory}, for $i<j<\alpha$ there is a finite-to-one function $\pi_{ij}:\omega\to\omega$ such that $\prescript{*}{}\varphi_{ij}(a_i)=\prescript{*}{}\pi_{ij}(a_i)$.
Note that this means that $\prescript{*}{}g_j(a_0)=\prescript{*}{}\pi_{ij}(\prescript{*}{}g_i(a_0))$ for $i<j<\alpha$, i.e. the set $X_{ij}=\set{n\in\omega:g_j(n)=\pi_{ij}(g_i(n))}$ is in $\mathcal U_0$.
Since $\alpha<\mathfrak c$ and $\mathcal U_0$ is a $P_{\mathfrak c}$-point, there is a set $X\subseteq \omega$ such that $X\in\mathcal U_0$ and that the set $X\setminus X_{ij}$ is finite whenever $i<j<\alpha$.
Consider the sets $W_i=g_i''X$ for $i<\alpha$.
For each $i<\alpha$, let $W_i=W_i^0\mathcal Up W_i^1$ where $W_i^0\mathfrak cap W_i^1=\emptyset$ and both $W_i^0$ and $W_i^1$ are infinite.
Fix $i<\alpha$ for a moment.
We know that
\[\textstyle
X=\lambdaeft(X\mathfrak cap \betaigcup_{n\in W_i^0}g_i^{-1}(\set{n})\right)\mathcal Up \lambdaeft(X\mathfrak cap \betaigcup_{n\in W_i^1}g_i^{-1}(\set{n})\right).
\]
Since $X\in\mathcal U_0$ and $\mathcal U_0$ is an ultrafilter, we have that either $X\mathfrak cap \betaigcup_{n\in W_i^0}g_i^{-1}(\set{n})\in\mathcal U_0$ or $X\mathfrak cap \betaigcup_{n\in W_i^1}g_i^{-1}(\set{n})\in\mathcal U_0$.
Suppose that $Y=X\mathfrak cap \betaigcup_{n\in W_i^0}g_i^{-1}(\set{n})\in \mathcal U_0$ (the other case would be handled similarly).
Note that by the definition of $\mathcal U_0$ we know that $a_0\in {}^*Y$.
Define $f_i:\omega\to\omega$ as follows: for $n\in Y$ let $f_i(n)=g_i(n)$, while for $n\notin Y$ let $f_i(n)=W_i^1(n)$.
Now that functions $f_i$ are defined, unfix $i$.
We will prove that $\mathcal F=\set{f_i:i<\alpha}$ has all the properties from the conclusion of the lemma.
Since each $g_i$ ($i<\alpha$) is finite-to-one, it is clear that $f_i$ is also finite-to-one.
Again, this is because $g_i$ is finite-to-one on $\omega$, and outside of $Y$ the function $f_i$ is defined so that it is one-to-one.
Since $\prescript{*}{}g_i(a_0)=a_i$ and $a_0\in {}^*Y$, it must be that $\prescript{*}{}f_i(a_0)=a_i$ for each $i<\alpha$.
Now we prove the last property.
Suppose that $i<j<\alpha$.
Since the set $X\setminus X_{ij}$ is finite and $Y\subseteq X$, there is $l<\omega$ so that $Y\setminus l\subseteq X_{ij}$.
Take $m,n\gammae l$, and suppose that $f_i(n)=f_i(m)$.
There are three cases.
First, $n,m\notin Y$.
In this case, $f_i(n)=f_i(m)$ implies that $W_i^1(n)=W_i^1(m)$, i.e. $n=m$.
Hence $f_j(n)=f_j(m)$.
Second, $m\in Y$ and $n\notin Y$.
Since $m\in Y$, $g_i(m)=f_i(m)=f_i(n)$ so $f_i(n)\in W_i^0$.
On the other hand, since $n\notin Y$, $f_i(n)=W_i^1(n)$.
Thus we have $f_i(n)\in W_i^0\mathfrak cap W_i^1$ which is in contradiction with the fact that $W_i^0\mathfrak cap W_i^1=\emptyset$.
So this case is not possible.
Third, $m,n\in Y$.
In this case $f_i(n)=f_i(m)$ implies that $g_i(n)=g_i(m)$.
Since $m,n\in Y\setminus l\subseteq X_{ij}$ it must be that $f_j(n)=g_j(n)=\pi_{ij}(g_i(n))=\pi_{ij}(g_i(m))=g_j(m)=f_j(m)$ as required.
Thus the lemma is proved.
\end{proof}
\betaegin{lemma}[MA$_{\alpha}$]\lambdaabel{ensuringtheorem3}
Let $\seq{M_i:i<\alpha}$ be a $\subseteq$-decreasing sequence of principal, and pairwise cofinal submodels of $M$.
Suppose that $\mathcal U_0=\set{X\subseteq \omega: a_0\in \prescript{*}{}X}$ is a $P_{\mathfrak c}$-point, where $a_0$ generates $M_0$.
Then there is an element $c\in\betaigcap_{i<\alpha}M_i$ which generates a principal model cofinal with all $M_i$ ($i<\alpha$).
\end{lemma}
\betaegin{proof}
Let $a_i$ for $i<\alpha$ be an element generating $M_i$.
By Lemma \ref{ensuringensuring} there is a family $\mathcal F=\set{f_i:i<\alpha}\subseteq \omega^{\omega}$ of finite-to-one functions such that $\prescript{*}{}f_i(a_0)=a_i$ for $i<\alpha$, and that for $i,j<\alpha$ with $i<j$, there is $l<\omega$ such that $f_j(n)=f_j(m)$ whenever $f_i(n)=f_i(m)$ and $m,n\gammae l$.
By Lemma \ref{ensuringcondition}, for each finite $A\subseteq \alpha$, and each $n<\omega$, there is a finite ($A,\mathcal F$)-closed set containing $n$ as an element.
Now using MA$_{\alpha}$, Lemma \ref{mainlemma} implies that there is a finite-to-one function $h\in\omega^{\omega}$, and a collection $\set{e_i:i<\alpha}\subseteq\omega^{\omega}$ such that for each $i<\alpha$ there is $l<\omega$ such that $h(n)=e_i(f_i(n))$ whenever $n\gammae l$.
Let $c=\prescript{*}{}h(a_0)$, and let $M_{\alpha}$ be a model generated by $c$.
By Lemma in \mathfrak cite[pp. 104]{blassmodeltheory}, $M_{\alpha}$ is cofinal with $M_0$.
Thus $M_{\alpha}$ is a principal model cofinal with all $M_i$ ($i<\alpha$).
To finish the proof we still have to show that $c\in \betaigcap_{i<\alpha}M_i$.
Fix $i<\alpha$.
Let $l<\omega$ be such that $h(n)=e_i(f_i(n))$ for $n\gammae l$.
If $a_0<l$, then $M_j=M$ for each $j<\alpha$ so the conclusion is trivially satisfied. So $a_0\prescript{*}{}\gammae l$.
Since the sentence $(\forall n) [n\gammae l\mathbb Rightarrow h(n)=e_i(f_i(n))]$ is true in $M$, it is also true in $M_0$.
Thus $c=\prescript{*}{}h(a_0)=\prescript{*}{}e_i(\prescript{*}{}f_i(a_0))=\prescript{*}{}e_i(a_i)\in M_i$ as required.
\end{proof}
\betaegin{theorem}[MA$_{\alpha}$]\lambdaabel{theorem3}
Let $M_i$ ($i<\alpha$) be a collection of pairwise cofinal submodels of $M$.
Suppose that $M_{0}$ is principal, and that $\mathcal U_0=\set{X\subseteq \omega: a_0\in \prescript{*}{}X}$ is a $P_{\mathfrak c}$-point, where $a_0$ generates $M_0$.
Then $\betaigcap_{i<\alpha}M_i$ contains a principal submodel cofinal with each $M_i$.
\end{theorem}
\betaegin{proof}
We define models $M'_i$ for $i\lambdae\alpha$ as follows. $M'_0=M_0$. If $M'_i$ is defined, then $M'_{i+1}$ is a principal submodel of $M'_i\mathfrak cap M_{i+1}$ cofinal with $M'_i$ and $M_{i+1}$. This model exists by Corollary in \mathfrak cite[pp. 105]{blassmodeltheory}. If $i\lambdae \alpha$ is limit, then the model $M'_i$ is a principal model cofinal with all $M'_j$ ($j<i$). This model exists by Lemma \ref{ensuringtheorem3}. Now the model $M'_{\alpha}$ is as required in the conclusion of the lemma.
\end{proof}
\betaegin{theorem}[MA$_{\alpha}$]\lambdaabel{maintheorem}
Suppose that $\set{\mathcal U_i:i<\alpha}$ is a collection of P-points.
Suppose moreover that $\mathcal U_0$ is a P$_{\mathfrak c}$-point such that $\mathcal U_i\lambdae_{RK}\mathcal U_0$ for each $i<\alpha$.
Then there is a P-point $\mathcal U$ such that $\mathcal U\lambdae_{RK} \mathcal U_i$ for each $i$.
\end{theorem}
\betaegin{proof}
By Theorem 3 of \mathfrak cite{blass}, $\omega^{\omega}/\mathcal U_i$ is isomorphic to an elementary submodel $M_i$ of $\omega^{\omega}/\mathcal U_0$.
Since all $\mathcal U_i$ ($i<\alpha$) are non-principal, each model $M_i$ ($i<\alpha$) is non-standard.
By Corollary in \mathfrak cite[pp. 150]{blass}, each $M_i$ ($i<\alpha$) is cofinal with $M_0$.
This implies that all the models $M_i$ ($i<\alpha$) are pairwise cofinal with each other.
By Theorem \ref{theorem3} there is a principal model $M'$ which is a subset of each $M_i$ ($i<\alpha$) and is cofinal with $M_0$.
Since $M'$ is principal, there is an element $a$ generating $M'$.
Let $\mathcal U=\set{X\subseteq \omega:a\in \prescript{*}{}X}$.
Then $\omega^{\omega}/\mathcal U\mathfrak cong M'$.
Since $M'$ is cofinal with $M_0$, $M'$ is not the standard model.
Thus $\mathcal U$ is non-principal.
Now $M'\prec M_i$ ($i<\alpha$) implies that $\mathcal U\lambdae_{RK}\mathcal U_i$ (again using Theorem 3 of \mathfrak cite{blass}).
Since $\mathcal U$ is Rudin-Keisler below a $P$-point, $\mathcal U$ is also a $P$-point.
\end{proof}
\betaegin{corollary}[MA] \lambdaabel{cor:lower}
If a collection of fewer than $\mathfrak c$ many ${P}_{\mathfrak c}$-points has an upper bound which is a ${P}_{\mathfrak c}$-point, then it has a lower bound.
\end{corollary}
\betaegin{corollary}[MA] \lambdaabel{cor:closed}
The class of ${P}_{\mathfrak c}$-points is downwards $< \mathfrak c$-closed under ${\lambdaeq}_{RK}$.
In other words, if $\alphalpha < \mathfrak c$ and $\lambdaangle {\mathcal U}_{i}: i < \alphalpha \omegaperatorname{ran}gle$ is a sequence of ${P}_{\mathfrak c}$-points such that $\forall i < j < \alphalpha\lambdaeft[{\mathcal U}_{j} \: {\lambdaeq}_{RK} \: {\mathcal U}_{i}\right]$, then there is a ${P}_{\mathfrak c}$-point $\mathcal U$ such that $\forall i < \alphalpha\lambdaeft[ \mathcal U \: {\lambdaeq}_{RK} \: {\mathcal U}_{i} \right]$.
\end{corollary}
\betaegin{thebibliography}{10}
\betaibitem{blassmodeltheory}
Andreas Blass.
\newblock The intersection of nonstandard models of arithmetic.
\newblock {\em J. Symbolic Logic}, 37:103--106, 1972.
\betaibitem{blass}
Andreas Blass.
\newblock The {R}udin-{K}eisler ordering of {$P$}-points.
\newblock {\em Trans. Amer. Math. Soc.}, 179:145--166, 1973.
\betaibitem{blassphd}
Andreas~Raphael Blass.
\newblock {\em O{RDERINGS} {OF} {ULTRAFILTERS}}.
\newblock ProQuest LLC, Ann Arbor, MI, 1970.
\newblock Thesis (Ph.D.)--Harvard University.
\betaibitem{kn}
K.~Kunen.
\newblock Ultrafilters and independent sets.
\newblock {\em Trans. Amer. Math. Soc.}, 172:299--306, 1972.
\betaibitem{RK}
Borisa Kuzeljevic and Dilip Raghavan.
\newblock A long chain of {P}-points.
\newblock {\em J. Math. Log.}, 18(1):1850004, 38, 2018.
\betaibitem{RS}
Dilip Raghavan and Saharon Shelah.
\newblock On embedding certain partial orders into the {P}-points under
{R}udin-{K}eisler and {T}ukey reducibility.
\newblock {\em Trans. Amer. Math. Soc.}, 369(6):4433--4455, 2017.
\betaibitem{RV}
Dilip Raghavan and Jonathan~L. Verner.
\newblock Chains of {P}-points.
\newblock {\em Canad. Math. Bull.}, 62(4):856--868, 2019.
\betaibitem{Rudin:66}
M.~E. Rudin.
\newblock Types of ultrafilters.
\newblock In {\em Topology {S}eminar ({W}isconsin, 1965)}, pages 147--151. Ann.
of Math. Studies, No. 60, Princeton Univ. Press, Princeton, N.J., 1966.
\betaibitem{mer}
Mary~Ellen Rudin.
\newblock Partial orders on the types in {$\betaeta N$}.
\newblock {\em Trans. Amer. Math. Soc.}, 155:353--362, 1971.
\betaibitem{rudin}
W.~Rudin.
\newblock Homogeneity problems in the theory of \v {C}ech compactifications.
\newblock {\em Duke Math. J.}, 23:409--419, 1956.
\betaibitem{proper}
Saharon Shelah.
\newblock {\em Proper and improper forcing}.
\newblock Perspectives in Mathematical Logic. Springer-Verlag, Berlin, second
edition, 1998.
\end{thebibliography}
\end{document} |
\begin{document}
\title{Optimal teleportation with a noisy source}
\author{B. G. Taketani}
\affiliation{Instituto de F\'{\i}sica, Universidade Federal do Rio de Janeiro, Rio de Janeiro, Brasil}
\affiliation{Physikalisches Institut der Albert--Ludwigs--Universit\"at, Freiburg, Deutschland}
\author{F. de Melo}
\affiliation{Instituut voor Theoretische Fysica, Katholieke Universiteit Leuven, Leuven, Belgi\"e}
\affiliation{Physikalisches Institut der Albert--Ludwigs--Universit\"at, Freiburg, Deutschland}
\author{R. L. de Matos Filho}
\affiliation{Instituto de F\'{\i}sica, Universidade Federal do Rio de Janeiro, Rio de Janeiro, Brasil}
\begin{abstract}
We establish the optimal quantum teleportation protocol for the realistic scenario when both input state and quantum channel are afflicted by noise. In taking these effects into account higher fidelities are achieved. The optimality of the proposed protocol prevails even when restricted to a reduced set of generically available operations.
\end{abstract}
\pacs{42.50.Xa, 42.50.Dv, 03.65.Ud}
\maketitle
\emph{Introduction.} Information Theory's main concern is optimal data transmission over noisy channels. In his 1948 seminal article ``A Mathematical Theory of Communication"~\cite{SHANNON:1948fk}, C.~E.~Shannon set the foundation stone of this theory. He showed that below a certain transmission rate threshold, which depends on the amount of noise on the channel, there exists a data codification that enables transmission with asymptotically negligible error. Since only classical information was considered, the input could be assumed perfectly prepared with all the disturbances lumped into the transmission process.
The newly born theory of Quantum Information follows the same steps of its predecessor. A noisy channel theorem, in the same spirit as the one by Shannon, was proved for the quantum case by Holevo~\cite{Holevo:1998vn}, and Schumacher and Westmoreland~\cite{Schumacher:1997kx}. Information is now encoded into quantum bits (or qubits), and its transmission is via quantum channels. The quantum realm, however, presents a myriad of new possibilities, with the teleportation protocol being the most astonishing example. In the protocol devised by Bennett and co-authors~\cite{BENNETT:1993fk}, an unknown state is perfectly transmitted between two parties (usually dubbed Alice and Bob) with the aid of classical communication and a shared maximally entangled (ME) state -- the latter plays the role of a quantum channel, with no classical counterpart. See Fig.~\ref{fig:diagrams} for a brief review of the teleporation protocol. As in the classical case, idealized scenarios are quickly substituted by more realistic ones, and teleportation over noisy quantum channels has been an extensively investigated topic~\cite{Horodecki:1999cr,Albeverio:2002ys,Verstraete:2003zr,Bennett:1996uq,Modlawska:2008nx}. The action of the noise is represented by a (completely positive) map, that generically maps the shared initially pure maximally entangled state into a mixed state with less entanglement. The teleportation is no longer perfect. Since the input state is unknown, Alice and Bob optimize their actions such as to maximize the average protocol quality (fidelity) over the set of input states. One point, however, has been hitherto neglected: quantum information is unavoidably disturbed by the environment, even before its transmission through the channel. The proper averaging is thus not over the uniform distribution of pure input states, but over the initial input distribution induced by the environment. As observed in \cite{Henderson:2000ve}, {\it a priori} information about the distribution of states to be teleported can be used to achieve higher fidelities. Here we address this issue, and present the optimal teleportation protocol including the effect of a noisy source. After that, we discuss the experimentally motivated scenario where Alice and Bob can implement only a small subset of all possible physical operations. The gain of the proposed protocol in respect to previous proposals is then numerically accessed.
\begin{figure}
\caption{{\bf Teleportation protocol: from ideal to real.}
\label{fig:diagrams}
\end{figure}
\emph{Realistic protocol.}
Let $\proj{\rm{\psi}}\otimes\chi$ be the total initial state. $\proj{\rm{\psi}} \in \mathcal{H}_{in}$ is a unknown input state to be teleported, and $\chi\,\in\,\mathcal{H}_{A}\otimes\mathcal{H}_{B}$ is the noisy quantum channel shared by Alice and Bob. For simplicity, we assume that $\dim \mathcal{H}_A= \dim \mathcal{H}_{\rm{in}}=\dim \mathcal{H}_B=n$. However, as the initial state cannot be perfectly created, or is the product of previous processing, the actual state to be teleported is given as the result of a completely positive map $\proj{\rm{\psi}} \mapsto \Lambda\left[\proj{\rm{\psi}}\right]$. The actual state at hands is thus $\Lambda \left[\proj{\rm{\psi}}\right]\otimes\chi$.
As in the standard teleportation protocol (STP), see Fig.~\ref{fig:diagrams}, Alice and Bob apply coordinated operations on their systems aiming for the highest teleportation fidelity. In general, this local, classically correlated (LOCC) actions can be described by:
\begin{equation}
\Lambda \left[\proj{\rm{\psi}}\right]\otimes\chi \mapsto \sum_{\alpha}\big(\textsf{A}_{\alpha}\otimes \textsf{B}_{\alpha} \big) (\Lambda \left[\proj{\rm{\psi}}\right]\otimes\chi) \big(\textsf{A}_{\alpha}^{\dagger}\otimes \textsf{B}_{\alpha}^{\dagger} \big);\nonumber
\end{equation}
where $A_\alpha$ denotes Alice's operation on $\mathcal{H}_{\rm{in}}\otimes\mathcal{H}_A$, and $B_\alpha$ represents Bob's reaction on $\mathcal{H}_B$~\cite{nota}. The common index $\alpha$ indicates the coordinated action, and is exchanged between Alice and Bob via a classical channel. In order to conserve probabilities, this operation must satisfy $\sum_{\alpha}\textsf{A}_{\alpha}^{\dagger}\textsf{A}_{\alpha}\otimes \textsf{B}_{\alpha}^{\dagger}\textsf{B}_{\alpha}=\openone\otimes\openone$. On avareage, Bob is left with the output state $\varrho_{\rm{out}}$ given by
\begin{equation}
\sum_{\alpha}\mathrm{Tr}_{\rm{in,A}}\left[\big(\textsf{A}_{\alpha}\otimes \textsf{B}_{\alpha} \big) (\Lambda \left[\proj{\rm{\psi}}\right]\otimes\chi) \big(\textsf{A}_{\alpha}^{\dagger}\otimes \textsf{B}_{\alpha}^{\dagger} \big)\right].
\label{eq:bob_out}
\end{equation}
This expression can be simplified noting that, by virtue of the Jamio\l kowsi~\cite{Jamiolkowski:1972qf} isomorphism, the noisy channel $\chi$ can be written as $\openone\otimes\bm\Gamma\left[\diad\phi\phi\right]$, with $\bm\Gamma[\bullet]:= \sum \Gamma_i \bullet \Gamma_i^\dagger$ a completely positive map, and $\ket{\phi}=\sum_{i=0}^{n-1} \ket{ii}/\sqrt{n}$ a maximally entangled state. It then follows that
\begin{align}
\varrho_{\rm{out}}=\sum_{\alpha,i}\textsf{B}_{\alpha}\,\Gamma_{i}
\textsf{B}ig\{\mathrm{Tr}_{\rm{in},A}\textsf{B}ig[&\textsf{B}ig(\textsf{A}_{\alpha}\otimes\openone\textsf{B}ig)(\bm\Lambda\left[\proj{\psi}\right]\otimes\proj{\phi}) \nonumber\\
&\times\textsf{B}ig(\textsf{A}_{\alpha}^{\dagger}\otimes\openone\textsf{B}ig)\textsf{B}ig]\textsf{B}ig\}\Gamma_{i}^{\dagger}\,\textsf{B}_{\alpha}^{\dagger} .\nonumber
\label{sqt}
\end{align}
Expanding $\textsf{A}_{\alpha}$ in a maximally entangled basis $\textsf{A}_{\alpha}=\sum_{rs}a_{rs}^{\alpha}\diad{\phi_{\textsf{U}_r}}{\phi_{\textsf{U}_s}}$, where $\ket{\phi_{\textsf{U}_\alpha}}=\textsf{U}_{\alpha}^{\dagger}\otimes\openone\ket{\phi}$, and defining the operators $\mathds{A}^{r}_{\alpha}=\nicefrac{1}{n}\sum_{p}\bra{\phi_{\textsf{U}_r}}\textsf{A}_{\alpha}\ket{\phi_{\textsf{U}_p}}\textsf{U}_{p}$, we can write the output state as:
\begin{equation}
\varrho_{\rm{out}}=\sum_{\alpha,r}\textsf{B}_{\alpha}\,\bm\Gamma\big[\mathds{A}^{r}_{\alpha}
\;\bm\Lambda\left[\diad{\psi}{\psi}\right]\,\mathds{A}^{r \dagger}_{\alpha}\big]\,\textsf{B}_{\alpha}^{\dagger}. \nonumber
\end{equation}
The most general teleportation protocol can thus be recast as the map $\bm\Phi: \mathcal{H}_{\rm{in}} \mapsto \mathcal{H}_B : \proj{\psi} \mapsto \varrho_{\rm{out}}=\sum_k \Phi_k \proj{\psi} \Phi_k^\dagger$ with $\Phi_{k=\{\alpha,r,i,j\}} := \textsf{B}_\alpha \Gamma_i \textsf{A}bar_\alpha^{r} \Lambda_j$, where we used the decomposition for the map $\bm\Lambda[\bullet]=\sum_j \Lambda_j \bullet \Lambda_j^{\dagger}$. We can thus define an effective teleportation map as acting on the original pure state, and not on the mixed state which Alice actually manipulates.
The still undefined operations $\{\textsf{A}bar_\alpha^{r}\}$ and $\{\textsf{B}_\alpha\}$ are to be fixed by optimizing the protocol for all possible input states. Even though the unknown system being teleported is in a mixed state, the primary goal of the protocol is to teleport the original pure state. Therefore, the figure of merit to be optimized is the average fidelity of the output state with the pure input state, i.e., $\overline{f}=\overline{\bra{\psi}\varrho_{\rm{out}}\ket{\psi}}$.
The evaluation of $\overline{f}$ is obtained by following the general framework develop in~\cite{Horodecki:1999cr}. The maximal average fidelity for the optimal protocol is then given by:
\begin{align}
\overline{f}_{\max}=\frac{n}{n+1}\mathcal{F}_{\max}\left(\chi,\bm\Lambda\right)+\frac{1}{n+1}\,\,\,,
\label{avefid}
\end{align}
where $\mathcal{F}_{\max}$, defined as
\begin{align}
\max_{\bm\Omega}\bra{\phi} (\openone\otimes\bm\Lambda)\circ\bm\Omega\left[\chi\right]\ket\phi ,
\label{Fmax}
\end{align}
is the maximal singlet fraction attainable by the combined action on $\chi$ of the trace-preserving operation $\bm\Omega$ and the decoherence map $\openone\otimes\bm\Lambda$. The maximization is taken over operations $\bm\Omega[\bullet] \mathrel{\mathop:}= \sum_{\alpha,r}\big(\textsf{A}bar^{rT}_{\alpha}\otimes\textsf{B}_{\alpha}\big)\bullet \big(\textsf{A}bar^{rT}_{\alpha}\otimes\textsf{B}_{\alpha}\big)^{\dagger}$, which refer to the LOCCs of Eq.~\eqref{eq:bob_out}. The transposition operation $T$ is taken on the computational basis.
This concludes the protocol, which constitute our main result.
\emph{Discussion.} To highlight the importance of acknowledging the presence of noise in the input distribution of states, we compare the protocol introduced above with the two main protocols for handling noisy teleportation.
\noindent \emph{i) Optimal teleportation vs. Distillation+STP.} In Ref.~\cite{Horodecki:1999cr} the authors realized that, in the noiseless input case, the optimal teleportation protocol is equivalent (in the sense of average fidelity) to an optimal distillation of the resource state followed by a STP. Explicitly, when $\bm\Lambda=\openone$ we have that $\mathcal{F}_{\max}$ is
\begin{equation}
\max_{\bm\Omega}\bra{\phi} \bm\Omega\left[\chi\right]\ket\phi = \bra{\phi} \bm\Omega_\text{STP}\left[\varrho_*\right]\ket\phi; \nonumber
\end{equation}
where $\varrho_*= \bm\Omega_* [\chi]$, with $\bm\Omega_*$ the optimal distillation, and $\bm\Omega_\text{STP} [\bullet] = 1/n^2 \sum_\alpha \textsf{U}_\alpha^T\otimes\textsf{U}_\alpha^\dagger \bullet (\textsf{U}_\alpha^T\otimes\textsf{U}_\alpha^\dagger)^\dagger$ representing the standard teleportation protocol. This is easily realized by noting that the singlet fraction is invariant under the STP, i.e., $\bra{\phi} \bm\Omega_\text{STP}\left[\mathcal{O}\right]\ket\phi = \bra{\phi} \mathcal{O}\ket\phi $ for any $\mathcal{O}$. Therefore, Eq.~\eqref{avefid} tell us that performing the STP with the optimal distilled state $\varrho_*$ yields the same average fidelity as performing the optimal teleportation protocol $\bm\Omega_*$ with the original resource $\chi$. This equivalence was then used by Verstraete and Verschelde (VV)~\cite{Verstraete:2003zr}, to design an optimal teleportation protocol via the best distillation procedure.
This correspondence, however, breaks down for noisy input states. Although mathematically it still remains true that $\max_{\bm\Omega}\bra{\phi} (\openone\otimes \bm\Lambda)\circ\bm\Omega\left[\chi\right]\ket\phi = \bra{\phi} \bm\Omega_\text{STP}\left[\varrho^\prime_*\right]\ket\phi$, where now $\varrho^\prime_*= {\bm\Omega}^\prime_* [\chi]$ with ${\bm\Omega}^\prime_*$ the optimal operation in~\eqref{Fmax}, physically the equivalence would assume that the effect of the noise $\bm\Lambda$ in the input states can -- as in the classical paradigm -- be absorbed in the channel (resource state). For a sensible correspondence still to prevail in the noisy input scenario, we must require that $\bra{\phi} \bm\Omega_\text{STP} \circ (\openone\otimes\bm\Lambda)[{\varrho}^\prime_*]\ket\phi = \bra{\phi} (\openone\otimes\bm\Lambda)\circ\bm\Omega_\text{STP}[{\varrho}^\prime_*]\ket\phi$. This is not true in general.
In this way, in a realistic scenario, the protocol proposed by VV is no longer the optimal one and must be replaced by the protocol here introduced. Operationally the reason for the latter to be at least as good as the first is clear: the realistic protocol allows Alice to perform general, collective operations on both input and (half) resource state, which is obviously superior than acting only on the resource state as in the VV protocol, or even separately on input and resource states.
\noindent \emph{ii) Unitaries+projective~measurements.}
The realistic protocol, \eqref{avefid} and \eqref{Fmax}, supposes the ability to perform the most general operations on Alice and Bob's parties. This may be impracticable. The most general LOCC operation may, for instance, require an infinite amount of classical communication exchange. The optimization of the protocol is thus only defined given an specific experimental realization and the accessible operations at the moment. A trade-off between protocol quality and experiment complexity should be always observed. Arguably the simplest protocol is the one where Alice performs projective measurements on a maximally entangled basis $\{\proj{\phi_{\textsf{U}_{\alpha}}}\}$, and Bob applies unitary transformations $\{\textsf{T}_{\alpha}\}$ depending on the measurements outcome. Within these operations we can exactly pinpoint the advantage of taking into account the noise on the source, for~\eqref{Fmax} reduces to
\begin{equation}
\mathcal{F}_{\max}= \frac{1}{n^{2}}\!\max_{\{\textsf{U}_\alpha,\textsf{T}_\alpha\}}\sum_{\alpha , k , l}|\bra{\phi} \Lambda_k^T \otimes \textsf{T}_\alpha\Gamma_l\textsf{U}_\alpha\ket{\phi}|^2;
\label{febf}
\end{equation}
with the optimization taken over all unitary basis $\{\textsf{U}_\alpha\}$, and all sets of unitary matrices $\{\textsf{T}_\alpha\}$.
Here again, it is clear that the case where the noise in the source is not taken into account is far from general. In fact, by setting $\Lambda_k \propto \openone$ each term in~\eqref{febf} is equal to $\sum_{l}|\bra{\phi} \openone \otimes \textsf{V}_\alpha \Gamma_l\ket{\phi}|^2$, with $V_\alpha = \textsf{U}_\alpha\textsf{T}_\alpha$. As the $\textsf{T}_\alpha$'s are not subjected to any constraint, each of these terms can be optimized independently over $\textsf{V}_{\alpha}$'s. The optimal fidelity is thus obtained for any choice of measurement basis. This simplified case was obtained in Ref.~\cite{Albeverio:2002ys}. When $\Lambda_k \not\propto \openone$, the optimization is much more challenging as each term in the sum is ``coupled" to the others via the unitary basis constraint.
Another interesting scenario is recovered when the map $\bf\Gamma$ (and/or $\bf \Lambda$) is covariant, i.e., ${\bm\Gamma}[\textsf{U}_\alpha\bullet\textsf{U}_\alpha^{\dagger}]=\textsf{W}_\alpha{\bm\Gamma}[\bullet]\textsf{W}_\alpha^{\dagger}$, with $\textsf{W}_\alpha$ unitary. In this case, each term in \eqref{febf} is proportional to $\sum_{k,l}|\bra{\phi} \Lambda_k^T \otimes \textsf{T}_\alpha\textsf{W}_\alpha\Gamma_l\ket{\phi}|^2$, and can be independently optimized, as $\textsf{T}_\alpha \textsf{W}_\alpha$ is another unitary without constraints. Furthermore, the noise in the source can now be absorbed into the noise in the channel, $\bra{\phi} \Lambda_k^T \otimes \textsf{T}_\alpha\textsf{W}_\alpha\Gamma_l\ket{\phi} = \bra{\phi} \openone \otimes \textsf{T}_\alpha\textsf{W}_\alpha\Gamma_l \Lambda_k \ket{\phi}$, resembling the classical communication paradigm.
Further insight is also possible for weak interactions with the environments. Under this assumption, one expects that the initial state is only slightly perturbed. Thus $\bm\Lambda^{T}\otimes \bm\Gamma[\proj{\phi_{\textsf{U}_{\alpha}}}]\approx (1-\epsilon) \proj{\phi_{\textsf{U}_{\alpha}}} + \epsilon \varrho_{\textsf{U}_\alpha}$ is a good approximation, with $\epsilon \ll 1$, and $\varrho_{\textsf{U}_\alpha}$ a state which depends on the initial state and channels. Equation~\eqref{febf} then becomes:
\begin{align}
\mathcal{F}_{\max}=\frac{1}{n^{2}}\!\max_{\{\textsf{U}_\alpha,\textsf{T}_\alpha\}}\sum_{\alpha} (1-\epsilon) |\bra{\phi} \openone \otimes\textsf{T}_\alpha \textsf{U}_\alpha \ket{\phi}|^2 \nonumber\\
+ \epsilon \big<\phi_{\textsf{T}_{\alpha}^{T}}\big| \varrho_{\textsf{U}_\alpha^*} \big|\phi_{\textsf{T}_{\alpha}^{T}}\big>.
\label{convexChannel}
\end{align}
As $\epsilon\ll 1$, the best strategy is to maximize the first term in Eq.~\eqref{convexChannel}, leading to $\textsf{T}_\alpha = \textsf{U}_\alpha^\dagger$. This prescription is the same as for the STP (see caption of Fig.\ref{fig:diagrams}) -- as expected from the limiting case of no noise. One difference should however be pointed out: the choice of Alice's measurement basis (and hence of Bob's operations) is no longer inconsequential. The noise action might break the equivalence among the bases, defining a preferred direction. In Eq.~\eqref{convexChannel} this is easily seen by the possibility of maximizing the second term with an appropriate choice of $\{U_\alpha\}$.
In fact, the latter is also true for some relevant noise scenarios, for which the optimization in \eqref{febf} can be explicitly carried out. For example, it is easy to show that when ${\bm\Lambda}$ and/or ${\bm\Gamma}$ represent computational errors (bit-flip, phase-flip, or bit-phase-flip) or the interaction with a zero temperature reservoir~\cite{nielsen}, the optimal protocol will have $\textsf{T}_\alpha = \textsf{U}_\alpha^\dagger$, and the maximum fidelity can be obtained, for instance, setting $\{\textsf{U}_{\alpha}\}=\{\openone,\sigma_{x},\sigma_{y},\sigma_{z}\}$, corresponding to a STP. Not all the choices of $\{\textsf{U}_\alpha\}$, however, lead to the best fidelity.
\emph{Numerics.} It is clear from the discussion above that the protocol here introduced is qualitatively better than any other teleportation protocol to date. Now we set out to quantify the gain in taking into account the noise in the input state distribution. Below we numerically optimize Eq.\eqref{febf}, corresponding to the protocol restricted to unitaries and projective measurements (\emph{ii}), specialized to a system of qubits ($\dim \mathcal{H}_i=2$, for $i=A, B, \rm{in}$). To emphasize the importance of considering the effects of noise on the input state, we compare our realistic protocol with the one proposed by Albeverio, Fei and Yang in Ref.~\cite{Albeverio:2002ys} (hereafter denoted by AFY protocol). The latter was intended to noisy quantum channels and pure input states. Since the AFY does not require optimization over the measurement basis, we randomly choose different maximally entangled basis and apply the protocol to realistic situations where $\bm\Lambda\neq\openone$. The optimization is performed with the genetic algorithm routine GENMin~\cite{Tsoulos:2008fk}.
\begin{figure}
\caption{{\bf Random channels scenario.}
\label{fig:bins}
\end{figure}
We first address the scenario where both channel and input states are subjected to different, randomly generated, noisy processes~\cite{notaX}. By considering channels with a given strength, the typical relative gain of the realistic protocol can be determined by optimizing Eq.\eqref{febf} for many different random channel configurations. We gauge the strength of $\bm\Gamma$ by the amount of entanglement loss of the quantum resource when compared to the perfect channel: $\gamma(\bm\Gamma)=1-\mbox{Neg}(\Gamma[\proj{\phi}])$, with $\mbox{Neg}$ an entanglement measure~\cite{WernerVidal}. Likewise, for the noise $\bm\Lambda$ on the input states, we use the fidelity loss $\lambda(\bm\Lambda)=1-\overline{\bra\psi\bm\Lambda\left[\proj\psi\right]\ket\psi}$, averaged over the set of pure input states. For the numerical investigations, we generated channels with strength parameters within intervals of length $0.01$. See Fig.\ref{fig:bins} for the results.
It is clear from these results that, independent of the amount of entanglement in the resource state, the stronger $\bm\Lambda$ is the greater is the advantage of taking it into account. Furthermore, out of a sample of 38620 random noise configurations tested, in only $\sim$4.6\% of the instances our realistic protocol could be classically simulated ($\overline{f}_{\max}<2/3$). For the AFY protocol $\sim$25\% of the cases gave an average fidelity below the classical threshold of 2/3. As expected the weaker the noise on the resource state is (smaller $\gamma$'s), the smaller is the difference between the AFY protocols, as the influence of carefully choosing the measurement basis is reduced. Additionally, having more quantum correlations at it's disposal, the realistic protocol can achieve bigger relative gains (shown in Fig.\ref{fig:bins}d for three values of $\lambda$).
Second, we compared the protocols when all the qubits are under the influence of identical bit-flip maps --- ${\bf \mathcal{E}}_{\rm{BP}}[\bullet] = (1-p) \bullet + p \,\sigma_x \bullet \sigma_x$, with $0\le p \le 1/2$. In this scenario, the realistic protocol gives fidelity above the classical threshold for all range of $p$. The relative gain of the realistic protocol against the average AFY increases monotonically with the noise strength, with a gain of 5.8\% at $p=0.25$, where the average AFY fidelity reaches the classical boundary. As mentioned previously, for this case the STP is already the best protocol. This was observed in our numerical experiment with all three protocols, STP, AFY$_{\max}$ (with an optimal choice for Alice's basis), and our realistic protocol, yielding the same maximum average fidelity. In addition, we generated close to 10000 numerical experiments with ${\bm\Lambda}$ and ${\bm\Gamma}$ representing computational errors, finite-temperature reservoirs or compositions of these~\cite{nielsen}. These showed that as long as the STP outperforms any classical strategy, it reaches the optimal fidelity of Eq.\eqref{febf}, suggesting that, within the restricted set of operations considered, the STP is a robust protocol against the aforementioned decoherence processes.
\emph{Conclusions.} Teleportation spots yet another trait of quantum communications: quantum information is disturbed by the environment even before its transmission, and this disturbance cannot in general be accommodated as a faulty communication channel. Recognizing this is not only of conceptual importance, but has also practical implications. The teleportation protocol here proposed appeals to this mindset shift in order to obtain sizable gains in communication quality.
\begin{acknowledgments}
We would like to acknowledge Fernando Brand\~ao, Rafael Chaves, Mark Fannes and Jeroen Wouters for fruitful discussions. B.G.T. also thanks CAPES/DAAD PROBRAL program, the Quantum Optics and Statistics group at Freiburg University and the Institute for Theoretical Physics at K. U. Leuven. F.d-M. was supported by Alexander von Humboldt Foundation, and Belgian Interuniversity
Attraction Poles Programme P6/02. B.G.T. and R.M.F. were supported by the Brazilian agencies CNPq, CAPES, FAPERJ and INCT-IQ.
\end{acknowledgments}
\end{document} |
\begin{document}
\title[Restricted diagonalization of finite spectrum normal operators]{Restricted diagonalization of \\ finite spectrum normal operators \\ and a theorem of Arveson}
\author{Jireh Loreaux}
\email{\href{mailto:[email protected]}{[email protected]}}
\address{Southern Illinois University Edwardsville \\
Department of Mathematics and Statistics \\
Edwardsville, IL, 62026-1653 \\
USA}
\keywords{Essential codimension, diagonals of projections, diagonals of normal operators}
\subjclass{Primary 47B15, 47A53; Secondary 46C05.}
\begin{abstract}
Kadison characterized the diagonals of projections and observed the presence of an integer.
Arveson later recognized this integer as a Fredholm index obstruction applicable to any normal operator with finite spectrum coincident with its essential spectrum whose elements are the vertices of a convex polygon.
Recently, in joint work with Kaftal, the author linked the Kadison integer to essential codimension of projections.
This paper provides an analogous link between Arveson's obstruction and essential codimension as well as a new approach to Arveson's theorem which also allows for generalization to any finite spectrum normal operator.
In fact, we prove that Arveson's theorem is a corollary of a trace invariance property of arbitrary normal operators.
An essential ingredient is a formulation of Arveson's theorem in terms of diagonalization by a unitary which is a Hilbert--Schmidt perturbation of the identity.
\end{abstract}
\maketitle
\section{Introduction}
A \emph{diagonal} of a bounded linear operator $T \in B(\ensuremath{\mathcal{H}})$ is a sequence of inner products $\big(\langle Te_n,e_n \rangle\big)$ where $\{e_n\}_{n=1}^{\infty}$ is an orthonormal basis for the Hilbert space $\ensuremath{\mathcal{H}}$.
In other words, a diagonal of $T$ is the diagonal of some matrix representation of $T$ with respect to an orthonormal basis.
In his seminal papers on the Pythagorean Theorem \cite{Kad-2002-PNASU,Kad-2002-PNASUa} Kadison proved the following characterization of diagonals of projections.
\begin{theorem}[Kadison]
\label{thm:kadison-carpenter-pythagorean}
A sequence $(d_n)$ is the diagonal of a projection $P$ if and only if it takes values in the unit interval and the quantities
\begin{equation*}
a := \sum_{d_n < \nicefrac{1}{2}} d_n \quad\text{and}\quad b := \sum_{d_n \ge \nicefrac{1}{2}} (1-d_n)
\end{equation*}
satisfy one of the mutually exclusive conditions
\begin{enumerate}
\item $a+b = \infty$;
\item\label{item:a+b-finite} $a+b < \infty$ and $a-b \in \mathbb{Z}$.
\end{enumerate}
\end{theorem}
The existence of the integer $a-b$ is not at all obvious and Kadison himself referred to it as ``curious.''
Since Kadison's initial paper, both Arveson \cite[Theorem~3]{Arv-2007-PNASU} and Argerami \cite[Theorem~4.6]{Arg-2015-IEOT} have provided new proofs that $a-b \in \mathbb{Z}$.
Recently, the author and Kaftal further clarified this integer in \cite{KL-2017-IEOT} as the essential codimension between the projection $P$ and a natural diagonal projection associated to $a,b$.
Essential codimension was developed by Brown, Douglas and Fillmore in \cite[Remark~4.9]{BDF-1973-PoaCoOT} (see also \autoref{def:essential-codimension} below) for pairs of projections whose difference is compact.
Arveson also recognized the Kadison integer as the index of a Fredholm operator in \cite{Arv-2007-PNASU}, and referred to it as an ``index obstruction'' to an arbitrary sequence with values in the unit interval being a diagonal of a projection.
Arveson was able to extend this index obstruction to any normal operator with finite spectrum coincident with its essential spectrum whose elements are the vertices of a convex polygon.
In order to state his main theorem, Arveson associated several objects to a finite set $X \subseteq \mathbb{C}$.
\begin{definition}
\label{def:lim-x}
For a finite set $X \subseteq \mathbb{C}$, the sequences which accumulate summably at $X$ are
\begin{equation*}
\Lim (X) := \left\{ (d_n) \in \ell^{\infty} \,\middle\vert\, \sum_{n=1}^{\infty} \dist(d_n,X) < \infty \right\}.
\end{equation*}
\end{definition}
\begin{definition}
\label{def:k_x}
For a set $X = \{\lambda_1,\ldots,\lambda_m\} \subseteq \mathbb{C}$, let $K_X$ denote the $\mathbb{Z}$-module of linear combinations over $\mathbb{Z}$ of elements of $X$ whose coefficients sum to zero.
This can also be expressed as the free $\mathbb{Z}$-module generated by $\lambda_1-\lambda_2, \ldots, \lambda_1-\lambda_m$.
\end{definition}
\begin{definition}
\label{def:s-quotient}
For a finite set $X \subseteq \mathbb{C}$ there is a natural map $s : \Lim(X) \to \mathbb{C}/K_X$.
For $(d_n) \in \Lim(X)$, since $X$ is finite there are $x_n \in X$ for which $\abs{d_n - x_n} = \dist(d_n,X)$, and therefore the series $\sum_{n=1}^{\infty} (d_n - x_n)$ is absolutely summable.
Arveson proved in \cite[Proposition~1]{Arv-2007-PNASU} that the coset of this sum in $\mathbb{C}/K_X$ is independent of the choices of $x_n \in X$, so the map
\begin{equation*}
s(d) := \sum_{n=1}^{\infty} (d_n - x_n) + K_X \in \mathbb{C}/K_X
\end{equation*}
is well-defined.
The element $s(d)$ is called the \emph{renormalized sum} of $d$.
\end{definition}
We reproduce Arveson's theorem \cite[Theorem~4]{Arv-2007-PNASU} verbatim for reference.
Here, $\mathcal{N}(X)$ denotes the set of normal operators with finite spectrum $X$ coincident with their essential spectrum.
\begin{theorem}[Arveson]
\label{thm:arveson-pythagorean}
Let $X = \{\lambda_1,\ldots,\lambda_m\}$ be the set of vertices of a convex polygon $P \subseteq \mathbb{C}$ and let $d = (d_1,d_2,\ldots)$ be a sequence of complex numbers satisfying $d_n \in P$ for $n \ge 1$, together with the summability condition
\begin{equation}
\label{eq:summability-condition}
\sum_{n=1}^{\infty} \abs{f(d_n)} < \infty,
\end{equation}
where $f(z) = (z-\lambda_1)(z-\lambda_2)\cdots(z-\lambda_m)$.
Then $d \in \Lim(X)$; and if $d$ is the diagonal of an operator $N \in \mathcal{N}(X)$, then $s(d) = 0$.
\end{theorem}
The summability condition \eqref{eq:summability-condition} is equivalent to $(d_n) \in \Lim(X)$ via a routine analysis argument (see \cite[Proposition~2]{Arv-2007-PNASU}).
Moreover, using the notation of \autoref{def:s-quotient}, $s(d) = 0$ is equivalent by definition to the existence integers $c_1,\ldots,c_m$ (which depend on the choices $x_n \in X$) whose sum is zero for which
\begin{equation}
\label{eq:c_k-integers}
\sum_{n=1}^{\infty} (d_n - x_n) = \sum_{k=1}^m c_k \lambda_k \in K_X.
\end{equation}
When $X = \{0,1\} = \spec(N)$, $N$ is a projection, and the condition $(d_n) \in \Lim(\{0,1\})$ is equivalent to $a+b < \infty$, where $a,b$ are defined as in \autoref{thm:kadison-carpenter-pythagorean}.
Moreover, $K_{\{0,1\}} = \mathbb{Z}$, so that Arveson's theorem is a generalization of the forward implication \autoref{thm:kadison-carpenter-pythagorean}\ref{item:a+b-finite} in the situation where $P$ is an infinite and co-infinite projection.
Our focus is to provide a new approach to Arveson's theorem that, by linking it to the notion of diagonalization by unitaries which are Hilbert--Schmidt perturbations of the identity, permits us both to identify the integers $c_k$ of \eqref{eq:c_k-integers} implicit in the theorem in terms of essential codimension and to eliminate some of the hypotheses in the theorem.
Our intent is to bring a fresh perspective on two key parts of Arveson's theorem: the quantity $\sum_{n=1}^{\infty} (d_n-x_n)$ and the condition $(d_n) \in \Lim \big(\spec(N)\big)$.
We identify the sum $\sum_{n=1}^{\infty} (d_n-x_n)$ as $\trace\big(E(N-N')\big)$ for some diagonal operator $N'$ with $\spec(N') \subseteq \spec(N)$ (\autoref{prop:renormalized-sum-trace}).
Here $E : B(\mathcal{H}) \to \mathcal{A}$ denotes the canonical trace-preserving conditional expectation onto the atomic masa associated to an orthonormal basis;
that is, $E$ is the operation of ``taking the main diagonal.''
Then we prove that if $N$ is normal and $U$ is a unitary which is a Hilbert--Schmidt perturbation of the identity, then $E(N-UNU^{*})$ is trace-class and $\trace\big(E(N-UNU^{*})\big) = 0$ (\autoref{thm:expectation-trace-zero}).
Next, we establish that the condition $(d_n) \in \Lim \big(\spec(N)\big)$ is equivalent to the diagonalizability of $N$ by a unitary which is a Hilbert--Schmidt perturbation of the identity (\autoref{thm:diagonalizable-by-I-plus-HS}).
The proof relies on essential codimension and a geometric lemma (\autoref{lem:convexity-coefficient-corner}) which is similar to \cite[Lemma~1]{Arv-2007-PNASU}.
This culminates in a generalization of Arveson's theorem (\autoref{thm:arveson-reformulated}) proved using techniques involving essential codimension, which allows for the identification of the integers $c_k$ in terms of the essential codimensions of pairs of spectral projections of $N$ and a diagonal operator $N'$.
Finally, we show how our results may be used to derive Arveson's \autoref{thm:arveson-pythagorean}.
\section{Essential codimension}
\label{sec:essential-codimension}
A fundamental tool we use throughout is the notion of essential codimension due to Brown, Douglas and Fillmore \cite[Remark~4.9]{BDF-1973-PoaCoOT}.
It associates an integer to a pair of projections $P,Q$ whose difference is compact by means of the Fredholm operator $QP: P\ensuremath{\mathcal{H}} \to Q\ensuremath{\mathcal{H}}$.
\begin{definition}
\label{def:essential-codimension}
Given a pair of projections $P,Q$ whose difference is compact, the \emph{essential codimension} of $P$ in $Q$, denoted $[P:Q]$, is the integer defined by
\begin{equation*}
[P:Q] :=
\begin{cases}
\trace P-\trace Q & \text{if}\ \trace P,\trace Q < \infty, \\[0.5em]
\ind(V^{*}W) & \parbox[c][2em]{0.5\textwidth}{if $\trace(P) = \trace(Q) = \infty$, where \\
$W^{*}W = V^{*}V = I, WW^{*} = P, VV^{*} = Q$.} \\[0.4em]
\end{cases}
\end{equation*}
Equivalently, essential codimension maybe be defined as
\begin{equation*}
[P:Q] := \ind(QP), \quad\text{where}\ QP : P\ensuremath{\mathcal{H}} \to Q\ensuremath{\mathcal{H}}.
\end{equation*}
\end{definition}
Several simple properties of essential codimension which we use are collated here for reference.
Proofs can be found in, for example, \cite[Proposition~2.2]{BL-2012-CJM}.
Each property can be derived from standard facts about Fredholm index.
\begin{proposition}
\label{prop:essential-codimension}
Let $P_1,P_2$ and $Q_1,Q_2$ each be mutually orthogonal pairs of projections with the property that $P_j-Q_j$ is compact for $j=1,2$.
Suppose also that $R_1$ is a projection for which $Q_1-R_1$ is compact.
Then
\begin{enumerate}
\item\label{item:negative-reverse} $[P_1:Q_1] = -[Q_1:P_1]$
\item\label{item:orthogonal-sum} $[P_1:Q_1] + [P_2:Q_2] = [P_1+P_2:Q_1+Q_2]$
\item\label{item:concatenation} $[P_1:R_1] = [P_1:Q_1] + [Q_1:R_1]$
\end{enumerate}
\end{proposition}
The original result of Brown, Douglas and Fillmore \cite[Remark~4.9]{BDF-1973-PoaCoOT} characterizes when projections can be conjugated by a unitary which is a compact perturbation of the identity.
More specifically, they proved that there is a unitary $U = I+K$ with $K$ compact which conjugates $P,Q$ if and only if $P-Q$ is compact and their essential codimension is zero.
The next proposition comes from \cite[Proposition~2.7(ii)]{KL-2017-IEOT} and extends the Brown--Douglas--Fillmore result verbatim to an arbitrary proper operator ideal $\mathcal{J}$, where $\mathcal{J}$ is two-sided but not necessarily norm-closed.
Herein, $\mathcal{J}$ will always denote a proper operator ideal.
\begin{proposition}
\label{prop:restricted-conjugation-of-projections}
If $P,Q$ are projections and $\mathcal{J}$ is a proper operator ideal, then $Q = UPU^{*}$ for some unitary $U = I+K$ with $K \in \mathcal{J}$ if and only if $P-Q \in \mathcal{J}$ and $[P:Q] = 0$.
\end{proposition}
The following proposition is a reformulation of \cite[Proposition~2.8]{KL-2017-IEOT} for the case when the ideal is the Hilbert--Schmidt class $\mathcal{C}_2$.
This proposition relates the Kadison integer to essential codimension in the following manner.
If $P$ is a projection with diagonal $(d_n)$ and $a,b$ are as in \autoref{thm:kadison-carpenter-pythagorean} with $a+b < \infty$, then, by choosing $Q$ to be the projection onto $\spans \{ e_n \mid d_n \ge \nicefrac{1}{2} \}$, \autoref{prop:kadison-integer-essential-codimension} guarantees $P-Q$ is Hilbert--Schmidt (a fact which was known to Arveson) and that $a-b = [P:Q]$.
\begin{proposition}
\label{prop:kadison-integer-essential-codimension}
Suppose $P,Q$ are projections. Then $P-Q$ is Hilbert--Schmidt if and only if in some (equivalently, every) orthonormal basis $\{e_n\}_{n=1}^{\infty}$ which diagonalizes $Q$, the diagonal $(d_n)$ of $P$ satisfies $a+b < \infty$, where
\begin{equation*}
a := \sum_{e_n \in Q^{\perp}\ensuremath{\mathcal{H}}} d_n = \trace(Q^{\perp}PQ^{\perp}) \quad\text{and}\quad b := \sum_{e_n \in Q\ensuremath{\mathcal{H}}} (1-d_n) = \trace(Q-QPQ).
\end{equation*}
Whenever $P-Q$ is Hilbert--Schmidt, $a-b = [P:Q]$.
\end{proposition}
\section{Restricted diagonalization}
\label{sec:restricted-diagonalization}
It is elementary that finite spectrum normal operators are diagonalizable.
However, one may ask about the possibility of diagonalization relative to a fixed orthonormal basis (or atomic masa) by a unitary of the form $U = I+K$ where $K$ lies in a given proper operator ideal $\mathcal{J}$.
For this we use the term \emph{restricted diagonalization}.
This concept has been studied by others in the aforementioned paper of Brown--Douglas--Fillmore \cite{BDF-1973-PoaCoOT}, as well as by Belti\c{t}a--Patnaik--Weiss \parencite{BPW-2016-IUMJ}, and Hinkkanen \cite{Hin-1985-MMJ}.
To our knowledge, the term restricted diagonalization was introduced by Belti\c{t}a--Patnaik--Weiss.
\subsection{Conditions for restricted diagonalization}
The next result is a corollary of \Cref{prop:restricted-conjugation-of-projections,prop:kadison-integer-essential-codimension}. It describes the conditions under which a projection experiences restricted diagonalization.
In the special case of the Hilbert--Schmidt ideal, this corollary shows that it suffices to examine the diagonal of the projection.
\begin{corollary}
\label{cor:restricted-diagonalization-of-projections}
For a projection $P$ and a proper operator ideal $\mathcal{J}$, the following are equivalent:
\begin{enumerate}
\item\label{item:diagonalization} $P$ is diagonalizable by a unitary $U=I+K$ with $K \in \mathcal{J}$;
\item\label{item:diagonal-projection} there exists a diagonal projection $Q$ for which $P-Q \in \mathcal{J}$.
\end{enumerate}
If $\mathcal{J} = \mathcal{C}_2$, then these are also equivalent to:
\begin{enumerate}[resume]
\item\label{item:diagonal-sequence} the diagonal $(d_n)$ of $P$ lies in $\Lim(\{0,1\})$.
\end{enumerate}
\end{corollary}
\begin{proof}
\ref{item:diagonalization} $\Rightarrow$ \ref{item:diagonal-projection}.
Suppose that $P$ is diagonalizable by a unitary $U=I+K$ with $K \in \mathcal{J}$.
Then setting $Q := UPU^{*}$, we have $P-Q = -KP-PK^{*}-KPK^{*} \in \mathcal{J}$.
\ref{item:diagonal-projection} $\Rightarrow$ \ref{item:diagonalization}.
Suppose $Q$ is a diagonal projection for which $P-Q \in \mathcal{J}$.
By replacing $Q$ with a diagonal projection $Q'$ that is a finite perturbation of $Q$, we can assume that $[P:Q] = 0$.
Indeed, notice that if $[P:Q] < 0$, then $\trace Q \ge -[P:Q]$, so there is a diagonal subprojection $Q'$ of $Q$ with $\trace(Q-Q') = -[P:Q]$.
Similarly, if $[P:Q] > 0$, then $\trace Q^{\perp} \ge [P:Q]$, so there is a diagonal subprojection $R$ of $Q^{\perp}$ with $\trace R = [P:Q]$, and in this case we set $Q' = Q+R$.
In either case, the construction guarantees $[P:Q] = -[Q:Q']$, and hence by \autoref{prop:essential-codimension}\ref{item:concatenation}, $[P:Q'] = [P:Q] + [Q:Q'] = 0$.
Therefore by \autoref{prop:restricted-conjugation-of-projections}, $P$ and $Q'$ are conjugated by a unitary $U=I+K$ with $K \in \mathcal{J}$, and hence $P$ is diagonalized by $U$.
\ref{item:diagonal-projection} $\Rightarrow$ \ref{item:diagonal-sequence}.
If $P-Q \in \mathcal{C}_2$, then by \autoref{prop:kadison-integer-essential-codimension}, for $a,b$ defined as in that proposition, $a+b < \infty$.
Equivalently, $(d_n) \in \Lim(\{0,1\})$.
\ref{item:diagonal-sequence} $\Rightarrow$ \ref{item:diagonal-projection}.
If the diagonal $(d_n)$ of $P$ lies in $\Lim(\{0,1\})$, then there are some choices $x_n \in \{0,1\}$ for which $(d_n - x_n) \in \ell^1$.
Let $Q$ be the diagonal projection onto the $\spans\{ e_n \mid x_n =1 \}$.
Then for $a,b$ as defined in \autoref{prop:kadison-integer-essential-codimension}, $a+b < \infty$, and so $P-Q \in \mathcal{C}_2$ by that result.
\end{proof}
We will generalize \autoref{cor:restricted-diagonalization-of-projections} to finite spectrum normal operators.
The equivalence \ref{item:diagonalization}~$\Leftrightarrow$~\ref{item:diagonal-projection} is generalized by \autoref{thm:finite-spectrum-normal-restricted-diagonalizability}, and \ref{item:diagonalization}~$\Leftrightarrow$~\ref{item:diagonal-sequence} by \autoref{thm:diagonalizable-by-I-plus-HS}.
\autoref{prop:restricted-conjugation-of-projections} can be bootstrapped by induction to characterize when a pair of finite collections of mutually orthogonal projections can be \emph{simultaneously} conjugated by a unitary $U = I + K$ with $K \in \mathcal{J}$.
\begin{lemma}
\label{lem:restricted-conjugation-of-sets-of-projections}
Suppose $\{P_k\}_{k=1}^m, \{Q_k\}_{k=1}^m$ are each finite sets of mutually orthogonal projections, and $\mathcal{J}$ is a proper operator ideal.
Then there is some unitary $U = I+K$ with $K \in \mathcal{J}$ for which $Q_k = UP_kU^{*}$ for $1 \le k \le m$ if and only if $P_k - Q_k \in \mathcal{J}$ and $[P_k:Q_k] = 0$ for all $1 \le k \le m$.
\end{lemma}
\begin{proof}
One direction is straightforward.
Namely, if there exists a unitary $U = I+K$ with $K \in \mathcal{J}$ for which $Q_k = UP_k U^{*}$ for all $1 \le k \le m$, then by \autoref{prop:restricted-conjugation-of-projections} $P_k - Q_k \in \mathcal{J}$ and $[P_k:Q_k] = 0$.
For the other direction, we use induction on $m$, and the base case $m=1$ follows from \autoref{prop:restricted-conjugation-of-projections}.
Let $m \in \mathbb{N}$ and suppose that if $\{P_k\}_{k=1}^m, \{Q_k\}_{k=1}^m$ are each sets of mutually orthogonal projections and satisfy $P_k - Q_k \in \mathcal{J}$ and $[P_k:Q_k] = 0$, then there is a single unitary $U=I+K$ with $K \in \mathcal{J}$ which conjugates $P_k$ into $Q_k$, i.e., $Q_k = UP_kU^{*}$.
Now suppose we have two sets of $m+1$ mutually orthogonal projections satisfying these conditions.
By \autoref{prop:restricted-conjugation-of-projections} there is a unitary $V = I+K$ with $K \in \mathcal{J}$ for which $Q_{m+1} = VP_{m+1}V^{*}$.
Moreover, for $1 \le k \le m$, $P'_k := VP_k V^{*}$ satisfies $P_k - P'_k \in \mathcal{J}$ and $[P_k:P'_k] = 0$.
Therefore $P'_k - Q_k \in \mathcal{J}$ and $[P'_k:Q_k] = 0$ by \autoref{prop:essential-codimension}\ref{item:concatenation}.
Applying the inductive hypothesis to the collections $\{P'_k\}_{k=1}^m, \{Q_k\}_{k=1}^m$ on the Hilbert space $Q_{m+1}^{\perp} \ensuremath{\mathcal{H}}$ yields a unitary $W = Q_{m+1}^{\perp} + K'$ acting on $Q_{m+1}^{\perp} \ensuremath{\mathcal{H}}$ with $K' \in \mathcal{J}$, and which conjugates $P'_k$ into $Q_k$ for $1 \le k \le m$.
Extending this to the unitary $Q_{m+1} \oplus W$ acting on $\ensuremath{\mathcal{H}}$ and setting $U = (Q_{m+1} \oplus W)V$, we find that $U$ is of the desired form and $UP_k U^{*} = Q_k$ for $1 \le k \le m+1$.
\end{proof}
The following lemma weakens the sufficient condition of \autoref{lem:restricted-conjugation-of-sets-of-projections} so long as we are allowed to perturb the diagonal projections.
\begin{lemma}
\label{lem:sum-zero-each-zero}
Suppose that $\{P_k\}_{k=1}^m, \{Q_k\}_{k=1}^m$ are each collections of mutually orthogonal projections for which $P_k - Q_k \in \mathcal{J}$ and $\sum_{k=1}^m [P_k:Q_k] = 0$.
Then for every atomic masa $\mathcal{A}$ containing $\{Q_k\}_{k=1}^m$, there exist mutually orthogonal projections $\{Q'_k\}_{k=1}^m \subseteq \mathcal{A}$ for which $P_k - Q'_k \in \mathcal{J}$ and $[P_k:Q'_k] = 0$.
\end{lemma}
\begin{proof}
Suppose $\{Q_k\}_{k=1}^m$ lies in an atomic masa.
Note that such a masa always exists since this is a finite collection of mutually orthogonal (hence commuting) projections.
The argument is by induction on $m$.
When $m=1$, the claim is trivial.
Now suppose $m > 1$.
Either $[P_k : Q_k] = 0$ for all $k$ already, or there are two indices $1 \le i,j \le m$ with $[P_i : Q_i] < 0 < [P_j:Q_j]$.
Notice that $\trace Q_i \ge -[P_i : Q_i]$.
Let $Q$ be a diagonal subprojection of $Q_i$ with $\trace Q = \min \{ -[P_i:Q_i], [P_j:Q_j] \}$.
Then we replace $Q_i$ with $Q_i - Q$ and $Q_j$ with $Q_j + Q$.
By construction, either $\big[P_i:(Q_i - Q)\big] = 0$ or $\big[P_j:(Q_j+Q)\big] = 0$.
So now we have $n-1$ pairs of projections for which the sum of the essential codimensions is zero.
By induction we can actually force them all to be zero while maintaining the condition that the $Q'_k$ projections are diagonal.
\end{proof}
\begin{theorem}
\label{thm:finite-spectrum-normal-restricted-diagonalizability}
Suppose $\mathcal{J}$ is a proper operator ideal.
A finite spectrum normal operator is diagonalizable by a unitary $U=I+K$ with $K \in \mathcal{J}$ if and only if each spectral projection differs from a diagonal projection by an element of $\mathcal{J}$.
\end{theorem}
\begin{proof}
Let $N = \sum_{k=1}^m \lambda_k P_k$ be a finite spectrum normal operator with spectral projections $P_k$ associated to the eigenvalues $\lambda_k$.
One direction is trivial, namely, if $N$ is diagonalizable by a unitary $U=I+K$ with $K \in \mathcal{J}$, then the projections $Q_k := UP_kU^{*}$ are diagonal and $P_k - Q_k \in \mathcal{J}$.
For the other direction, suppose that for each $P_k$ there is a diagonal projection $Q_k$ for which $P_k - Q_k \in \mathcal{J}$.
The operators $Q_jQ_k$ are projections because $Q_j,Q_k$ are commuting projections. Then since $P_jP_k = \delta_{jk}P_j$, for $j\not=k$ we obtain
\begin{equation}
\label{eq:Q_j*Q_k-in-J}
\begin{split}
Q_jQ_k &= \big(P_j+(Q_j-P_j)\big)\big(P_k+(Q_k-P_k)\big) \\
&= (Q_j-P_j)P_k + P_j(Q_k-P_k) + (Q_j-P_j)(Q_k-P_k) \in \mathcal{J}.
\end{split}
\end{equation}
Therefore $Q_jQ_k$ are finite projections when $j \not= k$.
Now let $Q'_1 := Q_1$ and inductively define $Q'_j = Q_j-Q_j(Q'_1+\cdots+Q'_{j-1})$ for $1 < j < m$ and finally $Q'_m = I-(Q'_1+\cdots+Q'_{m-1})$.
It is clear that for $1 \le j < m$, $Q'_j$ is in the algebra generated by $\{Q_1,\ldots,Q_j\}$ and is therefore diagonal.
Moreover, for $1 \le j < m$, by \eqref{eq:Q_j*Q_k-in-J} and induction $Q'_j - Q_j$ is finite rank, and hence $P_j - Q'_j \in \mathcal{J}$.
Thus, $Q'_m$ is a $\mathcal{J}$-perturbation of $I-(P_1+\cdots+P_{m-1}) = P_m$, and hence $P_m - Q'_m \in \mathcal{J}$ as well.
By \autoref{prop:essential-codimension}(ii),
\begin{equation*}
\sum_{k=1}^m [P_k:Q'_k] = \left[ \sum_{k=1}^m P_k: \sum_{k=1}^m Q'_k \right] = [I:I] = 0.
\end{equation*}
So, by \autoref{lem:sum-zero-each-zero}, we may assume by passing to a possibly different collection of diagonal $Q'_k$ that, in fact, $[P_k:Q'_k] = 0$ for $1 \le k \le m$.
Finally, by \autoref{lem:restricted-conjugation-of-sets-of-projections} there is a unitary $U=I+K$ with $K \in \mathcal{J}$ for which $Q'_k = UP_k U^{*}$ for each $1 \le k \le m$.
Therefore, $UNU^{*} = \sum_{k=1}^m \lambda_k Q'_k$, which is a diagonal operator.
\end{proof}
\subsection{Consequences of restricted diagonalization}
This subsection is motivated by the following observation about the condition $(d_n) \in \Lim\big(\spec(N)\big)$ in Arveson's theorem.
\begin{proposition}
\label{prop:renormalized-sum-trace}
Let $N$ be a normal operator with finite spectrum and let $(d_n)$ be the diagonal of $N$.
Then $(d_n) \in \Lim\big(\spec(N)\big)$ if and only if there exists a diagonal operator $N' = \diag(x_n)$ such that $spec(N') \subseteq \spec(N)$, and $E(N-N')$ is trace-class, in which case
\begin{equation}
\label{eq:renormalized-sum-trace}
\trace\big(E(N-N')\big) = \sum_{n=1}^{\infty} (d_n - x_n).
\end{equation}
\end{proposition}
\begin{proof}
($\Rightarrow$)
Suppose $(d_n) \in \Lim\big(\spec(N)\big)$.
Then there is a sequence $(x_n)$ with $x_n \in \spec(N)$ such that $(d_n - x_n)$ is absolutely summable, and we may take $N' := \diag(x_n)$.
Therefore, since $(d_n - x_n)$ is absolutely summable,
\begin{equation*}
\trace \abs{E(N-N')} = \sum_{n=1}^{\infty} \abs{d_n-x_n} < \infty,
\end{equation*}
and hence $E(N-N')$ is trace-class.
($\Leftarrow$)
Suppose $N'$ is a diagonal operator with $\spec(N') \subseteq \spec(N)$ and $E(N-N')$ trace-class, and let $(x_n)$ denote the diagonal of $N'$.
Then $x_n \in \spec(N') \subseteq \spec(N)$ and since $E(N-N')$ is trace-class,
\begin{equation*}
\sum_{n=1}^{\infty} \abs{d_n-x_n} = \trace \abs{E(N-N')} < \infty.
\end{equation*}
Therefore $(d_n-x_n)$ is absolutely summable and hence $d_n \in \Lim\big(\spec(N)\big)$.
Notice that whenever either of the equivalent conditions is satisfied, we have the equality
\begin{equation*}
\trace\big(E(N-N')\big) = \sum_{n=1}^{\infty} (d_n - x_n). \qedhere
\end{equation*}
\end{proof}
The remainder of the section is devoted to analyzing the expression $E(N-N')$ when $N'$ is a restricted diagonalization of a normal operator $N$ (not necessarily with finite spectrum), i.e., when $N' = UNU^{*}$ where $U = I + K$ is unitary and $K \in \mathcal{J}$.
As in \cite{DFWW-2004-AM}, the \emph{arithmetic mean closure} $\mathcal{J}^{-}$ of an operator ideal $\mathcal{J}$ is the set of operators $T$ whose singular values are weakly majorized by the singular values of an operator $A \in \mathcal{J}$; that is, if $s(T)$ denotes the singular value sequence of a compact operator $T$,
\begin{equation*}
\mathcal{J}^- := \left\{ T \in B(\ensuremath{\mathcal{H}}) \,\middle\vert\, \exists B \in \mathcal{J}, \forall n\in \mathbb{N},\ \sum_{j=1}^n s_j(T) \le \sum_{j=1}^n s_j(B) \right\}.
\end{equation*}
An ideal $\mathcal{J}$ is said to be \emph{arithmetic mean closed} if $\mathcal{J} = \mathcal{J}^-$.
Common examples of arithmetic mean closed ideals are the Schatten ideals $\mathcal{C}_p$ of which the trace-class ideal $\mathcal{C}_1$ and Hilbert--Schmidt ideal $\mathcal{C}_2$ are special cases.
In \cite{KW-2011-IUMJ}, Kaftal and Weiss investigated the relationship between an ideal $\mathcal{J}$ and the elements of its image $E(\mathcal{J})$ under a trace-preserving conditional expectation onto an atomic masa $\mathcal{A}$, and they established the following characterization \cite[Corollary~4.4]{KW-2011-IUMJ}.
\begin{corollary}
\label{cor:diagonal-invariance}
For every operator ideal $\mathcal{J}$, $E(\mathcal{J}) = \mathcal{J}^- \cap \mathcal{A}$.
\end{corollary}
Our next result says if an operator $N$ can be diagonalized by a unitary $U=I+K$ with $K \in \mathcal{J}$ then the diagonals of $N$ and its diagonalization differ by an element of the arithmetic mean closure of $\mathcal{J}^2$.
\begin{proposition}
\label{prop:diagonalization-by-I-plus-K-necessity}
Let $N$ be a diagonal operator, $\mathcal{J}$ an operator ideal, and $U = I+K$ a unitary with $K \in \mathcal{J}$.
Then $E(UNU^{*}-N) \in (\mathcal{J}^2)^-$.
\end{proposition}
\begin{proof}
Irrespective of the condition $K \in \mathcal{J}$, note that $U = I+K$ is unitary if and only if $K$ is normal and $K+K^{*} = -K^{*}K$ because
\begin{align*}
UU^{*} &= I + K + K^{*} + KK^{*} \\
U^{*}U &= I + K + K^{*} + K^{*}K.
\end{align*}
Then
\begin{align*}
E(UNU^{*}-N) &= E(KN+NK^{*}+KNK^{*}) \\
&= E(KN)+E(NK^{*}) + E(KNK^{*}) \\
&= E(K)N+NE(K^{*}) + E(KNK^{*}) \\
&= E(K+K^{*})N + E(KNK^{*}) \in (\mathcal{J}^2)^-,
\end{align*}
by \autoref{cor:diagonal-invariance}.
\end{proof}
When $\mathcal{J} = \mathcal{C}_2$, which is the primary concern in this paper, we can say more.
\begin{theorem}
\label{thm:expectation-trace-zero}
Suppose $N$ is a normal operator.
There is an atomic masa such that for every unitary $U = I + K$ with $K$ Hilbert--Schmidt, $E(UNU^{*}-N)$ is trace-class and has trace zero.
Moreover, if $N$ is diagonalizable, any atomic masa containing $N$ suffices.
\end{theorem}
\begin{proof}
Suppose first that $N$ is diagonalizable and consider an atomic masa in which $N$ lies.
Let $U = I+K$ be unitary with $K$ Hilbert--Schmidt.
By \autoref{prop:diagonalization-by-I-plus-K-necessity} with $\mathcal{J} = \mathcal{C}_2$ and its proof, each term of $E(UNU^{*}-N) = E(K+K^{*})N + E(KNK^{*})$ is trace-class because $K+K^{*} = -K^{*}K$ and $KNK^{*}$ are trace-class, and because the trace-class is arithmetic mean closed (in fact, it is the \emph{smallest} arithmetic mean closed ideal).
Then, because the conditional expectation is trace-preserving, we find
\begin{align*}
\trace\big(E(KNK^{*})\big) &= \trace(KNK^{*}) = \trace(K^{*}KN) \\
&= -\trace((K+K^{*})N) = -\trace(E(K+K^{*})N),
\end{align*}
and therefore $\trace\big(E(UNU^{*}-N)\big) = 0$.
Now suppose $N$ is an arbitrary normal operator.
By Voiculescu's extension \cite{Voi-1979-JOT} of the Weyl--von Neumann--Berg theorem we can write $N = D+J$ where $D$ is diagonalizable and $J$ is Hilbert--Schmidt.
Then $UJU^{*}-J = KJ+JK^{*}+KJK^{*}$ and each term is trace-class.
Moreover,
\begin{align*}
\trace(KJK^{*}) &= \trace(K^{*}KJ) = -\trace((K+K^{*})J) \\
&= -\trace(KJ)-\trace(K^{*}J) = -\trace(KJ)-\trace(JK^{*}),
\end{align*}
and hence $\trace(UJU^{*}-J) = 0$.
Therefore, if $E$ is a conditional expectation onto an atomic masa containing $D$, then $E(UNU^{*}-N) = E(UDU^{*}-D) + E(UJU^{*}-J)$ has trace zero.
\end{proof}
The previous theorem establishes a kind of \emph{trace invariance} property for arbitrary normal operators.
To see why we use this terminology, consider that a trace-class operator $A$ has a trace which is invariant under unitary conjugation.
That is, for any unitary $U$, $\trace A = \trace (UAU^{*})$.
Rearranging, we can write this as $\trace(UAU^{*}-A) = 0$, and since the canonical expectation is trace-invariant, we can rewrite this as $\trace\big(E(UAU^{*}-A)\big) = 0$.
Under more restrictive hypotheses, \autoref{thm:expectation-trace-zero} ensures the same condition for normal operators instead of trace-class operators.
\begin{remark}
The reader may have noticed that the normality in the previous theorem was only used in order to write the operator as a Hilbert--Schmidt perturbation of a diagonal operator.
Therefore, the above theorem remains valid under this substitution of the hypothesis, and a slightly more general result is obtained.
\end{remark}
\begin{example}
One may wonder if in \autoref{prop:diagonalization-by-I-plus-K-necessity} and \autoref{thm:expectation-trace-zero} we may take any trace-preserving conditional expectation instead of the special ones chosen.
The answer is negative in general as this example shows.
Consider commuting positive operators $C,S$ in $B(\ensuremath{\mathcal{H}})$ with zero kernel satisfying $C^2 + S^2 = I$.
Then consider the operators $P,U \in M_2\big(B(\ensuremath{\mathcal{H}})\big) \cong B(\ensuremath{\mathcal{H}} \oplus \ensuremath{\mathcal{H}})$
\begin{equation*}
P :=
\frac{1}{\sqrt{2}}
\begin{pmatrix}
I & I \\
I & I \\
\end{pmatrix}
\qquad
U :=
\begin{pmatrix}
C & S \\
-S & C \\
\end{pmatrix},
\end{equation*}
which are a projection and a unitary, respectively.
Thus
\begin{equation*}
UPU^{*} =
\frac{1}{\sqrt{2}}
\begin{pmatrix}
I+2CS & C^2 - S^2 \\
C^2 - S^2 & I-2CS \\
\end{pmatrix}
\end{equation*}
Now, choose $S = \diag(\sin(\theta_n))$ and $C = \diag(\cos(\theta_n))$ with $(\theta_n) \in \ell^2 \setminus \ell^1$.
Then $S \in \mathcal{C}_2, C-I \in \mathcal{C}_1$ and hence $U-(I \oplus I) \in \mathcal{C}_2$.
Moreover, $2CS = \diag(\sin(2\theta_n))$ which is Hilbert--Schmidt but not trace-class.
Thus, if $E$ is the expectation onto an atomic masa containing $C,S$, and $\tilde{E} := E \oplus E$, then
$\tilde{E}(UPU^{*}-P) = \frac{1}{\sqrt{2}} (2CS \oplus -2CS) \in \mathcal{C}_2 \setminus \mathcal{C}_1$.
\end{example}
\section{Arveson's Theorem Revisited}
\label{sec:arveson}
In this section we apply the results concerning restricted diagonalization to prove a few key facts which will yield a reformulation and extension of Arveson's theorem (\autoref{thm:arveson-reformulated}).
Our first result in this direction is \autoref{thm:diagonalizable-by-I-plus-HS} which characterizes the condition $(d_n) \in \Lim\big(\spec(N)\big)$ in terms of restricted diagonalization.
In order to prove \autoref{thm:diagonalizable-by-I-plus-HS}, we use a straightforward geometric lemma which serves a similar purpose as \cite[Lemma~1]{Arv-2007-PNASU}.
\begin{lemma}
\label{lem:convexity-coefficient-corner}
Suppose $\lambda_1,\ldots,\lambda_m \in \mathbb{C}$ are distinct and $x = \sum_{j=1}^m c_j \lambda_j$ is a convex combination, and $L$ is a line separating $\lambda_k$ from the remaining $\lambda_j$.
If $x$ lies on a line parallel to $L$ separating $\lambda_k$ from $L$, then
\begin{equation*}
\sum_{\substack{j=1 \\ j\not=k}}^m c_j \le \frac{\abs{x - \lambda_k}}{\dist(\lambda_k, L)}.
\end{equation*}
\end{lemma}
\begin{proof}
Relabel the $\lambda_j$ if necessary so that $k=1$.
By applying a rotation, translation and scaling (which preserve proportional distances), we may suppose that $\lambda_1 = 1$ and $L = -a + i\mathbb{R}$ for some $a \ge 0$ so that the real part $\Re(x) = 0$.
Note that $-a \ge \max_{j \ge 2} \{ \Re(\lambda_j)\}$.
Since $0 \in [-a,1]$ we may write
\begin{equation*}
t (-a) + (1-t) 1 = 0, \quad\text{for}\quad t = \frac{1}{1 + a}
\end{equation*}
Now
\begin{equation*}
0 = \Re(x) = \sum_{j=1}^m c_j \Re(\lambda_j) \le \Bigg( \sum_{j=2}^m c_j \Bigg) \max_{j \ge 2} \{\Re(\lambda_j)\} + c_1 \lambda_1 \le \Bigg( \sum_{j=2}^m c_j \Bigg) (-a) + c_1 1.
\end{equation*}
Since we have two convex combinations of $-a, 1$ and the latter is closer to $1$ than the former, the convexity coefficients satisfy
\begin{equation*}
\sum_{j = 2}^m c_j \le t = \frac{1}{1+a} = \frac{\dist(\Re(x),\lambda_1)}{\dist(\lambda_1,L)} \le \frac{\abs{x-\lambda_1}}{\dist(\lambda_1,L)}. \qedhere
\end{equation*}
\end{proof}
\begin{theorem}
\label{thm:diagonalizable-by-I-plus-HS}
Let $N$ be a normal operator with finite spectrum and diagonal $(d_n)$, and let $X$ be the vertices of the convex hull of its essential spectrum.
Then $(d_n) \in \Lim(X)$ if and only if $\spec_{\mathrm{ess}}(N) = X$ and $N$ is diagonalizable by a unitary which is a Hilbert--Schmidt perturbation of the identity.
\end{theorem}
\begin{proof}
We first reduce to the case when $\spec(N) = \spec_{\mathrm{ess}}(N)$.
Since $N$ is a normal operator with finite spectrum, by the spectral theorem there is a finite rank perturbation $N'$ of $N$ for which $N'$ is normal and $\spec(N') = \spec_{\mathrm{ess}}(N') = \spec_{\mathrm{ess}}(N)$.
In particular, if $P_{\lambda}$ are the spectral projections of $N$ onto $\{\lambda\}$, and $\lambda' \in \spec_{\mathrm{ess}}(N)$ is a distinguished element, then we can choose
\begin{equation*}
N' := \lambda' P + \sum_{\lambda \in \spec_{\mathrm{ess}}(N)} \lambda P_{\lambda}, \quad\text{where}\quad P = \sum_{\lambda \notin \spec_{\mathrm{ess}}(N)} P_{\lambda}.
\end{equation*}
Since $N'-N$ is finite rank, the diagonals of $N'$ and $N$ differ by an absolutely summable sequence, so $(d_n) \in \Lim(X)$ if and only if the diagonal of $N'$ is in $\Lim(X)$.
Moreover, the spectral projections of $N$ and $N'$ differ from one another by finite projections.
Therefore, the spectral projections of $N$ each differ from a diagonal projection by a Hilbert--Schmidt operator if and only if the same holds true for $N'$.
By \autoref{thm:finite-spectrum-normal-restricted-diagonalizability}, $N$ is diagonalizable by a unitary which is a Hilbert--Schmidt perturbation of the identity if and only if $N'$ is as well.
Therefore, by the above reduction, it suffices to prove the theorem with the added assumption that $\spec(N) = \spec_{\mathrm{ess}}(N)$.
(Proof of $\Rightarrow$)
Enumerate the elements of $\spec_{\mathrm{ess}}(N) = \spec(N)$ as $\lambda_1,\ldots,\lambda_m$.
Let $P_j$ denote the spectral projection corresponding to the eigenvalue $\lambda_j$, so that $N = \sum_{j=1}^m \lambda_j P_j$.
Let $\{e_n\}_{n=1}^{\infty}$ denote the orthonormal basis corresponding to the diagonal $(d_n)$.
Suppose $(d_n) \in \Lim(X)$, and so there exist $x_n \in X$ for which $(d_n - x_n) \in \ell^1$.
Let $\Lambda_k := \{ n \in \mathbb{N} \mid x_n = \lambda_k\}$ be the index set where the sequence $(x_n)$ takes the value $\lambda_k \in X$.
The projections $P_j$ sum to the identity, so for each $n \in \mathbb{N}$, $\sum_{j=1}^m \angles{P_j e_n,e_n} = 1$ and therefore
\begin{equation*}
d_n = \sangles{Ne_n, e_n} = \sum_{j=1}^m \sangles{P_j e_n, e_n} \lambda_j
\end{equation*}
is a convex combination of the spectrum.
For $\lambda_k \in X$, let $L_k$ be a line separating $\lambda_k$ from the remaining elements of $\spec_{\mathrm{ess}}(N)$.
Such a line $L_k$ exists because $\lambda_k$ is an extreme point of the convex hull of $\spec_{\mathrm{ess}}(N)$, and this is a finite set.
Since $(d_n) \in \Lim (X)$ we know that $(d_n - \lambda_k)_{n \in \Lambda_k}$ is absolutely summable for every $k$.
Therefore, for all but finitely many indices $n \in \Lambda_k$, the diagonal entry $d_n$ lies on a line parallel to $L_k$ separating $\lambda_k$ from $L_k$ and hence also $\spec_{\mathrm{ess}}(N) \setminus \{ \lambda_k \}$.
By \autoref{lem:convexity-coefficient-corner}, for these indices $n \in \Lambda_k$,
\begin{equation}
\label{eq:P_j-in-ell^1}
\sum_{\substack{j=1 \\ j\not=k}}^m \sangles{P_j e_n, e_n} \le \frac{\abs{d_n - \lambda_k}}{\dist(\lambda_k,L_k)}.
\end{equation}
Since this inequality holds for all but finitely many $n \in \Lambda_k$, and $\dist(\lambda_k,L_k)$ is independent of $n \in \Lambda_k$, and $(d_n - \lambda_k)_{n \in \Lambda_k}$ is absolutely summable, \eqref{eq:P_j-in-ell^1} proves $\big(\langle P_j e_n, e_n \rangle\big)_{n \in \Lambda_k}$ lies in $\Lim (\{0\}) = \ell^1$ when $j \not= k$.
If $\lambda_j \in \spec_{\mathrm{ess}}(N) \setminus X$, by letting $\lambda_k$ run through $X$, we find $\big(\langle P_j e_n, e_n \rangle\big)_{n \in \mathbb{N}}$ is absolutely summable since $\bigcup_{\lambda_k \in X} \Lambda_k = \mathbb{N}$.
This implies $P_j$ is trace-class and hence a finite projection, contradicting the fact that $\lambda_j \in \spec_{\mathrm{ess}}(N)$.
Therefore $X = \spec_{\mathrm{ess}}(N)$.
Now consider $\lambda_j \in X = \spec_{\mathrm{ess}}(N)$.
In analogy with the previous paragraph, using the fact that $\big(\langle P_j e_n, e_n \rangle\big)_{n \in \Lambda_k} \in \ell^1$ when $j \not= k$ and letting $\lambda_k$ run through $X \setminus \lambda_j$, we find $\big(\langle P_j e_n, e_n \rangle\big)_{n \notin \Lambda_j} \in \ell^1$.
Finally, for $n \in \Lambda_j$,
\begin{equation*}
1 - \sangles{P_j e_n, e_n} = \sum_{\substack{k=1 \\ k\not=j}}^m \sangles{P_k e_n, e_n},
\end{equation*}
and hence $\big(1- \sangles{P_j e_n, e_n}\big)_{n \in \Lambda_k}$ is a finite sum of absolutely summable sequences, and is therefore absolutely summable.
Thus $\big(\sangles{P_j e_n, e_n}\big)_{n \in \Lambda_k} \in \Lim(\{1\})$, so $\big(\sangles{P_j e_n,e_n}\big) \in \Lim(\{0,1\})$.
Therefore, by \autoref{cor:restricted-diagonalization-of-projections}, $P_j$ differs from a diagonal projection by a Hilbert--Schmidt operator.
Since this is true of all the spectral projections of $N$, we may apply \autoref{thm:finite-spectrum-normal-restricted-diagonalizability} to conclude that $N$ is a diagonalizable by a Hilbert--Schmidt perturbation of the identity.
(Proof of $\Leftarrow$) This implication is a direct corollary of \autoref{thm:expectation-trace-zero}.
To see this, suppose $\spec_{\mathrm{ess}}(N) = X$ and $N$ is diagonalizable by a unitary $U$ which is a Hilbert--Schmidt perturbation of the identity.
Thus $UNU^{*} = \diag(x_n)$ for some sequence $x_n \in \spec_{\mathrm{ess}}(N) = X$.
Then by \autoref{thm:expectation-trace-zero}, $E(N-UNU^{*})$ is trace-class.
That is, $\trace \big(E(N-UNU^{*})\big) = \sum_{n=1}^{\infty} (d_n-x_n)$ is an absolutely summable series, so $(d_n) \in \Lim(X)$.
\end{proof}
We now establish our generalized operator-theoretic reformulation of Arveson's \autoref{thm:arveson-pythagorean} by means of \autoref{thm:expectation-trace-zero}.
After the proof we will explain how to derive \autoref{thm:arveson-pythagorean} from \autoref{thm:arveson-reformulated}.
\begin{theorem}
\label{thm:arveson-reformulated}
Let $N$ be a normal operator with finite spectrum.
If $N$ is diagonalizable by a unitary which is a Hilbert--Schmidt perturbation of the identity,
then there is a diagonal operator $N'$ with $\spec(N') \subseteq \spec(N)$ for which $E(N-N')$ is trace-class, and for any such $N'$, $\trace\big(E(N-N')\big) \in K_{\spec(N)}$.
In particular,
\begin{equation}
\label{eq:trace-in-K-spec-N}
\trace\big(E(N-N')\big) = \sum_{\lambda \in \spec(N)} [P_{\lambda}:Q_{\lambda}] \lambda,
\end{equation}
where $P_{\lambda},Q_{\lambda}$ are the spectral projections onto $\{\lambda\}$ of $N,N'$ respectively.
Moreover, $P_{\lambda}-Q_{\lambda}$ is Hilbert--Schmidt for each $\lambda \in \spec(N)$.
\end{theorem}
\begin{proof}
Suppose $N$ is normal operator with finite spectrum which is diagonalizable by a unitary $U$ that is a Hilbert--Schmidt perturbation of the identity.
Then by \autoref{thm:expectation-trace-zero}, $E(UNU^{*}-N)$ is trace-class with trace zero.
Moreover, $\spec(UNU^{*}) = \spec(N)$, thereby proving that an $N'$ as in the statement exists.
Now, let $N'$ be any diagonal operator with $\spec(N') \subseteq \spec(N)$ for which $E(N-N')$ is trace-class.
Since $N'$ and $UNU^{*}$ are diagonal, we find
\begin{equation}
\label{eq:diagonal-split}
UNU^{*}-N' = E(UNU^{*}-N') = E(UNU^{*}-N)+E(N-N')
\end{equation}
is trace-class, diagonal, and has finite spectrum contained in the set of differences $\spec(N)-\spec(N)$.
Together, these conditions imply this operator is finite rank.
Moreover, the (diagonal) spectral projections of $UNU^{*}, N'$, which we denote $R_{\lambda},Q_{\lambda}$, respectively for $\lambda \in \spec(N)$, each differ by a finite rank operator.
Here we allow for the case $Q_{\lambda} = 0$ when $\lambda \in \spec(N) \setminus \spec(N')$.
This guarantees
\begin{equation*}
[R_{\lambda}:Q_{\lambda}] = \trace(R_{\lambda}-Q_{\lambda}),
\end{equation*}
using, for example, \autoref{prop:kadison-integer-essential-codimension}; however, this formula for essential codimension holds whenever the difference of the projections is trace-class and is widely known (see for instance \cite[Theorem~4.1]{ASS-1994-JFA}, \cite[Theorem~3]{AS-1994-LAA}, or \cite[Corollary~3.3]{CP-2004-KT}).
Therefore,
\begin{equation}
\label{eq:trace-ess-codim-formula}
\trace(UNU^{*}-N') = \trace \left( \sum_{\lambda \in \spec(N)} (\lambda R_{\lambda} - \lambda Q_{\lambda}) \right) = \sum_{\lambda \in \spec(N)} [R_{\lambda} : Q_{\lambda}] \lambda.
\end{equation}
Moreover, we can replace $R_{\lambda}$ with $P_{\lambda}$ in the right-most side of the above display.
Indeed, since $U$ conjugates $P_{\lambda}, R_{\lambda}$, $[P_{\lambda} : R_{\lambda}] = 0$ by \autoref{prop:restricted-conjugation-of-projections}, and furthermore $[P_{\lambda} : Q_{\lambda}] = [P_{\lambda} : R_{\lambda}] + [R_{\lambda} : Q_{\lambda}]$ by \autoref{prop:essential-codimension}(iii).
Finally, since $\trace\big(E(UNU^{*}-N)\big) = 0$, using \eqref{eq:diagonal-split} and \eqref{eq:trace-ess-codim-formula} we find that
\begin{equation*}
\trace\big(E(N-N')\big) = \trace(UNU^{*}-N') = \sum_{\lambda \in \spec(N)} [P_{\lambda}:Q_{\lambda}] \lambda. \qedhere
\end{equation*}
\end{proof}
We now illustrate how our results may be used to provide a new proof of Arveson's theorem.
\begin{proof}[Proof of \autoref{thm:arveson-pythagorean}]
Let $X = \{\lambda_1,\ldots,\lambda_m\}$ and $d = (d_1,d_2,\ldots)$ be as in \autoref{thm:arveson-pythagorean}.
That is, $X$ is the set of vertices of a convex polygon in $\mathbb{C}$, and $d$ satisfies
\begin{equation*}
\sum_{n=1}^{\infty} \abs{f(d_n)} < \infty,
\end{equation*}
where $f(z) = (z-\lambda_1)(z-\lambda_2)\cdots(z-\lambda_m)$.
As we remarked after \autoref{thm:arveson-pythagorean}, this summability condition is equivalent to $d \in \Lim (X)$ by \cite[Proposition~2]{Arv-2007-PNASU}.
Now suppose $d$ is the diagonal of an operator $N \in \mathcal{N}(X)$ (i.e., $N$ is normal with $\spec(N) = \spec_{\mathrm{ess}}(N) = X$).
Then by \autoref{thm:diagonalizable-by-I-plus-HS}, $N$ is diagonalizable by a unitary $U = I+K$ with $K$ Hilbert--Schmidt.
Therefore, we may apply \autoref{thm:arveson-reformulated} to conclude that $\trace\big(E(N-N')\big) \in K_{\spec(N)} = K_X$ for some diagonal operator $N'$ with $\spec(N') \subseteq \spec(N)$ and $E(N-N')$ is trace-class.
Finally, equation \eqref{eq:renormalized-sum-trace} of \autoref{prop:renormalized-sum-trace} establishes
\begin{equation*}
\sum_{n=1}^{\infty} (d_n - x_n) = \trace\big(E(N-N')\big) \in K_X
\end{equation*}
where $(x_n)$ is the diagonal of $N'$, so $x_n \in \spec(N') = \spec(N) = X$.
Hence $s(d) = 0$.
\end{proof}
\begin{remark}
\label{rem:generalize-bownik-jasper}
In \cite{BJ-2015-TAMS}, Bownik and Jasper completely characterized the diagonals of selfadjoint operators with finite spectrum.
A few of the results we have presented herein are generalizations of \cite[Theorem~4.1]{BJ-2015-TAMS}, which consists of some necessary conditions for a sequence to be the diagonal of a finite spectrum selfadjoint operator.
In particular, the statement $(d_n) \in \Lim(X)$ implies $X = \spec_{\mathrm{ess}}(N)$ of our \autoref{thm:diagonalizable-by-I-plus-HS} is an extension to finite spectrum normal operators of their corresponding result \cite[Theorem~4.1(ii)]{BJ-2015-TAMS} for selfadjoint operators.
Similarly, our formula \eqref{eq:trace-in-K-spec-N} of \autoref{thm:arveson-reformulated} generalizes \cite[Theorem~4.1(iii)]{BJ-2015-TAMS}.
\end{remark}
We conclude with another perspective on the trace $\trace\big(E(N-N')\big)$.
Our next corollary shows that when the $\mathbb{Z}$-module $K_{\spec(N)}$ has full rank (i.e., $\rank K_{\spec(N)}$ is one less than the number of elements in the spectrum), this trace is zero if and only if $N'$ is a diagonalization of $N$ by a unitary $U = I + K$ with $K$ Hilbert--Schmidt.
\begin{corollary}
Suppose $N$ is a normal operator with $\spec(N) = \{\lambda_1,\ldots,\lambda_m\}$ such that $\lambda_1-\lambda_2,\ldots,\lambda_1-\lambda_m$ are linearly independent in the $\mathbb{Z}$-module $K_{\spec(N)}$.
Suppose further that $N$ is diagonalizable by a unitary which is a Hilbert--Schmidt perturbation of the identity.
If $N'$ is a diagonal operator such that $E(N-N')$ is trace-class and $\trace\big(E(N-N')\big) = 0$, then there is a unitary $U = I+K$ with $K$ Hilbert--Schmidt such that $UNU^{*} = N'$.
\end{corollary}
\begin{proof}
By \autoref{thm:arveson-reformulated}, the differences $P_k - Q_k$ are Hilbert--Schmidt and
\begin{equation*}
0 = \trace\big(E(N-N')\big) = \sum_{k=1}^m [P_k:Q_k]\lambda_k
\end{equation*}
Since $\sum_{k=1}^m [P_k:Q_k] = 0$, we have $[P_1:Q_1] = - \sum_{k=2}^m [P_k:Q_k]$ and so we may rearrange the equality above to
\begin{equation*}
0 = \sum_{k=2}^m [P_k:Q_k](\lambda_1 - \lambda_k).
\end{equation*}
Since $\lambda_1-\lambda_2,\ldots,\lambda_1-\lambda_m$ are linearly independent in $K_{\spec(N)}$, we conclude that the coefficients $[P_k:Q_k] = 0$ for $2 \le k \le m$.
In turn, this implies $[P_1:Q_1] = 0$.
Therefore, by \autoref{lem:restricted-conjugation-of-sets-of-projections}, there is a unitary $U = I+K$ with $K$ Hilbert--Schmidt conjugating each $P_k$ to $Q_k$.
Thus $UNU^{*} = N'$.
\end{proof}
\emergencystretch=3em
\printbibliography
\end{document} |
\begin{document}
\title{\huge Boosting the Actor with Dual Critic}
\begin{abstract}
This paper proposes a new actor-critic-style algorithm called Dual Actor-Critic or Dual-AC. It is derived in a principled way from the Lagrangian dual form of the Bellman optimality equation, which can be viewed as a two-player game between the actor and a critic-like function, which is named as dual critic. Compared to its actor-critic relatives, Dual-AC~ has the desired property that the actor and dual critic are updated \emph{cooperatively} to optimize the same objective function, providing a more transparent way for learning the critic that is directly related to the objective function of the actor. We then provide a concrete algorithm that can effectively solve the minimax optimization problem, using techniques of multi-step bootstrapping, path regularization, and stochastic dual ascent algorithm. We demonstrate that the proposed algorithm achieves the state-of-the-art performances across several benchmarks.
\end{abstract}
\section{Introduction}\label{sec:intro}
Reinforcement learning~(RL) algorithms aim to learn a policy that maximizes the long-term return by sequentially interacting with an unknown environment. Value-function-based algorithms first approximate the optimal value function, which can then be used to derive a good policy. These methods~\citep{Sutton88,Watkins89} often take advantage of the Bellman equation and use bootstrapping to make learning more sample efficient than Monte Carlo estimation~\citep{SutBar98}. However, the relation between the quality of the learned value function and the quality of the derived policy is fairly weak~\citep{Bertsekas96Neuro}. Policy-search-based algorithms such as REINFORCE~\citep{Williams92} and others~\citep{Kakade02,SchLevAbbJoretal15}, on the other hand, assume a fixed space of parameterized policies and search for the optimal policy parameter based on unbiased Monte Carlo estimates. The parameters are often updated incrementally along stochastic directions that on average are guaranteed to increase the policy quality. Unfortunately, they often have a greater variance that results in a higher sample complexity.
Actor-critic methods combine the benefits of these two classes, and have proved successful in a number of challenging problems such as robotics~\citep{DeiNeuPet13}, meta-learning~\citep{BelPhaLeNoretal16}, and games~\citep{Mnih16Asynchronous}. An actor-critic algorithm has two components: the actor (policy) and the critic (value function). As in policy-search methods, actor is updated towards the direction of policy improvement. However, the update directions are computed with the help of the critic, which can be more efficiently learned as in value-function-based methods~\citep{SutMcASinMan00,Konda03Actor,PetVijSch05,Bhatnagar09Natural,SchMorLevJoretal15}. Although the use of a critic may introduce bias in learning the actor, its reduces variance and thus the sample complexity as well, compared to pure policy-search algorithms.
While the use of a critic is important for the efficiency of actor-critic algorithms, it is not entirely clear how the critic should be optimized to facilitate improvement of the actor. For some parametric family of policies, it is known that a certain compatibility condition ensures the actor parameter update is an unbiased estimate of the true policy gradient~\citep{SutMcASinMan00}. In practice, temporal-difference methods are perhaps the most popular choice to learn the critic, especially when nonlinear function approximation is used (e.g., \citet{SchMorLevJoretal15}).
In this paper, we propose a new actor-critic-style algorithm where the actor and the critic-like function, which we named as dual critic, are trained \emph{cooperatively} to optimize the same objective function. The algorithm, called \emph{Dual Actor-Critic}, is derived in a principled way by solving a dual form of the Bellman equation~\citep{Bertsekas96Neuro}. The algorithm can be viewed as a two-player game between the actor and the dual critic, and in principle can be solved by standard optimization algorithms like stochastic gradient descent (Section~\ref{sec:dual_bellman}). We emphasize the dual critic is not fitting the value function for \emph{current policy}, but that of the \emph{optimal policy}. We then show that, when function approximation is used, direct application of standard optimization techniques can result in instability in training, because of the lack of convex-concavity in the objective function (Section~\ref{sec:instability}). Inspired by the augmented Lagrangian method~\citep{LueYe15,BoyParChuPelEtal10}, we propose \emph{path regularization} for enhanced numerical stability. We also generalize the two-player game formulation to the multi-step case to yield a better bias/variance tradeoff. The full algorithm is derived and described in Section~\ref{sec:sac}, and is compared to existing algorithms in Section~\ref{sec:related_work}. Finally, our algorithm is evaluated on several locomotion tasks in the MuJoCo benchmark~\citep{TodEreTas12}, and compares favorably to state-of-the-art algorithms across the board.
\paragraph{Notation.} We denote a discounted MDP by $\Mcal = \rbr{\Scal, \Acal, P, R, \gamma}$, where $\Scal$ is the state space, $\Acal$ the action space, $P(\cdot|s, a)$ the transition probability kernel defining the distribution over next-state upon taking action $a$ in state $x$, $R(s, a)$ the corresponding immediate rewards, and $\gamma\in (0, 1)$ the discount factor. If there is no ambiguity, we will use $\sum_{a} f(a)$ and $\int f(a) da$ interchangeably.
\section{Duality of Bellman Optimality Equation}\label{sec:dual_bellman}
In this section, we first describe the linear programming formula of the Bellman optimality equation~\citep{Bertsekas95b,Puterman14}, paving the path for a duality view of reinforcement learning via Lagrangian duality. In the main text, we focus on MDPs with finite state and action spaces for simplicity of exposition. We extend the duality view to continuous state and action spaces in Appendix~\ref{appendix:continous_mdp}.
Given an initial state distribution $\mu(s)$, the reinforcement learning problem aims to find a policy $\pi(\cdot|s): \Scal\to \Pcal(\Acal)$ that maximizes the total expected discounted reward with $\Pcal(\Acal)$ denoting all the probability measures over $\Acal$, \ie,
\begin{equation}\label{eq:control_obj}
\textstyle \EE_{s_0\sim \mu(s)}\EE_{\pi}\sbr{\sum_{i=0}^\infty \gamma^{i} R(s_i, a_i)},
\end{equation}
where $s_{i+1}\sim P(\cdot|s_{i}, a_{i})$, $a_i\sim \pi(\cdot|s_{i})$.
Define
$
V^*(s) := \max_{\pi\in \Pcal(\Acal)}\EE\sbr{\sum_{i=0}^\infty \gamma^{i} R(s_i, a_i)|s_0=s},
$
the Bellman optimality equation states that:
\begin{equation}\label{eq:V_bellman_opt}
V^*(s) = (\Tcal V^*)(s) := \max_{a\in \Acal} \leqslantft\{ {R(s, a)} + \gamma \EE_{s'|s, a}\sbr{V^*(s')} \right\} \,,
\end{equation}
which can be formulated as a linear program~\citep{Puterman14,Bertsekas95b}:
\begin{eqnarray}\label{eq:bellman_lp_primal}
\mathsf{P}^*:= \min_{V}&& (1 - \gamma)\EE_{s\sim \mu(s)}\sbr{V(s)}\\
\st && V(s)\geqslant {R(s, a)} +\gamma \EE_{s'|s, a}\sbr{V(s')}, \quad \forall (s, a)\in \Scal\times \Acal.\nonumber
\end{eqnarray}
For completeness, we provide the derivation of the above equivalence in Appendix~\ref{appendix:dual_proofs}. Without loss of generality, we assume there exists an optimal policy for the given MDP, namely, the linear programming is solvable. The optimal policy can be obtained from the solution to the linear program~\eq{eq:bellman_lp_primal} via
\begin{eqnarray}
\pi^*(s) = \argmax_{a\in \Acal} \leqslantft\{ R(s, a) + \gamma \EE_{s'|s, a}\sbr{V^*(s')}\right\}\,.
\end{eqnarray}
The dual form of the LP below is often easier to solve and yield more direct relations to the optimal policy.
\begin{eqnarray}\label{eq:bellman_lp_dual}
\textstyle\mathsf{D}^* := \max_{\rho\geqslant 0}&& {\sum_{\rbr{s, a}\in\Scal\times \Acal} R(s, a)\rho(s, a)}\\
\st&& \textstyle\sum_{a\in\Acal} \rho(s', a) = (1 - \gamma)\mu(s') + \gamma \sum_{s, a\in\Scal\times \Acal}\rho(s, a){P(s'|s, a)}ds, \forall s'\in \Scal\nonumber.
\end{eqnarray}
Since the primal LP is solvable, the dual LP is also solvable, and $\mathsf{P^* - D^*} = 0$.
The optimal dual variables $\rho^*(s, a)$ and optimal policy $\pi^*(a|s)$ are closely related in the following manner:
\begin{theorem}[Policy from dual variables]\label{thm:dual_property}
$\sum_{s, a\in\Scal\times\Acal}\rho^*(s, a)= 1$, and $\pi^*(a|s) = \frac{\rho^*(s, a)}{\sum_{a\in\Acal} \rho^*(s, a)}$.
\end{theorem}
Since the goal of reinforcement learning task is to learn an optimal policy, it is appealing to deal with the Lagrangian dual which optimizes the policy directly, or its equivalent saddle point problem that jointly learns the optimal policy and value function.
\begin{theorem}[Competition in one-step setting]\label{thm:one_step_lagrangian}
The optimal policy $\pi^*$, actor, and its corresponding value function $V^*$, dual critic, is the solution to the following saddle-point problem
\begin{eqnarray}\label{eq:one_step_lagrangian}
\max_{\alpha\in\Pcal(\Scal), \pi\in\Pcal(\Acal)}\min_{V} \,\, L(V, \alpha, \pi) := \rbr{1 - \gamma}\EE_{s\sim \mu(s)} \sbr{V(s)} + \sum_{\rbr{s, a}\in\Scal\times\Acal}\alpha(s)\pi\rbr{a|s}\Delta[V](s, a),
\end{eqnarray}
where $\Delta[V](s,a) := R(s,a) + \gamma\EE_{s'|s, a}[V(s')]-V(s)$.
\end{theorem}
The saddle point optimization~\eq{eq:one_step_lagrangian} provides a game perspective in understanding the reinforcement learning problem~\citep{GooPouMirXuetal14}. The learning procedure can be thought as a game between the dual critic, \ie, value function for optimal policy, and the weighted actor, \ie, $\alpha(s)\pi(a|s)$: the dual critic $V$ seeks the value function to satisfy the Bellman equation, while the actor $\pi$ tries to generate state-action pairs that break the satisfaction. Such a competition introduces new roles for the actor and the dual critic, and more importantly, bypasses the unnecessary separation of policy evaluation and policy improvement procedures needed in a traditional actor-critic framework.
\section{Sources of Instability}\label{sec:instability}
To solve the dual problem in \eq{eq:one_step_lagrangian}, a straightforward idea is to apply stochastic mirror prox~\citep{NemJudLanSha09} or stochastic primal-dual algorithm~\citep{chen2014optimal} to address the saddle point problem in~\eq{eq:one_step_lagrangian}. Unfortunately, such algorithms have limited use beyond special cases. For example, for an MDP with finite state and action spaces, the one-step saddle-point problem~\eq{eq:one_step_lagrangian} with tabular parametrization is convex-concave, and finite-sample convergence rates can be established; see e.g.,~\citet{ChenWang16} and \citet{Wang17}.
However, when the state/action spaces are large or continuous so that function approximation must be used, such convergence guarantees no longer hold due to lack of convex-concavity. Consequently, directly solving \eq{eq:one_step_lagrangian} can suffer from severe bias and numerical issues, resulting in poor performance in practice (see, \eg, Figure~\ref{fig:ablation_study}):
\begin{enumerate}
\item {\bf Large bias in one-step Bellman operator}: It is well-known that one-step bootstrapping in temporal difference algorithms has lower variance than Monte Carlo methods and often require much fewer samples to learn. But it produces biased estimates, especially when function approximation is used. Such a bias is especially troublesome in our case as it introduces substantial noise in the gradients to update the policy parameters.
\item {\bf Absence of local convexity and duality}: Using nonlinear parametrization will easily break the local convexity and duality between the original LP and the saddle point problem, which are known as the necessary conditions for the success of applying primal-dual algorithm to constrained problems~\citep{LueYe15}. Thus none of the existing primal-dual type algorithms will remain stable and convergent when directly optimizing the saddle point problem without local convexity.
\item {\bf Biased stochastic gradient estimator with under-fitted value function}: In the absence of local convexity, the stochastic gradient w.r.t. the policy $\pi$ constructed from under-fitted value function will presumably be biased and futile to provide any meaningful improvement of the policy. Hence, naively extending the stochastic primal-dual algorithms in~\cite{ChenWang16,Wang17} for the parametrized Lagrangian dual, will also lead to biased estimators and sample inefficiency.
\end{enumerate}
\section{Dual Actor-Critic}\label{sec:sac}
In this section, we will introduce several techniques to bypass the three instability issues in the previous section:
(1) generalization of the minimax game to the multi-step case to achieve a better bias-variance tradeoff; (2) use of \emph{path regularization} in the objective function to promote local convexity and duality; and (3) use of stochastic \emph{dual ascent} to ensure unbiased gradient estimates.
\subsection{Competition in multi-step setting}\label{subsection:dual_kstep}
In this subsection, we will extend the minimax game between the actor and critic to the multi-step setting, which has been widely utilized in temporal-difference algorithms for better bias/variance tradeoffs~\citep{SutBar98,KeaSin00}.
By the definition of the optimal value function, it is easy to derive the $k$-step Bellman optimality equation as
\begin{equation}\label{eq:multi_step_bellman}
\textstyle
V^*(s) = \rbr{\Tcal^kV^*}(s) := \max_{\pi\in \Pcal} \leqslantft\{ \EE^\pi\sbr{\sum_{i = 0}^k \gamma^i R(s_i, a_i)} + \gamma^{k+1}\EE^\pi\sbr{V^*(s_{k+1})} \right\} \,.
\end{equation}
Similar to the one-step case, we can reformulate the multi-step Bellman optimality equation into a form similar to the LP formulation, and then we establish the duality, which leads to the following mimimax problem:
\begin{theorem}[Competition in multi-step setting]\label{thm:multi_step_lagrangian}
The optimal policy $\pi^*$ and its corresponding value function $V^*$ is the solution to the following saddle point problem
\begin{eqnarray}\label{eq:multi_step_lagrangian}
\max_{\alpha\in\Pcal(\Scal), \pi\in\Pcal(\Acal)}\min_{V} L_k(V, \alpha, \pi) = (1 - \gamma^{k+1}) \EE_{\mu}\sbr{V(s)} + \EE_{\alpha}^\pi\sbr{\delta\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}},
\end{eqnarray}
where $\delta\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}} = \sum_{i=0}^{k} \gamma^i R(s_i, a_i) +\gamma^{k+1} V(s_{k+1}) - V(s)$ and
$$
\EE_{\alpha}^\pi\sbr{\delta\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}} = \sum_{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}\alpha(s_0)\prod_{i=0}^k\pi(a_i|s_i)p(s_{i+1}|s_i, a_i)\delta\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}.
$$
\end{theorem}
The saddle-point problem~\eq{eq:multi_step_lagrangian} is similar to the one-step Lagrangian~\eq{eq:one_step_lagrangian}: the dual critic, $V$, and weighted $k$-step actor, $\alpha(s_0)\prod_{i=0}^k\pi(a_i|s_i)$, are competing for an equilibrium, in which critic and actor become the optimal value function and optimal policy. However, it should be emphasized that due to the existence of $\max$-operator over the space of distributions $\Pcal(\Acal)$, rather than $\Acal$, in the multi-step Bellman optimality equation~\eq{eq:multi_step_bellman}, the establishment of the competition in multi-step setting in Theorem~\ref{thm:multi_step_lagrangian} is not straightforward: {\bf i)}, its corresponding optimization is no longer a linear programming; {\bf ii)}, the strong duality in~\eq{eq:multi_step_lagrangian} is not obvious because of the lack of the convex-concave structure. We first generalize the duality to multi-step setting. Due to space limit, detailed analyses for generalizing the competition to multi-step setting are provided in Appendix~\ref{appendix:sac}.
\subsection{Path Regularization}\label{subsection:path_reg}
When function approximation is used, the one-step or multi-step saddle point problems~\eq{eq:multi_step_lagrangian} will no longer be convex in the primal parameter space. This could lead to severe instability and even divergence when solved by brute-force stochastic primal-dual algorithms. One then desires to partially convexify the objectives without affecting the optimal solutions. The augmented Lagrangian method~\citep{BoyParChuPelEtal10,LueYe15}, also known as method of multipliers, is designed and widely used for such purposes. However, directly applying this method would require introducing penalty functions of the multi-step Bellman operator, which renders extra complexity and challenges in optimization. Interested readers are referred to Appendix~\ref{appendix:augmented_lagrangian} for details.
Instead, we propose to use \emph{path regularization}, as a stepping stone for promoting local convexity and computation efficiency. The regularization term is motivated by the fact that the optimal value function satisfies the constraint $V(s) = \EE^{\pi^*}\sbr{\sum_{i=0}^\infty \gamma^{i} R(s_i, a_i)|s}$. In the same spirit as augmented Lagrangian, we will introduce to the objective the simple penalty function $\EE_{s\sim\mu(s)}\sbr{\rbr{\EE^{\pi_b}\sbr{\sum_{i=0}^\infty \gamma^{i} R(s_i, a_i) } - V(s)}^2}$, resulting in
\begin{eqnarray}\label{eq:path_reg_lagrangian}
L_r(V, \alpha, \pi) & := & (1 - \gamma^{k+1}) \EE_{\mu}\sbr{V(s)} \,+\, \EE_{\alpha}^\pi\sbr{\delta\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}} \\ [-2mm]
&&+\,\, {\textstyle \eta_V\EE_{s\sim\mu(s)}\sbr{\rbr{\EE^{\pi_b}\sbr{\sum_{i=0}^\infty \gamma^{i} R(s_i, a_i)} - V(s)}^2}}.\nonumber
\end{eqnarray}
Note that in the penalty function we use some behavior policy $\pi_b$ instead of the optimal policy, since the latter is unavailable. Adding such a regularization enables local duality in the primal parameter space. Indeed, this can be easily verified by showing the positive definite of the Hessian at a local solution. We name the regularization as \emph{path regularization}, since it exploits the rewards in the \emph{sample path} to regularize the \emph{solution path} of value function $V$ in the optimization procedure. As a by-product, the regularization also provides the mechanism to utilize \emph{off-policy} samples from behavior policy $\pi_b$.
One can also see that the regularization indeed provides guidance and preference to search for the solution path. Specifically, in the learning procedure of $V$, each update towards to the optimal value function while around the value function of the behavior policy $\pi_b$. Intuitively, such regularization restricts the feasible domain of the candidates $V$ to be a ball centered at $V^{\pi_b}$. Besides enhancing the local convexity, such penalty also avoid unbounded $V$ in learning procedure which makes the optimization invalid, and thus more numerical robust. As long as the optimal value function is indeed in such region, there will be no side-effect introduced. Formally, we can show that with appropriate $\eta_V$, the optimal solution $(V^*, \alpha^*, \pi^*)$ is not affected. The main results of this subsection are summarized by the following theorem.
\begin{theorem}[Property of path regularization]\label{thm:path_reg}
The local duality holds for $L_r(V, \alpha, \pi)$. Denote $(V^*, \alpha^*, \pi^*)$ as the solution to Bellman optimality equation, with some appropriate $\eta_V$,
$$
(V^*, \alpha^*, \pi^*) = \argmax_{\alpha\in\Pcal(\Scal), \pi\in\Pcal(\Acal)}\argmin_{V} L_r(V, \alpha, \pi).
$$
\end{theorem}
The proof of the theorem is given in Appendix~\ref{appendix:path_reg}. We emphasize that the theorem holds when $V$ is given enough capacity, \ie, in the nonparametric limit. With parametrization introduced, definitely approximation error will be introduced, and the valid range of $\eta_V$, which keeps optimal solution unchanged, will be affected. However, the function approximation error is still an open problem for general class of parametrization, we omit such discussion here which is out of the range of this paper.
\subsection{Stochastic Dual Ascent Update}\label{subsection:dual_update}
Rather than the primal form, \ie, $\min_{V}\max_{\alpha\in\Pcal(\Scal), \pi\in\Pcal(\Acal)} L_r(V, \alpha, \pi)$, we focus on optimizing the dual form $\max_{\alpha\in\Pcal(\Scal), \pi\in\Pcal(\Acal)}\min_{V} L_r(V, \alpha, \pi)$. The major reason is due to the sample efficiency consideration. In the primal form, to apply the stochastic gradient descent algorithm at $V^t$, one need to solve the $\max_{\alpha\in\Pcal(\Scal), \pi\in\Pcal(\Acal)} L_r(V^t, \alpha, \pi)$ which involves sampling from each $\pi$ and $\alpha$ during the solution path for the subproblem. We define the regularized dual function $\ell_r(\alpha, \pi) := \min_{V} L_r(V, \alpha, \pi)$. We first show the unbiased gradient estimator of $\ell_r$ w.r.t. $\theta_\rho = \rbr{\theta_\alpha, \theta_\pi}$, which are parameters associated with $\alpha$ and $\pi$. Then, we incorporate the stochastic update rule to dual ascent algorithm~\citep{BoyParChuPelEtal10}, resulting in the~\emph{dual actor-critic}~(Dual-AC) algorithm.
The gradient estimators of the dual functions can be derived using chain rule and are provided below.
\begin{theorem}\label{cor:reg_dual_grad}
The regularized dual function $\ell_r(\alpha, \pi)$ has gradients estimators
\begin{eqnarray}\label{eq:reg_alpha_grad}
\nabla_{\theta_\alpha}\ell_r\rbr{\theta_\alpha, \theta_\pi} &=&
\EE_{\alpha}^\pi\sbr{\delta\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}\nabla_{\theta_\alpha} \log\alpha(s)},
\end{eqnarray}
\begin{eqnarray}\label{eq:reg_pi_grad}
\textstyle
\nabla_{\theta_\pi}\ell_r\rbr{\theta_\alpha, \theta_\pi} =
\EE_{\alpha}^\pi\sbr{\delta\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}\sum_{i=0}^k\nabla_{\theta_\pi}\log\pi(a|s)}.
\end{eqnarray}
\end{theorem}
Therefore, we can apply stochastic mirror descent algorithm with the gradient estimator given in Theorem~\ref{cor:reg_dual_grad} to the regularized dual function $\ell_r(\alpha, \pi)$. Since the dual variables are probabilistic distributions, it is natural to use $KL$-divergence as the prox-mapping to characterize the geometry in the family of parameters~\citep{AmaNag93,NemJudLanSha09}. Specifically, in the $t$-th iteration,
\begin{equation}\label{eq:smd_prox}
\textstyle
\theta^t_\rho = \argmin_{\theta_\rho} -{\theta_\rho}^\top{ \hat g^{t-1}_\rho
} + \frac{1}{\zeta_t}KL(\rho_{\theta_\rho}\rbr{s, a}||\rho_{\theta_{\rho^{t-1}}}(s, a)),
\end{equation}
where $\hat g^{t-1}_\rho=\widehat\nabla_{\theta_\rho}\ell_r\rbr{\theta^{t-1}_\alpha, \theta^{t-1}_\pi}$ denotes the stochastic gradients estimated through~\eq{eq:reg_alpha_grad} and~\eq{eq:reg_pi_grad} via given samples and $KL(q(s, a)||p(s, a)) = \int q(s, a)\log \frac{q(s, a)}{p(s, a)}dsda$. Intuitively, such update rule emphasizes the balance between the current policy and the possible improvement based on samples. The update of $\pi$ shares some similarity to the TRPO, which is derived from the purpose for monotonic improvement guarantee~\cite{SchLevAbbJoretal15}. We discussed the details in Section~\ref{subsection:practical_alg}.
Rather than just update $V$ once via the stochastic gradient of $\nabla_V L_r(V, \alpha, \pi)$ in each iteration for solving saddle-point problem~\citep{NemJudLanSha09}, which is only valid in convex-concave setting, Dual-AC~exploits the stochastic dual ascent algorithm which requires $V^t = \argmin_V L_r(V, \alpha^{t-1}, \pi^{t-1})$ in $t$-th iteration for estimating $\nabla_{\theta_\rho}\ell_r\rbr{\theta_\alpha, \theta_\pi}$. As we discussed, such operation will keep the gradient estimator of dual variables unbiased, which provides better direction for convergence.
In Algorithm~\ref{alg:adversarial_control}, we update $V^t$ by solving optimization $\min_V L_r(V, \alpha^{t-1}, \pi^{t-1})$. In fact, the $V$ function in the path-regularized Lagrangian $L_r(V, \alpha, \pi)$ plays two roles: {\bf i)}, inherited from the original Lagrangian, the first two terms in regularized Lagrangian~\eq{eq:path_reg_lagrangian} push the $V$ towards the value function of the optimal policy with \emph{on-policy} samples; {\bf ii)}, on the other hand, the path regularization enforces $V$ to be close to the value function of behavior policy $\pi_b$ with \emph{off-policy} samples. Therefore, the $V$ function in the Dual-AC~algorithm can be understood as an interpolation between these two value functions learned from both on and off policy samples.
\subsection{Practical Implementation}\label{subsection:practical_alg}
In above, we have introduced path regularization for recovering local duality property of the parametrized multi-step Lagrangian dual form and tailored stochastic mirror descent algorithm for optimizing the regularized dual function. Here, we present several strategies for practical computation considerations.
\paragraph{Update rule of $V^t$}. In each iteration, we need to solve
$
V^t = \argmin_{\theta_V} L_r(V, \alpha^{t-1}, \pi^{t-1}),
$
which depends on $\pi_b$ and $\eta_V$, for estimating the gradient for dual variables. In fact, the closer $\pi_b$ to $\pi^*$ is, the smaller $\EE_{s\sim\mu(s)}\sbr{\rbr{\EE^{\pi_b}\sbr{\sum_{i=0}^\infty \gamma^{i} R(s_i, a_i) } - V^*(s)}^2}$ will be. Therefore, we can set $\eta_V$ to be large for better local convexity and faster convergence. Intuitively, the $\pi^{t-1}$ is approaching to $\pi^*$ as the algorithm iterates. Therefore, we can exploit the policy obtained in previous iteration, \ie, $\pi^{t-1}$, as the behavior policy. The experience replay can also be used.
Furthermore, notice the $L(V, \alpha^{t-1}, \pi^{t-1})$ is a expectation of functions of $V$, we will use stochastic gradient descent algorithm for the subproblem. Other efficient optimization algorithms can be used too. Specifically, the unbiased gradient estimator for $\nabla_{\theta_V} L(V, \alpha^{t-1}, \pi^{t-1}) $ is
\begin{eqnarray}
\textstyle
\nabla_{\theta_V} L_r(V, \alpha^{t-1}, \pi^{t-1}) &=& (1 - \gamma^{k+1}) \EE_{\mu}\sbr{\nabla_{\theta_V}V(s)} + \EE_{\alpha}^\pi\sbr{\nabla_{\theta_V}\delta\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}}\\
&&\textstyle - 2\eta_V \EE_{\mu}^{\pi_b}\sbr{\rbr{\sum_{i=0}^\infty \gamma^{i} R(s_i, a_i) - V(s)}\nabla_{\theta_V} V(s)}.\nonumber
\end{eqnarray}
We can use $k$-step Monte Carlo approximation for $\EE_{\mu}^{\pi_b}\sbr{\sum_{i=0}^\infty \gamma^{i} R(s_i, a_i)}$ in the gradient estimator. As $k$ is large enough, the truncate error is negligible~\citep{SutBar98}. We will iterate via $\theta_V^{t, i} = \theta_V^{t, i-1} + \kappa_i\widehat\nabla_{\theta^{t, i-1}_V} L_r(V, \alpha^{t-1}, \pi^{t-1})$ until the algorithm converges.
It should be emphasized that in our algorithm, $V^t$ is not the estimation of the value function of $\pi^t$. Although $V^t$ eventually becomes the estimation of the optimal value function once the algorithm achieves the global optimum, in each update, the $V^t$ is one function which helps the current policy to be improved. From this perspective, the Dual-AC~bypasses the policy evaluation step.
\begin{algorithm}[tb]
\caption{Dual Actor-Critic~(Dual-AC)}
\begin{algorithmic}[1]\label{alg:adversarial_control}
\STATE Initialize $\theta^0_V$, $\theta^0_\alpha$and $\theta^0_\pi$ randomly, set $\beta\in [\frac{1}{2}, 1]$.
\FOR{episode $t=1,\ldots, T$}
\STATE Start from $s\sim\alpha^{t-1}(s)$, collect samples $\cbr{\tau_l}_{l=1}^m$ follows behavior policy $\pi^{t-1}$.
\STATE Update $\theta^t_V = \argmin_{\theta_V} \hat L_r(V, \alpha^{t-1}, \pi^{t-1})$ by SGD based on $\cbr{\tau_l}_{l=1}^m$.
\STATE Update $\tilde\alpha^t(s)$ according to closed-form~\eq{eq:closed_alpha}.
\STATE Decay the stepsize $\zeta_t$ in rate $\frac{C}{n_0 + 1/t^\beta}$.
\STATE Compute the stochastic gradients for $\theta_\pi$ following~\eq{eq:reg_pi_grad}.
\STATE Update $\theta^t_\pi$ according to the exact prox-mapping~\eq{eq:smd_prox_pi} or the approximate closed-form~\eq{eq:smd_prox_fisher}.
\ENDFOR
\end{algorithmic}
\end{algorithm}
\paragraph{Update rule of $\alpha^t$}. In practice, we may face with the situation that the initial sampling distribution is fixed, \eg, in MuJoCo tasks. Therefore, we cannot obtain samples from $\alpha^t(s)$ at each iteration. We assume that $\exists \eta_\mu \in (0, 1]$, such that $\alpha(s) = (1 - \eta_\mu)\beta(s) + \eta_\mu\mu(s)$ with $\beta(s)\in\Pcal(\Scal)$. Hence, we have
$$
\textstyle \EE_{\alpha}^\pi\sbr{\delta\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}} = \EE_{\mu}^\pi\sbr{\rbr{\tilde\alpha(s)+ \eta_\mu}\delta\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}}
$$
where $\tilde\alpha(s) = (1 - \eta_\mu)\frac{\beta(s)}{\mu(s)}$. Note that such an assumption is much weaker comparing with the requirement for popular policy gradient algorithms (\eg, \citet{SutMcaSinetal99,SilLevHeeetal14}) that assumes $\mu(s)$ to be a stationary distribution. In fact, we can obtain a closed-form update for $\tilde\alpha$ if a square-norm regularization term is introduced into the dual function. Specifically,
\begin{theorem}\label{thm:closed_alpha}
In $t$-th iteration, given $V^t$ and $\pi^{t-1}$,
\begin{eqnarray}\label{eq:closed_alpha}
&&\argmax_{\alpha\geqslant 0}\EE_{\mu(s)\pi^{t-1}(s)}\sbr{\rbr{\tilde\alpha(s)+ \eta_\mu}\delta\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}} - \eta_\alpha\nbr{\tilde\alpha}^2_{\mu}\\
&=&\frac{1}{\eta_\alpha}\max\rbr{0,\EE^{\pi^{t-1}}\sbr{\delta\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}}}.
\end{eqnarray}
\end{theorem}
Then, we can update $\tilde\alpha^t$ through~\eq{eq:closed_alpha} with Monte Carlo approximation of $\EE^{\pi^{t-1}}\sbr{\delta\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}}$, avoiding the parametrization of $\tilde\alpha$. As we can see, the $\tilde\alpha^t(s)$ reweights the samples based on the temporal differences and this offers a principled justification for the heuristic prioritized reweighting trick used in~\citep{SchQuaAntSil15}.
\paragraph{Update rule of $\theta_\pi^t$}. The parameters for dual function, $\theta_\rho$, are updated by the prox-mapping operator~\eq{eq:smd_prox} following the stochastic mirror descent algorithm for the regularized dual function. Specifically, in $t$-th iteration, given $V^t$ and $\alpha^t$, for $\theta_\pi$, the prox-mapping~\eq{eq:smd_prox} reduces to
\begin{equation}\label{eq:smd_prox_pi}
\textstyle
\theta_\pi^t = \argmin_{\theta_\pi} - {\theta_\pi}^\top{\hat g^t_\pi} + \frac{1}{\zeta_t}KL\rbr{\pi_{\theta_\pi}(a|s)||\pi_{\theta_\pi^{t-1}}(a|s)},
\end{equation}
where $\hat g^t_\pi = \widehat\nabla_{\theta_\pi}\ell_r\rbr{\theta^{t}_\alpha, \theta^{t}_\pi}$. Then, the update rule will become exactly the natural policy gradient~\citep{Kakade02} with a principled way to compute the ``policy gradient'' $\hat g^t_\pi$. This can be understood as the penalty version of the trust region policy optimization~\citep{SchLevAbbJoretal15}, in which the policy parameters conservative update in terms of $KL$-divergence is achieved by adding explicit constraints.
Exactly solving the prox-mapping for $\theta_\pi$ requires another optimization, which may be expensive. To further accelerate the prox-mapping, we approximate the KL-divergence with the second-order Taylor expansion, and obtain an approximate closed-form update given by
\begin{eqnarray}\label{eq:smd_prox_fisher}
\textstyle
\theta_\pi^{t} &\approx& \argmin_{\theta_\pi} \leqslantft\{ - {\theta_\pi}^\top{\hat g^t_\pi} + \frac{1}{2}\nbr{\theta_\pi - \theta^{t-1}_\pi}_{F_t}^2 \right\} = \theta_\pi^{t-1} + \zeta_t F_t^{-1}\hat g^t_\pi
\end{eqnarray}
where $F_t := \EE_{\alpha^{t}\pi^{t-1}}\sbr{\nabla^2\log\pi_{\theta_{\pi}^{t-1}}}$ denotes the Fisher information matrix. Empirically, we may normalize the gradient by its norm $\sqrt{g^t_\pi F_t^{-1}g^t_\pi}$~\citep{RajLowTodKak17} for better performances.
Combining these practical tricks to the stochastic mirror descent update eventually gives rise to the dual actor-critic algorithm outlined in Algorithm~\ref{alg:adversarial_control}.
\section{Related Work}\label{sec:related_work}
The dual actor-critic algorithm includes both the learning of \emph{optimal} value function and \emph{optimal} policy in a \emph{unified} framework based on the duality of the linear programming~(LP) representation of Bellman optimality equation. The linear programming representation of Bellman optimality equation and its duality have been utilized for (approximate) planning problem~\citep{FarRoy04,WanLizBowSch08,PazPar11, ODonWanBoy11, MalAbbBar14, Cogill15}, in which the transition probability of the MDP is known and the value function or policy are in tabular form. \citet{ChenWang16,Wang17} apply stochastic first-order algorithms~\citep{NemJudLanSha09} for the one-step Lagrangian of the LP problem in reinforcement learning setting. However, as we discussed in Section~\ref{sec:instability}, their algorithm is restricted to tabular parametrization and are not applicable to MDPs with large or continuous state/action spaces.
The duality view has also been exploited in~\citet{NeuJonGom17}. Their algorithm is based on the duality of entropy-regularized Bellman equation~\citep{Todorov07,RubShaTis12,FoxPakTis15, HaaTanAbbLev17,NacNorXuSch17}, rather than the exact Bellman optimality equation used in our work. Meanwhile, their algorithm is only derived and tested in tabular form.
Our dual actor-critic algorithm can be understood as a nontrivial extension of the (approximate) dual gradient method~\citep[Chapter 6.3]{Bertsekas99} using stochastic gradient and Bregman divergence, which essentially parallels the view of (approximate) stochastic mirror descent algorithm~\citep{NemJudLanSha09} in the primal space. As a result, the algorithm converges with diminishing stepsizes and decaying errors from solving subproblems.
Particularly, the update rules of $\alpha$ and $\pi$ in the dual actor-critic are related to several existing algorithms. As we see in the update of $\alpha$, the algorithm reweighs the samples which are not fitted well. This is related to the heuristic prioritized experience replay~\citep{SchQuaAntSil15}. For the update in $\pi$, the proposed algorithm bears some similarities with trust region poicy gradient (TRPO)~\citep{SchLevAbbJoretal15} and natural policy gradient~\citep{Kakade02, RajLowTodKak17}. Indeed, TRPO and NPR solve the same prox-mapping but are derived from different perspectives. We emphasize that although the updating rules share some resemblance to several reinforcement learning algorithms in the literature, they are purely originated from a stochastic dual ascent algorithm for solving the two-play game derived from Bellman optimality equation.
\section{Experiments}\label{sec:experiment}
We evaluated the dual actor-critic~(Dual-AC) algorithm on several continuous control environments from the OpenAI Gym~\citep{BroChePetSchetal16} with MuJoCo physics simulator~\citep{TodEreTas12}. We compared Dual-AC~with several representative actor-critic algorithms, including trust region policy optimization~(TRPO)~\citep{SchLevAbbJoretal15} and proximal policy optimization (PPO)~\citep{SchWolDhaRadetal17}\footnote{As discussed in~\citet{HenIslBacPinetal17}, different implementations of TRPO and PPO can provide different performances. For a fair comparison, we use the codes from \url{https://github.com/joschu/modular\_rl} reported to have achieved the best scores in~\citet{HenIslBacPinetal17}.}. We ran the algorithms with $5$ random seeds and reported the average rewards with $50\%$ confidence interval. Details of the tasks and setups of these experiments including the policy/value function architectures and the hyperparameters values, are provided in Appendix~\ref{appendix:exp_details}.
\subsection{Ablation Study}\label{subsection:exp_ablation}
To justify our analysis in identifying the sources of instability in directly optimizing the parametrized one-step Lagrangian duality and the effect of the corresponding components in the dual actor-critic algorithm, we perform comprehensive Ablation study in InvertedDoublePendulum-v1, Swimmer-v1, and Hopper-v1 environments. We also considered the effect of $k = \{10, 50\}$ besides the one-step result in the study to demonstrate the benefits of multi-step.
We conducted comparison between the Dual-AC~and its variants, including Dual-AC~w/o multi-step, Dual-AC~w/o path-regularization, Dual-AC~w/o unbiased $V$, and the naive Dual-AC, for demonstrating the three instability sources in Section~\ref{sec:instability}, respectively, as well as varying the $k=\{10, 50\}$ in Dual-AC. Specifically, Dual-AC~w/o path-regularization removes the path-regularization components; Dual-AC~w/o multi-step removes the multi-step extension and the path-regularization; Dual-AC ~w/o unbiased $V$ calculates the stochastic gradient without achieving the convergence of inner optimization on $V$; and the naive Dual-AC~is the one without all components. Moreover, Dual-AC~with $k=10$ and Dual-AC~with $k=50$ denote the length of steps set to be $10$ and $50$, respectively.
\begin{figure*}
\caption{Comparison between the Dual-AC~and its variants for justifying the analysis of the source of instability.}
\label{fig:ablation_study}
\end{figure*}
The empirical performances on InvertedDoublePendulum-v1, Swimmer-v1, and Hopper-v1 tasks are shown in Figure~\ref{fig:ablation_study}. The results are consistent across the tasks with the analysis. The naive Dual-AC~performs the worst. The performances of the Dual-AC~found the optimal policy which solves the problem much faster than the alternative variants. The Dual-AC~w/o unbiased $V$ converges slower, showing its sample inefficiency caused by the bias in gradient calculation. The Dual-AC~w/o multi-step and Dual-AC~w/o path-regularization cannot converge to the optimal policy, indicating the importance of the path-regularization in recovering the local duality. Meanwhile, the performance of Dual-AC~w/o multi-step is worse than Dual-AC~w/o path-regularization, showing the bias in one-step can be alleviated via multi-step trajectories. The performances of Dual-AC~become better with the length of step $k$ increasing on these three tasks. We conjecture that the main reason may be that in these three MuJoCo environments, the bias dominates the variance. Therefore, with the $k$ increasing, the proposed Dual-AC~obtains more accumulate rewards.
\subsection{Comparison in Continuous Control Tasks}\label{subsection:exp_comparison}
In this section, we evaluated the Dual-AC~against TRPO and PPO across multiple tasks, including the InvertedDoublePendulum-v1, Hopper-v1, HalfCheetah-v1, Swimmer-v1 and Walker-v1. These tasks have different dynamic properties, ranging from unstable to stable, Therefore, they provide sufficient benchmarks for testing the algorithms. In Figure~\ref{fig:policy_comparison}, we reported the average rewards across $5$ runs of each algorithm with $50\%$ confidence interval during the training stage. We also reported the average final rewards in Table~\ref{table:final_reward}.
\begin{table}[htb]
\caption{The average final performances of the policies learned from Dual-AC~and the competitors.}
\label{table:final_reward}
\begin{center}
\begin{tabular}{c|c|c|c}
\hline
\
Environment &Dual-AC &PPO &TRPO \\
\hline
Pendulum &$\bf{-155.45}$ &$-266.98$ &$-245.11$\\
InvertedDoublePendulum &$\bf{8599.47}$ &$1776.26$ &$3070.96$\\
Swimmer &${234.56}$ &${223.13}$ &${232.89}$\\
Hopper &$\bf{2983.79}$ &${2376.15}$ &$2483.57$\\
HalfCheetah &$\bf{3041.47}$ &$2249.10$ &$2347.19$\\
Walker &$\bf{4103.60}$ &$3315.45$ &$2838.99$\\
\hline
\end{tabular}
\end{center}
\end{table}
The proposed Dual-AC~achieves the best performance in almost all environments, including Pendulum, InvertedDoublePendulum, Hopper, HalfCheetah and Walker. These results demonstrate that Dual-AC~is a viable and competitive RL algorithm for a wide spectrum of RL tasks with different dynamic properties.
A notable case is the InvertedDoublePendulum, where Dual-AC~substantially outperforms TRPO and PPO in terms of the learning speed and sample efficiency, implying that Dual-AC~is preferable to unstable dynamics. We conjecture this advantage might come from the different meaning of $V$ in our algorithm. For unstable system, the failure will happen frequently, resulting the collected data are far away from the optimal trajectories. Therefore, the policy improvement through the value function corresponding to current policy is slower, while our algorithm learns the optimal value function and enhances the sample efficiency.
\begin{figure*}
\caption{The results of Dual-AC~against TRPO and PPO baselines. Each plot shows average reward during training across $5$ random seeded runs, with $50\%$ confidence interval. The x-axis is the number of training iterations. The Dual-AC~achieves comparable performances comparing with TRPO and PPO in some tasks, but outperforms on more challenging tasks. }
\label{fig:policy_comparison}
\end{figure*}
\section{Conclusion}\label{sec:conclusion}
In this paper, we revisited the linear program formulation of the Bellman optimality equation, whose Lagrangian dual form yields a game-theoretic view for the roles of the actor and the dual critic. Although such a framework for actor and dual critic allows them to be optimized for the same objective function, parametering the actor and dual critic unfortunately induces instablity in optimization. We analyze the sources of instability, which is corroborated by numerical experiments. We then propose \emph{Dual Actor-Critic}, which exploits \emph{stochastic dual ascent} algorithm for the \emph{path regularized}, \emph{multi-step bootstrapping} two-player game, to bypass these issues. The algorithm achieves the state-of-the-art performances on several MuJoCo benchmarks.
\begin{thebibliography}{50}
\providecommand{\natexlab}[1]{#1}
\providecommand{\url}[1]{\texttt{#1}}
\expandafter\ifx\csname urlstyle\endcsname\relax
\providecommand{\doi}[1]{doi: #1}\else
\providecommand{\doi}{doi: \begingroup \urlstyle{rm}\Url}\fi
\bibitem[Amari and Nagaoka(1993)]{AmaNag93}
Shun-ichi Amari and H.~Nagaoka.
\newblock \emph{Methods of Information Geometry}.
\newblock Oxford University Press, 1993.
\bibitem[Bello et~al.(2016)Bello, Pham, Le, Norouzi, and
Bengio]{BelPhaLeNoretal16}
Irwan Bello, Hieu Pham, Quoc~V Le, Mohammad Norouzi, and Samy Bengio.
\newblock Neural combinatorial optimization with reinforcement learning.
\newblock \emph{arXiv preprint arXiv:1611.09940}, 2016.
\bibitem[Bertsekas(1999)]{Bertsekas99}
D.~P. Bertsekas.
\newblock \emph{Nonlinear Programming}.
\newblock Athena Scientific, Belmont, MA, second edition, 1999.
\bibitem[Bertsekas and Tsitsiklis(1996)]{Bertsekas96Neuro}
Dimitri~P. Bertsekas and John~N. Tsitsiklis.
\newblock \emph{Neuro-Dynamic Programming}.
\newblock Athena Scientific, September 1996.
\newblock ISBN 1-886529-10-8.
\bibitem[Bertsekas et~al.(1995)Bertsekas, Bertsekas, Bertsekas, and
Bertsekas]{Bertsekas95b}
Dimitri~P Bertsekas, Dimitri~P Bertsekas, Dimitri~P Bertsekas, and Dimitri~P
Bertsekas.
\newblock \emph{Dynamic programming and optimal control}, volume~1.
\newblock Athena Scientific Belmont, MA, 1995.
\bibitem[Bhatnagar et~al.(2009)Bhatnagar, Sutton, Ghavamzadeh, and
Lee]{Bhatnagar09Natural}
Shalabh Bhatnagar, Richard~S. Sutton, Mohammad Ghavamzadeh, and Mark Lee.
\newblock Natural actor–critic algorithms.
\newblock \emph{Automatica}, 45\penalty0 (11):\penalty0 2471--2482, 2009.
\bibitem[Boyd et~al.(2010)Boyd, Parikh, Chu, Peleato, and
Eckstein]{BoyParChuPelEtal10}
S.~Boyd, N.~Parikh, E.~Chu, B.~Peleato, and J.~Eckstein.
\newblock Distributed optimization and statistical learning via the alternating
direction method of multipliers.
\newblock \emph{Foundations and Trends in Machine Learning}, 3\penalty0
(1):\penalty0 1--123, 2010.
\bibitem[Brockman et~al.(2016)Brockman, Cheung, Pettersson, Schneider,
Schulman, Tang, and Zaremba]{BroChePetSchetal16}
Greg Brockman, Vicki Cheung, Ludwig Pettersson, Jonas Schneider, John Schulman,
Jie Tang, and Wojciech Zaremba.
\newblock Openai gym.
\newblock \emph{arXiv preprint arXiv:1606.01540}, 2016.
\bibitem[Burger(2003)]{Burger03}
Martin Burger.
\newblock Infinite-dimensional optimization and optimal design.
\newblock 2003.
\bibitem[Chen and Wang(2016)]{ChenWang16}
Yichen Chen and Mengdi Wang.
\newblock Stochastic primal-dual methods and sample complexity of reinforcement
learning.
\newblock \emph{arXiv preprint arXiv:1612.02516}, 2016.
\bibitem[Chen et~al.(2014)Chen, Lan, and Ouyang]{chen2014optimal}
Yunmei Chen, Guanghui Lan, and Yuyuan Ouyang.
\newblock Optimal primal-dual methods for a class of saddle point problems.
\newblock \emph{SIAM Journal on Optimization}, 24\penalty0 (4):\penalty0
1779--1814, 2014.
\bibitem[Cogill(2015)]{Cogill15}
Randy Cogill.
\newblock Primal-dual algorithms for discounted markov decision processes.
\newblock In \emph{Control Conference (ECC), 2015 European}, pages 260--265.
IEEE, 2015.
\bibitem[Dai et~al.(2014)Dai, Xie, He, Liang, Raj, Balcan, and
Song]{DaiXieHe14}
Bo~Dai, Bo~Xie, Niao He, Yingyu Liang, Anant Raj, Maria-Florina~F Balcan, and
Le~Song.
\newblock Scalable kernel methods via doubly stochastic gradients.
\newblock In \emph{Advances in Neural Information Processing Systems}, pages
3041--3049, 2014.
\bibitem[de~Farias and Roy(2004)]{FarRoy04}
D.~Pucci de~Farias and B.~Van Roy.
\newblock On constraint sampling in the linear programming approach to
approximate dynamic programming.
\newblock \emph{Mathematics of Operations Research}, 29\penalty0 (3):\penalty0
462--478, 2004.
\bibitem[Deisenroth et~al.(2013)Deisenroth, Neumann, and Peters]{DeiNeuPet13}
Marc~Peter Deisenroth, Gerhard Neumann, and Jan Peters.
\newblock A survey on policy search for robotics.
\newblock \emph{Foundations and Trends{\textregistered} in Robotics},
2\penalty0 (1--2):\penalty0 1--142, 2013.
\bibitem[Fox et~al.(2015)Fox, Pakman, and Tishby]{FoxPakTis15}
Roy Fox, Ari Pakman, and Naftali Tishby.
\newblock Taming the noise in reinforcement learning via soft updates.
\newblock \emph{arXiv preprint arXiv:1512.08562}, 2015.
\bibitem[Goodfellow et~al.(2014)Goodfellow, Pouget-Abadie, Mirza, Xu,
Warde-Farley, Ozair, Courville, and Bengio]{GooPouMirXuetal14}
Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley,
Sherjil Ozair, Aaron Courville, and Yoshua Bengio.
\newblock Generative adversarial nets.
\newblock In \emph{Advances in Neural Information Processing Systems}, pages
2672--2680, 2014.
\bibitem[Haarnoja et~al.(2017)Haarnoja, Tang, Abbeel, and
Levine]{HaaTanAbbLev17}
Tuomas Haarnoja, Haoran Tang, Pieter Abbeel, and Sergey Levine.
\newblock Reinforcement learning with deep energy-based policies.
\newblock \emph{arXiv preprint arXiv:1702.08165}, 2017.
\bibitem[Henderson et~al.(2017)Henderson, Islam, Bachman, Pineau, Precup, and
Meger]{HenIslBacPinetal17}
Peter Henderson, Riashat Islam, Philip Bachman, Joelle Pineau, Doina Precup,
and David Meger.
\newblock Deep reinforcement learning that matters.
\newblock \emph{arXiv preprint arXiv:1709.06560}, 2017.
\bibitem[Kakade(2002)]{Kakade02}
S.~Kakade.
\newblock A natural policy gradient.
\newblock In T.~G. Dietterich, S.~Becker, and Z.~Ghahramani, editors,
\emph{Advances in Neural Information Processing Systems 14}, pages
1531--1538. {MIT} Press, 2002.
\bibitem[Kearns and Singh(2000)]{KeaSin00}
M.~Kearns and S.~Singh.
\newblock Bias-variance error bounds for temporal difference updates.
\newblock In \emph{Proc. 13th Annu. Conference on Comput. Learning Theory},
pages 142--147. Morgan Kaufmann, San Francisco, 2000.
\bibitem[Konda and Tsitsiklis(2003)]{Konda03Actor}
Vijay~R. Konda and John~N. Tsitsiklis.
\newblock On actor-critic algorithms.
\newblock \emph{SIAM Journal on Control and Optimization}, 42\penalty0
(4):\penalty0 1143--1166, 2003.
\bibitem[Luenberger and Ye(2015)]{LueYe15}
David~G. Luenberger and Yinyu Ye.
\newblock \emph{Linear and Nonlinear Programming}.
\newblock Springer Publishing Company, Incorporated, 2015.
\newblock ISBN 3319188410, 9783319188416.
\bibitem[Malek et~al.(2014)Malek, Abbasi-Yadkori, and Bartlett]{MalAbbBar14}
Alan Malek, Yasin Abbasi-Yadkori, and Peter Bartlett.
\newblock Linear programming for large-scale markov decision problems.
\newblock In \emph{International Conference on Machine Learning}, pages
496--504, 2014.
\bibitem[Mnih et~al.(2016)Mnih, Badia, Mirza, Graves, Lillicrap, Harley,
Silver, and Kavukcuoglu]{Mnih16Asynchronous}
Volodymyr Mnih, Adri\`{a}~Puigdom\`{e}nech Badia, Mehdi Mirza, Alex Graves,
Timothy~P. Lillicrap, Tim Harley, David Silver, and Koray Kavukcuoglu.
\newblock Asynchronous methods for deep reinforcement learning.
\newblock In \emph{Proceedings of the 33rd International Conference on Machine
Learning}, pages 1928--1937, 2016.
\bibitem[Nachum et~al.(2017)Nachum, Norouzi, Xu, and Schuurmans]{NacNorXuSch17}
Ofir Nachum, Mohammad Norouzi, Kelvin Xu, and Dale Schuurmans.
\newblock Bridging the gap between value and policy based reinforcement
learning.
\newblock \emph{arXiv preprint arXiv:1702.08892}, 2017.
\bibitem[Nemirovski et~al.(2009)Nemirovski, Juditsky, Lan, and
Shapiro]{NemJudLanSha09}
A.~Nemirovski, A.~Juditsky, G.~Lan, and A.~Shapiro.
\newblock Robust stochastic approximation approach to stochastic programming.
\newblock \emph{SIAM J. on Optimization}, 19\penalty0 (4):\penalty0 1574--1609,
January 2009.
\newblock ISSN 1052-6234.
\bibitem[Nesterov(2005)]{Nesterov05}
Yurii Nesterov.
\newblock Smooth minimization of non-smooth functions.
\newblock \emph{Math. Program.}, 103\penalty0 (1):\penalty0 127--152, 2005.
\bibitem[Neu et~al.(2017)Neu, Jonsson, and G{\'o}mez]{NeuJonGom17}
Gergely Neu, Anders Jonsson, and Vicen{\c{c}} G{\'o}mez.
\newblock A unified view of entropy-regularized markov decision processes.
\newblock \emph{arXiv preprint arXiv:1705.07798}, 2017.
\bibitem[O'Donoghue et~al.(2011)O'Donoghue, Wang, and Boyd]{ODonWanBoy11}
Brendan O'Donoghue, Yang Wang, and Stephen Boyd.
\newblock Min-max approximate dynamic programming.
\newblock In \emph{Computer-Aided Control System Design (CACSD), 2011 IEEE
International Symposium on}, pages 424--431. IEEE, 2011.
\bibitem[Pazis and Parr(2011)]{PazPar11}
Jason Pazis and Ronald Parr.
\newblock Non-parametric approximate linear programming for mdps.
\newblock In \emph{AAAI}, 2011.
\bibitem[Peters et~al.(2005)Peters, Vijayakumar, and Schaal]{PetVijSch05}
Jan Peters, Sethu Vijayakumar, and Stefan Schaal.
\newblock Natural actor-critic.
\newblock In \emph{Machine Learning: ECML 2005, 16th European Conference on
Machine Learning, Porto, Portugal, October 3-7, 2005, Proceedings}, pages
280--291. Springer, 2005.
\bibitem[Puterman(2014)]{Puterman14}
Martin~L Puterman.
\newblock \emph{Markov decision processes: discrete stochastic dynamic
programming}.
\newblock John Wiley \& Sons, 2014.
\bibitem[Rajeswaran et~al.(2017)Rajeswaran, Lowrey, Todorov, and
Kakade]{RajLowTodKak17}
Aravind Rajeswaran, Kendall Lowrey, Emanuel Todorov, and Sham Kakade.
\newblock Towards generalization and simplicity in continuous control.
\newblock \emph{arXiv preprint arXiv:1703.02660}, 2017.
\bibitem[Rubin et~al.(2012)Rubin, Shamir, and Tishby]{RubShaTis12}
Jonathan Rubin, Ohad Shamir, and Naftali Tishby.
\newblock Trading value and information in mdps.
\newblock \emph{Decision Making with Imperfect Decision Makers}, pages 57--74,
2012.
\bibitem[Schaul et~al.(2015)Schaul, Quan, Antonoglou, and
Silver]{SchQuaAntSil15}
Tom Schaul, John Quan, Ioannis Antonoglou, and David Silver.
\newblock Prioritized experience replay.
\newblock \emph{arXiv preprint arXiv:1511.05952}, 2015.
\bibitem[Schulman et~al.(2015{\natexlab{a}})Schulman, Levine, Abbeel, Jordan,
and Moritz]{SchLevAbbJoretal15}
John Schulman, Sergey Levine, Pieter Abbeel, Michael~I Jordan, and Philipp
Moritz.
\newblock Trust region policy optimization.
\newblock In \emph{ICML}, pages 1889--1897, 2015{\natexlab{a}}.
\bibitem[Schulman et~al.(2015{\natexlab{b}})Schulman, Moritz, Levine, Jordan,
and Abbeel]{SchMorLevJoretal15}
John Schulman, Philipp Moritz, Sergey Levine, Michael Jordan, and Pieter
Abbeel.
\newblock High-dimensional continuous control using generalized advantage
estimation.
\newblock \emph{arXiv preprint arXiv:1506.02438}, 2015{\natexlab{b}}.
\bibitem[Schulman et~al.(2017)Schulman, Wolski, Dhariwal, Radford, and
Klimov]{SchWolDhaRadetal17}
John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov.
\newblock Proximal policy optimization algorithms.
\newblock \emph{arXiv preprint arXiv:1707.06347}, 2017.
\bibitem[Silver et~al.(2014)Silver, Lever, Heess, Degris, Wierstra, and
Riedmiller]{SilLevHeeetal14}
David Silver, Guy Lever, Nicolas Heess, Thomas Degris, Daan Wierstra, and
Martin Riedmiller.
\newblock Deterministic policy gradient algorithms.
\newblock In \emph{ICML}, 2014.
\bibitem[Sutton(1988)]{Sutton88}
R.~S. Sutton.
\newblock Learning to predict by the methods of temporal differences.
\newblock \emph{Machine Learning}, 3\penalty0 (1):\penalty0 9--44, 1988.
\bibitem[Sutton et~al.(2000)Sutton, McAllester, Singh, and
Mansour]{SutMcASinMan00}
R.~S. Sutton, David McAllester, S.~Singh, and Yishay Mansour.
\newblock Policy gradient methods for reinforcement learning with function
approximation.
\newblock In S.~A. Solla, T.~K. Leen, and K.-R. M\"uller, editors,
\emph{Advances in Neural Information Processing Systems 12}, pages
1057--1063, Cambridge, MA, 2000. {MIT} Press.
\bibitem[Sutton et~al.(1999)Sutton, McAllester, Singh, Mansour,
et~al.]{SutMcaSinetal99}
Richard~S Sutton, David~A McAllester, Satinder~P Singh, Yishay Mansour, et~al.
\newblock Policy gradient methods for reinforcement learning with function
approximation.
\newblock In \emph{NIPS}, volume~99, pages 1057--1063, 1999.
\bibitem[Sutton and Barto(1998)]{SutBar98}
R.S. Sutton and A.G. Barto.
\newblock \emph{Reinforcement Learning: An Introduction}.
\newblock {MIT} Press, 1998.
\bibitem[Todorov(2007)]{Todorov07}
Emanuel Todorov.
\newblock Linearly-solvable markov decision problems.
\newblock In \emph{Advances in neural information processing systems}, pages
1369--1376, 2007.
\bibitem[Todorov et~al.(2012)Todorov, Erez, and Tassa]{TodEreTas12}
Emanuel Todorov, Tom Erez, and Yuval Tassa.
\newblock Mujoco: A physics engine for model-based control.
\newblock In \emph{Intelligent Robots and Systems (IROS), 2012 IEEE/RSJ
International Conference on}, pages 5026--5033. IEEE, 2012.
\bibitem[Wang(2017)]{Wang17}
Mengdi Wang.
\newblock {Randomized Linear Programming Solves the Discounted Markov Decision
Problem In Nearly-Linear Running Time}.
\newblock \emph{ArXiv e-prints}, 2017.
\bibitem[Wang et~al.(2008)Wang, Lizotte, Bowling, and
Schuurmans]{WanLizBowSch08}
Tao Wang, Daniel Lizotte, Michael Bowling, and Dale Schuurmans.
\newblock Dual representations for dynamic programming.
\newblock 2008.
\bibitem[Watkins(1989)]{Watkins89}
C.~J. C.~H. Watkins.
\newblock \emph{Learning from Delayed Rewards}.
\newblock PhD thesis, King's College, Oxford, May 1989.
\newblock (To be reprinted by MIT Press.).
\bibitem[Williams(1992)]{Williams92}
Ronald~J. Williams.
\newblock Simple statistical gradient-following algorithms for connectionist
reinforcement learning.
\newblock \emph{Machine Learning}, 8:\penalty0 229--256, 1992.
\end{thebibliography}
\appendix
\onecolumn
\begin{appendix}
\thispagestyle{plain}
\begin{center}
{\Large \bf Appendix}
\end{center}
\section{Details of the Proofs for Section~\ref{sec:dual_bellman}}\label{appendix:dual_proofs}
\subsection{Duality of Bellman Optimality Equation}
\citet{Puterman14,Bertsekas95b} provide details in deriving the linear programming form of the Bellman optimality equation. We provide a briefly proof here.
\begin{proof}
We rewrite the linear programming~\ref{eq:bellman_lp_primal} as
\begin{equation}\label{eq:V_min}
V^* = \argmin_{V \geqslant \Tcal V} \EE_{\mu}\sbr{V(s)}.
\end{equation}
Recall the $\Tcal$ is monotonic, \ie, if $V \geqslant \Tcal V\Rightarrow \Tcal V \geqslant \Tcal^2 V$ and $V^* = \Tcal^\infty V$ for arbitrary $V$, we have for $\forall V$ feasible, $V\geqslant \Tcal V \geqslant \Tcal^2 V\geqslant \ldots\geqslant \Tcal^\infty V = V^*$.
\end{proof}
\noindent{\bf Theorem~\ref{thm:dual_property} (Optimal policy from occupancy)}
\emph{
$\sum_{s, a\in\Scal\times\Acal}\rho^*(s, a)= 1$, and $\pi^*(a|s) = \frac{\rho^*(s, a)}{\sum_{a\in\Acal} \rho^*(s, a)}$.
}
\begin{proof}
For the optimal occupancy measure, it must satisfy
\begin{eqnarray*}
&&\sum_{a\in\Acal}\rho^*(s', a) = \gamma\sum_{s, a\in\Scal\times\Acal}\rho^*(s, a)p(s'|s, a) + (1 - \gamma)\mu(s'),\quad \forall s'\in \Scal\\
&\Rightarrow& (1 -\gamma)\mu + \sum_{s, a\in\Scal\times\Acal}(\gamma P - I)\rho^*(s, a) = 0,
\end{eqnarray*}
where $P$ denotes the transition distribution and $I$ denotes a $\abr{\Scal}\times\abr{\Scal\Acal}$ matrix where $I_{ij} = 1$ if and only if $j \in [\rbr{i-1}\abr{\Acal}+1,\ldots, i\abr{\Acal}]$. Multiply both sides with $\one$, due to $\mu$ and $P$ are probabilities, we have $\langle \one, \rho^*\rangle = 1.$
Without loss of generality, we assume there is only one best action in each state. Therefore, by the KKT complementary conditions of~\eq{eq:bellman_lp_primal}, \ie,
$$
\rho(s, a)\rbr{{R(s, a)} +\gamma \EE_{s'|s, a}\sbr{V(s')} - V(s)} = 0,
$$
which implies $\rho^*(s, a)\neq 0$ if and only if $a = a^*$, therefore, the $\pi^*$ by normalization.
\end{proof}
\noindent{\bf Theorem~\ref{thm:one_step_lagrangian}}
\emph{
The optimal policy $\pi^*$ and its corresponding value function $V^*$ is the solution to the following saddle problem
\begin{eqnarray*}
\max_{\alpha\in\Pcal(\Scal), \pi\in\Pcal(\Acal)}\min_{V} \,\, L(V, \alpha, \pi) := \rbr{1 - \gamma}\EE_{s\sim \mu(s)} \sbr{V(s)} + \sum_{\rbr{s, a}\in\Scal\times\Acal}\alpha(s)\pi\rbr{a|s}\Delta[V](s, a)
\end{eqnarray*}
where $\Delta[V](s,a)= R(s,a) + \gamma\EE_{s'|s, a}[V(s')]-V(s)$.
}
\begin{proof}
Due to the strong duality of the optimization~\eq{eq:bellman_lp_primal}, we have
\begin{eqnarray*}
&&\min_{V}\max_{\rho(s, a)\geqslant 0} \,\, \rbr{1 - \gamma}\EE_{s\sim \mu(s)} \sbr{V(s)} + \sum_{\rbr{s, a}\in\Scal\times\Acal}\rho(s, a)\Delta[V](s, a)\\
&=&\max_{\rho(s, a)\geqslant 0}\min_{V} \,\, \rbr{1 - \gamma}\EE_{s\sim \mu(s)} \sbr{V(s)} + \sum_{\rbr{s, a}\in\Scal\times\Acal}\rho(s, a)\Delta[V](s, a).
\end{eqnarray*}
Then, plugging the property of the optimum in Theorem~\ref{thm:dual_property}, we achieve the final optimization~\eq{eq:one_step_lagrangian}.
\end{proof}
\subsection{Continuous State and Action MDP Extension}\label{appendix:continous_mdp}
In this section, we extend the linear programming and its duality to continuous state and action MDP. In general, the only weak duality holds for infinite constraints, \ie, $\mathsf{P^*}\geqslant \mathsf{D^*}$. With a mild assumption, we will recover the strong duality for continuous state and action MDP, and most of the conclusions in discrete state and action MDP still holds.
Specifically, without loss of generality, we consider the solvable MDP, \ie, the optimal policy, $\pi^*(a|s)$, exists. If $\nbr{R(s, a)}_\infty\leqslant C_R$, $\nbr{V^*}_\infty\leqslant \frac{C_R}{1 - \gamma}$. Moreover,
\begin{eqnarray*}
\nbr{V^*}_{2, \mu}^2 &=& \int \rbr{V^*(s)}^2\mu(s)ds = \int \rbr{R(s, a) + \gamma\EE_{s'|s, a}\sbr{V^*(s')}}^2 \pi^*(a|s)\mu(s)d(s, a)\\
&\leqslant&2 \int \rbr{R(s, a)}^2\pi^*(a|s)\mu(s)ds + 2\gamma^2\int \rbr{\EE_{s'|s, a}\sbr{V^*(s')}}^2\pi^*(a|s)\mu(s)ds\\
&\leqslant&2\max_{a\in\Acal}\nbr{R(s, a)}_\mu^2 + 2\gamma^2 \int\rbr{\int P^*(s'|s)\mu(s) ds} \rbr{V^*(s')}^2ds'\\
&\leqslant&2\max_{a\in\Acal}\nbr{R(s, a)}_\mu^2 + 2\gamma^2 \nbr{V^*(s')}_\infty^2\int \int P^*(s'|s)\mu(s) dsds'\\
&\leqslant&2\max_{a\in\Acal}\nbr{R(s, a)}_\mu^2 + 2\gamma^2 \nbr{V^*(s')}_\infty^2,
\end{eqnarray*}
where the first inequality comes from $2\langle f(x), g(x) \rangle_2 \leqslant \nbr{f}_2^2 + \nbr{g}_2^2$.
\begin{eqnarray*}
\nbr{V^* - \gamma \EE_{s'|s, a}\sbr{V(s')}}_{\mu\pi_b}^2 \leqslant 2\nbr{V^*}^2_\mu + 2\gamma^2\nbr{\EE_{s'|s, a}\sbr{V^*(s')}}_{\mu\pi_b}^2 \leqslant 2\nbr{V^*}^2_\mu + 2\gamma^2 \nbr{V^*(s')}_\infty^2,
\end{eqnarray*}
for some $\pi_b\in \Pcal$ that $\pi_b(a|s) > 0$ for $\forall \rbr{s,a}\in\Scal\times\Acal$.
Therefore, with the assumption that $\nbr{R(s, a)}_\mu^2\leqslant C^\mu_R, \forall a\in\Acal$, we have $R(s, a)\in\Lcal_{\mu\pi_b}^2\rbr{\Scal\times\Acal}$ and $V^*(s')\in \Lcal_\mu^2(\Scal)$. The constraints in the primal form of linear programming can be written as
$$
\rbr{\Ical - \gamma \Pcal} V - R\succeq_{\Lcal^2_{\mu\pi_b}} 0,
$$
where $\Ical-\gamma \Pcal :\Lcal^2_\mu(\Scal)\rightarrow \Lcal_{\mu\pi_b}^2(\Scal\times\Acal)$ without any effect on the optimality. For simplicity, we denote $\succeq$ as $\succeq_{\Lcal^2_{\mu\pi_b}}$ and $\langle f, g\rangle = \int f(s, a)g(s, a)\mu(s)\pi_b(a|s)dsda$. Apply the Lagrangian multiplier for constraints in ordered Banach space in~\cite{Burger03}, we have
\begin{eqnarray}\label{eq:continuous_lagrangian}
\mathsf{P^*} = \min_{V\in \Lcal}\max_{\varrho \succeq 0}\,\, (1 - \gamma)\EE_{\mu}\sbr{V(s)} - \langle \varrho, \rbr{\Ical - \gamma \Pcal} V - R\rangle.
\end{eqnarray}
The solution $(V^*, \varrho^*)$ also satisfies the KKT conditions,
\begin{eqnarray}
(1 - \gamma)\one - \rbr{\Ical - \gamma \Pcal}^\top \varrho^* &=& 0,\\
\varrho^* &\succeq& 0, \\
\rbr{\Ical - \gamma \Pcal} V^* - R &\succeq& 0,\\
\langle \varrho^*, \rbr{\Ical - \gamma \Pcal} V^* - R\rangle & = &0.
\end{eqnarray}
where $^\top$ denotes the conjugate operation. By the KKT condition, we have
\begin{equation}\label{eq:normalize_condition}
\inner{\one}{(1 - \gamma)\one - \rbr{\Ical - \gamma \Pcal}^\top \varrho^*} = 0\Rightarrow \inner{\one}{\varrho} = 1.
\end{equation}
The strongly duality also holds, \ie,
\begin{eqnarray}
\mathsf{P^*} = \mathsf{D^*} := \max_{\varrho \succeq 0}&& \langle R(s, a), \varrho(s, a) \rangle\\
\st && (1 - \gamma)\one - \rbr{\Ical - \gamma \Pcal}^\top \varrho = 0
\end{eqnarray}
\begin{proof}
We compute the duality gap
\begin{eqnarray*}
&&(1 - \gamma) \langle \one, V^* \rangle - \langle R, \varrho^*\rangle\\
& = & \langle \varrho^*, \rbr{\Ical - \gamma \Pcal} V^*\rangle - \langle R, \varrho^*\rangle\\
& = & \langle \varrho^*, \rbr{\Ical - \gamma \Pcal} V^* - R \rangle =0,
\end{eqnarray*}
which shows the strongly duality holds.
\end{proof}
\section{Details of The Proofs for Section~\ref{sec:sac}}\label{appendix:sac}
\subsection{Competition in Multi-Step Setting}
Once we establish the $k$-step Bellman optimality equation~\eq{eq:multi_step_bellman}, it is easy to derive the $\lambda$-Bellman optimality equation, \ie,
\begin{eqnarray}
V^*(s) = \max_{\pi\in\Pcal}\,\, (1 - \lambda)\sum_{k=0}^\infty \lambda^k \EE^\pi\sbr{\sum_{i=0}^k \gamma^i R(s_i, a_i) +\gamma^{k+1} V^*(s_{k+1})}:= (\Tcal_\lambda V^*)(s).
\end{eqnarray}
\begin{proof}
Denote the optimal policy as $\pi^*(a|s)$, we have
$$
V^*(s) = \EE^{\pi^*}_{\cbr{s_t}_{i=0}|s}\sbr{\sum_{i=0}^k \gamma^i R(s_i, a_i)} +\gamma^{k+1} \EE^{\pi^*}_{s_{k+1}|s}\sbr{V^*(s_{k+1})},
$$
holds for arbitrary $\forall k\in\NN$. Then, we conduct $k\sim \Gcal eo(\lambda)$ and take expectation over the countable infinite many equation, resulting
\begin{eqnarray*}
V^*(s) &=& (1 - \lambda)\sum_{k=0}^\infty \lambda^k \EE^{\pi^*}\sbr{\sum_{i=0}^k \gamma^i R(s_i, a_i) +\gamma^{k+1} V^*(s_{k+1})}\\
&=& \max_{\pi\in\Pcal} \,\,(1 - \lambda)\sum_{k=0}^\infty \lambda^k \EE^\pi\sbr{\sum_{i=0}^k \gamma^i R(s_i, a_i) +\gamma^{k+1} V^*(s_{k+1})}
\end{eqnarray*}
\end{proof}
Next, we investigate the equivalent optimization form of the $k$-step and $\lambda$-Bellman optimality equation, which requires the following monotonic property of $\Tcal_k$ and $\Tcal_\lambda$.
\begin{lemma}\label{lemma:monotonic_prop}
Both $\Tcal_k$ and $\Tcal_\lambda$ are monotonic.
\end{lemma}
\begin{proof}
Assume $U$ and $V$ are the value functions corresponding to $\pi_1$ and $\pi_2$, and $U\geqslant V$, \ie, $U(s)\geqslant V(s)$, $\forall s\in \Scal$, apply the operator $\Tcal_k$ on $U$ and $V$, we have
\begin{eqnarray*}
\rbr{\Tcal_k U}(s) &=& \max_{\pi\in\Pcal}\,\, \EE^\pi_{\{s_i\}_{i=1}^k|s}\sbr{\sum_{i=0}^k \gamma^i R(s_i, a_i)} +\gamma^{k+1}\EE^\pi_{s_{k+1}|s}\sbr{ U(s_{k+1})},\\
\rbr{\Tcal_k V}(s) &=& \max_{\pi\in\Pcal}\,\, \EE^\pi_{\{s_i\}_{i=1}^k|s}\sbr{\sum_{i=0}^k \gamma^i R(s_i, a_i)} +\gamma^{k+1}\EE^\pi_{s_{k+1}|s}\sbr{ V(s_{k+1})}.\\
\end{eqnarray*}
Due to $U\geqslant V$, we have $\EE^\pi_{s_{k+1}|s}\sbr{ U(s_{k+1})}\geqslant \EE^\pi_{s_{k+1}|s}\sbr{ V(s_{k+1})}$, $\forall \pi\in\Pcal$, which leads to the first conclusion, $\Tcal_k U \geqslant \Tcal_k V$.
Since $\Tcal_\lambda = (1 - \lambda)\sum_{k=1}^\infty\Tcal_k = \EE_{k\sim \Gcal eo(\lambda)}\sbr{\Tcal_k}$, therefore, $\Tcal_\lambda$ is also monotonic.
\end{proof}
With the monotonicity of $\Tcal_k$ and $\Tcal_\lambda$, we can rewrite the $V^*$ as the solution to an optimization,
\begin{theorem}\label{thm:lp_bellman}
The optimal value function $V^*$ is the solution to the optimization
\begin{eqnarray}\label{eq:multi_step_optimization_form}
V^* = \argmin_{V\geqslant \Tcal_k V} \,\, \rbr{1 - \gamma^{k+1}} \EE_{s\sim \mu(s)}\sbr{V(s)},
\end{eqnarray}
where $\mu(s)$ is an arbitrary distribution over $\Scal$.
\end{theorem}
\begin{proof}
Recall the $\Tcal_k$ is monotonic, \ie, $V \geqslant \Tcal_k V\Rightarrow \Tcal_k V \geqslant \Tcal_k^2 V$ and $V^* = \Tcal_k^\infty V$ for arbitrary $V$, we have for $\forall V$, $V\geqslant \Tcal_k V \geqslant \Tcal_k^2 V\geqslant \ldots\geqslant \Tcal_k^\infty V = V^*$, where the last equality comes from the Banach fixed point theorem~\citep{Puterman14}. Similarly, we can also show that $\forall V$, $V\geqslant \Tcal^\infty_\lambda V = V^*$. By combining these two inequalities, we achieve the optimization.
\end{proof}
We rewrite the optimization as
\begin{eqnarray}\label{eq:bellman_kstep_primal}
\min_{V}&& (1 - \gamma^{k+1})\EE_{s\sim \mu(s)}\sbr{V(s)}\\
\st && V(s) \geqslant R(s, a) + \max_{\pi\in\Pcal}\,\EE^\pi_{\{s_i\}_{i=1}^{k+1}|s}\sbr{\sum_{i=1}^{k} \gamma^i R(s_i, a_i) +\gamma^{k+1} V(s_{k+1})},\nonumber \\
&& \rbr{s, a}\in \Scal\times\Acal,\nonumber
\end{eqnarray}
We emphasize that this optimization is no longer linear programming since the existence of $\max$-operator over distribution space in the constraints. However, Theorem~\ref{thm:dual_property} still holds for the dual variables in~\eq{eq:full_lagrangian}.
\begin{proof}
Denote the optimal policy as $\tilde\pi_V^* = \argmax_{\pi\in\Pcal}\,\EE^\pi_{\{s_i\}_{i=1}^{k+1}|s}\sbr{\sum_{i=1}^{k} \gamma^i R(s_i, a_i) +\gamma^{k+1} V(s_{k+1})}$, the KKT condition of the optimization~\eq{eq:bellman_kstep_primal} can be written as
\begin{eqnarray*}
&&\rbr{1 - \gamma^{k+1}}\mu(s') + \gamma^{k+1}\sum_{\cbr{s_i, a_i}_{i=0}^k}p(s'|s_k, a_k)\prod_{i=0}^{k-1}p(s_{i+1}|s_i, a_i) \prod_{i=1}^k\tilde\pi_V^*(a_i|s_i)\rho^*(s_0, a_0)\\
&&=\sum_{a_0, \cbr{s_i, a_i}_{i=1}^k}\prod_{i=0}^kp(s_{i+1}|s_i, a_i)\rho^*(s', a)\prod_{i=1}^k\tilde\pi_V^*(a_i|s_i).
\end{eqnarray*}
Denote $P_k^\pi(s_{k+1}|s, a) = \sum_{\cbr{s_i, a_i}_{i=1}^k}p(s_{k+1}|s_k, a_k)\prod_{i=0}^{k-1}p(s_{i+1}|s_i, a_i) \prod_{i=1}^k\pi(a_i|s_i)$, we simplify the condition, \ie,
$$
\rbr{1 - \gamma^{k+1}}\mu(s') + \gamma^{k+1} \sum_{s, a}P_k^{\tilde\pi_V^*}(s'|s, a)\rho^*(s, a) = \sum_{a}\rho^*(s', a).
$$
Due to the $P_k^{\pi_V^*}(s'|s, a)$ is a conditional probability for $\forall V$, with similar argument in Theorem~\ref{thm:dual_property}, we have $\sum_{s, a}\rho^*(s, a) = 1$.
By the KKT complementary condition, the primal and dual solutions, \ie, $V^*$ and $\rho^*$, satisfy
\begin{equation}\label{eq:KKT_complementary}
\rho^*(s, a)\rbr{R(s, a) + \EE^{\tilde\pi_{V^*}^*}_{\{s_i\}_{i=1}^{k+1}|s}\sbr{\sum_{i=1}^{k} \gamma^i R(s_i, a_i) +\gamma^{k+1} V^*(s_{k+1})} - V^*(s)} = 0.
\end{equation}
Recall $V^*$ denotes the value function of the optimal policy, then, based on the definition, $\tilde\pi_{V^*}^* = \pi^*$ which denotes the optimal policy. Then, the condition~\eq{eq:KKT_complementary} implies $\rho(s, a) \neq 0$ if and only if $a= a^*$, therefore, we can decompose $\rho^*(s, a) = \alpha^*(s)\pi^*(a|s)$.
\end{proof}
The corresponding Lagrangian of optimization~\eq{eq:bellman_kstep_primal} is
\begin{eqnarray}\label{eq:kstep_lagrangian}
\min_{V}\max_{\rho(s, a)\geqslant 0 } L_k(V, \rho) = (1 - \gamma^{k+1}) \EE_{\mu}\sbr{V(s)} + \sum_{\rbr{s, a}\in \Scal\times\Acal}\rho(s, a)\rbr{\max_{\pi\in\Pcal}\Delta^\pi_k[V](s, a)},
\end{eqnarray}
where $\Delta^\pi_k[V](s, a) = R(s, a) +\EE^\pi_{\{s_t\}_{i=1}^{k+1}|s}\sbr{\sum_{i=1}^{k} \gamma^i R(s_i, a_i) +\gamma^{k+1} V(s_{k+1})} - V(s)$.
We further simplify the optimization. Since the dual variables are positive, we have
\begin{eqnarray}\label{eq:full_lagrangian}
\min_{V}\max_{\rho(s, a)\geqslant 0, \pi\in\Pcal} L_k(V, \rho) = (1 - \gamma^{k+1}) \EE_{\mu}\sbr{V(s)} + \sum_{\rbr{s, a}\in \Scal\times\Acal}\rho(s, a)\rbr{\Delta^\pi_k[V](s, a)}.
\end{eqnarray}
After clarifying these properties of the optimization corresponding to the multi-step Bellman optimality equation, we are ready to prove the Theorem~\ref{thm:multi_step_lagrangian}.
\noindent{\bf Theorem~\ref{thm:multi_step_lagrangian}}
\emph{
The optimal policy $\pi^*$ and its corresponding value function $V^*$ is the solution to the following saddle point problem
\begin{eqnarray*}
\max_{\alpha\in\Pcal(\Scal),\pi\in\Pcal(\Acal)}\min_{V} L_k(V, \alpha, \pi) &:=& (1 - \gamma^{k+1}) \EE_{\mu}\sbr{V(s)}
\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\eq{eq:multi_step_lagrangian}\\
&+& \hspace{-8mm}\sum_{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}\hspace{-6mm}\alpha(s_0)\prod_{i=0}^k\pi(a_i|s_i)p(s_{i+1}|s_i, a_i)\delta[V]\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}\nonumber
\end{eqnarray*}
where $\delta[V]\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}} = \sum_{i=0}^{k} \gamma^i R(s_i, a_i) +\gamma^{k+1} V(s_{k+1}) - V(s)$.
}
\begin{proof}
By Theorem~\ref{thm:dual_property} in multi-step setting, we can decompose $\rho(s, a) = \alpha(s)\pi(a|s)$ without any loss. Plugging such decomposition into the Lagrangian~\ref{eq:full_lagrangian} and realizing the equivalence among the optimal policies, we arrive the optimization as
$
\min_{V}\max_{\alpha\in\Pcal(\Scal),\pi\in\Pcal(\Acal)} L_k(V, \alpha, \pi).
$
Then, because of the strong duality as we proved in Lemma~\ref{lemma:multi_step_strong_dual}, we can switch $\min$ and $\max$ operators in optimization~\ref{eq:multi_step_lagrangian} without any loss.
\end{proof}
\begin{lemma}\label{lemma:multi_step_strong_dual}
The strong duality holds in optimization~\eq{eq:multi_step_lagrangian}.
\end{lemma}
\begin{proof}
Specifically, for every $\alpha\in\Pcal(\Scal),\pi\in\Pcal(\Acal)$,
\begin{eqnarray*}
\ell(\alpha, \pi) &=& \min_{V} L_k(V, \alpha, \pi)\leqslant \min_{V} \cbr{L_k(V, \alpha, \pi); \,\,\delta[V]\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}\leqslant 0}
\\
&\leqslant& \min_{V}\cbr{\begin{matrix}
(1 - \gamma^{k+1})\EE_{s\sim \mu(s)}\sbr{V(s)},\\
\st\,\delta[V]\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}} \leqslant 0
\end{matrix}} = (1 - \gamma^{k+1})\EE_{s\sim \mu(s)}\sbr{V^*(s)}.
\end{eqnarray*}
On the other hand, since $L_k(V, \alpha^*, \pi^*)$ is convex w.r.t. $V$, we have
$
V^* \in \argmin_{V} L_k(V, \alpha^*, \pi^*),
$
by checking the first-order optimality. Therefore, we have
\begin{eqnarray*}
\max_{\alpha\in\Pcal(\Scal),\pi\in\Pcal(\Acal)}\ell(\alpha, \pi) &=& \max_{\alpha\in\Pcal(\Scal),\pi\in\Pcal(\Acal), V\in \argmin_{V} L_k(V, \alpha, \pi)} L_k(V, \alpha, \pi) \\
&\geqslant& L(V^*, \alpha^*, \pi^*) = (1 - \gamma^{k+1})\EE_{s\sim \mu(s)}\sbr{V^*(s)}.
\end{eqnarray*}
Combine these two conditions, we achieve the strong duality even without convex-concave property
$$
(1 - \gamma^{k+1})\EE_{s\sim \mu(s)}\sbr{V^*(s)}\leqslant \max_{\alpha\in\Pcal(\Scal),\pi\in\Pcal(\Acal)}\ell(\alpha, \pi)\leqslant (1 - \gamma^{k+1})\EE_{s\sim \mu(s)}\sbr{V^*(s)}.
$$
\end{proof}
\subsection{The Composition in Applying Augmented Lagrangian Method}\label{appendix:augmented_lagrangian}
We consider the one-step Lagrangian duality first. Following the vanilla augmented Lagrangian method, one can achieve the dual function as
\begin{eqnarray*}
\ell(\alpha, \pi) = \min_{V} \rbr{1 - \gamma}\EE_{s\sim \mu(s)} \sbr{V(s)} + \sum_{\rbr{s, a}\in\Scal\times\Acal}P_c\rbr{\Delta[V](s, a), \alpha(s)\pi(a|s)},
\end{eqnarray*}
where
$$
P_c\rbr{\Delta[V](s, a), \alpha(s)\pi(a|s)} = \frac{1}{2c}\cbr{\sbr{\max\rbr{0, \alpha(s)\pi(a|s) + c\Delta[V](s, a)}}^2 - \alpha^2(s)\pi^2(a|s)}.
$$
The computation of $P_c$ is in general intractable due to the composition of $\max$ and the condition expectation in $\Delta[V](s, a)$, which makes the optimization for augmented Lagrangian method difficult.
For the multi-step Lagrangian duality, the objective will become even more difficult due to constraints are on distribution family $\Pcal(\Scal)$ and $\Pcal(\Acal)$, rather than $\Scal\times\Acal$.
\subsection{Path Regularization}\label{appendix:path_reg}
{\bf Theorem~\ref{thm:path_reg}}
\emph{
The local duality holds for $L_r(V, \alpha, \pi)$. Denote $(V^*, \alpha^*, \pi^*)$ as the solution to Bellman optimality equation, with some appropriate $\eta_V$,
$
(V^*, \alpha^*, \pi^*) = \argmax_{\alpha\in\Pcal(\Scal), \pi\in\Pcal(\Acal)}\argmin_{V} L_r(V, \alpha, \pi).
$}
\begin{proof}
The local duality can be verified by checking the Hessian of $L_r(\theta_{V^*})$. We apply the local duality theorem~\citep{LueYe15}[Chapter 14]. Suppose $
(\Vtil^*, \tilde\alpha^*, \tilde\pi^*)$ is a local solution to $\min_{V}\max_{\alpha\in\Pcal(\Scal), \pi\in\Pcal(\Acal)} L_r(V, \alpha, \pi)$, then, $\max_{\alpha\in\Pcal(\Scal), \pi\in\Pcal(\Acal)}\min_{V} L_r(V, \alpha, \pi)$ has a local solution $\Vtil^*$ with corresponding $\tilde\alpha^*, \tilde\pi^*$.
Next, we show that with some appropriate $\eta_V$, the path regularization does not change the optimum. Let $U^\pi(s) = \EE^{\pi}\sbr{\sum_{i=0}^\infty\gamma^i R(s_i, a_i)|s}$, and thus, $U^{\pi^*} = V^*$. We first show that for $\forall \pi_b\in \Pcal(\Acal)$, we have
\begin{eqnarray*}
&&\textstyle
\EE\sbr{\rbr{\EE^{\pi_b}\sbr{\sum_{i=0}^\infty\gamma^i R(s_i, a_i)} - V^*(s)}^2}= \EE\sbr{\rbr{U^\pi_b(s) - U^{\pi^*}(s) + U^{\pi^*}(s) - V^*(s)}^2}\\
&=& \textstyle\EE\sbr{\rbr{U^{\pi_b}(s) - U^{\pi^*}(s)}^2}\\
&\leqslant& \EE\sbr{\rbr{\int \rbr{\prod_{i=0}^\infty\pi_b(a_i|s_i) - \prod_{i=0}^\infty\pi^*(a_i|s_i)}{\prod_{i=0}^\infty p(s_{i+1}|s_i, a_i)}\rbr{\sum_{i=1}^\infty\gamma^i R(s_i, a_i)}d\cbr{s_i, a_i}_{i=0}^\infty}^2}\\
&\leqslant&\EE\sbr{\nbr{\sum_{i=1}^\infty\gamma^i R(s_i, a_i)}^2_\infty\nbr{\rbr{\prod_{i=0}^\infty\pi_b(a_i|s_i) - \prod_{i=0}^\infty\pi^*(a_i|s_i)}{\prod_{i=0}^\infty p(s_{i+1}|s_i, a_i)}}^2_1}\\
&\leqslant&\textstyle4\nbr{\sum_{i=1}^\infty\gamma^i R(s_i, a_i)}^2_\infty\leqslant \frac{4}{\rbr{1 - \gamma}^2}\nbr{R(s, a)}^2_\infty
\end{eqnarray*}
where the last second inequality comes from the fact that $\pi_b(a_i|s_i)p(s_{i+1}|s_i, a_i)$ is distribution.
We then rewrite the optimization $\min_{V}\max_{\alpha\in\Pcal(\Scal), \pi\in\Pcal(\Acal)} L_r(V, \alpha, \pi)$ as
\begin{eqnarray*}
\min_{V}\max_{\alpha\in\Pcal(\Scal), \pi\in\Pcal(\Acal)}&& L_k(V, \alpha, \pi)\\[-3mm]
\st && \textstyle
V\in \Omega_{\epsilon, \pi_b} := \cbr{ V: \EE_{s\sim\mu(s)}\sbr{\rbr{\EE^{\pi_b}\sbr{\sum_{i=0}^\infty \gamma^{i} R(s_i, a_i)} - V(s)}^2}\leqslant \epsilon},
\end{eqnarray*}
due to the well-known one-to-one correspondence between regularization $\eta_V$ and $\epsilon$~\cite{Nesterov05}. If we set $\eta_V$ with appropriate value so that its corresponding $\epsilon(\eta_V) \geqslant \frac{2}{1 - \gamma}\nbr{R(s, a)}_\infty$, we will have $V^*\in \Omega_{\epsilon(\eta_V)}$, which means adding such constraint, or equivalently, adding the path regularization, does not affect the optimality. Combine with the local duality, we achieve the conclusion.
\end{proof}
In fact, based on the proof, the closer $\pi_b$ to $\pi^*$ is, the smaller $\EE_{s\sim\mu(s)}\sbr{\rbr{\EE^{\pi_b}\sbr{\sum_{i=0}^\infty \gamma^{i} R(s_i, a_i) } - V^*(s)}^2}$ will be. Therefore, we can set $\eta_V$ bigger for better local convexity, which resulting faster convergence.
\subsection{Stochastic Dual Ascent Update}\label{appendix:dual_update}
{\bf Corollary~\ref{cor:reg_dual_grad}}
\emph{
The regularized dual function $\ell_r(\alpha, \pi)$ has gradients estimators
\begin{eqnarray*}
\nabla_{\theta_\alpha}\ell_r\rbr{\theta_\alpha, \theta_\pi} &=&
\EE_{\alpha}^\pi\sbr{\delta\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}\nabla_{\theta_\alpha} \log\alpha(s)},
\end{eqnarray*}
\begin{eqnarray*}
\textstyle
\nabla_{\theta_\pi}\ell_r\rbr{\theta_\alpha, \theta_\pi} =
\EE_{\alpha}^\pi\sbr{\delta\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}\sum_{i=0}^k\nabla_{\theta_\pi}\log\pi(a|s)}.
\end{eqnarray*}
}
\begin{proof}
We mainly focus on deriving $\nabla_{\theta_\pi}\ell_r\rbr{\theta_\alpha, \theta_\pi}$. The derivation of $\nabla_{\theta_\alpha} \ell_r\rbr{\theta_\alpha, \theta_\pi}$ is similar.
By chain rule, we have
\begin{eqnarray*}
\nabla_{\theta_\pi}\ell_r\rbr{\theta_\alpha, \theta_\pi} &=& \underbrace{\textstyle \rbr{\nabla_{V} L_k(V(\alpha, \theta), \alpha, \theta) - 2\eta_V \rbr{\EE^{\pi_b}\sbr{\sum_{i=0}^\infty \gamma^{i} R(s_i, a_i) } - V^*(s)}}}_{0}\nabla_{\theta_\pi} V(\alpha, \theta)\\
&&+\EE_{\alpha}^\pi\sbr{\delta\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}\sum_{i=0}^k\nabla_{\theta_\pi}\log\pi(a|s)}\\
&=&\EE_{\alpha}^\pi\sbr{\delta\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}\sum_{i=0}^k\nabla_{\theta_\pi}\log\pi(a|s)}.
\end{eqnarray*}
The first term in RHS equals to zero due to the first-order optimality condition for $V(\alpha, \pi) = \argmin_V L_r(V, \alpha, \pi)$.
\end{proof}
\subsection{Practical Algorithm}\label{appendix:practical_alg}
{\bf Theorem~\ref{thm:closed_alpha}}
\emph{
In $t$-th iteration, given $V^t$ and $\pi^{t-1}$,
\begin{eqnarray*}
&&\argmax_{\alpha\geqslant 0}\EE_{\mu(s)\pi^{t-1}(s)}\sbr{\rbr{\tilde\alpha(s)+ \eta_\mu}\delta\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}} - \eta_\alpha\nbr{\tilde\alpha}^2_{\mu}\\
&=&\frac{1}{\eta_\alpha}\max\rbr{0,\EE^{\pi^{t-1}}\sbr{\delta\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}}}.
\end{eqnarray*}
}
\begin{proof}
Recall the optimization w.r.t. $\tilde\alpha$ is
$
\max_{\tilde\alpha \geqslant 0} \EE_{\mu}\sbr{{\tilde\alpha(s) }{\EE^{\pi}\sbr{\delta\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}}} - \eta_\alpha \tilde\alpha^2(s)},
$
denote $\tau(s)$ as the dual variables of the optimization, we have the KKT condition as
\[
\begin{cases}
\eta_\alpha\tilde\alpha &= \tau + \EE^{\pi}\sbr{\delta\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}},\\
\tau(s)\tilde\alpha(s)&= 0,\\
\tilde\alpha&\geqslant 0, \\
\tau&\geqslant 0,
\end{cases}
\]
\[
\Rightarrow
\begin{cases}
\tilde\alpha &= \frac{\tau + \EE^{\pi}\sbr{\delta\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}} }{\eta_\alpha},\\
\tau(s)\rbr{\tau(s) + \EE^{\pi}\sbr{\delta\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}}}&= 0,\\
\tilde\alpha&\geqslant 0, \\
\tau&\geqslant 0,
\end{cases}\]
\[\Rightarrow
\tau(s) =
\begin{cases}
-\EE^{\pi}\sbr{\delta\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}}&\quad \EE^{\pi}\sbr{\delta\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}}< 0\\
0&\quad \EE^{\pi}\sbr{\delta\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}}\geqslant 0
\end{cases}.
\]Therefore, in $t$-th iteration, $\tilde\alpha^{t}(s)= \frac{1}{\eta_\alpha}\max\rbr{0,\EE^{\pi}\sbr{\delta\rbr{\cbr{s_i, a_i}_{i=0}^k, s_{k+1}}}}.
$
\end{proof}
\section{Experiment Details}\label{appendix:exp_details}
\noindent{\bf Policy and value function parametrization.} For fairness, we use the same parametrization across all the algorithms. The parametrization of policy and value functions are largely based on the recent paper by~\citet{RajLowTodKak17}, which shows the natural policy gradient with the RBF neural network achieves the state-of-the-art performances of TRPO on MuJoCo. For the policy distribution, we parametrize it as $\pi_{\theta_\pi}(a|s) = \Ncal(\mu_{\theta_\pi}(s), \Sigma_{\theta_\pi})$, where $\mu_{\theta_\pi}(s)$ is a two-layer neural nets with the random features of RBF kernel as the hidden layer and the $\Sigma_{\theta_\pi}$ is a diagonal matrix. The RBF kernel bandwidth is chosen via median trick~\citep{DaiXieHe14,RajLowTodKak17}. The same as~\citet{RajLowTodKak17}, we use $100$ hidden nodes in Pendulum, InvertedDoublePendulum, Swimmer, Hopper, and use $500$ hidden nodes in HalfCheetah. Since the TRPO and PPO uses GAE~\citep{SchMorLevJoretal15} with linear baseline as $V$, we also use the parametrization for $V$ in our algorithm. However, the Dual-AC~can adopt arbitrary function approximator without any change.
\noindent{\bf Training details.} We report the hyperparameters for each algorithms here. We use the $\gamma = 0.995$ for all the algorithms. We keep constant stepsize and tuned for TRPO, PPO and Dual-AC~in $\cbr{0.001, 0.01, 0.1}$. The batchsize are set to be $52$ trajectories for comparison to the competitors in Section~\ref{subsection:exp_comparison}. For the Ablation study, we set batchsize to be $24$ trajectories for accelerating. The CG damping parameter for TRPO is set to be $10^{-4}$. We iterate $20$ steps for the Fisher information matrix computation. For the $\eta_V, \eta_\mu, \frac{1}{\eta_\alpha}$ in Dual-AC~from $\cbr{0.001, 0.01, 0.1, 1}$.
\end{appendix}
\end{document} |
\begin{document}
\title{Diameter two properties and the Radon-Nikod\'ym property in Orlicz spaces}
\keywords{Banach function space, Orlicz space, Daugavet property, (local, strong) diameter two property, Radon-Nikod\'ym property, octahedral norm, uniformly non-$\ell_1^2$ points}
\subjclass[2010]{46B20, 46E30, 47B38}
\author{Anna Kami\'{n}ska}
\address{Department of Mathematical Sciences,
The University of Memphis, TN 38152-3240}
\email{[email protected]}
\author{Han Ju Lee}
\address{Department of Mathematics Education, Dongguk University - Seoul, 04620 (Seoul), Republic of Korea}
\email{[email protected]}
\author{Hyung Joon Tag}
\address{Department of Mathematical Sciences,
The University of Memphis, TN 38152-3240}
\email{[email protected]}
\date{\today}
\thanks{
The second author was supported by Basic Science Research Program through the National Research Foundation of Korea(NRF) funded by the Ministry of Education, Science and Technology [NRF-2020R1A2C1A01010377].}
\begin{abstract}
Some necessary and sufficient conditions are found for Banach function lattices to have the Radon-Nikod\'ym property. Consequently it is shown that an Orlicz function space $L_\varphi$ over a non-atomic $\sigma$-finite measure space $(\Omega, \Sigma,\mu)$, not necessarily separable, has the Radon-Nikod\'ym property if and only if $\varphi$ is an $N$-function at infinity and satisfies the appropriate $\Delta_2$ condition. For an Orlicz sequence space $\ell_\varphi$, it has the Radon-Nikod\'ym property if and only if $\varphi$ satisfies the $\Delta_2^0$ condition. In the second part a relationship between uniformly $\ell_1^2$ points of the unit sphere of a Banach space and the diameter of the slices are studied. Using these results, a quick proof is given that an Orlicz space $L_\varphi$ has the Daugavet property only if $\varphi$ is linear, so when $L_\varphi$ is isometric to $L_1$. Another consequence is that Orlicz spaces equipped with the Orlicz norm generated by $N$-functions never have the local diameter two property, while it is well-known that when equipped with the Luxemburg norm, it may have that property. Finally, it is shown that the local diameter two property, the diameter two property, and the strong diameter two property are equivalent in Orlicz function and sequence spaces with the Luxemburg norm under appropriate conditions on $\varphi$.
\end{abstract}
\maketitle
\begin{center}{Dedicated to the memory of Professor W.A.J. Luxemburg}\end{center}
\section{Introduction}
The objective of this paper is to study geometrical properties in real Banach spaces, in particular in Banach function spaces and Orlicz spaces.
A Banach space $(X, \|\cdot\|)$ is said to have the {\it Daugavet property} if every rank one operator $T: X\to X$ satisfies the equation
\[
\|I + T\| = 1 + \|T\|.
\]
It is well-known that $C[0,1]$ has the Daugavet property. Also, a rearrangement invariant space $X$ over a finite non-atomic measure space with the Fatou property satisfies the Daugavet property if the space is isometrically isomorphic to either $L_1$ or $L_{\infty}$ \cite{AKM, AKM2}. If a rearrangement invariant space $X$ over an infinite non-atomic measure space is uniformly monotone, then it is isometrically isomorphic to $L_1$ \cite{AKM2}. Furthermore, the only separable rearrangement invariant space over $[0,1]$ with the Daugavet property is $L_1[0,1]$ with the standard $L_1$-norm \cite{KMMW}. In \cite{KK}, a characterization of Musielak-Orlicz spaces with the Daugavet property has been provided. We refer to \cite{KSSW, W} for further information on the Daugavet property.
Let $S_X$ and $B_X$ be the unit sphere and the unit ball of a Banach space $X$ and let $X^*$ be the dual space of $X$. A slice of $B_X$ determined by $x^*\in S_{X^*}$ and $\epsilon>0$ is defined by the set
\[
S(x^*; \epsilon) = \{x\in B_X : x^*(x) > 1 - \epsilon\}.
\]
Analogously, for $x\in S_X$ and $\epsilon >0$, a weak$^*$-slice $S(x, \epsilon)$ of $B_{X^*}$ is defined by the set
\[
S(x; \epsilon) = \{x^*\in B_{X^*}: x^*(x) > 1 - \epsilon\}.
\]
There are several geometrical properties related to slices and weak$^*$-slices. We say that $X$ has
\begin{enumerate}[{\rm(i)}]
\item the {\it local diameter two property} (LD2P) if every slice of $B_X$ has the diameter two.
\item the {\it diameter two property} (D2P) if every non-empty relatively weakly open subset of $B_X$ has the diameter two.
\item the {\it strong diameter two property} (SD2P) if every finite convex combination of slices of $B_X$ has the diameter two.
\item the {\it weak$^*$-local diameter two property} (weak$^{*}$-LD2P) if every weak$^*$-slice of $B_{X^*}$ has the diameter two.
\item the {\it Radon-Nikod\'ym property} (RNP) if there exist slices of $B_X$ with arbitrarily small diameter.
\end{enumerate}
A few remarks are in order now. Condition $\rm(v)$ is a geometrical interpretation of the classical Radon-Nikod\'ym property \cite[Theorem 3, p. 202]{DU}. By the definitions, we see that properties (i), (ii) and (iii) are on the opposite spectrum of $\rm(v)$. It is clear that $\rm(ii) \implies \rm(i)$, and the implication $\rm(iii) \implies \rm(ii)$ results from
\cite[Lemma II.1 p. 26]{GGMS}.
It is also well-known that these three properties are not equivalent in general {\cite{ALN, BLR}, see also introductory remarks in \cite{HLP}. A Banach space $X$ with the Daugavet property satisfies the SD2P \cite[Theorem 4.4]{ALN}.
After Preliminaries, in Section 3, we show first that if a Banach function space $X$ over a $\sigma$-finite measure space has the RNP then it must be order continuous. The opposite implication is not true in general. However we prove it under additional assumptions when $X$ satisfies the Fatou property, and when the subspaces of order continuous elements and the closure of simple functions coincide in its K\"othe dual space $X'$. We will provide some examples to see that this assumption is necessary in order to show the converse. Applying the obtained results further, we conclude the section with the necessary and sufficient condition for the RNP in Orlicz spaces. There is a well-known criterion for the RNP in Orlicz spaces $L_\varphi$ over a separable complete non-atomic measure space $(\Omega, \Sigma, \mu)$, generated by an $N$-function $\varphi$. Here we drop the assumption of separability of a measure space and show that necessary and sufficient conditions for the RNP is that $\varphi$ satisfies appropriate $\Delta_2$ condition and that $\varphi$ is an $N$-function at infinity. In sequence spaces $\ell_\varphi$ we drop the assumption that $\varphi$ is an $N$-function.
In section 4 the Daugavet and various diameter two properties are studied.
In the first main theorem we give a local characterization of uniformly $\ell_1^2$ points $x\in S_X$, where $X$ is a Banach space, and the diameter of the weak$^*$-slice $S(x;\epsilon)$ generated by $x$. Analogously we describe a relationship between $x^*\in S_{X^*}$ and the diameter of the slice $S(x^*,\epsilon)$ given by $x^*$. Consequently, we obtain a description of global properties of $X$ or $X^*$ being locally octahedral and $X^*$ or $X$ respectively, having (weak$^*$) local diameter two property. We also obtain relationships among the Daugavet property of $X$ and the D2P of $X$ and weak$^*$-LD2P of $X^*$. In Theorem \ref{th:KamKub} we provide sufficient conditions for the existence of uniformly non-$\ell_1^2$ points in $L_\varphi$ and $\ell_\varphi$ both equipped with the Luxemburg norms. Combining this with the previous general facts we recover instantly that the only Orlicz space $L_\varphi$ generated by a finite function $\varphi$ with the Daugavet property must coincide with $L_1$ as sets with equivalent norm. The other consequences are that large class of the Orlicz spaces $L_\varphi^0$ and $\ell_\varphi^0$ equipped with the Orlicz norm, does not have the LD2P, in the striking opposition to the same Orlicz spaces equipped with the Luxemburg norm.
In the final result we show that the LD2P, D2P, SD2P and the appropriate condition $\Delta_2$ are equivalent in $L_\varphi$ and $\ell_\varphi$.
\section{Preliminaries}
Let $(\Omega, \Sigma, \mu)$ be a measure space with a $\sigma$-finite complete measure $\mu$ and let $L^0(\Omega)$ be the set of all equivalence classes of $\mu$-measurable functions $f:\Omega\to \mathbb{R}$ modulo a.e. equivalence. We denote by $L^0=L^0(\Omega)$ if $\Omega$ is a non-atomic measure space and by $ \ell^0=L^0(\mathbb{N})$ if $\Omega = \mathbb{N}$ with the counting measure $\mu$. That is, $\ell^0$ consists of all real-valued sequences $x = \{x(n)\}$. A Banach space $(X, \|\cdot\|) \subset L^0(\Omega)$ is called a {\it Banach function lattice} if for $f\in L^0(\Omega)$ and $g \in X$, $0 \leq f \leq g$ implies $f \in X$ and $\|f\| \leq \|g\|$. We call $X$ a {\it Banach function space} if $\Omega$ is a non-atomic measure space and a {\it Banach sequence space} if $\Omega = \mathbb{N}$ with the counting measure $\mu$. A Banach function lattice $(X, \| \cdot \|)$ is said to have the {\it Fatou property} if whenever a sequence $(f_n) \subset X$ satisfies $\sup_n \|f_n\| < \infty$ and $f_n \uparrow f\in L^0(\Omega)$ a.e., we have $f \in X$ and $\|f_n\| \uparrow \|f\|$. An element $f\in X$ is said to be {\it order continuous} if for every $(f_n) \subset L^0(\Omega)$ such that $0\le f_n \le f$, $f_n \downarrow 0$ a.e. implies $\|f_n\| \downarrow 0$. The set of all order continuous elements in $X$ is denoted by $X_a$, and the closure in $X$ of all simple functions belonging to $X$ is denoted by $X_b$. If $X=X_a$ then we say that $X$ is order continuous. In this paper, a simple function is a finitely many valued function whose support is of finite measure. It is well-known that $X_a \subset X_b$ \cite[Theorem 3.11, p. 18]{BS}\cite{ Lux}.
The {\it K\"{o}the dual space}, denoted by $X^{\prime}$, of a Banach function lattice $X$ is a set of $x \in L^0(\Omega)$, such that
\[
\|x\|_{X^{\prime}}= \sup\left\{\int_\Omega xy : \|y\| \leq 1\right\} < \infty.
\]
The space $X^{\prime}$, equipped with the norm $\| \cdot \|_{X^{\prime}}$, is a Banach function lattice satisfying the Fatou property. It is well known that $X = X''$ if and only if $X$ satisfies the Fatou property \cite[Theorem 1, Ch. 15, p. 71]{Z}.
We say $f,g \in X$ are {\it equimeasurable}, denoted by $f \sim g$, if $\mu\{t: |f(t)| > \lambda\} = \mu\{t: |g(t)| > \lambda\}$ for every $\lambda > 0$. A Banach function lattice $(X, \|\cdot\|)$ is said to be {\it rearrangement invariant} (r.i.) if $f \sim g$ ($f,g \in X$) implies $\|f\| = \|g\|$. The Lebesgue, Orlicz and Lorentz spaces are classical examples of r.i. spaces. The fundamental function of a r.i. space $X$ over a non-atomic measure space is defined by $\phi_X(t) = \|\chi_{E}\|_X$, $t\ge 0$, where $E\in \Sigma$ is such that $\mu(E) = t$. It is known that $\lim_{t \rightarrow 0^+} \phi_X(t) = 0$ if and only if $X_a = X_b$ \cite[Theorem 5.5, p. 67]{BS}. For the purely atomic case where each atom has the same measure,}$X_a = X_b$ is always true \cite[Theorem 5.4, p. 67]{BS}.
Recall that a measure space $(\Omega, \Sigma, \mu)$ is said to be {\it separable} if there is a countable family $\mathcal{T}$ of measurable subsets such that for given $\epsilon>0$ and for each $E\in \Sigma$ of finite measure there is $A\in \mathcal{T}$ such that $\mu(A\Delta E)<\epsilon$, where $A\Delta E$ is the symmetric difference of $A$ and $E$. It is easy to check that if $\Sigma$ is a $\sigma$-algebra generated by countable subsets then $(\Omega, \Sigma, \mu)$ is separable \cite{Hal}.
A function $\varphi:\mathbb{R}_+\to [0,\infty]$ is called an {\it Orlicz function} if $\varphi$ is convex, $\varphi(0)=0$, and $\varphi$ is left-continuous, not identically zero nor infinite on $(0,\infty)$. The complementary function $\varphi_{\ast}$ to $\varphi$ is defined by
\[
\varphi_{\ast}(u) = \sup_{v \geq 0}\{uv- \varphi(v) \}, \ \ \ u\ge 0.
\]
The complementary function $\varphi_*$ is also an Orlicz function and $\varphi_{**} = \varphi$.
An Orlicz function $\varphi$ is an {\it $N$-function at zero} if $\lim_{u \rightarrow 0^+} \frac{\varphi(u)}{u} = 0$ and {\it at infinity} if $\lim_{u \rightarrow \infty} \frac{\varphi(u)}{u} = \infty$. If $\varphi$ is an $N$-function at both zero and infinity then we say that $\varphi$ is an {\it $N$-function}. A function $\varphi$ is an $N$-function if and only if $\varphi_*$ is an $N$-function.
An Orlicz function $\varphi$ satisfies the {\it $\Delta_2$ condition} if there exists $K>2$ such that $\varphi(2u) \leq K \varphi(u)$ for all $u\geq 0$, the $\Delta_2^\infty$ condition if there exist $K>2$ and $u_0\ge 0$ such that $\varphi(u_0) < \infty$ and for all $u\geq u_0$, $\varphi(2u) \leq K \varphi(u)$, and the $\Delta_2^0$ condition if there exist $K>2$ and $u_0$ such that $\infty > \varphi(u_0) > 0$ and for all $0\le u \leq u_0$, $\varphi(2u) \leq K \varphi(u)$. When we use the term {\it the appropriate $\Delta_2$ condition}, it means $\Delta_2$ in the case of a non-atomic measure $\mu$ with $\mu(\Omega) = \infty$, $\Delta_2^\infty$ for a non-atomic measure $\mu$ with $\mu(\Omega) < \infty$, and $\Delta_2^0$ for $\Omega = \mathbb{N}$ with the counting measure i.e. $\mu\{n\} =1$ for every $n\in \mathbb{N}$.
The Orlicz space $L_\varphi(\Omega)$ is a collection of all $f\in L^0(\Omega)$ such that for some $\lambda > 0$,
\[
I_\varphi(\lambda f):= \int_\Omega\varphi(\lambda |f(t)|)\,d\mu(t) = \int_\Omega\varphi(\lambda |f|)\,d\mu < \infty.
\]
The Orlicz spaces are equipped with either the Luxemburg norm
\[
\|f\|_\varphi= \inf\left\{\epsilon > 0: I_\varphi\left(\frac{f}{\epsilon}\right) \le 1\right\},
\]
or the Orlicz (or Amemiya) norm
\[
\|f\|_\varphi^0= \sup\left\{\int_\Omega fg : I_{\varphi_*} (g)\le 1\right\} = \inf_{k>0} \frac{1}{k}(1 + I_\varphi(kf)).
\]
It is well-known that $\|f\|_\varphi \le \|f\|_\varphi^0 \le 2\|f\|_\varphi$ for $f\in L_\varphi(\Omega)$.
By $L_\varphi(\Omega)$ we denote an Orlicz space equipped with the Luxemburg norm and by $L_\varphi^0(\Omega)$ with the Orlicz norm.
The Orlicz spaces with either norms are rearrangement invariant spaces and have the Fatou property.
If $\varphi$ is finite, i.e. $\varphi(u)< \infty$ for all $u>0$, then $(L_\varphi(\Omega))_a \ne \{0\}$ and it contains all simple functions. Therefore
\[
(L_\varphi(\Omega))_a = (L_\varphi(\Omega))_b = \{x\in L^0: I_\varphi(\lambda x) < \infty \ \ \text{for all} \ \ \lambda > 0\}.
\]
It is also well-known that $L_\varphi(\Omega) = (L_\varphi(\Omega))_a$ if and only if $\varphi$ satisfies the appropriate $\Delta_2$ condition. The K\"othe duals of $L_\varphi(\Omega)$ and $L_\varphi^0(\Omega)$ are described by Orlicz spaces induced by $\varphi_*$ \cite{Chen, BS}. In fact,
\[
(L_\varphi(\Omega))' = L_{\varphi_*}^0(\Omega)\ \ \text{ and} \ \ \
(L_\varphi^0(\Omega))' = L_{\varphi_*}(\Omega).
\]
In the case of non-atomic measure (resp., counting measure), we use the symbols $L_\varphi$ and $L_\varphi^0$, (resp., $\ell_\varphi$ and $\ell_{\varphi}^0$) for Orlicz spaces equipped with the Luxemburg and the Orlicz norm, respectively. For complete information on Orlicz spaces we refer the reader to the monographs \cite{BS, Chen, KR, LT1, LT2, Lux}.
\section{The Radon-Nikod\'ym property}
We start with a general result on the Radon-Nikod\'ym property in Banach function spaces.
\begin{Theorem}\label{th:RNKothe}
Let $X$ be a Banach function space over a complete $\sigma$-finite measure space $(\Omega, \Sigma, \mu)$.
\begin{itemize}
\item[(i)]
If $X$ has the RNP then $X$ is order continuous.
\item[(ii)]
Assume that $X$ has the Fatou property and $(X')_a = (X')_b$. Then if $X$ is order continuous then $X$ has the RNP.
\end{itemize}
\end{Theorem}
\begin{proof}
(i) If $X$ is not order continuous then it contains an order isomorphic copy of $\ell_\infty$ \cite[Theorem 14.4, p.220]{AB}. Since $\ell_\infty$ does not have the RNP, $X$ does not have this property either.
(ii) Suppose that $(X')_a = (X')_b$ and that $X$ is order continuous with the Fatou property. It is well-known that every separable dual space possesses the RNP \cite{DU}.
Since $((X')_a)' = ((X')_b)'$, $((X')_a)'=(X')' = X''$ and $((X')_a)^*\simeq ((X')_a)'$ by Corollary 1.4.2 in \cite{BS}. It follows by the Fatou property that $X'' = X$.
Therefore
\[
((X')_a)' \simeq ((X')_a)^* \simeq X ''=X.
\]
Hence $X$ is the dual space of $(X')_a$. If the measure space $(\Omega, \Sigma,\mu)$ is separable, then the order continuous space $X$ is also separable by Theorem 2.5.5 in \cite{BS}. Thus in this case, $X$ has the RNP.
Now, suppose that $(\Omega, \Sigma,\mu)$ is not separable and we show that $X$ still has the RNP. We will use the fact that a Banach space $X$ satisfies the RNP if and only if every separable closed subspace $Y\subset X$ has the RNP \cite[Theorem 2, p. 81]{DU}.
Since $X$ is order continuous $X=X_a=X_b$. Let $Y\subset X$ be a closed separable subspace of $X$. Then there exists a dense and countable set $\mathcal{Y} \subset Y$. For every $y\in \mathcal{Y} \subset X=X_b$, there exists a sequence of simple functions $(y_n) \subset X$ with supports of finite measure and such that $\|y-y_n\|_X \to 0$. Each $y_n$ can be expressed as $y_n = \sum_{i=1}^{m_n} a_i^{(n)} \chi_{A_i^{(n)}}$, where $a_i^{(n)}\in \mathbb{R}$, $A_i^{(n)} \in \Sigma$ with $\mu(A_i^{(n)}) < \infty$, so $y \in \overline{span} \{\chi_{A_i^{(n)}}, i=1,\dots,m_n, \ n\in \mathbb{N}\}$. Letting $\mathcal{A}_y = \{A_i^{(n)}: i=1,\dots,m_n, n\in\mathbb{N}\}$ and $\mathcal{A} = \cup_{ y\in \mathcal{Y}}\mathcal{A}_y$, the family $\mathcal{A}$ is countable.
For our convenience, let $\mathcal{A} = \{E_i: i\in \mathbb{N}\}$. For each $i\in \mathbb{N}$ we have $\mu(E_i) < \infty$. Then we have
\[
Y = \overline{\mathcal{Y}}\subset \overline{span} \{\chi_{E_i}, \ E_i \in \mathcal{A}\} \subset X.
\]
Let $\widetilde\Omega = \cup_{i=1}^\infty E_i$, $\sigma(\mathcal{A})$ be the smallest $\sigma$-algebra of $\Omega$ containing $\mathcal{A}$, $\widetilde{\Sigma} = \{\widetilde{\Omega} \cap E: E \in \sigma(\mathcal{A})\}$ and $\widetilde\mu = \mu|_{\widetilde\Sigma}$ the measure $\mu$ restricted to $\widetilde{\Sigma}$. In fact, it is easy to show that $\widetilde{\Sigma} = \sigma(\mathcal{A})$. Hence $\widetilde{\Sigma}$ is generated by a countable set, namely $\mathcal{A}$, so the measure space $(\widetilde\Omega, \widetilde\Sigma,\widetilde\mu)$ is separable \cite[Theorem B, p. 168]{Hal}). Now define the set
\[
\widetilde X = \{x\chi_{\widetilde\Omega}: x\in X, \ x \ \text{is} \ \widetilde{\Sigma} - \text{measurable}\}.
\]
It is straightforward to check that $\widetilde X$ is a closed subspace of $X$ such that it is an order continuous Banach function space on $(\widetilde\Omega,\widetilde\Sigma, \widetilde\mu)$ with the Fatou property. So $\widetilde X$ is separable. The K\"othe dual of $\widetilde{X}$ is
\[
\widetilde X' := (\widetilde X)' = \{y\chi_{\widetilde\Omega}: y\in X', \ y \ \text{is} \ \widetilde{\Sigma} - \text{measurable}\}.
\]
Clearly $\widetilde X' \subset L^0(\widetilde\Omega, \widetilde\Sigma, \widetilde\mu)$. From the assumption we have $(\widetilde X')_a = (\widetilde X')_b$. Hence
$\widetilde X = \widetilde{X}'' \simeq ((\widetilde{X}')_a)^*$ by Corollary 1.4.2 in \cite{BS} again. Therefore, $\widetilde X$ is a separable dual space such that $Y \subset \overline{span} \{\chi_{E_i}, \ E_i\in \mathcal{A}\}\subset \widetilde X$, which implies that $\widetilde X$ and hence $Y$ has the RNP. Since the choice of $Y$ was arbitrary, $X$ has the RNP.
\end{proof}
\begin{Remark}
$(1)$ The Fatou property in (ii) is a necessary assumption. For example, take $X=c_0$. This space does not have the RNP \cite[p. 61]{DU} and clearly does not satisfy the Fatou property. However, since $(c_0)'= \ell_1$ and $\ell_1$ is order continuous we have $((c_0)')_a = (\ell_1)_a = (\ell_1)_b = ((c_0)')_b$, which is the second assumption of the theorem.
$(2)$ The assumption $(X')_a = (X')_b$ in (ii) is also necessary. Consider $X=L_1[0,1]$ that is clearly order continuous. Moreover
$(X')_a = (L_\infty[0,1])_a = \{0\}$ and $(X')_b = (L_\infty[0,1])_b = L_\infty[0,1]$. Hence $(X')_a \ne (X')_b$ and it is well-known that $L_1[0,1]$ does not have the RNP \cite[p. 60]{DU}.
\end{Remark}
\begin{Proposition}\label{pro1}
Let $\mu$ be a non-atomic measure and $\varphi$ be a finite Orlicz function. If $\varphi$ is not an $N$-function at infinity, then $L_\varphi$ contains a subspace isomorphic to $L_1[0,1]$.
\end{Proposition}
\begin{proof}
Suppose $\varphi$ is not an $N$-function at infinity. We will show that given $A\in\Sigma$ with $\mu(A) < \infty$, the space $L_\varphi(A) = \{x\chi_A: x\in L_\varphi\}$ is equal to $ L_1(A)$ with equivalent norms. By the assumption $\lim_{u\to\infty} \varphi(u)/u =K< \infty$, and by the fact that
the function $\varphi(u)/u$ is increasing, there exist $M> 0$ and $u_0 >0$ such that
\begin{equation}\label{eq:31}
\varphi(u) \le Ku \ \ \text{for} \ \ u\ge 0, \ \ \ \text{and} \ \ \ \varphi(u)\ge M u \ \ \ \text{for} \ \ u \geq u_0.
\end{equation}
Let $f\in L_\varphi(A)$ with $\|f\|_\varphi = 1$. Then $\supp f\subset A$ and $I_\varphi(f) \le 1$. Set $A_1 = \{t\in A: |f(t)| \le u_0\}$. Thus in view of the second part of inequality (\ref{eq:31}) we get
\[
\|f\|_1 = \int_{A_1} |f| \, d\mu + \int_{A\setminus A_1} |f|\, d\mu \le u_0 \mu(A) + \frac1M I_\varphi(f) \le C,
\]
where $C = u_0 \mu(A) + \frac1M.$
Therefore for any $f$ from $L_\varphi(A)$, $\|f\|_1 \le C \|f\|_\varphi$.
On the other hand, by the second part of inequality (\ref{eq:31}), for any $E\subset A$ we have
\[
\int_A\varphi\left(\frac{\chi_E}{K\mu(E)}\right)\, d\mu = \int_E\varphi\left(\frac{1}{K\mu(E)}\right) \, d\mu \le 1.
\]
It follows that $\|\chi_E\|_\varphi \le K\mu(E) =K \|\chi_E\|_1$. Hence for any simple function $x = \sum_{i=1}^n a_i \chi_{E_i}\in L_\varphi(A)$ with $E_i \cap E_j = \emptyset$ for $i\ne j$,
\[
\|x\|_\varphi \le \sum_{i=1}^n |a_i| \|\chi_{E_i}\|_\varphi \le K \sum_{i=1}^n |a_i| \mu(E_i) = K \|x\|_1.
\]
Now by the Fatou property of $L_1(A)$ and $L_\varphi(A)$, $\|x\|_\varphi \le K \|x\|_1$ for every $x \in L_1(A)$.
Hence $L_\varphi(A) = L_1(A)$ with equivalent norms, and the proof is completed since $L_1(A)$ contains a subspace isomorphic to $L_1[0,1]$ (see \cite[p. 127, Theorem 9 (1)]{Lac}).
\end{proof}
Recall that an Orlicz function is said to be finite if its range does not contain the infinity.
\begin{Lemma}\label{lem:finite}
\rm{(a)} A finite Orlicz function $\varphi$ is an $N$-function at infinity if and only if $\varphi_*$ is finite.
\rm{(b)} Let $\mu$ be a non-atomic measure. If $\varphi$ is a finite Orlicz function then $\|\chi_A\|_\varphi = 1/\varphi^{-1}(1/t)$, where $t>0$, $\mu(A) = t$. Consequently
\[
\lim_{t\to 0+} \phi_{L_\varphi}(t) = \lim_{t\to 0+} 1/\varphi^{-1}(1/t) = 0.
\]
\end{Lemma}
\begin{proof}
(a) Suppose $\varphi$ is not an $N$-function at infinity. Then there exists $K>0$ such that for every $u >0$, $\varphi(u) \leq Ku$. Hence
\[
\varphi_*(v) = \sup_{u >0}\{uv - \varphi(u)\} \geq \sup_{u >0}\{(v-K)u\}.
\]
Therefore if $v > K$ then $\varphi_*(v) =\sup_{u>0}\{(v-K)u\} = \infty$.
Conversely, suppose there exists $K > 0$ such that for every $v > K$, $\varphi_*(v) = \infty$. Then
\[
\varphi(u) = \sup\{uv - \varphi_*(v) : v \in (0,K)\}.
\]
By $\frac{\varphi(u)}{u} = \sup\{v - \frac{\varphi_*(v)}{u}: v \in (0, K)\}$, we have $\lim_{u \rightarrow \infty} \frac{\varphi(u)}{u} \leq K < \infty$, which shows that $\varphi$ is not an $N$-function at infinity.
(b) Let $a_\varphi=\sup\{t: \varphi(t)=0\}$, and let $A\in \Sigma$, $\mu(A) = t$, $t > 0$. Then $I_\varphi(\chi_A/\epsilon) = 0$ if $\epsilon \ge 1/a_\varphi$, and $I_\varphi(\chi_A/\epsilon) = \varphi(1/\epsilon) t$ if $ \epsilon < 1/a_\varphi$. By the latter condition if $I_\varphi(\chi_A/\epsilon) = 1$, we get that $\|\chi_A\|_\varphi = \epsilon = 1/\varphi^{-1}(1/t)$. Clearly for $t\to 0+$ we get that $1/\varphi^{-1}(1/t) \to 0$.
\end{proof}
The next result provides a criterion of the Radon-Nikod\'ym property of Orlicz spaces over non-atomic measure spaces. We do not need the assumption of separability of the measure space \cite[Theorem 3.32]{Chen}.
\begin{Theorem}\label{th:OrRN-funct}
Let $\mu$ be a complete $\sigma$-finite, non-atomic measure on $\Sigma$ and $\varphi$ be a finite Orlicz function. Then the Orlicz spaces $L_\varphi$ {\rm (}and $L_\varphi^0${\rm )} over
$(\Omega, \Sigma, \mu)$ have the Radon-Nikod\'ym property if and only if $\varphi$ is an $N$-function at infinity and satisfies the appropriate $\Delta_2$ condition.
\end{Theorem}
\begin{proof}
Since the Luxemburg and Orlicz norms are equivalent we consider only $L_\varphi$ equipped with the Luxemburg norm. By the assumption that $\varphi$ is an $N$-function at infinity and Lemma \ref{lem:finite}(a) we get that $\varphi_*$ is finite on $(0,\infty)$.
Applying now Lemma \ref{lem:finite}(b) to the function $\varphi_*$ we get that $\phi_{L_{\varphi_*}}(t) \to 0$ if $t\to 0+$. Hence $\lim_{t\to 0+} \phi_{L^0_{\varphi_*}}(t) = 0$. Applying now Theorem 2.5.5 in \cite{BS} we
get $(L_{\varphi_*}^0)_a = (L_{\varphi_*}^0)_b$ and in view of $(L_\varphi)' = L_{\varphi_*}^0$ \cite[Corollary 8.15, p. 275]{BS} \cite{KR}, we have $((L_\varphi)')_a = ((L_\varphi)')_b$.
It is well-known that $L_\varphi$ has the Fatou property and that $L_{\varphi}$ is order continuous if and only if $\varphi$ satisfies the appropriate $\Delta_2$ condition. Therefore, by Theorem \ref{th:RNKothe}(ii) the Orlicz space $L_\varphi$ has the RNP.
For the converse, assume that $L_\varphi$ has the RNP. Since $L_1[0,1]$ does not have the RNP, $\varphi$ needs to be an $N$-function at infinity by Proposition~\ref{pro1}. If $\varphi$ does not satisfy the appropriate $\Delta_2$ condition, then $L_\varphi$ is not order continuous, and by Theorem \ref{th:RNKothe}(i) it does not have the RNP.
\end{proof}
By Theorem 2.5.4 in \cite{BS}, $X_a = X_b$ holds for every rearrangement invariant sequence space $X$. Consequently we obtain a characterization of the RNP in Orlicz sequence spaces $\ell_\varphi$ as a consequence of Theorem \ref{th:RNKothe}. This result is well-known for $\varphi$ being an $N$-function \cite[Theorem 3.32]{Chen}.
\begin{Theorem} \label{th:RNP-ORseq}
Let $\varphi$ be a finite Orlicz function. An Orlicz sequence space $\ell_{\varphi}$ has the Radon-Nikod\'ym property if and only if $\varphi$ satisfies the $\Delta_2^0$ condition.
\end{Theorem}
\begin{proof}
Since any Orlicz sequence space is an r.i. space with the Fatou property, we always have $((\ell_{\varphi})')_a = (\ell_{\varphi_*}^0)_a = (\ell_{\varphi_*}^0)_b = ((\ell_{\varphi})')_b$. Moreover it is well-known that $\ell_{\varphi}$ is order continuous if and only if $\varphi$ satisfies the $\Delta_2^0$ condition \cite[Proposition 4.a.4]{LT1}. Hence, $\ell_{\varphi}$ has the RNP by Theorem \ref{th:RNKothe}.
Conversely, suppose that $\ell_{\varphi}$ has the RNP. Then $\ell_{\varphi}$ is order continuous by Theorem \ref{th:RNKothe}. This implies that $\varphi$ satisfies the $\Delta_2^0$ condition.
\end{proof}
\section{Locally octahedral norm, uniformly non-$\ell_1^2$ points, diameter two properties and the Daugavet property}
In this section, we first examine the relationship between locally octahedral norms and the Daugavet property.
\begin{Definition}\cite{G, BLR2, HLP}
A Banach space $X$ is locally octahedral if for every $x \in X$ and $\epsilon >0$, there exists $y \in S_X$ such that $\|\lambda x + y\| \geq (1 - \epsilon) (|\lambda| \|x\| + \|y\|)$ for all $\lambda \in \mathbb{R}$.
\end{Definition}
A point $x\in S_X$ is called a \emph{uniformly non-$\ell_1^2$} point if there exists $\delta>0$ such that $\min\{\|x + y\|, \|x - y\|\} \leq 2-\delta$ for all $y\in S_X$. Motivated by this, we introduce the following.
\begin{Definition}
A point $x\in S_X$ is called a \emph{uniformly $\ell_1^2$} point if, given $\delta>0$, there is $y\in S_X$ such that $\min\{\|x + y\|, \|x - y\|\} > 2-\delta$.
\end{Definition}
By Proposition 2.1 in \cite{HLP} we get immediately the following corollary.
\begin{Corollary}\label{cor:octah-non}
Every point $x \in S_X$ is a uniformly $\ell_1^2$ point if and only if the Banach space $X$ is locally octahedral.
\end{Corollary}
\begin{Lemma}\cite{HLP}\label{lem:aux}
If $x, y \in S_X$ satisfy $\|x \pm y\| > 2 - \delta$ and $\alpha, \beta \in \mathbb{R}$, then
\[
(1-\delta)(|\alpha| + |\beta|) < \|\alpha x \pm \beta y\| \leq |\alpha| + |\beta|.
\]
\end{Lemma}
\begin{proof} See the proof of the implication from (iii) to (ii) in Proposition 2.1 in \cite{HLP}.
\end{proof}
In the next theorem we give a local characterization of uniformly $\ell_1^2$ points $x\in S_X$ (resp. $x^*\in S_{X^*}$) and the diameter of the slice $S(x;\epsilon)$ (resp. the diameter of the weak$^{*}$-slice $S(x^*,\epsilon)$). The techniques used in the proof are somewhat similar to the proof of Theorem 3.1 in \cite{HLP}, but the key ideas are more subtle emphasizing the local nature of discussed properties.
It follows a corollary on relationships between global properties of local diameter two property in $X$ and of $X^*$ being locally octahedral, as well as between the weak$^*$-local diameter two property of $X^*$ and $X$ being locally octahedral.
\begin{Theorem}\label{th:unif}
\rm(a) An element $x \in S_{X}$ is a uniformly $\ell_1^2$ point if and only if the diameter of a weak$^{*}$-slice $S(x;\epsilon)$ is two for every $\epsilon > 0$.
\rm(b) An element $x^* \in S_{X^*}$ is a uniformly $\ell_1^2$ point if and only if ${\rm{diam}}\,S(x^*;\epsilon)=2$ for every $\epsilon > 0$.
\end{Theorem}
\begin{proof}
We will prove only (a) since (b) follows analogously. Suppose that for all $0<\epsilon < 1$, ${\rm{diam}} \, S(x; \epsilon) = 2$. Then there exist $x_1^*, x_2^*\in S(x;\epsilon)$ such that
\begin{equation}\label{eq:11}
x_1^*(x)> 1-\epsilon, \ \ \ x_2^* (x) > 1 - \epsilon, \ \ \|x_1^* - x_2^*\| > 2- \epsilon.
\end{equation}
Hence we can find $y \in S_X$ with $(x_1^* - x_2^*)(y) > 2 - \epsilon$.
Thus
\[
2 \ge x_1^*(y) - x_2^*(y) > 2-\epsilon \ \ \ \text{and} \ \ \ x_1^*(y)\le1,\ -x_2^*(y) \le 1,
\]
and so $x_1^*(y) > 1-\epsilon$ and $-x_2^*(y) > 1-\epsilon$. Combining this with (\ref{eq:11}) we get that $x_1^*(x+y) > 2-2\epsilon$, $x_2^*(x-y) > 2 - 2\epsilon$, and so $\|x + y\| > 2 - 2\epsilon$ and $\|x - y\| > 2 -2\epsilon$. We showed that for every $0< \epsilon< 1$ there exists $y\in S_X$ such that
\[
\min\{\|x + y\|, \|x - y\|\} > 2 -2\epsilon,
\]
which means that $x$ is a uniformly $\ell_1^2$ point.
Conversely, suppose that $x \in S_X$ is a uniformly $\ell_1^2$ point. Then for any $\epsilon>0$, there exists $y \in S_X$ such that $\|x \pm y\| > 2 - \epsilon$. Define bounded linear functionals $x_1^*, x_2^*$ on the subspace ${\rm span}\{x,y\}$ such that
\[
x_1^*(x) = 1,\ \ x_1^*(y) = 0,\ \ x_2^*(x) = 0 \ \ \text{and} \ \ x_2^*(y) = 1.
\]
\noindent Note that $\|x_1^*\| \geq 1$ and $\|x_2^*\| \geq 1$. By Lemma \ref{lem:aux}, for $\alpha, \beta \in \mathbb{R}$ we have
\[
|(x_1^* \pm x_2^*)(\alpha x + \beta y)| = |\alpha \pm \beta| \leq |\alpha| + |\beta| \leq (1-\epsilon)^{-1}\|\alpha x + \beta y\|,
\]
\noindent so $\|x_1^* \pm x_2^*\| \leq (1-\epsilon)^{-1}$.
Now, let $\widetilde{x_1}^* = \frac{x_1^* + x_2^*}{\|x_1^* + x_2^*\|}$ and $\widetilde{x_2}^* = \frac{x_1^* - x_2^*}{\|x_1^* - x_2^*\|}$. Then
\[
\|\widetilde{x_1}^* - (x_1^* + x_2^*)\| = |\|x_1^* + x_2^*\| - 1| \leq \left|\frac{1}{1-\epsilon} - 1 \right| = \frac{\epsilon}{1 - \epsilon}.
\]
Similarly,
\[
\|\widetilde{x_2}^* - (x_1^* - x_2^*)\| \leq \frac{\epsilon}{1 - \epsilon}.
\]
Since $(x_1^* \pm x_2^*)(x)=1$, we have $\widetilde{x_1}^*(x) = \frac{1}{\|x_1^* + x_2^*\|} \geq 1 - \epsilon$ and $\widetilde{x_2}^*(x) = \frac{1}{\|x_1^* - x_2^*\|} \geq 1 - \epsilon$. Hence $\widetilde{x_1}^*, \widetilde{x_2}^* \in S(x,\epsilon)$. Furthermore,
\begin{eqnarray*}
\|\widetilde{x_1}^* - \widetilde{x_2}^*\| &=& \|\widetilde{x_1}^* + (x_1^* + x_2^*) - (x_1^* + x_2^*) + (x_1^* - x_2^*) - (x_1^* - x_2^*) - \widetilde{x_2}^*\| \\
&\geq& 2 \|x_2^*\| -\|\widetilde{x_1}^* - (x_1^* + x_2^*)\| -\|\widetilde{x_2}^* - (x_1^* - x_2^*)\| \geq 2 - \frac{2\epsilon}{1-\epsilon}.
\end{eqnarray*}
\noindent Since $\epsilon > 0$ is arbitrary, ${\rm{diam}}\, S(x, \epsilon) = 2$. Finally by the Hahn-Banach theorem, we can extend the bounded linear functionals $x_1^*$ and $x_2^*$ from ${\rm span}\{x,y\}$ to $X$ and the proof is completed.
\end{proof}
Combining Corollary \ref{cor:octah-non} and Theorem \ref{th:unif} we obtain the following result proved earlier in \cite{HLP}.
\begin{Corollary} \cite[Theorem 3.2, 3.4]{HLP}\label{HLP} Let $X$ be a Banach space. Then the following hold.
\begin{enumerate}
\item[$(1)$] $X$ is locally octahedral if and only if $X^*$ satisfies the weak$^{*}$ local diameter two property.
\item[$(2)$] $X^*$ is locally octahedral if and only if $X$ satisfies the local diameter two property.
\end{enumerate}
\end{Corollary}
Recall the equivalent geometric interpretation of the Daugavet property.
\begin{Lemma}\cite[Lemma 2.2]{KSSW}
\label{lem:Daug}
The following are equivalent.
\begin{enumerate}[{\rm(i)}]
\item A Banach space $(X,\|\cdot\|)$ has the Daugavet property,
\item\label{Daugii} For every slice $S = S(x^*,\epsilon)$ where $x^*\in S_{X^*}$, every $x \in S_X$ and every $\epsilon>0$, there exists $y\in S_{X}\cap S$ such that $\|x+y\|>2-\epsilon$,
\item\label{Daugiii} For every weak$^{*}$-slice $S^* = S(x,\epsilon)$ where $x\in S_{X}$, every $x^* \in S_{X^*}$ and every $\epsilon>0$, there exists $y^*\in S_{X^*}\cap S^*$ such that $\|x^*+y^*\|>2-\epsilon$,
\end{enumerate}
\end{Lemma}
The next result is known in a stronger form \cite[Theorem 4.4]{ALN} \cite[Corollary 2.5]{BLR2} \cite{HLP}, namely, if $X$ has the Daugavet property then it has the SD2P as well as its dual $X^*$ has the weak$^*$-SD2P.
\begin{Proposition}\label{prop:slice}
If a Banach space $X$ has the Daugavet property, then $X$ has the local diameter two property and $X^*$ has the weak$^*$-local diameter two property.
\end{Proposition}
\begin{proof} Let $x^*\in S_{X^*}$ and $S(x^*; \epsilon)$ be a slice of $B_X$. Then there is $x\in S_X$ such that $-x^*(x) > 1- \epsilon$. By (iii) of Lemma \ref{lem:Daug} we find $y\in S_X$ with $x^*(y) > 1-\epsilon$ and $\|x + y\| > 2 - 2\epsilon$. Clearly $-x, y \in S(x^*;\epsilon)$, and so ${\rm diam} \, S(x;\epsilon) = 2$.
Now let $x\in S_X$ and $S(x; \epsilon)$ be a weak$^*$-slice of $B_{X^*}$. There exists $y^*\in S_{X^*}$ such that $-y^*(x) > 1 - \epsilon$. By (ii) of Lemma \ref{lem:Daug} there is $x^* \in S_{X^*}$ with $x^*(x) > 1 - \epsilon$ and $\|x^* + y^*\| > 2 - 2\epsilon$. Since both $x^*, -y^* \in S(x;\epsilon)$, $\epsilon > 0$ is arbitrary, we have that ${\rm diam} \, S(x,\epsilon) = 2$.
\end{proof}
The next result is an instant corollary of Theorem \ref{th:unif} and Proposition \ref{prop:slice}.
\begin{Corollary}\cite[Proposition 4.4]{KK}
\label{prop}
If $(X,\|\cdot\|)$ has the Daugavet property, then all elements in $S_X$ and $S_{X^*}$ are uniformly $\ell_1^2$ points.
\end{Corollary}
Next we shall consider Orlicz spaces $L_{\varphi}, \,\ell_\varphi$ and $L_{\varphi}^0, \, \ell_\varphi^0$. Let us define first the following numbers related to Orlicz function $\varphi:\mathbb{R}_+ \to [0,\infty]$. Recall the Orlicz function $\varphi$ is called a {\it linear function} if $\varphi(u)= ku$ on $\mathbb{R}_+$ for some $k>0$. Set
\[
d_\varphi =\sup\{u: \varphi(u)\ \ \text{is linear}\}, \ \ c_\varphi = \sup\{u: \varphi(u) \le 1\},\ \ \
b_\varphi = \sup\{u: \varphi(u) < \infty\}.
\]
\begin{Lemma} \cite[Lemma 4.1]{KK} \label{lem:1}
Let $\varphi$ be an Orlicz function For every closed and bounded inteval $I \subset (d_{\varphi}, b_{\varphi})$ there is a constant $\sigma \in (0,1)$ such that $2 \varphi(u/2)/\varphi(u) \leq \sigma$ for $u \in I$. Moreover, if $\varphi(b_\varphi) < \infty$ then the same statement holds true for closed intervals $I \subset (d_\varphi, b_\varphi]$.
\end{Lemma}
\begin{Theorem}\label{th:KamKub}
\begin{enumerate}[{\rm(1)}]
\item[\rm(i)] Let $\mu$ be non-atomic. Let $\varphi$ be an Orlicz function such that $\varphi(b_\varphi)\mu(\Omega) >1$ and $d_\varphi < b_\varphi$. Then there exists $a> 0$ and $A \in \Sigma$ such that $x = a \chi_A, \|x\|_{\varphi} =1$ and $x$ is uniformly non-$\ell_1^2$ point in $L_{\varphi}$. If $b_\varphi = \infty$ then $x\in (L_\varphi)_a$.
\item[\rm(ii)] Let $\mu$ be the counting measure on $\mathbb{N}$ and $\varphi$ be an Orlicz function such that $d_\varphi < c_\varphi$ and $\varphi(c_\varphi) = 1$. Then there exist $a > 0$ and $A \subset \mathbb{N}$ such that $x = a \chi_A, \|x\|_{\varphi} =1$ and $x$ is uniformly non-$\ell_1^2$ point in $\ell_{\varphi}$. If $b_\varphi = \infty$ then $x\in (\ell_\varphi)_a$.
\end{enumerate}
\end{Theorem}
\begin{proof}
(i): By the assumptions on $\varphi$ and non-atomicity of $\mu$, there exist $A\in \Sigma$ and $a\in (d_\varphi,b_\varphi)$ such that $\varphi(a) \mu(A) = 1$.
Letting $x=a \chi_A$, we get $I_{\varphi}(x) =1$, and $\|x\|_{\varphi} = 1$. Clearly $x\in (L_\varphi)_a$ if $b_\varphi = \infty$.
Let $y \in S_{L_\varphi}$ be arbitrary. Hence for a.e. $t\in \Omega$, $|y(t)| < b_\varphi$ if $b_\varphi=\infty$ or $|y(t)| \le b_\varphi$ if $b_\varphi<\infty$. Then, for any $\lambda >1$, $I_{\varphi}({y}/{\lambda}) \leq 1$. We claim that
\begin{equation}\label{cond1}
\text{there exist }\, d\in (a,b_\varphi) \,\, \text{and} \,\, B=\{t \in \Omega : |y(t)| \leq d \chi_A (t)\} \,\, \text{such that} \,\, \mu(A \cap B) >0.
\end{equation}
Indeed, let first $b_\varphi = \infty$. Define $B_k = \{t \in \Omega : |y(t)| \leq k \chi_A (t)\}$ for $k \in \mathbb{N}$. The sequence of sets $\{B_k\}$ is increasing, and so
$0 < \mu(A) = \mu (A \cap (\cup_{k=1}^{\infty} B_k) = \lim_{k \rightarrow \infty} \mu (A \cap B_k)$, and this implies that there exists $m \in \mathbb{N}$ such that $2a <m$, $\mu (A \cap B_{m}) >0$. Letting $B = B_{m}$, $d=m$, we get (\ref{cond1}).
Let now $b_\varphi < \infty$. Define $C_k = \{t \in \Omega : |y(t)| \leq (b_\varphi - 1/k) \chi_A (t)\}$ for $k \in \mathbb{N}$. Like before, $\{C_k\}$ is increasing and $\lim_{k \rightarrow \infty} \mu(A \cap C_k)>0$. So there exists $m$ such that $b_\varphi - 1/m > a$. Let now $d = b_\varphi - 1/m$ and $B = C_m$, and so (\ref{cond1}) is satisfied.
Set
\begin{equation}\label{eq:111}
\gamma = I_{\varphi} (a \chi_{A \setminus B}).
\end{equation}
Clearly $\gamma\in [0,1)$.
For any $\delta>0$, there exists $1>\epsilon>0$ such that $I_{\varphi}((1+ \epsilon)x) = \varphi((1+\epsilon)a)\mu(A) \leq 1 + \delta$. We can choose $\epsilon$ so small that we also have $(1+\epsilon)a < d$.
Let $z = (1+\epsilon) x = (1 + \epsilon)a \chi_A$. Thus
\begin{equation}\label{eq:11}
I_\varphi(z) \le 1+\delta.
\end{equation}
Define
\[
D = \{t \in A \cap B : x(t)y(t) \geq 0 \},\,\, E = (A \cap B) \setminus D.
\]
For $t \in A \cap B$, $\max \{ |z(t)|, |y(t)| \} = \max \{ |(1+\epsilon)a|, |y(t)| \} \in [a,d]$.
Since $D \subset A \cap B$, we have $|z(t) - y(t)|/2 \le \max\{|z(t)|, |y(t)|\}$ for $t\in D$. Moreover by Lemma \ref{lem:1}, there exists $\sigma \in (0,1)$ such that $2\varphi(u/2)/\varphi(u) \le \sigma$ for $u\in [a,d] \subset (d_{\varphi}, b_{\varphi})$. Therefore
\begin{eqnarray*}
I_{\varphi}\left(\frac{z-y}{2} \chi_D \right) \leq I_{\varphi}\left(\frac{\max\{|z|, |y| \}}{2} \chi_D \right) &\leq& \frac{\sigma}{2} I_{\varphi}(\max\{|z|, |y| \}\chi_D)\\
&\leq& \frac{\sigma}{2}(I_{\varphi}(z \chi_D)+ I_{\varphi}(y \chi_D)).
\end{eqnarray*}
\noindent Analogously we can also show that
\[
I_{\varphi} \left(\frac{z+y}{2} \chi_E \right) \leq \frac{\sigma}{2}(I_{\varphi}(z \chi_E)+ I_{\varphi}(y \chi_E)).
\]
Then, by the convexity of $\varphi$ and $A \cap B = D\cup E$,
\begin{eqnarray*}
&\,&I_{\varphi}\left(\frac{z-y}{2} \chi_{A \cap B} \right) + I_{\varphi} \left(\frac{z+y}{2} \chi_{A \cap B} \right)\\ &=& I_{\varphi} \left(\frac{z-y}{2} \chi_D \right) + I_{\varphi} \left(\frac{z+y}{2} \chi_D \right) + I_{\varphi} \left(\frac{z-y}{2} \chi_E \right) + I_{\varphi} \left(\frac{z+y}{2} \chi_E \right)\\
&\leq& \frac{\sigma}{2}(I_{\varphi}(z\chi_D) + I_{\varphi}(y\chi_D)) + \frac{1}{2}(I_{\varphi}(z\chi_D) + I_{\varphi}(y\chi_D))+ \frac{1}{2}(I_{\varphi}(z\chi_E) + I_{\varphi}(y\chi_E)) \\
&+& \frac{\sigma}{2}(I_{\varphi}(z\chi_E) + I_{\varphi}(y\chi_E)
= \frac{1+ \sigma}{2}(I_{\varphi}(z \chi_{A \cap B}) + I_{\varphi}(y \chi_{A \cap B})).
\end{eqnarray*}
Now, choose $\delta \in (0, \frac{(1-\sigma)(1- \gamma)}{2})$. By the assumption $I_{\varphi}(y) \leq 1$ and by (\ref{eq:111}), (\ref{eq:11}) we have
\[
2+ \delta \geq I_{\varphi}(y) + 1 + \delta \geq I_{\varphi}(y) + I_{\varphi}(z),
\]
and so
\begin{eqnarray*}
2+ \delta - I_{\varphi}\left(\frac{z+y}{2} \right) - I_{\varphi}\left(\frac{z-y}{2}\right) &\geq& I_{\varphi}(y)+ I_{\varphi}(z) - I_{\varphi}\left(\frac{z+y}{2}\right) - I_{\varphi}\left(\frac{z-y}{2}\right)\\
&\geq& I_{\varphi}(y)+ I_{\varphi}(z) - \frac{1+ \sigma}{2}(I_{\varphi}(z \chi_{A \cap B}) + I_{\varphi}(y \chi_{A \cap B}))\\
&\geq& \frac{1- \sigma}{2}(I_{\varphi}(z \chi_{A \cap B}) + I_{\varphi}(y \chi_{A \cap B}))\\
&\geq& \frac{1- \sigma}{2}I_{\varphi}(a \chi_{A \cap B}) = \frac{(1- \sigma)(1-\gamma)}{2},
\end{eqnarray*}
\noindent which implies that
\[
I_{\varphi}\left(\frac{z+y}{2} \right) + I_{\varphi}\left(\frac{z-y}{2} \right) \leq 2+ \delta - \frac{(1- \sigma)(1-\gamma)}{2} \le 2.
\]
It follows
\[
\min \left \{I_{\varphi}\left(\frac{z+y}{2} \right), I_{\varphi}\left(\frac{z-y}{2} \right) \right\} \leq 1.
\]
If $I_{\varphi}\left(\frac{z+y}{2}\right) \leq 1$, then $\left \| \frac{z+y}{2} \right \|_{\varphi} \leq 1$, and so $\left \| \frac{x+(y/(1+\epsilon))}{2} \right \|_{\varphi} \leq \frac{1}{1+\epsilon}$. Moreover,
\begin{equation*}
\left | \left\| \frac{x+y}{2}\right \|_{\varphi} - \left \|\frac{x+(y/(1+\epsilon))}{2} \right \|_{\varphi} \right | \leq \left \| \frac{x+y}{2} - \frac{x+(y/(1+\epsilon))}{2} \right \|_{\varphi} = \frac{\epsilon}{2(1+\epsilon)}.
\end{equation*}
Hence
\[
\left \| \frac{x+y}{2} \right \|_{\varphi} \leq \left \|\frac{x+(y/(1+\epsilon))}{2} \right \|_{\varphi} + \frac{\epsilon}{2(1+\epsilon)} \leq \frac{1}{1+ \epsilon} + \frac{\epsilon}{2(1+ \epsilon)} = 1 - \frac{\epsilon}{2(1+\epsilon)}.
\]
In a similar way, if $I_{\varphi} \left(\frac{z-y}{2} \right) \leq 1$, then $\left \| \frac{x-y}{2} \right \|_{\varphi} \leq 1 - \frac{\epsilon}{2(1+\epsilon)}$. Thus, we just showed that for any $y \in S_{L_\varphi}$, $\min \left\{ \left \| \frac{x+y}{2} \right \|_{\varphi}, \left \| \frac{x-y}{2} \right \|_{\varphi} \right \} \leq 1 - \frac{\epsilon}{2(1+\epsilon)}$, which means
\begin{equation*}
\min \{ \| x+y\|_{\varphi}, \| x-y\|_{\varphi}\} \leq 2 - \frac{\epsilon}{1+ \epsilon} < 2.
\end{equation*}
Therefore, $x= a \chi_A$, $\|x\|_{\varphi} = 1$ is a uniformly non-$\ell_1^2$ point in $L_\varphi$.
(ii): If $x \in S_{\ell_{\varphi}}$, then $I_\varphi(x) = \sum_{i=1}^{\infty} \varphi(|x(i)|) \leq 1$. So for every $i \in \mathbb{N}$, $\varphi(|x(i)|) \leq 1$. Hence for any element of $S_{\ell_\varphi}$, we only consider $u \geq 0$ such that $\varphi(u)\leq 1$. Then by the assumptions $1= \frac{1}{\varphi(c_\varphi)}< \frac{1}{\varphi(d_\varphi)}$, there exist $a \in (d_{\varphi}, c_\varphi]$ and $A \subset \mathbb{N}$ such that $\varphi(a) = 1/\mu(A)$. Let $x = a\chi_A$. Then $I_\varphi(x) = \varphi(a)\mu(A) = 1$ and $\|x\|_\varphi = 1$. If $b_\varphi = \infty$ then $x \in (\ell_\varphi)_a$.
Now for $y \in S_{\ell_\varphi}$, we want to show that there exists $d \in (a, c_\varphi)$ and $B = \{i \in \mathbb{N} : |y(i)| \leq d\}$ such that $\mu(A \cap B) > 0$, which corresponds to (\ref{cond1}) in function case.
Since $y$ is in the unit ball of $\ell_\varphi$, for each $i\in \mathbb{N}$, $|y(i)| \le c_\varphi$.
Define $C_k = \{i \in \mathbb{N} : |y(i)| \leq (c_\varphi - 1/k) \chi_A (i)\}$ for $k \in \mathbb{N}$. The sequence $\{C_k\}$ is increasing and
\[
0 < \mu (A) = \mu (A \cap (\cup_{k=1}^{\infty} C_k) = \lim_{k \rightarrow \infty} \mu (A \cap C_k).
\]
So there exists $m$ such that $c_\varphi - 1/m > a$. Let now $d = c_\varphi - 1/m$ and $B = C_m$. Then
$d\in (a,c_\varphi)$, $|y(i)| \le d \chi_A(i)$ for $i\in A\cap B$ and $\mu(A\cap B) > 0$.
Further we proceed analogously as in the proof for function spaces starting from (\ref{eq:111}). We apply Lemma \ref{lem:1} for the interval $I=[a,d] \subset (d_\varphi, c_\varphi)\subset (d_\varphi, b_\varphi)$.
\end{proof}
Concerning the Daugavet property we will consider only the case of non-atomic measure since it is not difficult to show that any rearrangement invariant sequence space never has the Daugavet property.
In \cite{AKM2}, it was shown that an Orlicz space $L_\varphi$ generated by a finite Orlicz function $\varphi$ has the Daugavet property if and only if the space is isometrically isomorphic to $L_1$. Similar result
can be derived also from \cite{KK} where it was proved for Musielak-Orlicz spaces. Below the given proof for Orlicz spaces $L_\varphi$ is much simpler than those in \cite{AKM2, KK}. In fact it is a direct corollary of Theorem \ref{th:KamKub}(i).
\begin{Theorem}\label{thm:DaugOrlicz}
Let $\mu$ be a non-atomic measure.
If $\varphi$ is a finite Orlicz function then the only Orlicz space $L_\varphi$ having the Daugavet property
correspond to a linear function $\varphi$, that is $L_\varphi = L_1$ isometrically.
\end{Theorem}
\begin{proof}
If $L_\varphi = L_1$ isometrically, clearly the Orlicz space has the Daugavet property.
Supposing $L_\varphi$ has the Daugavet property, by Corollary \ref{prop}, every point of the unit sphere of $L_\varphi$ is a uniformly $\ell_1^2$ point. Applying now Theorem \ref{th:KamKub}(i), $d_\varphi = b_\varphi$, where $b_\varphi = \infty$ by the assumption that $\varphi$ assumes finite values. Therefore $\varphi(u) = ku$, for some $k>0$ and all $u\ge 0$. Consequently, $L_\varphi = L_1$ and $\|\cdot\|_\varphi = k\|\cdot\|_1$.
\end{proof}
\begin{Theorem}\label{thm:Orlicznormdiam} \rm(i) Let $\mu$ be a non-atomic measure. If $d_{\varphi_*} < b_{\varphi_*}$ and $\varphi_*(b_{\varphi_*})
\mu(\Omega) > 1$ then $L_\varphi^0$ does not have the local diameter two property.
\rm(ii) Let $\mu$ be the counting measure on $\mathbb{N}$. If $d_{\varphi_*} < c_{\varphi_*}$ and $\varphi_*(c_{\varphi_*}) =1$
then $\ell_\varphi^0$ does not have the local diameter two property.
\end{Theorem}
\begin{proof} We will show only (i), since the sequence case is proved similarly. By the assumptions in view of Theorem \ref{th:KamKub}(i), the space $L_{\varphi_*}$ has a uniformly non-$\ell_1^2$ point. In view of Theorem \ref{th:unif} it is equivalent to that the dual space $(L_{\varphi_*})^*$ does not have the weak$^*$-star local diameter two property. It is well-known that the dual space to Orlicz space $L_\varphi$ is isometrically isomorphic to the direct sum $L_{\varphi_*}^0 \oplus_1 \mathcal{S}$, where $\mathcal{S}$ is a set of the singular functionals on $L_\varphi$ (\cite{Chen}).
Therefore the dual space $ (L_{\varphi_*})^*$ is isometrically isomorphic to $L_\varphi^0 \oplus_1 \mathcal{S}$ due to $\varphi_{**} = \varphi$ \cite{KR}. By Theorem \ref{th:KamKub}(i), there exists a uniformly non-$\ell_1^2$ point $x\in S_{L_{\varphi_*}}$ of a unit ball in $L_{\varphi_*}$. Hence in view of Proposition \ref{th:unif}, there exists $\epsilon > 0$ such that ${\rm diam}\, S(x;\epsilon) < 2$ where $S(x;\epsilon) = \{x^* \in B_{(L_{\varphi_*})^*} : x^*(x) > 1 - \epsilon\}$ is a weak$^*$-slice. Now, let $J: L_{\varphi_*} \rightarrow (L_{\varphi_*})^{**}$ be the canonical mapping so that $J(x)(x^*) = x^*(x)$. Letting $i: L_{\varphi}^0 \rightarrow (L_{\varphi_*})^*$ be isometric embedding, $T: = J(x) \circ i\in B_{(L_\varphi^0)^*}$ and $S(T;\epsilon) = \{y \in B_{L_{\varphi}^0} : T(y) > 1 - \epsilon\} $ is a slice of the unit ball in $L_\varphi^0$. Moreover,
\[
S(T;\epsilon) \subset \{x^* \in B_{(L_{\varphi_*})^*} : J(x)(x^*) > 1 - \epsilon\} = \{x^* \in B_{(L_{\varphi_*})^*} : x^*(x) > 1 - \epsilon\} = S(x; \epsilon).
\]
Therefore, ${\rm diam} \,S(T;\epsilon)<2$, and the space $L_\varphi^0$ does not have the local diameter two property.
\end{proof}
In \cite{AKM2} it has been proved that if $\varphi$ does not satisfy the appropriate $\Delta_2$ condition then $L_\varphi$ or $\ell_\varphi$ has the local diameter two property. This result was generalized later to Orlicz-Lorentz spaces \cite{KT}. For the Orlicz spaces equipped with the Orlicz norm the situation is different. As shown below, for a large class of finite Orlicz functions the spaces $L_\varphi^0$ or $\ell_\varphi^0$ have no local diameter two property.
\begin{Corollary}\label{Cor:Orlicznormdiam}
Let $\mu$ be a non-atomic measure on $\Sigma$ or $\mu$ be the counting measure on $\mathbb{N}$. Let $\varphi$ be a finite $N$-function at infinity. Then there exists a slice of $B_{L_\varphi^0}$, respectively of $B_{\ell_\varphi^0}$, with diameter less than two. Consequently, the Orlicz spaces $L_\varphi^0$ or $\ell_\varphi^0$ equipped with the Orlicz norm do not have the local diameter two property.
\end{Corollary}
\begin{proof}
Since $\varphi$ is an $N$-function at infinity then in view of Lemma \ref{lem:finite}, $\varphi_*$ is a finite function and so $b_{\varphi_*} = \infty$. We also have that $d_{\varphi_*} <\infty$. Indeed, if for a contrary $d_{\varphi_*} =\infty$ then $\varphi_*(v) = kv$ for some $k>0$ and all $v\ge 0$. Then it is easy to show that $\varphi = \varphi_{**}$ assumes only zero or infinity values, which contradicts the assumption that $\varphi$ is a finite Orlicz function. We complete the proof by
application of Theorem \ref{thm:Orlicznormdiam}.\end{proof}
We conclude this paper by showing that the SD2P, D2P and LD2P are equivalent in $L_\varphi$ or $\ell_\varphi$ when $\varphi$ does not satisfy the appropriate $\Delta_2$ condition. Recall a subspace $Y$ of $X^*$ is said to be {\it norming} if for every $x \in X$,
\[
\|x\| = \sup\{|x^*(x)| : \|x^*\|_{X^*} \leq 1, x^* \in Y\}.
\]
\begin{Proposition}\cite[Proposition 1.b.18]{LT2}, \label{LT2}
If $X$ is a Banach function space with the Fatou property, then the K\"othe dual space $X'$ is order isometric to a norming subspace of $X^*$.
\end{Proposition}
We say a closed subspace $Y$ is an \emph{$M$-ideal} in $X$ if $Y^\perp$ is the range of the bounded projection $P: X^* \rightarrow X^*$ such that $\|x^*\| = \|Px^*\| + \|(I-P)x^*\|$, that is $X^* = Y^{\perp} \oplus_1 Z$ for some subspace $Z$ of $X^*$. In fact, there is a connection between $M$-ideals and the SD2P.
\begin{Theorem} \cite[Theorem 4.10]{ALN}\label{SD2P}
Let $Y$ be a proper subspace of $X$ and let $Y$ be an $M$-ideal in $X$ i.e. $X^* = Y^{\perp} \oplus_1 Z$. If $Z$ is a norming subspace of $X^*$, then both $X$ and $Y$ have the strong diameter two property.
\end{Theorem}
\begin{Corollary}\label{th:Mideal}
Let $\mu$ be a non-atomic measure on $\Sigma$ or the counting measure on $\mathbb{N}$. Given a finite Orlicz function $\varphi$ which does not satisfy the appropriate $\Delta_2$ condition, the spaces $L_{\varphi}$ or $\ell_\varphi$ and their proper subspaces $(L_{\varphi})_a\ne \{0\}$ or $(\ell_\varphi)_a\ne \{0\}$ have the strong diameter two property.
\end{Corollary}
\begin{proof}
Let $\mu$ be non-atomic. By the assumption that $\varphi$ is finite, the subspace $(L_{\varphi})_a$ is non-trivial. Moreover it is well-known that it is an $M$-ideal in $L_{\varphi}$ \cite{HWW}. It is a proper subspace if $(L_{\varphi})_a\ne L_{\varphi}$, which is equivalent to that $\varphi$ does not satisfy the appropriate $\Delta_2$ condition. By Proposition \ref{LT2}, $(L_{\varphi})' \simeq ((L_{\varphi})_a)^*$ is a norming subspace of $(L_\varphi)^*$. Hence by Theorem \ref{SD2P}, both $(L_{\varphi})_a$ and $L_{\varphi}$ have the strong diameter two property. The proof in sequence case is similar.
\end{proof}
The $M$-ideal property of the order continuous subspace of an Orlicz-Lorentz space has been studied \cite{KLT}. In our final result, we obtain full characterization of (local, strong) diameter two properties in Orlicz spaces equipped with the Luxemburg norm. It is completion and extension of Theorems 2.5 and 2.6 from \cite{AKM}, where it was shown that $L_\varphi$ or $\ell_\varphi$ have the D2P whenever $\varphi$ does not satisfy appropriate condition $\Delta_2$.
\begin{Theorem}\label{OReq} Let $\mu$ be a non-atomic measure on $\Sigma$ or the counting measure on $\mathbb{N}$ and let $\varphi$ be a finite Orlicz function. Consider the following properties.
\begin{itemize}
\item[(i)] $L_\varphi$ or $\ell_\varphi$ has the local diameter two property.
\item[(ii)] $L_\varphi$ or $\ell_\varphi$ has the diameter two property.
\item[(iii)] $L_\varphi$ or $\ell_\varphi$ has the strong diameter two property.
\item[(iv)] $\varphi$ does not satisfy the appropriate $\Delta_2$ condition.
\end{itemize}
Then $\rm(iii) \implies (ii) \implies (i)$. For the sequence space $\ell_\varphi$ all properties $\rm(i)-(iv)$ are equivalent. If in addition $\varphi$ is $N$-function at infinity then all $\rm(i)-(iv)$ are also equivalent for the function space $L_\varphi$.
\end{Theorem}
\begin{proof}
The fact $\rm(iii) \implies (ii) \implies (i)$ is well-known in general Banach spaces \cite{ALN, GGMS}. The implication $\rm(iv) \implies (iii)$ follows from Corollary \ref{th:Mideal}. If $L_\varphi$ has the local diameter two property then the space can not have the RNP. Thus from Theorems \ref{th:OrRN-funct} and \ref{th:RNP-ORseq}, (i) $\implies$ (iv).
\end{proof}
\end{document} |
\begin{document}
\author{Ciprian Demeter}
\address{Department of Mathematics, Indiana University, Bloomington IN}
\email{demeterc@@indiana.edu}
\author{Shaoming Guo}
\address{Department of Mathematics, Indiana University, Bloomington IN}
\email{shaoguo@@indiana.edu}
\thanks{The first author is partially supported by the NSF grant DMS-1161752}
\title[Schr\"odinger maximal function]{Schr\"odinger maximal function estimates via the pseudoconformal transformation}
\begin{abstract}
We present an alternative way to recover the recent result from \cite{LR} using the pseudoconformal transformation.
\end{abstract}
\maketitle
\section{Introduction}
Recall that the solution of the Schr\"odinger equation
\begin{equation}
\label{e2}
i\partial_t u(x,t)+{\mathbb D}}\def\x{{\bf x}}\def\b{{\bf b}}\def\g{{\bf \gamma}}\def\w{{\bf w}}\def\u{{\bf u}}\def\v{{\bf v}}\def\p{\vec{p}elta u(x,t)=0,\;x\in\R^n,\;t\ge 0
\end{equation}
with initial data $u_0\in L^2(\R^n)$ is given by
$$u(x,t)=e^{it{\mathbb D}}\def\x{{\bf x}}\def\b{{\bf b}}\def\g{{\bf \gamma}}\def\w{{\bf w}}\def\u{{\bf u}}\def\v{{\bf v}}\def\p{\vec{p}elta}u_0(x)=\int_{\R^n}\widehat{u}_0(\xi)e^{2\pi ix\cdot\xi-4\pi^2it|\xi|^2}d\xi.$$
A fundamental open question for $n\ge 2$ is identifying the smallest Sobolev index $s>0$ for which
$$\lim_{t\to 0}u(x,t)=u_0(x)\; a.e., \text{ for each }u_0\in H^s(\R^n).$$
The main goal of this note is to give an alternative argument for the following recent result of Luc\`a and Rogers, which proves a lower bound on the Sobolev regularity index $s$.
\begin{theorem}\label{main}
Let $n\ge 2$ and $s<\frac{n}{2(n+2)}$. Then there exist $R_k\to\infty$ and $f_k\in L^2(\R^n)$ with $\widehat{f_k}$ supported in the annulus $|\xi|\sim R_k$ such that
\begin{equation}
\label{e1}
\lim_{k\to\infty}\frac{\|\sup_{0<t\lesssim 1}|e^{it{\mathbb D}}\def\x{{\bf x}}\def\b{{\bf b}}\def\g{{\bf \gamma}}\def\w{{\bf w}}\def\u{{\bf u}}\def\v{{\bf v}}\def\p{\vec{p}elta}f_k(x)|\|_{L^2(B(0,1))}}{R_k^{s}\|f_k\|_{L^2(\R^n)}}=\infty.
\end{equation}
\end{theorem}
We use the pseudoconformal symmetry, according to which, if $u(x,t)$ solves \eqref{e2} then so does
$$v(x,t)=\frac1{t^{n/2}}\bar{u}(\frac{x}{t},\frac1{t})e^{i\frac{|x|^2}{4t}}.$$
Moreover, the initial data of the two solutions will have comparable $L^2$ norms. See the Appendix.
We will start with a solution $u$ (the same as the one in \cite{LR}) that is big on a cartesian set $X\times T$ of $(x,t)$. The set $X$ will be a small neighborhood of a rescaled copy of ${\mathbb Z}^n$ inside $[-1,1]^n$, while $T$ will be a discrete lattice inside $t\sim 1$. The measure of $X$ will be significantly smaller than 1, of order $R_k^{-\alpha_n}$, for some $\alpha_n>0$. The property that our construction exploits is that the set $Y=\frac{X}{T}$ can be made much larger than $X$, in fact it can be made to have measure comparable to 1. Note that the new solution $v$ will now be big for each $x\in Y$ (for some $t$ depending on $x$). This will be enough to prove Theorem \ref{main}.
Let us compare our approach with other recent ones. Luc\`a and Rogers \cite{LR} use the Galilean symmetry, according to which
if $u(x,t)$ solves \eqref{e2} then so does
$$v(x,t)=u(x-t\theta,t)e^{it\frac{|\theta|^2}{4}}e^{i\frac{x\cdot\theta}2}$$
for arbitrary $\theta\in\R^n$. Moreover, the initial data of the two solutions will have comparable $L^2$ norms. As mentioned before, they start with the same $u$, and thus have the same $X,T$. Their observation is that, for appropriate $\theta$, the set $Y=X-\theta T$ will have measure comparable to 1.
Bourgain \cite{Bo} constructs a solution $u$ which has two attributes. On the one hand, it is big on a cartesian product $X\times \{0\}$. So $T=\{0\}$. The second property of $u$ is that it is very symmetrical, almost invariant under a certain space-time translation. More precisely, for an appropriate $\nu\in\R^{n+1}$
\begin{equation}
\label{e3}
u(x,t)\approx u((x,t)+s\nu)
\end{equation}
will hold for all $x\in B(0,1)$ and all $t,s\sim 1$. The original small set $X$ where $u$ was large gets amplified from the fact that the $x$ projection of the set $$Y=(X\times \{0\})+[\frac1{10},10]\nu$$ has measure comparable to $1$.
In both our example and the one from \cite{LR}, the Fourier transform $\widehat{u}_0$ of the initial data is essentially the characteristic function of a small neighborhood of a rescaled (and truncated) copy of ${\mathbb Z}^n$. In Bourgain's construction, the mass lives on a small portion of this set, where lattice points are restricted to a sphere. The key is that the lift of this sphere to the paraboloid $(\xi,|\xi|^2)$ is a collection of points that live in a hyperplane $H\subset \R^{n+1}$. The existence of a nonzero vector $\nu\in H^{\perp}$ is what makes the remarkable symmetry \eqref{e3} possible.
In terms of the actual mathematics that is involved in proving that the enhanced set $Y$ has measure comparable to 1, the three methods described above are at least superficially different. Luc\`a and Rogers derive a quantitative version of the ergodic theorem involving the Funk-Hecke theorem. Bourgain uses a bit of Fourier analysis but his argument also has diophantine flavor. Our argument elaborates on a quantitative version of the multidimensional Dirichlet principle, which in its simplest form can be stated as follows.
\begin{lemma}
\label{8}
Given $y_1,\ldots,y_n\in [0,1]$ and a real number $N\ge 1$, there is $1\le p\le N+2$ such that
$$\max_{1\le i\le n}\|py_i\|\le \frac1{N^{1/n}}.$$
\end{lemma}
Here and in the following, $\|x\|$ will denote the distance of $x$ to ${\mathbb Z}$. The proof of this lemma is an immediate application of pigeonholing.
It is hard to conjecture what the optimal $s$ in Theorem \ref{main} should be. The authors feel that the likeliest possibility is $s=\frac{n}{2(n+1)}$. If one runs a multilinear type Bourgain--Guth argument for this problem (as was done in \cite{Bo}), the $n+1$ linear term has a favorable estimate consistent with this value of $s$. Another interesting question is whether the optimal $s$ is the same for a larger class of curved hyper-surfaces $(\xi,\varphi(\xi))$ generalizing the paraboloid $(\xi,|\xi|^2)$. It is worth mentioning that Bourgain exhibits a surface
$$\varphi(\xi)=\langle A\xi,\xi\rangle+O(|\xi|^3)$$
with $A$ positive definite, for which a stronger result is proved: Theorem \ref{main} will hold even with $s<\frac{n-1}{2n}$ ($n\ge 3$) and $s<\frac{5}{16}$ ($n=2$).
\begin{ack*}
The authors thank J. Bennett, R. Luc\`a, K. Rogers and A. Vargas for a few interesting discussions on this topic.
\end{ack*}
\section{The main construction and the proof of Theorem \ref{main}}
Via rescaling, Theorem \ref{main} will follow from the following result.
\begin{theorem}\label{main1}
Let $n\ge 2$ and $s<\frac{n}{2(n+2)}$. Then there exist $R_k\to\infty$ and $v_k\in L^2(\R^n)$ with $\widehat{v_k}$ supported in the annulus $|\xi|\sim 1$ such that
\begin{equation}
\label{e25}
\lim_{k\to\infty}\frac{\|\sup_{0<t\lesssim R_k}|e^{it{\mathbb D}}\def\x{{\bf x}}\def\b{{\bf b}}\def\g{{\bf \gamma}}\def\w{{\bf w}}\def\u{{\bf u}}\def\v{{\bf v}}\def\p{\vec{p}elta}v_k(x)|\|_{L^2(B(0,R_k))}}{R_k^{s}\|v_k\|_{L^2(\R^n)}}=\infty.
\end{equation}
\end{theorem}
We will prove this in the end of the section, using some elementary number theoretical results derived in Section \ref{50}.
For $0<u<v$ define the annuli
$${\mathbb A}}\def\W{{\mathcal W}_{u,v}=\{x\in\R^n:\;u<|x|<v\}.$$
Fix $\sigma<\frac1{n+2}$. Fix a Schwartz function $\theta$ on $\R^n$ whose Fourier transform is supported inside ${\mathbb A}}\def\W{{\mathcal W}_{4^{-n-3},4\sqrt{n}}$ and equals 1 on ${\mathbb A}}\def\W{{\mathcal W}_{4^{-n-2},2\sqrt{n}}$. The next three lemmas are used to align the phases of an exponential sum so that the absolute value of the sum is comparable to the number of exponentials in the sum.
\begin{lemma}
\label{4}
There exists $\epsilon_1>0$ so that for each $R$ large enough, the following holds:
For each $x\in {\mathbb A}}\def\W{{\mathcal W}_{4^{-n-2},2\sqrt{n}}$, each $t\in (0,1)$ and each $\xi'\in\R^n$ with $|\xi'|\le \epsilon_1R$ we have
$$|\int e^{2\pi i[(x-\frac{2t\xi'}{R})\cdot \xi-\frac{t}{R}|\xi|^2]}\theta(\xi)d\xi-1|<\frac12.$$
\end{lemma}
\begin{proof}
Let $$\psi(\xi)=-2\pi [\frac{2t\xi'}{R}\cdot \xi+\frac{t}{R}|\xi|^2].$$
Use the fact that $$\int e^{2\pi ix\cdot \xi}\theta(\xi)d\xi=1.$$
Then estimate $$|\int e^{2\pi ix\cdot \xi}\theta(\xi)[e^{i\psi(\xi)}-1]d\xi|\le 2\int_{|\xi|>C}|\theta(\xi)|d\xi+2\sup_{|\xi|\le C}|\psi(\xi)|\int_{\R^n}|\theta|.$$
Choose first $C$ so that $$\int_{|\xi|>C}|\theta(\xi)|d\xi<\frac14.$$
Then note that $$\sup_{|\xi|\le C}|\psi(\xi)|\le 2\pi(\frac{C^2}{R}+2\epsilon_1C).$$
Choose $\epsilon_1$ so small that $$4\pi(\frac{C^2}{R}+2\epsilon_1C)\int_{\R^n}|\theta|<\frac14.$$
for all $R$ large enough.
\end{proof}
The following lemma is rather trivial.
\begin{lemma}
\label{5}
Let $\Omega$ be a finite set.
Consider $a_{\xi'},b_{\xi'}\in{\mathbb C}$ for $\xi'\in\Omega$ such that
$$\max|a_{\xi'}-1|\le \delta_1$$
$$\max|b_{\xi'}-1|\le \delta_2.$$
Then
$$|\sum_{\xi'\in \Omega}a_{\xi'}b_{\xi'}-|\Omega||\le |\Omega|(\delta_1\max|b_{\xi'}|+\delta_2\max|a_{\xi'}|+\delta_1\delta_2).$$
\end{lemma}
For $\epsilon_2>0$ small enough (depending only on $\theta$, as revealed in the proof of Proposition \ref{40}), define
$$X:=(R^{\sigma-1}{\mathbb Z}^n+B(0,\frac{\epsilon_2}R))\cap {\mathbb A}}\def\W{{\mathcal W}_{4^{-n-2},2\sqrt{n}}$$
$$T:=(R^{2\sigma-1}{\mathbb Z})\cap (4^{-n-1},1),$$
and
$$\Omega:=(R^{1-\sigma}{\mathbb Z}^n)\cap B(0,\epsilon_1R).$$
Define also the Fourier transform of the initial data
$$\widehat{u_0}(\xi)=\sum_{\xi'\in\Omega}\theta(\xi-\xi').$$
Note that
\begin{equation}
\label{24}
\|u_0\|_2\sim R^{\frac{\sigma n}{2}}.
\end{equation}
and
\begin{equation}
\label{41}
{\operatorname{supp}}}\def\det{{\operatorname{det}}}\def\MLK{{\textbf{MLK}}\; u_0\subset {\mathbb A}}\def\W{{\mathcal W}_{4^{-n-3},4\sqrt{n}}.
\end{equation}
The following is essentially proved in (3.2) from \cite{LR}.
\begin{proposition}
\label{40}
We have the following lower bound for each $(x,t)\in X\times T$
\begin{equation}
\label{20}
|e^{i\frac{t}{2\pi R}{\mathbb D}}\def\x{{\bf x}}\def\b{{\bf b}}\def\g{{\bf \gamma}}\def\w{{\bf w}}\def\u{{\bf u}}\def\v{{\bf v}}\def\p{\vec{p}elta}u_0(x)|\gtrsim R^{\sigma n}.
\end{equation}
\end{proposition}
\begin{proof}
Note first that
$$e^{i\frac{t}{2\pi R}{\mathbb D}}\def\x{{\bf x}}\def\b{{\bf b}}\def\g{{\bf \gamma}}\def\w{{\bf w}}\def\u{{\bf u}}\def\v{{\bf v}}\def\p{\vec{p}elta}u_0(x)=\sum_{\xi'\in\Omega}e^{2\pi i[x\cdot \xi'-\frac{t}{R}|\xi'|^2]}\int e^{2\pi i[(x-\frac{2t\xi'}{R})\cdot \xi-\frac{t}{R}|\xi|^2]}\theta(\xi)d\xi$$
One easily checks that for $(x,t)\in X\times T$ and $\xi'\in\Omega$ we have
$$x\cdot \xi'-\frac{t}{R}|\xi'|^2\in {\mathbb Z}+B(0,\epsilon_3),$$
where $\epsilon_3$ can be chosen as small as desired by choosing $\epsilon_2$ small enough. In particular, we can make sure that
$$|e^{2\pi i[x\cdot \xi'-\frac{t}{R}|\xi'|^2]}-1|<\min\{\frac1{100}, \frac{1}{100}\int|\theta|\}.$$
It suffices now to combine this with Lemma \ref{4} and Lemma \ref{5}, once we also note that $$|\Omega|\sim R^{\sigma n}.$$
\end{proof}
Recall that $u_0$ depends on $R$, so we might as well write $u_0=u_{0,R}$.
Let now $u_{0, R}(x,t)=e^{it{\mathbb D}}\def\x{{\bf x}}\def\b{{\bf b}}\def\g{{\bf \gamma}}\def\w{{\bf w}}\def\u{{\bf u}}\def\v{{\bf v}}\def\p{\vec{p}elta}u_{0,R}(x)$ and let
$$v_R(x,t)=\frac1{t^{n/2}}\bar{u}_R(\frac{x}{t},\frac1{t})e^{i\frac{|x|^2}{4t}}$$
be its pseudoconformal transformation. The proposition in the Appendix shows that $v_R$ solves the Schr\"odinger equation with some initial data that we call $v_{0,R}$.
We record the properties of $v_R$ in the following proposition.
\begin{proposition}
\label{11}
We have for each large enough $R$ such that $R^{\sigma}$ is an integer
\begin{equation}
|\{x\in B(0,R):\sup_{0<t\lesssim R}|v_R(x,t)|\gtrsim R^{\sigma n-\frac{n}2}\}|\gtrsim R^n
\end{equation}
\begin{equation}
\|v_{0,R}\|_2\sim R^{\frac{\sigma n}2}
\end{equation}
\begin{equation}
\frac{\|\sup_{0<t\lesssim R}|v_R(x,t)|\|_{L^2(B_R)}}{\|v_{0,R}\|_2}\gtrsim R^{\frac{\sigma n}2}
\end{equation}
\begin{equation}
{\operatorname{supp}}}\def\det{{\operatorname{det}}}\def\MLK{{\textbf{MLK}} \;\widehat{v_{0,R}}\subset 4\pi ({\operatorname{supp}}}\def\det{{\operatorname{det}}}\def\MLK{{\textbf{MLK}} \;u_{0,R})\subset {\mathbb A}}\def\W{{\mathcal W}_{4^{-n-2}\pi, 16\pi\sqrt{n}}.
\end{equation}
\end{proposition}
\begin{proof}
The first property follows from \eqref{20} and \eqref{21}. The second one follows from \eqref{24} and \eqref{25}. The third one is a consequence of the first two. The fourth one also follows from \eqref{25} and \eqref{41}.
\end{proof}
\begin{itemize}gskip
Let now $s<\frac{n}{2(n+2)}$. The proof of Theorem \ref{main1} for this $s$ will now immediately follow by choosing $\sigma<\frac1{n+2}$ such that $\frac{\sigma n}{2}>s$, and by using $v_k=v_{0, R_k}$ from Proposition \ref{11}, with $R_k^\sigma$ an integer that grows to infinity with $k$.
\section{Number theoretical considerations}
\label{50}
\begin{lemma}
\label{7}
For each large enough real number $N$ there is $S=S_N\subset [0,1]^n$ with $|S|\ge \frac34$ such that for each $(y_1,\ldots,y_n)\in S$ there exists $p\in [4^{-n-1}N,N+2]$ satisfying
\begin{equation}
\label{9}
\max_{1\le i\le n}\|py_i\|\le \frac1{N^{\frac1n}}.
\end{equation}
\end{lemma}
\begin{proof}
Using Lemma \ref{8}, we know that \eqref{9} holds for each $(y_1,\ldots,y_n)\in [0,1]^n$, if we allow $p\in [1,N+2]$. We need an upper bound for those $(y_1,\ldots,y_n)$ corresponding to $1\le p\le 4^{-n-1}N$. For each $p$ define
$$A_p=\{(y_1,\ldots,y_n)\in [0,1]^n:\;\max_{1\le i\le n}\|py_i\|\le \frac1{N^{\frac1n}}\}=$$
$$=\begin{itemize}gcup_{0\le k_i\le p}\{(y_1,\ldots,y_n)\in [0,1]^n:\;\max_{1\le i\le n}|py_i-k_i|\le \frac1{N^{\frac1n}}\}.$$
The crude estimate
$$|A_p|\le (p+1)^n(\frac{2}{N^{\frac1n}p})^n<\frac{4^n}{N}$$
leads to
$$|\begin{itemize}gcup_{1\le p\le 4^{-n-1}N}A_p|\le \frac14.$$
\end{proof}
\begin{proposition}
Assume $R^{\sigma}$ is a large enough integer.
Let $$\frac{XR}{T}=\{\frac{xR}{t}:x\in X, t\in T\}.$$
Then
\begin{equation}
\label{21}
|\frac{XR}{T}\cap B(0,R)|\gtrsim R^n.
\end{equation}
\end{proposition}
\begin{proof}
It suffices to prove that
$$|\frac{X}{T}\cap [0,1]^n|\ge \frac12.$$
This can be written as $|U|\ge \frac12$ where
$$U=\{x\in[0,1]^n:\max_{1\le i\le n}|x_i-R^{-\sigma}\frac{k_i}{p}|\le \frac{\epsilon_2}R,\text{ with }$$$$4^{-n-2}R^{1-\sigma}\le |k|\le 2\sqrt{n}R^{1-\sigma},\;4^{-n-1}R^{1-2\sigma}\le p\le R^{1-2\sigma} \}.$$
This will follow if we prove that the larger set (we have dropped the restriction on $k_i$)
$$V=\{x\in[0,1]^n:\max_{1\le i\le n}|x_i-R^{-\sigma}\frac{k_i}{p}|\le \frac{\epsilon_2}R,\text{ with }4^{-n-1}R^{1-2\sigma}\le p\le R^{1-2\sigma} \}$$
satisfies
$|V|\ge \frac34.$ Indeed, note first that the restriction
$$|k|\le 2\sqrt{n}R^{1-\sigma}$$is redundant, as the inequality $\max|k_i|\le R^{1-\sigma}+1\le 2R^{1-\sigma}$ is forced by the combination of $x\in[0,1]^n$, $\max_{1\le i\le n}|x_i-R^{-\sigma}\frac{k_i}{p}|\le \frac{\epsilon_2}R$ and $p\le R^{1-2\sigma}$.
Second, note that
$$W=\{x\in[0,1]^n:\max_{1\le i\le n}|x_i-R^{-\sigma}\frac{k_i}{p}|\le \frac{\epsilon_2}R,\text{ with }$$$$|k|\le 4^{-n-2}R^{1-\sigma},\;4^{-n-1}R^{1-2\sigma}\le p\le R^{1-2\sigma} \}$$
satisfies $W\subset[0,\frac12]^n$ and thus $|W|<\frac14$.
We next focus on showing that $|V|\ge \frac34$. The inequality $$\max_{1\le i\le n}|x_i-R^{-\sigma}\frac{k_i}{p}|\le \frac{\epsilon_2}R$$
can be written as
$$\max_{1\le i\le n}\|p\{R^{\sigma}x_i\}\|\le \frac{\epsilon_2pR^{\sigma}}R,$$
where $\{z\}$ is the fractional part of $z$. Note that given the lower bound $p\ge 4^{-n-1}R^{1-2\sigma}$ and that $\sigma<\frac1{n+2}$, for $R$ large enough we have
$$\frac{\epsilon_2pR^{\sigma}}R\ge \frac1{(R^{1-2\sigma}-2)^{\frac1n}}.$$
Let $N:=R^{1-2\sigma}-2$. Then $V_0\subset V$ where
$$V_0=\{x\in[0,1]^n:\max_{1\le i\le n}\|p\{R^{\sigma}x_i\}\|\le \frac1{N^{\frac1n}},\text{ for some }4^{-n-1}N\le p\le N+2 \}.$$
Since $R^{\sigma}$ is an integer, the map
$$(x_1,\ldots,x_n)\mapsto (\{R^{\sigma}x_1\},\ldots, \{R^{\sigma}x_n\})$$
is a measure preserving transformation on $[0,1]^n$. The fact that $|V_0|\ge \frac34$ now follows from Lemma \ref{7}.
\end{proof}
\section{Appendix}
We record below the following classical result. See for example \cite{Ta}, page 72.
\begin{proposition}
If $u(x,t)$ solves \eqref{e2} then so does its pseudoconformal transformation
$$v(x,t)=\frac1{t^{n/2}}\bar{u}(\frac{x}{t},\frac1{t})e^{i\frac{|x|^2}{4t}}.$$
Moreover, the initial data $u_0(x)=u(x,0)$, $v_0(x)=v(x,t)$ are related by the formula
\begin{equation}
\label{25}
\widehat{v_0}(y)=Cu_0(4\pi y).
\end{equation}
In particular, $$\|u_0\|_2\sim\|v_0\|_2.$$
\end{proposition}
\end{document} |
\begin{document}
\begin{abstract}
We study the strong instability of standing waves for a system of nonlinear Schr\"odinger equations with quadratic interaction under the mass resonance condition in dimension $d=5$.
\end{abstract}
\maketitle
\section{Introduction}
\label{S:0}
We consider the system NLS equations
\begin{equation} \label{Syst-NLS}
\left\{
\renewcommand*{\arraystretch}{1.3}
\begin{array}{rcl}
i\partial_t u + \frac{1}{2m} \Delta u & = & \lambda v \overline{u}, \\
i\partial_t v + \frac{1}{2M} \Delta v & = & \mu u^2,
\end{array}
\right.
\end{equation}
where $u, v: \mathbb{R} \times \mathbb{R}^d \rightarrow \mathbb{C}$, $m$ and $M$ are positive constants, $\Delta$ is the Laplacian in $\mathbb{R}^d$ and $\lambda, \mu$ are complex constants.
The system \eqref{Syst-NLS} is regarded as a non-relativistic limit of the system of nonlinear Klein-Gordon equations
\[
\left\{
\renewcommand*{\arraystretch}{1.3}
\begin{array}{rcl}
\frac{1}{2c^2m}\partial^2_t u - \frac{1}{2m} \Delta u + \frac{mc^2}{2} u& = & -\lambda v \overline{u}, \\
\frac{1}{2c^2M}\partial^2_t v - \frac{1}{2M} \Delta v + \frac{Mc^2}{2} v& = & -\mu u^2,
\end{array}
\right.
\]
under the mass resonance condition
\begin{align} \label{mas-res}
M=2m.
\end{align}
Indeed, the modulated wave functions $(u_c,v_c):= (e^{itmc^2} u, e^{itMc^2} v)$ satisfy
\begin{align}\label{klei-gord}
\left\{
\renewcommand*{\arraystretch}{1.3}
\begin{array}{rcl}
\frac{1}{2c^2m} \partial^2_t u_c - i\partial_t u_c - \frac{1}{2m} \Delta u_c &=& - e^{itc^2(2m-M)} \lambda v_c \overline{u}_c,\\
\frac{1}{2c^2M} \partial^2_t v_c - i\partial_t v_c - \frac{1}{2M} \Delta v_c &=& - e^{itc^2(M-2m)} \mu u^2_c.
\end{array}
\right.
\end{align}
We see that the phase oscillations on the right hand sides vanish if and only if \eqref{mas-res} holds, and the system \eqref{klei-gord} formally yields \eqref{Syst-NLS} as the speed of light $c$ tends to infinity. The system \eqref{Syst-NLS} also appears in the interaction process for waves propagation in quadratic media (see e.g. \cite{CMS}).
The system \eqref{Syst-NLS} has attracted a lot of interest in past several years. The scattering theory and the asymptotic behavior of solutions have been studied in \cite{HLN, HLN-modi, HLO, OU}. The Cauchy problem for \eqref{Syst-NLS} in $L^2$, $H^1$ and in the weighted $L^2$ space $\langle x \rangle^{-1} L^2 = \mathcal{F}(H^1)$ under mass resonance condition have been studied in \cite{HOT}. The space-time analytic smoothing effect has been studied in \cite{HO-1, HO-2, Hoshino}. The sharp threshold for scattering and blow-up for \eqref{Syst-NLS} under the mass resonance condition in dimension $d=5$ has been studied in \cite{Hamano}. The existence, stability of standing waves and the characterization of finite time blow-up solutions with minimal mass have been studied recently in \cite{Dinh}.
Let us recall the local well-posedness in $H^1$ for \eqref{Syst-NLS} due to \cite{HOT}. To ensure the conservation law of total charge, it is natural to consider the following condition:
\begin{align} \label{mas-con}
\exists ~ c \in \mathbb{R} \backslash \{0\} \ : \ \lambda = c \overline{\mu}.
\end{align}
\begin{proposition}[LWP in $H^1$ \cite{HOT}] \label{prop-lwp-h1}
Let $d\leq 6$ and let $\lambda$ and $\mu$ satisfy \eqref{mas-con}. Then for any $(u_0,v_0) \in H^1\times H^1$, there exists a unique paire of local solutions $(u,v) \in Y(I)\times Y(I)$ of \eqref{Syst-NLS} with initial data $(u(0), v(0))=(u_0,v_0)$, where
\begin{align*}
Y(I) = (C\cap L^\infty)(I,H^1) \cap L^4(I,W^{1,\infty}) &\text{ for } d=1, \\
Y(I) = (C\cap L^\infty)(I,H^1) \cap L^{q_0}(I,W^{1,{r_0}}) &\text{ for } d=2,
\end{align*}
where $0<\frac{2}{q_0}=1-\frac{2}{r_0}<1$ with $r_0$ sufficiently large,
\begin{align*}
Y(I) = (C\cap L^\infty)(I, H^1) \cap L^2(I, W^{1,\frac{2d}{d-2}}) &\text{ for } d\geq 3.
\end{align*}
Moreover, the solution satisfies the conservation of mass and energy: for all $t\in I$,
\begin{align*}
M(u(t),v(t))&:= \|u(t)\|^2_{L^2}+ c\|v(t)\|^2_{L^2} = M(u_0,v_0), \\
E(u(t),v(t))&:= \frac{1}{2m}\|\nabla u(t)\|^2_{L^2} + \frac{c}{4M} \|\nabla v(t)\|^2_{L^2} + \emph{Re} (\lambda \langle v(t), u^2(t) \rangle ) = E(u_0,v_0),
\end{align*}
where $\langle \cdot, \cdot \rangle$ is the scalar product in $L^2$.
\end{proposition}
We now assume that $\lambda$ and $\mu$ satisfy \eqref{mas-con} with $c>0$ and $\lambda \ne 0, \mu \ne 0$. By change of variables
\[
u(t,x) \mapsto \sqrt{\frac{c}{2}} |\mu| u \left(t,\sqrt{\frac{1}{2m}} x \right), \quad v(t,x) \mapsto -\frac{\lambda}{2} v\left( t, \sqrt{\frac{1}{2m}} x\right),
\]
the system \eqref{Syst-NLS} becomes
\begin{equation} \label{cha-Syst}
\left\{
\renewcommand*{\arraystretch}{1.3}
\begin{array}{rcl}
i\partial_t u + \Delta u & = & -2 v \overline{u}, \\
i\partial_t v + \kappa \Delta v & = & - u^2,
\end{array}
\right.
\end{equation}
where $\kappa=\frac{m}{M}$ is the mass ratio. Note that the mass and the energy now become
\begin{align*}
M(u(t),v(t)) &= \|u(t)\|^2_{L^2} + 2 \|v(t)\|^2_{L^2}, \\
E(u(t),v(t)) &= \frac{1}{2} (\|\nabla u(t)\|^2_{L^2} + \kappa \|\nabla v(t)\|^2_{L^2} ) - \text{Re}( \langle v(t), u^2(t)\rangle).
\end{align*}
The local well-posedness in $H^1$ for \eqref{cha-Syst} reads as follows.
\begin{proposition} [LWP in $H^1$] \label{prop-lwp-wor}
Let $d\leq 6$. Then for any $(u_0, v_0) \in H^1 \times H^1$, there exists a unique pair of local solutions $(u,v) \in Y(I) \times Y(I)$ of \eqref{cha-Syst} with initial data $(u(0), v(0))=(u_0,v_0)$. Moreover, the solution satisfies the conservation of mass and energy: for all $t \in I$,
\begin{align*}
M(u(t),v(t)) &:= \|u(t)\|^2_{L^2} + 2 \|v(t)\|^2_{L^2} = M(u_0,v_0), \\
E(u(t),v(t)) &:= \frac{1}{2} (\|\nabla u(t)\|^2_{L^2} + \kappa \|\nabla v(t)\|^2_{L^2}) - \emph{Re} (\langle v(t),u^2(t)\rangle) = E(u_0,v_0).
\end{align*}
\end{proposition}
The main purpose of this paper is to study the strong instability of standing waves for the system \eqref{cha-Syst} under the mass resonance condition $\kappa=\frac{1}{2}$ in dimension $d=5$. Let $d=5$ and consider
\begin{equation} \label{mas-res-Syst}
\left\{
\renewcommand*{\arraystretch}{1.3}
\begin{array}{rcl}
i\partial_t u + \Delta u & = & -2 v \overline{u}, \\
i\partial_t v + \frac{1}{2} \Delta v & = & - u^2,
\end{array}
\right.
\end{equation}
We call a standing wave a solution to \eqref{mas-res-Syst} of the form $(e^{i\omega t} \phi_\omega, e^{i2\omega t} \psi_\omega)$, where $\omega \in \mathbb{R}$ is a frequency and $(\phi_\omega, \psi_\omega) \in H^1 \times H^1$ is a non-trivial solution to the elliptic system
\begin{equation} \label{ell-equ}
\left\{
\renewcommand*{\arraystretch}{1.3}
\begin{array}{rcl} -\Delta \phi_\omega + \omega \phi_\omega & = & 2 \psi_\omega \overline{\phi}_\omega, \\ -\frac{1}{2} \Delta \psi_\omega + 2\omega \psi_\omega & = & \phi_\omega^2.\end{array}
\right.
\end{equation}
We are interested in showing the strong instability of ground state standing waves for \eqref{mas-res-Syst}. Let us first introduce the notion of ground states related to \eqref{mas-res-Syst}. Denote
\[
S_\omega(u,v):= E(u,v) + \frac{\omega}{2} M(u,v) = \frac{1}{2} K(u,v) + \frac{\omega}{2} M(u,v) - P(u,v),
\]
where
\[
K(u,v) = \|\nabla u\|^2_{L^2} + \frac{1}{2} \|\nabla v\|^2_{L^2}, \quad M(u,v) = \|u\|^2_{L^2} + 2 \|v\|^2_{L^2}, \quad P(u,v) = \text{Re} \int \overline{v} u^2 dx.
\]
We also denote the set of non-trivial solutions of \eqref{ell-equ} by
\[
\mathcal{A}_\omega:= \{ (u,v) \in H^1 \times H^1 \backslash \{(0,0)\} \ : \ S'_\omega(u,v) =0 \}.
\]
\begin{definition} \label{def-gro-sta-ins}
A pair of functions $(\phi,\psi) \in H^1 \times H^1$ is called ground state for \eqref{ell-equ} if it is a minimizer of $S_\omega$ over the set $\mathcal{A}_\omega$. The set of ground states is denoted by $\mathcal{G}_\omega$. In particular,
\[
\mathcal{G}_\omega= \{(\phi,\psi) \in \mathcal{A}_\omega \ : \ S_\omega(\phi,\psi) \leq S_\omega(u,v), \forall (u,v) \in \mathcal{A}_\omega \}.
\]
\end{definition}
We have the following result on the existence of ground states for \eqref{ell-equ}.
\begin{proposition} \label{prop-exi-gro-sta-ins}
Let $d=5$, $\kappa = \frac{1}{2}$ and $\omega>0$. Then the set $\mathcal{G}_\omega$ is not empty, and it is characterized by
\[
\mathcal{G}_\omega = \{ (u,v) \in H^1 \times H^1 \backslash \{(0,0)\} \ : \ S_\omega(u,v) = d(\omega), K_\omega(u,v) =0 \},
\]
where
\[
K_\omega(u,v) = \left. \partial_\gamma S_\omega(\gamma u, \gamma v) \right|_{\gamma=1} = K(u,v) + \omega M(u,v) - 3 P(u,v)
\]
is the Nehari functional and
\begin{align} \label{d-ome}
d(\omega) := \inf \{ S_\omega(u,v) \ : \ (u,v) \in H^1 \times H^1 \backslash \{(0,0)\}, K_\omega(u,v) =0 \}.
\end{align}
\end{proposition}
The existence of real-valued ground states for \eqref{ell-equ} was proved in \cite{HOT} (actually for $d\leq 5$ and $\kappa>0$). Here we proved the existence of ground states (not necessary real-valued) and proved its characterization. This characterization plays an important role in the study of strong instability of ground states standing waves for \eqref{mas-res-Syst}. We only state and prove Proposition $\ref{prop-exi-gro-sta-ins}$ for $d=5$ and $\kappa=\frac{1}{2}$. However, it is still available for $d\leq 5$ and $\kappa>0$.
We also recall the definition of the strong instability of standing waves.
\begin{definition} \label{def-str-ins}
We say that the standing wave $(e^{i\omega t} \phi_\omega, e^{i2\omega t} \psi_\omega)$ is strongly unstable if for any ${\varepsilon}>0$, there exists $(u_0,v_0) \in H^1 \times H^1$ such that $\|(u_0,v_0) - (\phi_\omega, \psi_\omega)\|_{H^1 \times H^1} <{\varepsilon}$ and the corresponding solution $(u(t),v(t))$ to \eqref{mas-res-Syst} with initial data $(u(0), v(0))=(u_0,v_0)$ blows up in finite time.
\end{definition}
Our main result of this paper is the following.
\begin{theorem} \label{theo-str-ins}
Let $d=5$, $\kappa=\frac{1}{2}$, $\omega>0$ and $(\phi_\omega, \psi_\omega) \in \mathcal{G}_\omega$. Then the ground state standing waves $(e^{i\omega t} \phi_\omega, e^{i2\omega t} \psi_\omega)$ for \eqref{mas-res-Syst} is strongly unstable.
\end{theorem}
To our knowledge, this paper is the first one addresses the strong instability of standing waves for a system of nonlinear Schr\"odinger equations with quadratic interaction. In \cite{CCO-ins}, Colin-Colin-Ohta proved the instability of standing waves for a system of nonlinear Schr\"odinger equations with three waves interaction. However, they only studied the orbital instability not strong instability by blow-up, and they only consider a special standing wave solution $(0,0,e^{2i\omega t} \varphi)$, where $\varphi$ is the unique positive radial solution to the elliptic equation
\[
-\Delta \varphi + 2 \omega \varphi - |\varphi|^{p-1} \varphi=0.
\]
This paper is organized as follows. In Section $\ref{S:1}$, we show the existence of ground states and its characterization given in Proposition $\ref{prop-exi-gro-sta-ins}$. In Section $\ref{S:2}$, we give the proof of the strong instability of standing waves given in Theorem $\ref{theo-str-ins}$.
\section{Exitence of ground states}
\label{S:1}
We first show the existence of ground states given in Proposition $\ref{prop-exi-gro-sta-ins}$. To do so, we need the following profile decomposition.
\begin{proposition}[Profile decomposition] \label{prop-pro-dec-5D}
Let $d=5$ and $\kappa=\frac{1}{2}$. Le $(u_n,v_n)_{n\geq 1}$ be a bounded sequence in $H^1 \times H^1$. Then there exist a subsequence, still denoted by $(u_n,v_n)_{n\geq 1}$, a family $(x^j_n)_{n\geq 1}$ of sequences in $\mathbb{R}^5$ and a sequence $(U^j, V^j)_{j\geq 1}$ of $H^1\times H^1$-functions such that
\begin{itemize}
\item[(1)] for every $j\ne k$,
\begin{align} \label{ort-pro-dec-5D}
|x^j_n-x^k_n| \rightarrow \infty \text{ as } n\rightarrow \infty;
\end{align}
\item[(2)] for every $l\geq 1$ and every $x \in \mathbb{R}^5$,
\[
u_n(x) = \sum_{j=1}^l U^j(x-x^j_n) + u^l_n(x), \quad v_n(x)= \sum_{j=1}^l V^j(x-x^j_n) + v^l_n(x),
\]
with
\begin{align} \label{err-pro-dec-5D}
\limsup_{n\rightarrow \infty} \|(u^l_n, v^l_n)\|_{L^q\times L^q} \rightarrow 0 \text{ as } l \rightarrow \infty,
\end{align}
for every $q\in (2, 10/3)$.
\end{itemize}
Moreover, for every $l\geq 1$,
\begin{align}
M(u_n,v_n) &= \sum_{j=1}^l M(U^j_n, V^j_n) + M(u^l_n,v^l_n) + o_n(1), \label{mas-pro-dec-5D} \\
K(u_n,v_n) &= \sum_{j=1}^l K(U^j,V^j) + K(u^l_n, v^l_n) + o_n(1), \label{kin-pro-dec-5D} \\
P(u_n,v_n) &= \sum_{j=1}^l P(U^j,V^j) + P(u^l_n, v^l_n) + o_n(1), \label{sca-pro-dec-5D}
\end{align}
where $o_n(1) \rightarrow 0$ as $n\rightarrow \infty$.
\end{proposition}
We refer the reader to \cite[Proposition 3.5]{Dinh} for the proof of this profile decomposition.
The proof of Proposition $\ref{prop-exi-gro-sta-ins}$ is done by several lemmas. To simplify the notation, we denote for $\omega>0$,
\[
H_\omega(u,v):= K(u,v) + \omega M(u,v).
\]
It is easy to see that for $\omega>0$ fixed,
\begin{align} \label{equ-nor}
H_\omega(u,v) \sim \|(u,v)\|_{H^1 \times H^1}.
\end{align}
Note also that
\[
S_\omega(u,v)=\frac{1}{2} K_\omega(u,v)+\frac{1}{2}P(u,v)= \frac{1}{3}K_\omega(u,v)+\frac{1}{6} H_\omega(u,v).
\]
\begin{lemma} \label{lem-pos-d-ome}
$d(\omega)>0$.
\end{lemma}
\begin{proof}
Let $(u,v) \in H^1 \times H^1 \backslash \{(0,0)\}$ be such that $K_\omega(u,v)=0$ or $H(u,v) = 3 P(u,v)$. We have from Sobolev embedding that
\begin{align*}
P(u,v) \leq \int |v| |u|^2 dx \lesssim \|v\|_{L^3} \|u\|^2_{L^3} \lesssim \|\nabla v\|_{L^2} \|\nabla u\|^2_{L^2} \lesssim [H_\omega(u,v)]^3 \lesssim [P(u,v)]^3.
\end{align*}
This implies that there exists $C>0$ such that
\[
P(u,v) \geq \sqrt{\frac{1}{C}}>0.
\]
Thus
\[
S_\omega(u,v) = \frac{1}{2} K(u,v) + \frac{1}{2} P(u,v) \geq \frac{1}{2} \sqrt{\frac{1}{C}}>0.
\]
Taking the infimum over all $(u,v)\in H^1 \times H^1 \backslash \{(0,0)\}$ satisfying $K_\omega(u,v)=0$, we get the result.
\end{proof}
We now denote the set of all minimizers for $d(\omega)$ by
\[
\mathcal{M}_\omega := \left\{ (u,v) \in H^1 \times H^1 \backslash \{(0,0)\} \ : \ K_\omega(u,v) =0, S_\omega(u,v) = d(\omega) \right\}.
\]
\begin{lemma} \label{lem-no-emp-M-ome}
The set $\mathcal{M}_\omega$ is not empty.
\end{lemma}
\begin{proof}
Let $(u_n,v_n)_{n\geq 1}$ be a minimizing sequence for $d(\omega)$, i.e. $(u_n, v_n) \in H^1 \times H^1 \backslash \{(0,0)\}$, $K_\omega(u_n,v_n) =0$ for any $n\geq 1$ and $\lim_{n\rightarrow \infty} S_\omega(u_n,v_n) = d(\omega)$. Since $K_\omega(u_n,v_n) = 0$, we have that $H_\omega(u_n,v_n) = 3 P(u_n,v_n)$ for any $n\geq 1$. We also have that
\[
S_\omega (u_n, v_n) = \frac{1}{3} K_\omega(u_n,v_n) + \frac{1}{6}H_\omega(u_n,v_n) \rightarrow d(\omega) \text{ as } n\rightarrow \infty.
\]
This yields that there exists $C>0$ such that
\[
H_\omega(u_n,v_n) \leq 6 d(\omega) + C
\]
for all $n\geq 1$. By \eqref{equ-nor}, $(u_n,v_n)_{n\geq 1}$ is a bounded sequence in $H^1 \times H^1$. We apply the profile decomposition given in Proposition $\ref{prop-pro-dec-5D}$ to get up to a subsequence,
\[
u_n(x) = \sum_{j=1}^l U^j(x-x^j_n) + u^l_n(x), \quad v_n(x) = \sum_{j=1}^l V^j(x-x^j_n) + v^l_n(x)
\]
for some family of sequences $(x^j_n)_{n\geq 1}$ in $\mathbb{R}^5$ and $(U^j,V^j)_{j\geq 1}$ a sequence of $H^1 \times H^1$-functions satisfying \eqref{err-pro-dec-5D} -- \eqref{sca-pro-dec-5D}. We see that
\[
H_\omega (u_n,v_n)=\sum_{j=1}^l H_\omega (U^j,V^j) + H_\omega(u^l_n, v^l_n) + o_n(1).
\]
This implies that
\begin{align*}
K_\omega(u_n,v_n)&=H_\omega(u_n,v_n)-3P(u_n,v_n)\\
&=\sum_{j=1}^l H_\omega(U^j,V^j) + H_\omega(u^l_n,v^l_n) - 3P(u_n,v_n)+ o_n(1) \\
&=\sum_{j=1}^l K_\omega(U^j,V^j) + 3\sum_{j=1}^l P(U^j,V^j)- 3 P(u_n,v_n) + H_\omega(u^l_n,v^l_n)+o_n(1).
\end{align*}
Since $K_\omega(u_n,v_n)=0$ for any $n\geq 1$, $P(u_n,v_n) \rightarrow 2 d(\omega)$ as $n\rightarrow \infty$ and $H_\omega(u^l_n,v^l_n) \geq 0$ for any $n\geq 1$, we infer that
\[
\sum_{j=1}^l K_\omega(U^j,V^j) + 3 \sum_{j=1}^l P(U^j,V^j) - 6 d(\omega) \leq 0
\]
or
\[
\sum_{j=1}^l H_\omega (U^j,V^j) - 6d(\omega) \leq 0.
\]
By H\"older's inequality and \eqref{err-pro-dec-5D}, it is easy to see that $\limsup_{n\rightarrow \infty} P(u_n^l, v^l_n) =0$ as $l\rightarrow \infty$. Thanks to \eqref{sca-pro-dec-5D}, we have that
\[
2d(\omega) = \lim_{n\rightarrow \infty} P(u_n,v_n) = \sum_{j=1}^\infty P(U^j,V^j).
\]
We thus obtain
\begin{align} \label{pro-dec-app-5D}
\sum_{j=1}^\infty K_\omega(U^j,V^j) \leq 0 \text{ and } \sum_{j=1}^\infty H_\omega(U^j,V^j) \leq 6d(\omega).
\end{align}
We now claim that $K_\omega(U^j,V^j) =0$ for all $j\geq 1$. Indeed, suppose that if there exists $j_0 \geq 1$ such that $K_\omega(U^{j_0},V^{j_0}) <0$, then we see that the equation $K_\omega(\gamma U^{j_0}, \gamma V^{j_0}) = \gamma^2 H_\omega(U^{j_0},V^{j_0}) - 3 \gamma^3 P(U^{j_0},V^{j_0})=0$ admits a unique non-zero solution
\[
\gamma_0 := \frac{H_\omega(U^{j_0},V^{j_0})}{3 P(U^{j_0}, V^{j_0})} \in (0,1).
\]
By the definition of $d(\omega)$, we have
\[
d(\omega) \leq S_\omega(\gamma_0 U^{j_0}, \gamma_0 V^{j_0}) = \frac{1}{6} H_\omega(\gamma_0 U^{j_0}, \gamma_0 V^{j_0}) = \frac{\gamma_0^2}{6} H(U^{j_0},V^{j_0}) <\frac{1}{6} H_\omega(U^{j_0},V^{j_0})
\]
which contradicts to the second inequality in \eqref{pro-dec-app-5D}. We next claim that there exists only one $j$ such that $(U^j,V^j)$ is non-zero. Indeed, if there are $(U^{j_1},V^{j_1})$ and $(U^{j_2},V^{j_2})$ non-zero, then by \eqref{pro-dec-app-5D}, both $H_\omega(U^{j_1},V^{j_1})$ and $H_\omega(U^{j_2},V^{j_2})$ are strictly smaller than $6d(\omega)$. Moreover, since $K_\omega(U^{j_1},V^{j_1}) =0$,
\[
d(\omega) \leq S_\omega(U^{j_1},V^{j_1}) = \frac{1}{6} H_\omega(U^{j_1},V^{j_1}) <d(\omega)
\]
which is absurd. Therefore, without loss of generality we may assume that the only one non-zero profile is $(U^1,V^1)$. We will show that $(U^1,V^1) \in \mathcal{M}_\omega$. Indeed, we have $P(U^1,V^1) = 2d(\omega)>0$ which implies $(U^1,V^1) \ne (0,0)$. We also have
\[
K_\omega(U^1,V^1) =0 \text{ and } S_\omega(U^1,V^1) = \frac{1}{2} P(U^1,V^1) =d(\omega).
\]
This shows that $(U^1,V^1)$ is a minimizer for $d(\omega)$. The proof is complete.
\end{proof}
\begin{lemma} \label{lem-inc-M-G}
$\mathcal{M}_\omega \subset \mathcal{G}_\omega$.
\end{lemma}
\begin{proof}
Let $(\phi,\psi) \in \mathcal{M}_\omega$. Since $K_\omega(\phi,\psi) =0$, we have $H_\omega(\phi,\psi) = 3 P(\phi,\psi)$. On the other hand, since $(\phi,\psi)$ is a minimizer for $d(\omega)$, there exists a Lagrange multiplier $\gamma \in \mathbb{R}$ such that
\[
S'_\omega(\phi,\psi) = \gamma K'_\omega(\phi,\psi).
\]
This implies that
\[
0 = K_\omega(\phi,\psi) = \langle S'_\omega(\phi,\psi), (\phi,\psi)\rangle = \gamma \langle K'_\omega(\phi,\psi), (\phi,\psi)\rangle.
\]
A direct computation shows that
\[
\langle K'_\omega(\phi,\psi), (\phi,\psi)\rangle = 2 K(\phi,\psi) + 2 \omega M(\phi,\psi) - 9 P(\phi,\psi) = 2 H_\omega(\phi,\psi) - 9 P(\phi,\psi) = - 3P(\phi,\psi) <0.
\]
Therefore, $\gamma = 0$ and $S'_\omega(\phi,\psi) =0$ or $(\phi,\psi) \in \mathcal{A}_\omega$. It remains to show that $S_\omega(\phi,\psi) \leq S_\omega(u,v)$ for all $(u,v) \in \mathcal{A}_\omega$. Let $(u,v) \in\mathcal{A}_\omega$. We have $K_\omega(u,v) = \langle S'_\omega(u,v), (u,v) \rangle =0$. By the definition of $d(\omega)$, we get $S_\omega(\phi,\psi) \leq S_\omega(u,v)$. The proof is complete.
\end{proof}
\begin{lemma} \label{lem-inc-G-M}
$\mathcal{G}_\omega \subset \mathcal{M}_\omega$.
\end{lemma}
\begin{proof}
Let $(\phi_\omega, \psi_\omega) \in \mathcal{G}_\omega$. Since $\mathcal{M}_\omega$ is not empty, we take $(\phi,\psi) \in \mathcal{M}_\omega$. We have from Lemma $\ref{lem-inc-M-G}$ that $(\phi,\psi) \in \mathcal{G}_\omega$. Thus, $S_\omega(\phi_\omega,\psi_\omega) = S_\omega(\phi, \psi)=d(\omega)$. It remains to show that $K_\omega(\phi_\omega,\psi_\omega)=0$. Since $(\phi_\omega,\psi_\omega) \in \mathcal{A}_\omega$, $S'_\omega(\phi_\omega,\psi_\omega)=0$. This implies that
\[
K_\omega(\phi_\omega,\psi_\omega) = \langle S'_\omega(\phi_\omega,\psi_\omega), (\phi_\omega,\psi_\omega) \rangle =0.
\]
The proof is complete.
\end{proof}
\noindent \textit{Proof of Proposition $\ref{prop-exi-gro-sta-ins}$.}
The proof of Proposition $\ref{prop-exi-gro-sta-ins}$ follows immediately from Lemmas $\ref{lem-no-emp-M-ome}$, $\ref{lem-inc-M-G}$ and $\ref{lem-inc-G-M}$.
$\Box$
\section{Strong instability of standing waves}
\label{S:2}
We are now able to study the strong instability of standing waves for \eqref{mas-res-Syst}. Note that the local well-posedness in $H^1 \times H^1$ for \eqref{mas-res-Syst} in 5D is given in Proposition $\ref{prop-lwp-wor}$. Let us start with the following so-called Pohozaev's identities.
\begin{lemma} \label{lem-poh-ide}
Let $d=5$, $\kappa=\frac{1}{2}$ and $\omega>0$. Let $(\phi_\omega,\psi_\omega) \in H^1 \times H^1$ be a solution to \eqref{ell-equ}. Then the following identities hold
\[
2 K(\phi_\omega, \psi_\omega) = 5 P(\phi_\omega, \psi_\omega), \quad 2\omega M(\phi_\omega, \psi_\omega) = P(\phi_\omega,\psi_\omega).
\]
\end{lemma}
\begin{proof}
We only make a formal calculation. The rigorous proof follows from a standard approximation argument. Multiplying both sides of the first equation in \eqref{ell-equ} with $\overline{\phi}_\omega$, integrating over $\mathbb{R}^5$ and taking the real part, we have
\[
\|\nabla \phi_\omega\|^2_{L^2} + \omega \|\phi_\omega\|^2_{L^2}= 2 \text{Re} \int \overline{\psi}_\omega \phi_\omega^2 dx.
\]
Multiplying both sides of the second equation in \eqref{ell-equ} with $\overline{\psi}_\omega$, integrating over $\mathbb{R}^5$ and taking the real part, we get
\[
\frac{1}{2} \|\nabla \psi_\omega\|^2_{L^2} + 2 \omega \|\psi_\omega\|^2_{L^2} = \text{Re} \int \overline{\psi}_\omega \phi_\omega^2 dx.
\]
We thus obtain
\begin{align} \label{poh-ide-pro-1}
K(\phi_\omega,\psi_\omega) + 2 \omega M(\phi_\omega,\psi_\omega) = 3 P(\phi_\omega,\psi_\omega).
\end{align}
Multiplying both sides of the first equation in \eqref{ell-equ} with $x \cdot \nabla \overline{\phi}_\omega$, integrating over $\mathbb{R}^5$ and taking the real part, we see that
\[
-\text{Re} \int \Delta \phi_\omega x \cdot \nabla \overline{\phi}_\omega dx + \omega \text{Re} \int \phi_\omega x \cdot \nabla \overline{\phi}_\omega dx = 2 \text{Re} \int \psi_\omega \overline{\phi}_\omega x \cdot \nabla \overline{\phi}_\omega dx.
\]
A direct computation shows that
\begin{align*}
\text{Re} \int \Delta\phi_\omega x \cdot \nabla \overline{\phi}_\omega dx &=\frac{3}{2} \|\nabla \phi_\omega\|^2_{L^2}, \\
\text{Re} \int \phi_\omega x \cdot \nabla \overline{\phi}_\omega dx &= -\frac{5}{2} \|\phi_\omega\|^2_{L^2}, \\
\text{Re} \int \psi_\omega \overline{\phi}_\omega x \cdot \nabla \overline{\phi}_\omega dx &= -\frac{5}{2} \text{Re} \int \overline{\psi}_\omega (\phi_\omega)^2 dx - \frac{1}{2} \text{Re} \int \phi_\omega^2 x\cdot \nabla \overline{\psi}_\omega dx.
\end{align*}
It follows that
\[
-\frac{3}{2} \|\nabla \phi_\omega\|^2_{L^2} - \frac{5}{2} \omega \|\phi_\omega\|^2_{L^2} = - 5 \text{Re} \int \overline{\psi}_\omega \phi^2_\omega dx - \text{Re} \int \phi^2_\omega x \cdot \nabla \overline{\psi}_\omega dx.
\]
Similarly, multiplying both sides of the second equation in \eqref{ell-equ} with $x \cdot \nabla \overline{\psi}_\omega$, integrating over $\mathbb{R}^5$ and taking the real part, we have
\[
-\frac{3}{4} \|\nabla \psi_\omega\|^2_{L^2} - 5 \omega \|\psi_\omega\|^2_{L^2} = \text{Re} \int \phi^2_\omega x\cdot \nabla \overline{\psi}_\omega dx.
\]
We thus get
\begin{align} \label{poh-ide-pro-2}
\frac{3}{2} K(\phi_\omega,\psi_\omega) + \frac{5}{2} \omega M(\phi_\omega,\psi_\omega) = 5 P(\phi_\omega,\psi_\omega).
\end{align}
Combining \eqref{poh-ide-pro-1} and \eqref{poh-ide-pro-2}, we prove the result.
\end{proof}
We also have the following exponential decay of solutions to \eqref{ell-equ}.
\begin{lemma} \label{lem-dec-pro-gro-sta}
Let $d=5$, $\kappa=\frac{1}{2}$ and $\omega>0$. Let $(\phi_\omega,\psi_\omega) \in H^1 \times H^1$ be a solution to \eqref{ell-equ}. Then the following properties hold
\begin{itemize}
\item $(\phi_\omega,\psi_\omega) \in W^{3,p} \times W^{3,p}$ for every $2 \leq p <\infty$. In particular, $(\phi_\omega,\psi_\omega) \in C^2 \times C^2$ and $|D^\beta \phi_\omega(x)| + |D^\beta \psi_\omega (x)| \rightarrow 0$ as $|x| \rightarrow \infty$ for all $|\beta| \leq 2$;
\item
\[
\int e^{|x|} (|\nabla \phi_\omega|^2 + |\phi_\omega|^2) dx <\infty, \quad \int e^{|x|} (|\nabla \psi_\omega|^2 + 4|\psi_\omega|^2) dx <\infty.
\]
In particular, $(|x| \phi_\omega, |x| \psi_\omega) \in L^2 \times L^2$.
\end{itemize}
\end{lemma}
\begin{proof}
The follows the argument of \cite[Theorem 8.1.1]{Cazenave}. Let us prove the first item. We note that if $(\phi_\omega, \psi_\omega) \in L^p \times L^p$ for some $2 \leq p<\infty$, then $\psi_\omega \overline{\phi}_\omega, \phi^2_\omega \in L^{\frac{p}{2}}$. It follows that $(\phi_\omega, \psi_\omega) \in W^{2,\frac{p}{2}} \times W^{2,\frac{p}{2}}$. By Sobolev embedding, we see that
\begin{align} \label{dec-pro-pro-1}
(\phi_\omega,\psi_\omega) \in L^q \times L^q \text{ for some } q \geq \frac{p}{2} \text{ satisfying } \frac{1}{q} \geq \frac{2}{p} - \frac{2}{5}.
\end{align}
We claim that $(\phi_\omega,\psi_\omega) \in L^p \times L^p$ for any $2 \leq p<\infty$. Since $(\phi_\omega,\psi_\omega) \in H^1 \times H^1$, the Sobolev embedding implies that $(\phi_\omega, \psi_\omega) \in L^p \times L^p$ for any $2 \leq p<\frac{10}{3}$. It remains to show the claim for any $p$ sufficiently large. To see it, we define the sequence
\[
\frac{1}{q_n} = 2^n \left( -\frac{1}{15} + \frac{2}{5 \times 2^n} \right).
\]
We have
\[
\frac{1}{q_{n+1}} -\frac{1}{q_n} = -\frac{1}{15} \times 2^n <0.
\]
This implies that $\frac{1}{q_n}$ is decreasing and $\frac{1}{q_n} \rightarrow -\infty$ as $n\rightarrow \infty$. Since $q_0= 3$ (we take $(\phi_\omega, \psi_\omega) \in L^3 \times L^3$ to prove our claim), it follows that there exists $k \geq 0$ such that
\[
\frac{1}{q_n} >0 \text{ for } 0 \leq n \leq k \text{ and } \frac{1}{q_{n+1}} \leq 0.
\]
We will show that $(\phi_\omega, \psi_\omega) \in L^{q_k} \times L^{q_k}$. If $(\phi_\omega, \psi_\omega) \in L^{q_{n_0}} \times L^{q_n}$ for some $0 \leq n_0 \leq k-1$, then by \eqref{dec-pro-pro-1}, $(\phi_\omega,\psi_\omega) \in L^q \times L^q$ for some $q \geq \frac{q_{n_0}}{2}$ satisfying $\frac{1}{q} \geq \frac{2}{q_{n_0}} - \frac{2}{5}$. By the choice of $q_n$, it is easy to check that $\frac{2}{q_{n_0}} - \frac{2}{5} = \frac{2}{q_{n_0+1}}$. In particular, $(\phi_\omega,\psi_\omega) \in L^{q_{n_0+1}} \times L^{q_{n_0+1}}$. By induction, we prove $(\phi_\omega, \psi_\omega) \in L^{q_k} \times L^{q_k}$. Applying again \eqref{dec-pro-pro-1}, we have
\[
(\phi_\omega,\psi_\omega) \in L^q \times L^q \text{ for all } q \geq \frac{q_k}{2} \text{ such that } \frac{1}{q} \geq \frac{1}{q_{k+1}}.
\]
This shows that $(\phi_\omega, \psi_\omega)$ belongs to $L^p \times L^p$ for any $p$ sufficiently large. The claim follows. Using the claim, we have in particular $\psi_\omega \overline{\phi}_\omega, \phi^2_\omega \in L^p$ for any $2 \leq p<\infty$. Hence $(\phi_\omega, \psi_\omega) \in W^{2,p} \times W^{2,p}$ for any $2 \leq p<\infty$. By H\"older's inequality, we see that $\partial_j(\psi_\omega \overline{\phi}_\omega), \partial_j(\phi^2_\omega) \in L^p$ for any $2 \leq p<\infty$ and any $ 1\leq j \leq 5$. Thus $(\partial_j \phi_\omega, \partial_j \psi_\omega) \in W^{2,p} \times W^{2,p}$ for any $2 \leq p<\infty$ and any $1 \leq j \leq 5$, or $(\phi_\omega,\psi_\omega) \in W^{3,p} \times W^{3,p}$ for any $2 \leq p<\infty$. By Sobolev embedding, $(\phi_\omega,\psi_\omega) \in C^{2,\delta} \times C^{2,\delta}$ for all $0<\delta <1$. In particular, $|D^\beta \phi_\omega(x)| + |D^\beta \psi_\omega(x)| \rightarrow 0$ as $|x| \rightarrow \infty$ for all $|\beta| \leq 2$.
To see the second item. Let ${\varepsilon}>0$ and set $\chi_{\varepsilon}(x) := e^{\frac{|x|}{1+{\varepsilon} |x|}}$. For each ${\varepsilon}>0$, the function $\chi_{\varepsilon}$ is bounded, Lipschitz continuous and satisfies $|\nabla \chi_{\varepsilon}| \leq \chi_{\varepsilon}$ a.e. Multiplying both sides of the first equation in \eqref{ell-equ} by $\chi_{\varepsilon} \overline{\phi}_\omega$, integrating over $\mathbb{R}^5$ and taking the real part, we have
\[
\text{Re} \int \nabla \phi_\omega \cdot \nabla (\chi_\omega \overline{\phi}_\omega) dx + \int \chi_{\varepsilon} |\phi_\omega|^2 dx = 2 \text{Re} \int \chi_{\varepsilon} \psi_\omega \overline{\phi}^2_\omega dx.
\]
Since $\nabla(\chi_{\varepsilon} \overline{\phi}_\omega) = \chi_{\varepsilon} \nabla \overline{\phi}_\omega + \nabla \chi_{\varepsilon} \overline{\phi}_\omega$, the Cauchy-Schwarz inequality implies that
\begin{align*}
\text{Re} \int \nabla \phi_\omega \cdot \nabla (\chi_{\varepsilon} \overline{\phi}_\omega) dx &= \int \chi_{\varepsilon} |\nabla \phi_\omega|^2 dx + \text{Re} \int \nabla \chi_{\varepsilon} \nabla \phi_\omega \overline{\phi}_\omega dx \\
&\geq \int \chi_{\varepsilon} |\nabla \phi_\omega|^2 dx - \int |\nabla \chi_{\varepsilon}| |\nabla \phi_\omega| |\phi_\omega| dx \\
& \geq \int \chi_{\varepsilon} |\nabla \phi_\omega|^2 dx - \frac{1}{2} \int \chi_{\varepsilon} (|\nabla \phi_\omega|^2 + |\phi_\omega|^2) dx.
\end{align*}
We thus get
\begin{align} \label{dec-pro-pro-2}
\int \chi_{\varepsilon} (|\nabla \phi_\omega|^2 + |\phi_\omega|^2) dx \leq 4 \text{Re} \int \chi_{\varepsilon} \psi_\omega \overline{\phi}^2_\omega dx.
\end{align}
Similarly, multiplying both sides of the second equation in \eqref{ell-equ} with $\chi_{\varepsilon} \overline{\psi}_\omega$, integrating over $\mathbb{R}^5$ and taking the real part, we get
\begin{align} \label{dec-pro-pro-3}
\int \chi_{\varepsilon}(|\nabla \psi_\omega|^2 + 4 |\psi_\omega|^2) dx \leq \frac{8}{3} \text{Re} \int \chi_{\varepsilon} \overline{\psi}_\omega \phi^2_\omega dx.
\end{align}
By the first item, there exists $R>0$ large enough such that $|v(x)| \leq \frac{1}{8}$ for $|x| \geq R$. We have that
\begin{align*}
4 \text{Re} \int \chi_{\varepsilon} \psi_\omega \overline{\phi}^2_\omega dx & \leq 4 \int \chi_{\varepsilon} |\psi_\omega| |\phi_\omega|^2 dx \\
&=4 \int_{|x| \leq R} \chi_{\varepsilon} |\psi_\omega| |\phi_\omega|^2 dx + \int_{|x| \geq R} \chi_{\varepsilon} |\psi_\omega| |\phi_\omega|^2 dx \\
&\leq 4 \int_{|x| \leq R} e^{|x|} |\psi_\omega| |\phi_\omega|^2 dx + \frac{1}{2} \int \chi_{\varepsilon} |\phi_\omega|^2 dx.
\end{align*}
We thus get from \eqref{dec-pro-pro-2} that
\begin{align} \label{dec-pro-pro-4}
\int \chi_{\varepsilon} (|\nabla \phi_\omega|^2 + |\phi_\omega|^2) dx \leq 8 \int_{|x| \leq R} e^{|x|} |\psi_\omega||\phi_\omega|^2 dx.
\end{align}
Letting ${\varepsilon} \rightarrow 0$, we obtain
\[
\int e^{|x|}( |\nabla \phi_\omega|^2 + |\phi_\omega|^2) dx \leq 8 \int_{|x| \leq R} e^{|x|} |\psi_\omega||\phi_\omega|^2 dx <\infty.
\]
Similarly, by \eqref{dec-pro-pro-3} and \eqref{dec-pro-pro-4},
\begin{align*}
\int \chi_{\varepsilon} (|\nabla \psi_\omega|^2 + 4 |\psi_\omega|^2) dx &\leq \frac{2}{3} \left(4 \int_{|x| \leq R} e^{|x|} |\psi_\omega| |\phi_\omega|^2 dx + \frac{1}{2} \int \chi_{\varepsilon} |\phi_\omega|^2 dx \right) \\
&\leq \frac{16}{3} \int_{|x| \leq R} e^{|x|} |\psi_\omega| |\phi_\omega|^2 dx.
\end{align*}
Letting ${\varepsilon} \rightarrow 0$, we get
\[
\int e^{|x|}( |\nabla \psi_\omega|^2 + 4|\psi_\omega|^2) dx \leq \frac{16}{3} \int_{|x| \leq R} e^{|x|} |\psi_\omega||\phi_\omega|^2 dx <\infty.
\]
The proof is complete.
\end{proof}
We also need the following virial identity related to \eqref{mas-res-Syst}.
\begin{lemma} \label{lem-vir-ide-ins}
Let $d=5$ and $\kappa=\frac{1}{2}$. Let $(u_0,v_0) \in H^1 \times H^1$ be such that $(|x|u_0, |x| v_0) \in L^2 \times L^2$. Then the corresponding solution to \eqref{mas-res-Syst} with initial data $(u(0),v(0)) = (u_0,v_0)$ satisfies
\begin{align*}
\frac{d^2}{dt^2} (\|xu(t)\|^2_{L^2} + 2 \|xv(t)\|^2_{L^2}) = 8 \left(\|\nabla u(t)\|^2_{L^2} + \frac{1}{2} \|\nabla v(t)\|^2_{L^2}\right) - 20 \emph{Re} \int \overline{v}(t) u^2(t) dx.
\end{align*}
\end{lemma}
\begin{proof}
The above identity follows immediately from \cite[Lemma 3.1]{Dinh} with $\chi(x) = |x|^2$.
\end{proof}
Now let us denote for $(u,v) \in H^1 \times H^1 \backslash \{(0,0)\}$,
\[
Q(u,v) := K(u,v) - \frac{5}{2} P(u,v).
\]
It is obvious that
\begin{align} \label{vir-ide-ins}
\frac{d^2}{dt^2} (\|xu(t)\|^2_{L^2} + 2 \|xv(t)\|^2_{L^2}) = 8 Q(u(t),v(t)).
\end{align}
Note that if we take
\begin{align} \label{scaling}
u^\gamma(x) = \gamma^{\frac{5}{2}} u (\gamma x), \quad v^\gamma(x) = \gamma^{\frac{5}{2}} v(\gamma x),
\end{align}
then
\begin{align*}
S_\omega(u^\gamma, v^\gamma) &= \frac{1}{2} K(u^\gamma,v^\gamma) + \frac{\omega}{2} M(u^\gamma,v^\gamma) - P(u^\gamma,v^\gamma) \\
&=\frac{\gamma^2}{2} K(u,v) + \frac{\omega}{2} M(u,v) - \gamma^{\frac{5}{2}} P(u,v).
\end{align*}
It is easy to see that
\[
Q(u,v) = \left. \partial_\gamma S_\omega(u^\gamma, v^\gamma) \right|_{\gamma=1}.
\]
\begin{lemma} \label{lem-cha-gro-sta-5D}
Let $d=5$, $\kappa=\frac{1}{2}$ and $\omega>0$. Let $(\phi_\omega,\psi_\omega) \in \mathcal{G}_\omega$. Then
\[
S_\omega(\phi_\omega,\psi_\omega) = \inf \left\{ S_\omega(u,v) \ : \ (u,v) \in H^1 \times H^1 \backslash \{(0,0)\}, Q(u,v)=0 \right\}.
\]
\end{lemma}
\begin{proof}
Denote $m:= \inf \left\{ S_\omega(u,v) \ : \ (u,v) \in H^1 \times H^1 \backslash \{(0,0)\}, Q(u,v)=0 \right\}$. Since $(\phi_\omega,\psi_\omega)$ is a solution of \eqref{ell-equ}, it follows from Lemma $\ref{lem-poh-ide}$ that $Q(\phi_\omega,\psi_\omega) =K_\omega(\phi_\omega,\psi_\omega)=0$. Thus
\begin{align} \label{cha-gro-sta-5D-pro-1}
S_\omega(\phi_\omega,\psi_\omega) \geq m.
\end{align}
Now let $(u,v) \in H^1 \times H^1 \backslash \{(0,0)\}$ be such that $Q(u,v) =0$. If $K_\omega(u,v) =0$, then by Proposition $\ref{prop-exi-gro-sta-ins}$, $S_\omega(u,v) \geq S_\omega(\phi_\omega,\psi_\omega)$. If $K_\omega(u,v) \ne 0$, we consider $K_\omega(u^\gamma, v^\gamma) = \gamma^2 K(u,v) + \omega M(u,v) - \gamma^{\frac{5}{2}} P(u,v)$, where $(u^\gamma, v^\gamma)$ is as in \eqref{scaling}. Since $\lim_{\gamma \rightarrow 0} K_\omega(u^\gamma, v^\gamma)= \omega M(u,v) >0$ and $\lim_{\gamma \rightarrow \infty} K_\omega(u^\gamma, v^\gamma) = -\infty$, there exists $\gamma_0>0$ such that $K_\omega(u^{\gamma_0},v^{\gamma_0}) =0$. It again follows from Proposition $\ref{prop-exi-gro-sta-ins}$, $S_\omega(u^{\gamma_0},v^{\gamma_0}) \geq S_\omega(\phi_\omega,\psi_\omega)$. On the other hand,
\[
\partial_\gamma S_\omega(u^\gamma,v^\gamma) = \gamma K(u,v) - \frac{5}{2} \gamma^{\frac{3}{2}} P(u,v).
\]
We see that the equation $\partial_\gamma S_\omega(u^\gamma, v^\gamma) =0$ admits a unique non-zero solution
\[
\left( \frac{2K(u,v)}{5P(u,v)} \right)^2=1
\]
since $Q(u,v) =0$. This implies that $\partial_\gamma S_\omega(u^\gamma, v^\gamma)>0$ if $\gamma \in (0,1)$ and $\partial_\gamma S_\omega(u^\gamma,v^\gamma)<0$ if $\gamma \in (1,\infty)$. In particular, $S_\omega(u^\gamma,v^\gamma) \leq S_\omega(u,v)$ for all $\gamma >0$. Hence $S_\omega(u^{\gamma_0},v^{\gamma_0}) \leq S_\omega(u,v)$. We thus obtain $S_\omega(\phi_\omega,\psi_\omega) \leq S_\omega(u,v)$ for any $(u,v) \in H^1 \times H^1 \backslash \{(0,0)\}$ satisfying $Q(u,v)=0$. Therefore,
\begin{align} \label{cha-gro-sta-5D-pro-2}
S_\omega(\phi_\omega,\psi_\omega) \leq m.
\end{align}
Combining \eqref{cha-gro-sta-5D-pro-1} and \eqref{cha-gro-sta-5D-pro-2}, we prove the result.
\end{proof}
Let $(\phi_\omega,\psi_\omega) \in \mathcal{G}_\omega$. Define
\[
\mathcal{B}_\omega:= \left\{ (u,v) \in H^1 \times H^1 \backslash \{(0,0)\} \ : \ S_\omega(u,v) < S_\omega(\phi_\omega,\psi_\omega), Q(u,v) <0 \right\}.
\]
\begin{lemma} \label{lem-inv-set}
Let $d=5$, $\kappa=\frac{1}{2}$, $\omega>0$ and $(\phi_\omega,\psi_\omega) \in \mathcal{G}_\omega$. The set $\mathcal{B}_\omega$ is invariant under the flow of \eqref{mas-res-Syst}.
\end{lemma}
\begin{proof}
Let $(u_0,v_0) \in \mathcal{B}_\omega$. We will show that the corresponding solution $(u(t),v(t))$ to \eqref{mas-res-Syst} with initial data $(u(0),v(0)) = (u_0,v_0)$ satisfies $(u(t),v(t)) \in \mathcal{B}_\omega$ for any $t$ in the existence time. Indeed, by the conservation of mass and energy, we have
\begin{align} \label{inv-set-pro}
S_\omega(u(t),v(t)) = S_\omega(u_0,v_0) < S_\omega (\phi_\omega,\psi_\omega)
\end{align}
for any $t$ in the existence time. It remains to show that $Q(u(t),v(t))<0$ for any $t$ as long as the solution exists. Suppose that there exists $t_0 >0$ such that $Q(u(t_0),v(t_0)) \geq 0$. By the continuity of the function $t\mapsto Q(u(t),v(t))$, there exists $t_1 \in (0,t_0]$ such that $Q(u(t_1),v(t_1)) =0$. It follows from Lemma $\ref{lem-cha-gro-sta-5D}$ that $S_\omega(u(t_1),v(t_1)) \geq S_\omega(\phi_\omega,\psi_\omega)$ which contradicts to \eqref{inv-set-pro}. The proof is complete.
\end{proof}
\begin{lemma} \label{lem-key-lem}
Let $d=5$, $\kappa=\frac{1}{2}$, $\omega>0$ and $(\phi_\omega,\psi_\omega) \in \mathcal{G}_\omega$. If $(u,v) \in \mathcal{B}_\omega$, then
\[
Q(u,v) \leq 2 (S_\omega(u,v) - S_\omega(\phi_\omega,\psi_\omega)).
\]
\end{lemma}
\begin{proof}
Let $(u,v) \in \mathcal{B}_\omega$. Set
\[
f(\gamma):= S_\omega(u^\gamma,v^\gamma) = \frac{\gamma^2}{2} K(u,v) + \frac{\omega}{2} M(u,v) - \gamma^{\frac{5}{2}} P(u,v).
\]
We have
\[
f'(\gamma) = \gamma K(u,v) - \frac{5}{2} \gamma^{\frac{3}{2}} P(u,v) = \frac{Q(u^\gamma, v^\gamma)}{\gamma}.
\]
We see that
\begin{align} \label{key-lem-pro}
(\gamma f'(\gamma))' &= 2\gamma K(u,v) - \frac{25}{4} \gamma^{\frac{3}{2}} P(u,v) \nonumber \\
&= 2 \left(\gamma K(u,v) - \frac{5}{2} \gamma^{\frac{3}{2}} P(u,v) \right) - \frac{5}{4} \gamma^{\frac{3}{2}} P(u,v) \nonumber \\
&\leq 2f'(\gamma)
\end{align}
for all $\gamma >0$. Note that $P(u,v) \geq 0$ which follows from the fact $Q(u,v) <0$. We also note that since $Q(u,v) <0$, the equation $\partial_\gamma S_\omega(u^\gamma, v^\gamma)=0$ admits a unique non-zero solution
\[
\gamma_0 = \left(\frac{2K(u,v)}{5P(u,v)} \right)^2 \in (0,1),
\]
and $Q(u^{\gamma_0},v^{\gamma_0}) = \gamma_0 \times \left.\partial_\gamma S_\omega(u^\gamma,v^\gamma)\right|_{\gamma=\gamma_0} =0$. Taking integration of \eqref{key-lem-pro} over $(\gamma_0,1)$ and using the fact $\gamma f'(\gamma) = Q(u^\gamma,v^\gamma)$, we get
\[
Q(u,v) - Q(u^{\gamma_0},v^{\gamma_0}) \leq 2 (S_\omega(u,v) - S_\omega(u^{\gamma_0},v^{\gamma_0})).
\]
The result then follows from the fact that $S_\omega(\phi_\omega,\psi_\omega) \leq S_\omega(u^{\gamma_0},v^{\gamma_0})$ since $Q(u^{\gamma_0},v^{\gamma_0}) = 0$.
\end{proof}
We are now able to prove the strong instability of standing waves given in Theorem $\ref{theo-str-ins}$.
\noindent \textit{Proof of Theorem $\ref{theo-str-ins}$.}
Let ${\varepsilon}>0$. Since $(\phi^{\gamma}_\omega, \psi^{\gamma}_\omega) \rightarrow (\phi_\omega,\psi_\omega)$ as $\gamma \rightarrow 1$, there exists $\gamma_0>1$ such that $\|(\phi^{\gamma_0}_\omega,\psi^{\gamma_0}_\omega) - (\phi_\omega,\psi_\omega)\|_{H^1 \times H^1} <{\varepsilon}$. We claim that $(\phi^{\gamma_0}_\omega, \psi^{\gamma_0}_\omega) \in \mathcal{B}_\omega$. Indeed, we have
\begin{align*}
S_\omega(\phi^\gamma_\omega, \psi^\gamma_\omega) &= \frac{\gamma^2}{2} K(\phi_\omega,\psi_\omega) +\frac{\omega}{2} M(\phi_\omega,\psi_\omega) -\gamma^{\frac{5}{2}} P(\phi_\omega,\psi_\omega), \\
\partial_\gamma S_\omega(\phi^\gamma_\omega, \psi^\gamma_\omega) &= \gamma K(\phi_\omega, \psi_\omega) - \frac{5}{2} \gamma^{\frac{3}{2}} P(\phi_\omega,\psi_\omega) = \frac{Q(\phi^\gamma_\omega, \psi^\gamma_\omega)}{\gamma}.
\end{align*}
Since $Q(\phi_\omega,\psi_\omega)=0$, the equation $\partial_\gamma S_\omega(\phi^\gamma_\omega, \psi^\gamma_\omega) =0$ admits a unique non-zero solution
\[
\left( \frac{2K(\phi_\omega,\psi_\omega)}{5P(\phi_\omega,\psi_\omega)} \right)^2 =1.
\]
This implies that $\partial_\gamma S_\omega(\phi^\gamma_\omega, \psi^\gamma_\omega) >0$ if $\gamma \in (0,1)$ and $\partial_\gamma S_\omega(\phi^\gamma_\omega, \psi^\gamma_\omega)<0$ if $\gamma \in (1,\infty)$. In particular, $S_\omega(\phi^\gamma_\omega,\psi^\gamma_\omega)<S_\omega(\phi_\omega,\psi_\omega)$ for any $\gamma>0$ and $\gamma \ne 1$. On the other hand, since $Q(\phi^\gamma_\omega,\psi^\gamma_\omega)= \gamma \partial_\gamma S_\omega(\phi^\gamma_\omega, \psi^\gamma_\omega)$, we see that $Q(\phi^\gamma_\omega, \psi^\gamma_\omega) >0$ if $\gamma \in (0,1)$ and $Q(\phi^\gamma_\omega, \psi^\gamma_\omega)<0$ if $\gamma \in (1,\infty)$. Since $\gamma_0>1$, we see that
\[
S_\omega(\phi^{\gamma_0}_\omega, \psi^{\gamma_0}_\omega)< S_\omega(\phi_\omega,\psi_\omega) \text{ and } Q(\phi^{\gamma_0}_\omega,\psi^{\gamma_0}_\omega) <0.
\]
Therefore, $(\phi^{\gamma_0}_\omega, \psi^{\gamma_0}_\omega) \in \mathcal{B}_\omega$ and the claim follows.
By the local well-posedness, there exists a unique solution $(u(t), v(t)) \in C([0,T), H^1 \times H^1)$ to \eqref{mas-res-Syst} with initial data $(u(0),v(0)) = (\phi^{\gamma_0}_\omega, \psi^{\gamma_0}_\omega)$, where $T>0$ is the maximal time of existence. By Lemma $\ref{lem-inv-set}$, we see that $(u(t),v(t)) \in \mathcal{B}_\omega$ for any $t\in [0,T)$. Thus, applying Lemma $\ref{lem-key-lem}$, we get
\[
Q(u(t),v(t)) \leq 2 (S_\omega(u(t),v(t)) - S_\omega (\phi_\omega,\psi_\omega)) = 2(S_\omega(\phi^{\gamma_0},\psi^{\gamma_0}) - S_\omega(\phi_\omega, \psi_\omega)) =- \delta
\]
for any $t\in [0,T)$, where $\delta= 2 (S_\omega(\phi_\omega, \psi_\omega) - S_\omega(\phi^{\gamma_0}_\omega, \psi^{\gamma_0}_\omega)) >0$. Since $(|x|\phi_\omega, |x|\psi_\omega) \in L^2 \times L^2$, it follows that $(|x| \phi^{\gamma_0}_\omega, |x| \psi^{\gamma_0}_\omega) \in L^2 \times L^2$. Thanks to the virial identity \eqref{vir-ide-ins}, we obtain
\[
\frac{d^2}{dt^2} \left( \|xu(t)\|^2_{L^2} + 2 \|xv(t)\|^2_{L^2} \right) = 8 Q(u(t),v(t)) \leq -8 \delta <0,
\]
for any $t\in [0,T)$. The classical argument of Glassey \cite{Glassey} implies that the solution blows up in finite time. The proof is complete.
$\Box$
\end{document} |
\begin{document}
\title[Quantum Nondemolition Principle]{Nondemolition Principle of Quantum
Measurement Theory}
\author{V.P. Belavkin}
\address{Philipps Universit\"{a}t, Fachbereich Physik D--3550, Marburg,
Germany and University of Nottingham, NG7 2RD, UK}
\email{ [email protected]}
\date{Received August 31, 1992 }
\subjclass{}
\keywords{Quantum measurement problem, Quantum nondemolition measurements,
Quantum posterior states, Quantum state diffusion, Quantum spontaneous
localization.}
\thanks{ Published in:\emph{\ }\textit{Foundations of Physics}, \textbf{24}
(1994) No 5, 685--714}
\begin{abstract}
\noindent We give an explicit axiomatic formulation of the quantum
measurement theory which is free of the projection postulate. It is based on
the generalized nondemolition principle applicable also to the unsharp,
continuous--spectrum and continuous-in-time observations. The
\textquotedblleft collapsed state--vector\textquotedblright\ after the
\textquotedblleft objectification\textquotedblright\ is simply treated as a
random vector of the \textit{a posteriori}\emph{\/} state given by the
quantum filtering, i.e., the conditioning of the \textit{a priori\/} induced
state on the corresponding reduced algebra. The nonlinear phenomenological
equation of \textquotedblleft continuous spontaneous
localization\textquotedblright\ has been derived from the Schr\"{o}dinger
equation as a case of the quantum filtering equation for the diffusive
nondemolition measurement. The quantum theory of measurement and filtering
suggests also another type of the stochastic equation for the dynamical
theory of continuous reduction, corresponding to the counting nondemolition
measurement, which is more relevant for the quantum experiments.
\end{abstract}
\maketitle
\section{The status of quantum measurement theory}
Quantum measurement theory, based on the ordinary von Neumann or a
generalized reduction postulate, was never an essential part of quantum
physics but rather of metaphysics. First, this was because the orthodox
quantum theory had always dealt with a closed quantum system while the
object of measurement is an open system due to the interaction with the
measurement apparatus. Second, the superposition principle of quantum
mechanics, having dealt with simple algebras of observables, is in
contradiction with the von Neumann projection postulate while it may be not
so in the algebraic quantum theory with the corresponding superselection
rules. Third, due to the dynamical tradition in quantum theory going on from
the deterministic mechanics, the process of the measurement was always
considered by theoretical physicists as simply just an ordinary interaction
between two objects while any experimentalist or statistician knows that
this is a stochastic process, giving rise to the essential difference
between a \textit{priori\/} and a \textit{posteriori\/} description of the
states.
The last and most essential reason for such an unsatisfactory status of the
quantum measurement theory was the limitations of the projection postulate
applicable only to the instantaneous measurement of the observables with the
discrete spectra, while the real experiments always have a finite duration
and the most important observation is the measurement of the position having
the continuous spectrum.
There are many approaches to the theory of quantum measurement ranging from
purely philosophical to qualitative and even quantitative theories in which
the projection postulate apparently is not needed or is generalized to meet
the indirect, or unsharp, measurements [1--10].\nocite{bib:1,2,3}\nocite
{bib:4,5,6}\nocite{bib:7,8,9,10}
The most general, the philosophical level, of the discussion of these
problems is of course the simplest and the appropriate one for the largest
audience. But it provides room for unprofessional applications of the more
sophisticated theoretical arguments, giving rise to different kinds of the
speculations and paradoxes. I believe that the professional standard of
quantum measurement theory ought to be an axiomatic and rigorous one and the
quantum measurement problems must be formulated within it and solved
properly instead of making speculations.
In order to examine the quantum paradoxes of Zeno type related to the
continuous measurements, the study must be based on advanced mathematical
methods of the quantum theory of compound systems with not regular but
rather singular interaction, and this has recently received a stochastic
treatment in the quantum theory of open systems and noise. It must use the
tools of the quantum algebraic theory for the calculus of input fields of
the apparatus, i.e., the quantum noises which usually have an infinite
number of degrees of freedom, and for the superselection of output fields,
i.e., commutative (classical) pointer processes which are usually the
stochastic processes in continuous time.
Perhaps some philosophers and physicists would not like such a treatment of
quantum measurement theory; the more mathematical a theory is the less
philosophical it is, and the more rigorous it is, the less alive it is. But
this is just an objective process of the development of any scientific
theory and has already happened with the classical information and
measurement theory.
The corresponding classical dynamical measurement theory, called the
stochastic filtering theory, was developed in the beginning of the 60's by
Stratonovich [11] and for the particular linear case by Kalman [12]. This
theory, based on the notion of the partial (unsharp) observation and the
stochastic calculus method, is optional for the classical deterministic
physics, having dealt with the complete (sharp) observations of the phase
trajectories and ordinary differential calculus, and is usually regarded as
a part of the stochastic systems theory or, more precisely, the classical
information and control theory. The main task of the filtering theory is to
derive and solve a stochastic reduction equation for the present posterior
state of the object of incomplete measurement, giving a means to calculate
the conditional probabilities of the future observations with respect to the
results of the past measurements. The corresponding filtering equation
describes, for example, the continuous spontaneous localization of the
classical Brownian particle under an unsharp observation as the result of
the dynamical reduction of the statistical posterior state given by the
classical conditional expectations under the continuous increase of the
interval of the observation. The stochasticity of this nonlinear equation is
generated either by the Wiener process or by the Poisson process, or by
mixture of them, corresponding to the diffusive, counting, or mixed type of
continuous measurement on the fixed output. It can be also written in the
linear form in terms of the classical renormalized state vector (probability
density), and is sometimes called ``the Schr\"odinger equation of the
classical systems theory'' to emphasize its importance and the probabilistic
interpretation.
Recently the corresponding quantum filtering theory was developed for the
different types of continuous observations, [13,14], although the particular
linear case of quantum Kalman filter was proposed by the author much earlier
[6,7]. This gives rise to an axiomatic quantum measurement theory based on
the new quantum calculus method to handle rigorously the singular
interactions of the quantum object and input fields, and based on the
generalized nondemolition principle to select properly the output observable
processes. The mathematical quantum measurement theory plays the same
central role in the general quantum theory of compound systems containing
the information and control channels. as in the classical systems theory.
But in distinction to the classical case it is not optional for the quantum
physics due to the irreducible probabilistic nature of quantum mechanics
which results in the absence of the phase trajectories. There is no need in
this theory to use the projection or any other reduction postulate. But it
does not contradict the quantum theory, as claimed in Ref. [15], and its
application can be derived in the relevant cases simply as the result of
state vector filtering by means of which the conditional probabilities of
the future observation with respect to the results of the past measurements
are calculated.
There is no need to postulate different nonlinear stochastic modifications
of the Schr\"{o}dinger equation in the phenomenological theories of
spontaneous localization or of the nonstandard quantum theories of dynamical
reduction and continuous collapse, [16--20] and to argue which type is more
universal. They all are given as particular cases [21--24] of the general
diffusive type quantum filtering equation, [25], rigorously derived by
conditioning the corresponding Schr\"{o}dinger equation for the uniquely
determined minimal compound quantum system in Fock--Hilbert space.
The quantum filtering theory gives also a new type of phenomenological
stochastic equations which are relevant to the quantum mechanics with
spontaneous localization, [19,20], corresponding to the random quantum
jumps, [26--28]. This pure discontinuous type is also rigorously derived
from the Schr\"{o}dinger equation [29] by conditioning the
continuous-in-time counting measurement which contains the diffusive type as
the central limit case [30].
Thus, the stochastic nature of measurement processes is reconciled with
unitarity and deterministic interaction on the level of the compound system.
But to account for the unavoidable noise in the continuous observation the
unitary model necessarily involves a quantum system with infinitely many
degrees of freedom and a singular interaction.
The purpose of this paper is to describe explicitly a new universal
nondemolition principle for quantum measurement theory which makes possible
the derivations of the reduction postulates from the quantum interactions.
We show on simple examples what it means to derive rigorously the quantum
filtering equation (thus the Hilbert stochastic process) by conditioning a
Schr\"{o}dinger equation for a compound system. Here, we demonstrate these
derivations from the corresponding unitary interactions with the apparatus
for the particular cases of the measurement of a single observable with the
trivial Hamiltonian $H=0$ of the object using the operator quantum calculus
method instead of the quantum stochastic one [21--23]. But if one wants to
obtain such results in nontrivial cases related to the dynamical observables
that are continuous in time and continuous in spectra and that do not
commute with $H\neq 0$, one needs to use the appropriate mathematical tools,
such as quantum differential calculus and quantum conditional expectations,
recently developed within the algebraic approach in quantum probability
theory. Otherwise, one would be in the same situation as trying to study the
Newton mechanics in nontrivial cases without using the ordinary differential
calculus.
Note that the quantum filtering equation was first obtained in a global form
[9] and then in the differential form [30] within the operational approach,
[1,2], giving the reduced description of the open quantum systems and
quantum continuous measurements. This was done by the stochastic
representation of the continuous instrument, described by the semigroup of
the operational valued measures which are absolutely continuous with respect
to the standard Wiener or Poisson process. The most general approach [31] to
these problems is based on the quantum stochastic calculus of nondemolition
measurements and quantum conditional expectations. It clearly shows that the
operational semigroup approach is restricted to only the Markovian case of
the quantum stochastic object as an open system and to the conditionally
independent nondemolition observations describing the output of the compound
system.
\section{Causality and nondemolition principle}
Let us begin with the discussion of the quantum nondemolition principle
which forms the basis of the axiomatic formulation of the quantum
measurement theory without the projection postulate, and which has been
implicitly explored also in other approaches [1--10]. The term
``nondemolition measurement'' was first introduced into the theory of
ultrasensitive gravitational experiments by Braginski and others [32--34] to
describe the sequential observations in a quantum Weber antenna as a
simultaneous measurement of some quantum observables. But the property of
nondemolition has never been formalized or even carefully described other
than by requiring the commutativity of the sequential observables in the
Heisenberg picture, which simply means that the measurement process can be
represented as a classical stochastic one by the Gelfand transformation.
Therefore no essentially quantum, noncommutative results have been obtained,
and no theorems showing the existence of such measurements in nontrivial
time continuous models have been proved.
An operator $X$ in a Hilbert space $\mathcal{H}$ is said to be demolished by
an observable $Y=Y^{\dagger }$ in $\mathcal{H}$ if the expectation $\langle
X\rangle $ is changed for $\langle \tilde{X}\rangle \neq \langle X\rangle $
in an initial state when $Y$ has been measured, although without reading.
According to the projection postulate the demolished observable $\tilde{X}
=\delta \lbrack X]$ is described by the reduction operation $\delta \lbrack
X]=\sum P_{i}XP_{i}$ for a discrete observable $Y=\sum y_{i}P_{i}$ given by
the orthoprojectors $P_{i}^{2}=P_{i}=P_{i}^{\dagger }$, $\sum P_{i}=I$ and
eigenvalues $\{y_{i}\}$. The observable $Y$ is nondemolition with respect to
$X$ if $\delta \lbrack X]$ is compatible, $\langle \delta \lbrack X]\rangle
=\langle X\rangle $, with respect to each initial state, i.e., iff $\delta
\lbrack X]=X$. It follows immediately in this discrete case that the
nondemolition condition is $XY=YX$, as the main filtering theorem says [30]
even in the general case. Moreover, for each demolition observable $Y$ there
exists a nondemolition representation $\tilde{Y}=\varrho \lbrack Y]$ in an
extended Hilbert space $\mathcal{H}\otimes \mathcal{F}$, which is
statistically equivalent to $Y$ in the sense that $\langle \tilde{X}Y\rangle
=\langle X\tilde{Y}\rangle $ for each input state in $\mathcal{H}$ and
corresponding output state in $\mathcal{H}\otimes \mathcal{F}$. This follows
from the reconstruction theorem [35] for quantum measurements giving the
existence of the nondemolition representation for any kind of observations,
which might be even continuously distributed in the relativistic space-times
$\mathbb{R}^{1+d}$. In the case of a single discrete observable $Y$ it
proves the unitary reconstruction of the projection postulate, which is
given in section 3.
Now we give several equivalent formulations of the dynamical nondemolition
considered not just as a \emph{possible} property for the quantum
measurements but rather as the \emph{universal} condition to handle such
problems as the modeling of the unsharp measurements, the generalized
reduction and instantaneous collapse for the continuous spectrum
observables, the quantum sequential measurements, and the dynamical
reduction and spontaneous localization under the continuous-in-time
observation. This condition, based on the reconstruction theorem, was
discovered in Ref. [7] and consists of a new principle of quantum axiomatic
measurement theory for the proper representation of the observable process
in a Hilbert space, such as the interaction representation of the object
with the measurement apparatus.
On the philosophical level, one can say that the nondemolition principle is
equivalent to the quantum causality principle of the statistical
predictability for the present and all possible future observations and for
all possible initial states from the a posteriori probability distributions
which are conditioned by the results of the past measurements. This should
be regarded rather as the physical content and purpose of this principle and
not as a definition.
On the mathematical level the nondemolition principle must be formulated as
a necessary and sufficient condition for the existence of the conditional
expectations on the algebras generated by the present and future Heisenberg
operators of the object of the measurement and all the output observables
with respect to the subalgebras of the past measurements and arbitrary input
states.
In the most general algebraic approach this formulation was first obtained
in Ref. [7], (see also Refs. [13] and [14]) as the condition
\begin{equation}
[X(t),Y(s)]:=X(t)Y(s)-Y(s)X(t)=0\ ,\qquad \forall s\geq t \label{eq:1.1}
\end{equation}
of compatibility of all system operators $X(t)$ considered as the possible
observables at a time instant $t$ with all past observables $Y(s)$, $s\leq t$
, which have been measured up to $t$. It says that the Heisenberg operators $
X(t)$ of the quantum object of the measurement given, say, in the
interaction representation with the apparatus must commute with all past
output observables $Y(s)$, $s\leq t$, of the pointer for any instant $t$.
And according to the causality principle there is no restriction on the
choice of the future observables $Y(r)$, $r\geq t$, with respect to the
present operators $X(t)$ except the self-nondemolition $[Y(r),Y(s)]=0$ for
the compatibility of the family $\{Y(t)\}$. Generalized then in \nocite
{bib:21,22,23}\nocite{bib:24,25,26}\nocite{bib:27,28} [21--28] for arbitrary
$X$ and $Y$, these conditions define a stochastic process $Y(t)$ which is
nondemolition with respect to a given quantum process $X(t)$. Note that the
condition (\ref{eq:1.1}) for clearly distinguished object and pointer
observables does not reduce completely the algebra of the compound system to
the commutative one as it does in the case of the direct observations $Y=X$
when it reads as the self-nondemolition condition $[X(t),X(s)]=0$, $\forall
t, s$. The nondemolition measurements considered in Refs. [32--34] were
defined only by the self-nondemolition condition, corresponding to this
trivial (Abelian) case $X(t)=Y(t)$.
In the operational approach [1,2], applicable for the reduced description of
the quantum Markov open system, one might prefer to have a condition that is
equivalent to the nondemolition principle in that case. It can be given in
terms of the induced states on the reduced algebra, i.e., of the states
given by the expectations $\phi (Z)=\langle \psi ,Z(t)\psi \rangle $ on the
algebra of observables $Z$ generated in the Heisenberg picture $
Z(t)=U^{\dagger }(t)ZU(t)$ by all $X(t)$ and $Y(t)$ for a given initial
state vector $\psi $. The nondemolition principle simply means that the
induced current quantum state of the object coincides with the \textit{a
priori} one, as a statistical mixture of a posteriori states with respect to
the past, but not the future, observations [30]. The a posteriori state as a
quantum state of the object after the measurement, when a result has been
read mathematically, will be defined in the next section. Here we only point
out that the coincidence means that the induced state is not demolished by
the measurement if the results have not been read. This justifies the use of
the word nondemolition in the generalized sense.
One can call this coincidence the generalized reduction principle because it
does not restrict the consideration to the projection valued operations
only, corresponding to the von Neumann reduction of the quantum states,
which is not applicable even for the relatively simple case of instantaneous
measurements of the quantum observables with the continuous spectrum.
The equivalence of these two formulations in the quantum Markovian case and
their relation to the projection postulate (see the next section) can be
illustrated even in the case of the single operation corresponding to an
instantaneous measurement, or a measurement with fixed duration.
Let $\mathcal{H}$ and $\mathcal{F}$ be the Hilbert spaces of state vectors $
\eta \in \mathcal{H}$, and $\varphi \in \mathcal{F}$ for the quantum object
and the measurement apparatus, respectively, and let $R$ be a self-adjoint
operator in $\mathcal{H}$, representing a dynamical variable with the
spectral values $x\in \mathbb{R}$ to be measured by means of the measurement
apparatus with a given observable $\hat{y}$, representing the pointer of the
apparatus as a self-adjoint operator in $\mathcal{F}$ with either discrete
or continuous spectrum $\Lambda \subseteq \mathbb{R}$. The measurement
apparatus has the fixed initial state $\varphi _{0}\in \mathcal{F}$, $\Vert
\varphi _{0}\Vert =1$ and is coupled to the object by an interaction
operator $S^{\dagger }=V_{0}U^{\dagger }V_{1}$, where $U$ is a unitary
evolution operator of the system in the product space $\mathcal{G}=\mathcal{H
}\otimes \mathcal{F}$, $U^{\dagger }=U^{-1}$, and $V_{0}=V\otimes \hat{1}$, $
V_{1}=I\otimes \hat{v}$ are the unitarities given by the free evolution
operators $V:\mathcal{H}\rightarrow \mathcal{H}$, $\hat{v}:\mathcal{F}
\rightarrow \mathcal{F}$ of the object and the apparatus, respectively,
during the fixed measurement interval $[0,t]$. It is natural to suppose that
the interaction does not disturb the variable $R$ in the sense $
R_{0}:=R\otimes \hat{1}=S^{\dagger }R_{0}S$, or equivalently, $\langle x|S=
\hat{s}_{x}\langle x|$, i.e.,
\begin{equation}
S:|x\rangle \otimes \varphi _{0}\mapsto |x\rangle \otimes \varphi _{x}\
,\quad \forall x\in \mathbb{R} \label{eq:1.2}
\end{equation}
in terms of (generalized) eigenvectors $|x\rangle $ of $R$, where $\varphi
_{x}=\hat{s}_{x}\varphi _{0}$. But it must disturb the input observable $
\hat{q}=\hat{v}^{\dagger }\hat{y}\hat{v}$ in order to get the
distinguishable probability densities $f_{x}(y)=|\varphi _{x}(y)|^{2}$ of
the output observable $Y=S^{\dagger }(\kappa I\otimes \hat{q})S$,
corresponding to the different eigen values $x\in \mathbb{R}$ of the input
states $|x\rangle $ to be tested by the usual methods of mathematical
statistics. Here $\kappa >0$ is a scaling parameter and we have assumed, for
simplicity that the observable $\hat{y}$ and hence $\hat{q}$ has the
nondegenerate spectral values $y\in \Lambda $, so that $\varphi \in \mathcal{
F}$ in the input representation is described by the (generalized)
eigenvectors $|y\rangle $ of $\hat{q}:|y\rangle \mapsto y|y\rangle $ as a
square integrable function $\varphi (y)=\langle y|\varphi $, $\Vert \varphi
\Vert ^{2}=\int |\varphi (y)|^{2}\mathrm{d}\nu <\infty $ with respect to a
given measure $\nu $ on $\Lambda $.
The positive measure $\nu $ is either discrete or continuous or can even be
of mixed type normalizing the probability densities $g(y)=\langle \psi
(y),\psi (y)\rangle $ for the state vectors $\psi \in \mathcal{G}$:
\begin{equation}
\Vert \psi \Vert ^{2}=\int_{\Lambda }\langle \psi (y),\ \psi (y)\rangle
\mathrm{d}\nu =\int_{\Lambda }g(y)\mathrm{d}\nu =1 \label{eq:1.4}
\end{equation}
where $\psi (y)=\langle y|\psi $ are the $\mathcal{H}$-valued wavefunctions
of the system \textquotedblleft quantum object plus measurement
apparatus.\textquotedblright\ One can consider, for example, the standard
Lebesgue measures $\mathrm{d}\nu =\mathrm{d}\lambda $ on $\Lambda =\mathbf{Z}
$, $\mathrm{d}\lambda =1$ and on $\Lambda =\mathbb{R}$, $\mathrm{d}\lambda =
\mathrm{d}y$:
\begin{equation*}
\Vert \psi \Vert ^{2}=\sum \langle \psi (k),\psi (k)\rangle \;(\mathrm{d}
\lambda =1)\ ;\quad \Vert \psi \Vert ^{2}=\int \langle \psi (y),\psi
(y)\rangle \mathrm{d}y\;(\mathrm{d}\lambda =\mathrm{d}y)
\end{equation*}
respectively for the discrete spectrum $y\in \mathbf{Z}$ and for the
continuous one $y\in \mathbb{R}$, given by the distributions $f(y)=\sum
\delta (y-k)$ and $f(y)=1$ as $\mathrm{d}\lambda =f(y)\mathrm{d}y$.
The output state vectors $\chi =S(\xi \otimes \varphi _{0})\in \mathcal{G}$,
corresponding to the arbitrary input ones $\xi =V\eta $, $\langle \xi ,\xi
\rangle =1$, are given by the vector-functions $\chi :y\mapsto \chi (y)\in
\mathcal{H}$ of $y\in \Lambda $ with values
\begin{equation*}
\chi (y)=\langle y|S(\xi \otimes \varphi _{0})=\langle y|\chi .
\end{equation*}
The operators $\langle y|S:\mathcal{G}\rightarrow \mathcal{H}$ correspond to
the adjoint ones $S^{\dagger }|y\rangle :\eta \mapsto S^{\dagger }(\eta
\otimes |y\rangle )$,
\begin{equation}
\langle \eta ,\langle y|S(\xi \otimes \varphi )\rangle =\langle S^{\dagger
}(\eta \otimes |y\rangle ),\xi \otimes \varphi \rangle \label{eq:1.3}
\end{equation}
defining the (generalized) vector-functions $S^{\dagger }|y\rangle \eta $ by
\begin{equation*}
\int S^{\dagger }|y\rangle \eta \varphi _{0}(y)\mathrm{d}\nu =S^{\dagger
}(\eta \otimes \varphi _{0})\;\;\;\;\forall \eta ,\varphi .
\end{equation*}
The operator $(R_{0}\chi )(y)=R\chi (y)$ commutes with $Q=\kappa I\otimes
\hat{q}$ as well as with any other operator $C_{0}=C\otimes \hat{1}$
representing an object variable $C:\mathcal{H}\rightarrow \mathcal{H}$ in $
\mathcal{H}\otimes \mathcal{F}$ as the constant function $Z(y)=C$. This is
because the general operator $Z$ in $\mathcal{H}\otimes \mathcal{F}$
commuting with $Q$ corresponds to an operator--valued function $Z(y):
\mathcal{H}\rightarrow \mathcal{H}$, which is defined by the operator $Z$ as
\begin{equation}
\langle y|Z\psi =Z(y)\langle y|\psi \ ,\quad \forall \psi \in \mathcal{G}\
,\quad y\in \Lambda \label{eq:1.5}
\end{equation}
in the case $Z=Q$ it corresponds to $Z(y)=\kappa yI$: $\ \langle y|Q\psi
=\kappa y\langle y|\psi $. It is trivial in this case that the Heisenberg
operators $X=S^{\dagger }ZS$ satisfy the nondemolition condition $[X,Y]=0$
with respect to the output observable $Y=S^{\dagger }QS$, but not the
initial operators $Z:[Z,Y]\not=0$ if $[Z(y),R]\not=0$. This makes it
possible to condition, by the observation of $Y$, the future measurements of
any dynamical variable of the quantum object, but not the potential
measurements of $Z$ in the past with respect to the present observation of $Y
$ if they have not been done initially.
Indeed, let $P_{\Delta }=S^{\dagger }I_{\Delta }S$ be the spectral
orthoprojector of $Y$, given for a measurable $\Delta \subseteq \Lambda $ by
$I_{\Delta }=I\otimes \hat{1}_{\Delta }$ as
\begin{equation}
\langle y|I_{\Delta }\chi =1_{\Delta }(y)\chi (y)=1_{\Delta }(y)\langle
y|\chi \ ,\quad 1_{\Delta }(y)=\{
\begin{array}{cc}
1 & y\in \Delta \\
0 & y\neq \Delta
\end{array}
\label{eq:1.6}
\end{equation}
and $p_{\Delta }=\langle \eta \otimes \varphi ,P_{\Delta }(\eta \otimes
\varphi )\rangle \not=0$. Then the formula
\begin{equation}
\varepsilon _{\Delta }[X]=\langle \eta ,\omega \lbrack XP_{\Delta }]\eta
\rangle /\langle \eta ,\omega \lbrack P_{\Delta }]\eta \rangle \,
\label{eq:1.7}
\end{equation}
where $\langle \eta ,\omega \lbrack X]\eta \rangle =\langle \eta \otimes
\varphi ,X(\eta \otimes \varphi )\rangle $, $\forall \eta \in \mathcal{H}$,
defines the conditional expectation of $X=S^{\dagger }ZS$ with respect to $Y$
. It gives the conditional probability $\varepsilon _{\Delta }[X]\in \lbrack
0,1]$ for any orthoprojector $X=O$, while $\varepsilon _{\Delta }[Z]$
defined by the same formula for $Z=\{Z(y)\}$ may not be the conditional
expectation due to the lack of positivity $\omega \lbrack EP_{\Delta }]\geq
0 $, for all $\varphi \in \mathcal{F}$ if the orthoprojector$Z=E$ does not
commute with $P_{\Delta }$. The necessity of the nondemolition principle for
the existence of the conditional probabilities is the consequence of the
main filtering theorem consistent with the causality principle according to
which the conditioning with respect to the current observation has the sense
of preparation for future measurements but not for past ones.
This theorem proved in the general algebraic form in Ref. [30] reads in the
simplest formulation as
\noindent \textsc{Main Measurement Theorem.} Let $O$ be an orthoprojector in
$\mathcal{G}=\mathcal{H}\otimes \mathcal{F}$. Then for each state vector $
\psi =\xi \otimes \varphi $ there exists the conditional probability $
\varepsilon _{\Delta }[O]\in \lbrack 0,1]$, defined by the compatibility
condition
\begin{equation}
\varepsilon _{\Delta }[O]\langle \xi \otimes \varphi ,P_{\Delta }(\xi
\otimes \varphi )\rangle =\langle \xi \otimes \varphi ,\ OP_{\Delta }(\xi
\otimes \varphi )\rangle \label{eq:1.8}
\end{equation}
if and only if $OP_{\Delta }=P_{\Delta }O$. It is uniquely defined for any
measurable $\Delta \subset \Lambda $ with respect to $P_{\Delta }=S^{\dagger
}I_{\Delta }S$, $\varphi =\varphi _{0}$ as
\begin{equation}
\varepsilon _{\Delta }[O]={\frac{1}{\mu _{\Delta }}}\int_{\Delta }\langle
\chi _{y},E(y)\chi _{y}\rangle \mathrm{d}\mu \label{eq:1.9}
\end{equation}
Here $E(y):\mathcal{H}\rightarrow \mathcal{H}$ is the orthoprojector valued
function, describing $O$, commuting with all $P_{\Delta }$ in the Schr\"{o}
dinger picture as $O=S^{\dagger }ES$, $\mu _{\Delta }=\int_{\Delta }g_{\xi
}(y)\mathrm{d}\nu $ is the absolutely continuous with respect to $\nu $
probability distribution of $y\in \Lambda $, $g_{\xi }(y)=\Vert \chi
(y)\Vert ^{2}$, $\chi (y)=\langle y|S(\xi \otimes \varphi _{0})$, and $
y\mapsto \chi _{y}$ is the random state vector $\chi _{y}\in \mathcal{H}$ of
the object uniquely defined for almost all $y:g_{\xi }(y)\not=0$ up to the
random phase $\theta (y)=\mathrm{arg}c_{\xi }(y)$ by the normalization
\begin{equation}
\chi _{y}=\chi (y)/c_{\xi }(y)\ ,\quad |c_{\xi }(y)|^{2}=g_{\xi }(y)
\label{eq:1.10}
\end{equation}
\section{The generalized \emph{a posteriori} reduction}
It follows immediately from the main theorem that the input state vector $
\xi :\Vert \xi \Vert =1$ of the object of measurement has to be changed for $
\chi _{y}\in \mathcal{H}$ due to the preparation $\xi \mapsto \{\chi
(y):y\in \Lambda \}$ of the \textit{a priori\/} state vector $\chi =S(\eta
\otimes \varphi _{0})$ of the meter and the object after the objectification
$\hat{q}=y$. The former is given by the dynamical interaction in the pointer
representation $\chi (y)=\langle y\mid \chi $ due to the choice of the
measurement apparatus and the output observables, and the latter is caused
by statistical filtering $\chi \mapsto \chi (y)$ due to the registration of
the measurement result $y\in \Lambda $ and the normalization $\chi _{y}=\chi
(y)/\Vert \chi (y)\Vert $.
While the process of preparation described by a unitary operator applied to
a fixed initial state of the meter encounters no objection among physicists,
the process of objectification encounters objection because of the
nonunitarity of the filtering and nonlinearity of the normalization. But the
main theorem shows clearly that there is nothing mysterious in the
objectification. It is not a physical process but only a mathematical
operation to evaluate the \textit{conditional state}
\begin{equation}
\pi _{y}[Z]=\varepsilon _{y}[S^{\dagger }ZS]=\langle \chi _{y},Z(y){\chi _{y}
}\rangle \label{eq:2.1}
\end{equation}
which are defined by the conditional expectations $\varepsilon
_{y}[X]=\lim_{\Delta \downarrow y}\varepsilon _{\Delta }[X]$ of the
Heisenberg operators $X$ for $Z=\{Z(y)\}$. The linear random operator
\begin{equation}
G(y):\xi \in {\mathcal{H}}\mapsto \langle y|S(\xi \otimes \varphi _{0})\
,\quad y\in \Lambda \label{eq:2.2}
\end{equation}
defines the reduction transformations $G(y)$ as the partial matrix elements $
\langle y|S\varphi _{0}$ of the unitary operator $S$. They map the
normalized vectors $\xi \in \mathcal{H}$ into the \emph{a posteriori\/} ones
$\chi (y)=G(y)\xi $, renormalized to the probability density
\begin{equation*}
g_{\xi }(y)=\Vert G(y)\xi \Vert ^{2}=\langle \xi ,E(y)\xi \rangle ,\quad
E=G^{\dagger }G\ .
\end{equation*}
If the condition (\ref{eq:1.2}) holds, then the only eigen vectors $
|x\rangle $ of $R$ remain unchanged up to a phase multiplier:
\begin{equation}
G(y)|x\rangle =|x\rangle \varphi _{x}(y),\ \varphi _{x}(y)=\langle y|\hat{s}
_{x}\varphi _{0}=\langle y|\varphi _{x} \label{eq:2.3}
\end{equation}
and hence $\chi _{y}=e^{\mathrm{i}\theta _{x}(y)}|x\rangle $, where $\theta
_{x}(y)=\arg \,\varphi _{x}(y)$. The superpositions $\xi =\int |x\rangle \xi
(x)\mathrm{d}\lambda $ change their amplitudes $\xi (x)=\langle x|\xi $ for $
\chi _{y}(x)=\langle x|\chi _{y}$
\begin{equation}
\langle x|\chi _{y}=c_{\xi }^{-1}(y)\chi (x,y)\ ,\quad \chi (x,y)=\langle
x|G(y)\xi =\chi _{x}(y)\xi (x) \label{eq:2.4}
\end{equation}
where $c_{\xi }(y)=(\int |\varphi _{x}(y)|^{2}h(x)\mathrm{d}\lambda )^{1/2}$
, $h(x)=|\xi (x)|^{2}$.
In the case of a purely continuous spectrum of $R$ there are no invariant
state vectors at all because the generalized eigenvectors cannot be
considered as input ones due to $|x\rangle \notin \mathcal{H}$ as $\langle
x|x\rangle=\infty$ in that case.
The generalized reduction (\ref{eq:2.1}) of the state-vector corresponds to
the limit case $\Delta\downarrow y$ when the accuracy of the instrument $
\Delta\ni y$ tends to the single-point subset $\{y\}\subset\Lambda$. It is
not even the mathematical idealization of the real physical experiment if
the observable $\hat q$ has the discrete spectrum $\Lambda=\{y_i\}$.
Prior to discussing why the generalized reduction does not contradict the
main postulates of the quantum theory, let us show how to derive the von
Neumann projection postulate in this way, corresponding to the orthogonal
transformations $G(y_{i})=F_{i}$ given by a partition $\sum A_{i}=\mathbb{R}$
of the spectrum of $R$ as $F_{i}=E_{A_{i}}$. Here $A\mapsto E_{A}$, $
E_{A}^{\dagger }E_{A^{\prime }}=E_{A\cap A^{\prime }}$, $\sum E_{A_{i}}=I$
is the spectral measure of $R=\int x\mathrm{d}E$ which might be either of
discrete or of continuous type as in the cases
\begin{equation*}
E_{A}=\sum_{x\in A}|x\rangle \langle x|\ ,\quad E_{A}=\int_{A}|x\rangle
\langle x|\mathrm{d}x\ ,
\end{equation*}
corresponding to the nondegenerate spectrum of $R:\mathrm{d}E=|x\rangle
\langle x|\mathrm{d}\lambda $.
Considering the indices $i$ of $y_{i}$ in $\mathbf{Z}$ it is always possible
to find the unitary interaction in the Hilbert space ${\mathcal{G}=\mathcal{H
}}\otimes l^{2}(\mathbf{Z})$ of the two--sided sequences $\psi =\{\eta
^{k}|k=0,\pm 1,\pm 2,\dots \}$ with $\eta ^{k}\in \mathcal{H}$ such that $
\Vert \psi \Vert ^{2}=\sum_{-\infty }^{\infty }\langle \eta ^{k},\eta
^{k}\rangle <\infty $. Indeed, we can define the interaction as the
block-matrix $S^{\dagger }=[W_{k}^{i}]$ acting in $\mathcal{G}$ as $
W^{i}\psi =\sum_{k=-\infty }^{\infty }W_{k}^{i}\eta ^{k}$, by $
W_{k}^{i}=F_{k-i}$, where $F_{k}=0$ if there is no point $y_{k}$ in $\Lambda
$ numbered by a $k\in \mathbf{Z}$. It is the unitary one because $S=[F_{i-k}]
$ is inverse to $S^{\dagger }=[F_{k-i}]$ as
\begin{equation*}
\sum_{j=-\infty }^{\infty }F_{i-j}F_{k-j}=\delta _{k-j}^{i-j}\sum_{j=-\infty
}^{\infty }F_{-j}=\delta _{k}^{i}\sum F_{i}=\delta _{i}^{k}I
\end{equation*}
due to the orthogonality $F_{i}F_{k}=0$, $i\not=k$, and completeness $\sum
F_{i}=I$ of $\{F_{i}\}$.
Let us fix the initial sequence $\varphi _{0}\in l^{2}(\mathbf{Z})$ as the
eigenstate $\varphi _{0}=\{\delta _{0}^{k}\}=|0\rangle $ of the input
observable $\hat{k}$ in $l^{2}(\mathbf{Z})$ as the counting operator
\begin{equation}
\hat{k}=\sum_{k=-\infty }^{\infty }k|k\rangle \langle k|\ ,\quad |i\rangle
=\{\delta _{i}^{k}\}\in l^{2}(\mathbf{Z}) \label{eq:2.5}
\end{equation}
with the spectrum $\mathbf{Z}$. Then we obtain the conditional states (\ref
{eq:2.1}) defined as
\begin{equation*}
\pi _{i}[Z]={\frac{1}{p_{i}}}\langle F_{i}\eta ,Z_{i}F_{i}\eta \rangle
=\langle \eta _{i},Z_{i}\eta _{i}\rangle ,\;\eta _{i}=F_{i}\eta /p_{i}^{1/2}
\end{equation*}
up to the normalizations $p_{i}=\langle F_{i}\eta ,F_{i}\eta \rangle \not=0$
by the linear operations $\sigma \mapsto W_{i}^{0}\sigma W_{i}^{0}$,
\begin{equation}
W_{i}^{0}\eta =\langle i|S(\varphi _{0}\otimes \eta )=\sum_{k=-\infty
}^{\infty }F_{i-k}\delta _{0}^{k}\eta =F_{i}\eta \ . \label{eq:2.6}
\end{equation}
It is only in that case that the \emph{a posteriori\/} state always remains
unchanged under the repetitions of the measurement. Such an interaction
satisfies the condition (\ref{eq:1.2}) with $\varphi _{x}=\hat{s}_{x}\varphi
_{0}$ given by the sequences $\varphi _{x}=\{\delta
_{i(x)}^{k}\}=|i(x)\rangle $ because
\begin{equation*}
F_{i-k}|x\rangle =|x\rangle \delta _{i-k}^{i(x)}=W_{i}^{k}|x\rangle \quad
(=|x\rangle \,,\quad \forall x\in A_{i-k})\,,
\end{equation*}
where $i(x)=i$ if $x\in A_{i}$ is the index map of the coarse-graining $
\{A_{i}\}$ of the spectrum of $R$. Hence in the $x$-representation $\psi
=\int |x\rangle \psi (x)\mathrm{d}\lambda $, $\psi (x)=\langle x|\psi $ it
can be described by the shifts $\hat{s}_{x}^{\dagger }=[\delta _{k-i}^{i(x)}]
$ in $l^{2}(\mathbf{Z})$
\begin{equation}
\hat{s}_{x}^{\dagger }:\psi (x)=\{\eta ^{k}(x)\}\mapsto \{\langle x|\eta
^{i(x)+k}\}\,\quad \eta ^{k}(x)=\langle x|\eta ^{k} \label{eq:2.7}
\end{equation}
replacing the initial state $\varphi _{0}=|0\rangle $ of the meter for each $
x$ by another eigenstate $|i(x)\rangle =\hat{s}_{x}|0\rangle $ if $x\notin
A_{0}$.
This realizes the coarse-grained measurement of $R$ by means of the
nondemolition observation of the output
\begin{equation}
Y=S^{\dagger }(I\otimes \hat{k})S=i(R)\otimes \hat{1}+I\otimes \hat{k}\,,
\label{eq:2.8}
\end{equation}
where $i(R)=\int i(x)\mathrm{d}E=\sum iF_{i}$. If $q(R)=\hbar i(R)$ is the
quantized operator $R$ given, say, by the integer $i(x)=\lfloor x/\hbar
\rfloor $, then the rescaled model $\hat{y}_{x}=\hbar \hat{s}_{x}^{\dagger }
\hat{k}\hat{s}_{x}=q(x)\hat{1}+\hbar \hat{k}$ of the nondemolition
measurement has the classical limit $\lim \hat{y}_{x}=x\hat{1}$ if $\hbar
\rightarrow 0$, corresponding to the direct observation of a continuous
variable $R$ by means of $\lim \hbar Y=R\otimes \hat{1}$.
Note that the observable $Y$ commutes with the arbitrary Heisenberg operator
$A=S^{\dagger }(C\otimes \hat{1})S$ of the object, but not with the initial
operators $C_{0}=C\otimes \hat{1}$ if $[C,i(R)]\not=0$.
The unitary operator $S^{\dagger }$ is given by the interaction potential $
q(R)\otimes \hat{p}$ as $S^{\dagger }=\exp \{(\mathrm{i}/\hbar )q(R)\otimes
\hat{p}\}$, where $\mathrm{i}=\sqrt{-1}$, and $\hat{p}=[\langle i|\hat{p}
|k\rangle ]$, $\langle i|\hat{p}|k\rangle =(1/2\pi )\int_{-\pi }^{\pi }pe^{-
\mathrm{i}(i-k)p}\mathrm{d}p$ is the matrix of the momentum operator in $
l^{2}(\mathbf{Z})$, generating the shifts $\hat{s}_{x}^{\dagger }=[\langle i|
\hat{s}_{x}^{\dagger }|k\rangle ]$ as $\hat{s}_{x}^{\dagger }=e^{i(x)\mathrm{
i}\hat{p}} $:
\begin{equation*}
\langle i|\hat{s}_{x}^{\dagger }|k\rangle ={\frac{1}{2\pi }}\int_{-\pi
}^{\pi }e^{i(x)\mathrm{i}p}e^{-\mathrm{i}(i-k)p}\mathrm{d}p=\delta
_{i-k}^{i(x)}\,.
\end{equation*}
The nondemolition observation reproduces the statistics of the
\textquotedblleft demolition\textquotedblright\ measurement of $R$ by the
direct observation of $q(R)$ because the output observable $Y$ has the same
characteristic function with respect to the state vector $\xi \otimes
\varphi _{0}$ as $i(R)$ with respect to $\xi $:
\begin{eqnarray*}
&\langle \xi \otimes \varphi _{0},\exp \{\mathrm{i}pY\}(\xi \otimes \varphi
_{0})\rangle =&\langle S(\xi \otimes \varphi _{0}),e^{\mathrm{i}pQ}S(\xi
\otimes \varphi _{0})\rangle \\
&&\qquad =\sum \langle F_{i}\xi ,e^{i\mathrm{i}p}F_{i}\xi \rangle =\langle
\xi ,\exp \{\mathrm{i}pi(R)\}\xi \rangle \,.
\end{eqnarray*}
Here $p$ is the parameter of the characteristic function, $Q=I\otimes \hat{k}
$, and $F_{i}=\langle i|S\varphi _{0}=F_{i}^{\dagger }$ are the
orthoprojectors, such that $\sum_{i}F_{i}^{\dagger }F_{i}=\int i(x)\mathrm{d}
E=i(R)$. If the observable $R$ is discrete, then the nondemolition
observation (\ref{eq:2.8}) realizes the precise measurement of $R$, if the
partition $\{A_{i}\}$ separates all the eigenvalues $\{x_{i}\}$ as in the
case $x_{i}\in A_{i}$, $\forall i$, corresponding to $x_{i}=\hbar i$, $
A_{i}=[\hbar i,\hbar (i+1)[$, $i=0,1,2,\ldots $.
The nondemolition principle helps not only to derive the projection
postulate as a reduced description of the shift interaction in the enlarged
Hilbert space $\mathcal{G}$ with respect to the initial eigenvector $\varphi
_{0}=|0\rangle $ of the discrete meter $\hat{q}$, but also extends it to the
generalized reductions under the unsharp measurements with arbitrary
spectrum $\Lambda $, corresponding to the nonrepeatable instruments [1,2]
\begin{equation}
\Pi _{\Delta }[C]=\int_{\Delta }\Psi \lbrack C](y)\mathrm{d}\nu \,,\quad
\Psi \lbrack C](y)=G(y)^{\dagger }CG(y)\,. \label{eq:2.9}
\end{equation}
The density $\Psi (y)$ of the instrument defines completely positive but not
necessarily orthoprojective operations $E(y)=\Psi \lbrack I](y)$, called the
effects for the probability densities $g(y)=\sigma \lbrack E(y)]$, and also
the nonlinear operation $\sigma \mapsto \sigma \circ \Psi (y)/\sigma \lbrack
E(y)]$ of the generalized reduction, mapping the pure input states $\sigma
_{\xi }[C]=\langle \xi |C|\xi \rangle $ into the \emph{a posteriori\/} ones
\begin{equation}
\rho _{y}[C]={\frac{1}{g_{\xi }(y)}}\rho \lbrack C](y)=\pi _{y}[C_{0}]\
,\quad \rho \lbrack C](y)=\langle \chi (y),\ C\chi (y)\rangle \,.
\label{eq:2.10}
\end{equation}
They are also pure because of the completeness of the nondemolition
measurement, i.e., nondegeneracy of the spectrum of the observable $\hat{q}$
in $\mathcal{F}$. Thus, the reduction of the state-vector is simply the way
of representing in the form (\ref{eq:2.1}) the \emph{a posteriori\/} pure
states (\ref{eq:2.10}) given at the limit $\Delta \rightarrow 0$ by the
usual (in the statistics) Bayesian formula (\ref{eq:1.7}) for $X=S^{\dagger
}C_{0}S=A$, which is applicable due to the commutativity of $A$ and $
P_{\Delta }$.
The reduction $\sigma _{1}\rightarrow \rho _{y}$ of the prepared state $
\sigma _{1}=\sigma \circ \Psi $ for the object measurement is given as the
evaluation of the conditional expectations which are the standard attributes
of any statistical theory. All the attempts to derive the reduction as a
result of deterministic interaction only are essentially the doomed attempts
to derive the probabilistic interpretation of quantum theory. There is no
physical explanation of the stochasticity of the measurement process as
there is no adequate explanation of the randomness of an observable in a
pure quantum state.
It is not a dynamical but a purely statistical effect because the input and
output state-vectors of this process are not the observables of the
individual object of the statistical ensemble but only the means for
calculating the \textit{a priori}\emph{\/} and the \textit{a posteriori}
\emph{\/} probabilities of the observables of this object. Hence there is no
observation involving just a single quantum object which can confirm the
reduction of its state. The reduction of the state-vector can be treated as
an observable process only for an infinite ensemble of similar object plus
meter systems. But the measurements for the corresponding collective
observables also involves preparation and objectification procedures, this
time for the ensemble, i.e., for a second quantized compound system. So the
desirable treatment of all the reductions as some objective stochastic
process can never be reached in this way. They are secondary stochastic
since they are dependent on the random information that has been gained up
to a given time instant $t$.
The reduction of the state-vector is not at variance with the coherent
superposition principle, because a vector $\eta \in \mathcal{H}$ is not yet
a pure quantum state but defines it rather up to a constant $c\in \mathbb{C}$
as the one-dimensional subspace $\{c\eta |c\in \mathbb{C}\}\subset \mathcal{H
}$ which is a point of the projective space over $\mathcal{H}$. For every
reduced state-vector $\chi _{y}$ there exists an equivalent one, namely $
\chi (y)=\sqrt{g_{\xi }(y)}\chi _{y}$, defining the same quantum pure state,
given by the linear transformation $G(y):\xi \mapsto \chi (y)$, so that the
superposition principle holds: $\chi (y)=\sum c_{i}\chi ^{i}(y)$ if $\xi
=\sum c_{i}\xi ^{i}$. The pure state transformation $G(y)$ does not need to
be unitary, but as an operator $G:\mathcal{H}\rightarrow \mathcal{G}$ with
\begin{equation*}
G^{\dagger }G=\int G(y)^{\dagger }G(y)\mathrm{d}\nu =\int \varphi
_{0}^{\dagger }S^{\dagger }|y\rangle \langle y|S\varphi _{0}\mathrm{d}\nu
=\varphi _{0}^{\dagger }S^{\dagger }S\varphi _{0}=I
\end{equation*}
it preserves the total probability by mapping the normalized $\xi \in
\mathcal{H}$ into the $\chi (y)=G(y)\xi $, normalized to the probability
density $g_{\xi }(y)$.
According to the nondemolition principle it makes sense to apply the vector $
\chi=\{\chi(y)\}$ of the system after the measurement preparation only
against the reduced observables $Z=\{Z(y)\}$ which commute with $Q=\kappa
I\otimes \hat q$. Otherwise according to the main theorem the conditional
probabilities of the future observations may not exist for an initial
state-vector $\chi_0=\eta\otimes\varphi$ and a given result $y\in\Lambda$ of
the measurement. It is against the physical causality to consider the
unreduced operators as the observables for the future measurements since the
causality means that the future observations must be statistically
predictable from the data of a measurement and such prediction can be given
only by the conditional probabilities (\ref{eq:1.9}). Once the output
observables are selected as a part of a preparation, the algebra of the
actual observables is reduced and there is no way to measure an observable $
Z $ which is not compatible with $Q$. It could be measured in the past if
another preparation had been made but the irreversibility of the time arrow
does not give this possibility. Thus, the quantum measurement theory implies
a kind of time-dependent superselection rule for algebras such as those of
the observables $Z$ chosen as the actual observable at the moment $t$. But
it does not prevent one from considering other operators as the virtual
observables defining super operators, i.e., the subsidiary operators for the
description of some meaningful operations, although an evaluation of their
expectations does not make any sense as it does for the differential
operators in the classical theory.
The \textit{a priori}\emph{\/} states are the induced ones
\begin{equation*}
\sigma _{1}(C)=\int \langle \chi _{y},C\chi _{y}\rangle \mathrm{d}\mu
=\langle \chi ,C_{0}\chi \rangle \ ,\quad C_{0}=C\otimes \hat{1}
\end{equation*}
on the algebra generated by the operators in $\mathcal{H}$ of the object
only. They are given as the statistical mixtures of the \textit{a posteriori}
\emph{\/} pure states (\ref{eq:2.10}) of the object even if the initial
state $\sigma $ was pure. But it does not contradict quantum mechanics
because the prepared state $\phi (Z)=\langle \chi ,Z\chi \rangle $ of the
quantum system after the measurement is reduced to the object plus pointer
but is still given uniquely by the state-vector $\chi \in \mathcal{G}$, up
to a random phase. Namely, the vector $\chi $ is a coherent superposition
\begin{equation*}
\chi =\sum \chi _{i}\otimes |y_{i}\rangle c_{i}\ ,\quad \chi _{i}=\chi
(y_{i})/c_{i}\ ,\quad |c_{i}|^{2}=p_{i}
\end{equation*}
of the \textit{a posteriori}\emph{\/} states $\chi _{i}\otimes |y_{i}\rangle
$ of the system, if $\hat{q}$ has the spectral decomposition $\hat{q}=\sum
y_{i}|y_{i}\rangle \langle y_{i}|$ and $p_{i}$ are the probabilities of $
y_{i}$.
This uniqueness does not hold for the density-matrix representations $\phi
\lbrack Z]=\mathrm{Tr}\{\hat{\phi}Z\}$; among the equivalent density
matrices $\hat{\phi}\geq 0$ there exists always the projector $\hat{\phi}
=|\chi \rangle \langle \chi |$, but there are also mixtures such as the
diagonal one
\begin{equation*}
\hat{\phi}_{1}=\sum p_{i}|\eta _{i}\rangle \langle \eta _{i}|\ ,\quad |\eta
_{i}\rangle =\eta _{i}\otimes |y_{i}\rangle
\end{equation*}
in the discrete case $\Lambda =\{y_{i}\}$. Hence, the diagonalization $\hat{
\phi}\mapsto \hat{\phi}_{1}$ of the density matrix due to the measurement of
$\hat{q}$ is only the rule to choose the most mixed one $\hat{\phi}_{1}$
which is equivalent to the coherent choice $\hat{\phi}$ due to
\begin{equation*}
\mathrm{Tr}\{\hat{\phi}Z\}=\sum p_{i}\langle \eta _{i},Z_{i}\eta _{i}\rangle
=\mathrm{Tr}\{\hat{\phi}_{1}Z\}
\end{equation*}
for all reduced operators $Z=\sum Z_{i}\otimes |y_{i}\rangle \langle y_{i}|$
. There is no special need to fix such a choice, which is even impossible in
the continuous spectrum case. This is because the continuous observable $
\hat{q}$ has no ordinary eigenvectors, $\langle y|y\rangle =\infty $ and
hence $\chi _{y}\otimes |y\rangle \notin \mathcal{G}$, but there exist the
eigenstates $\omega _{y}[\hat{z}]=z(y)$ on the algebra of complex functions $
z(y)$, defining the conditional expectations $\varepsilon _{y}[X]$ for $
X=S^{\dagger }ZS$ as
\begin{equation*}
\varepsilon _{y}[X]=\pi _{y}[SXS^{\dagger }]\ ,\quad \pi _{y}=\rho
_{y}\otimes \omega _{y}\ ,\quad \forall y\in \Lambda \,.
\end{equation*}
Thus, the nondemolition principle abandons the collapse problem, reducing it
to the evaluation of the \emph{a posteriori\/} state. The decrease of the
observable algebra is the only reason for the irreversibility of the linear
transformation $\phi _{0}\mapsto \phi $ of the initial states $\phi
_{0}(X)=\langle \chi _{0},X\chi _{0}\rangle $, which are pure on the algebra
of all operators $X$ into the prepared (mixed) ones on the algebra of the
reduced operators $Z$.
\section{The main measurement problem}
As was shown using an instantaneous measurement as an example, the
nondemolition principle leads to the notion of the instrument, described by
the operational-valued measure (\ref{eq:2.9}), and gives rise to the
generalized reduction (\ref{eq:2.10}) of the quantum statistical states. In
the operational approach [1,2] one starts from the instrumental description $
\sigma\mapsto\sigma\circ\Phi(y)=\rho(y)$ of the measurement, which is
equivalent to postulating the generalized reduction (20) given up to the
probabilistic normalization $g(y)=\rho[I](y)$ by the linear map $
\sigma\mapsto\sigma\circ\Psi(y)$ due to $\Psi_y(\sigma)=(1/g(y))\sigma\circ
\Psi(y)=\rho_ y$.
The main measurement problem is the reconstruction of an interaction
representation of the quantum measurement, that is, finding a proper
dilation $\mathcal{G}$ of the Hilbert space $\mathcal{H}$ and the output
process $Y $, satisfying the nondemolition (and self-nondemolition)
condition (\ref{eq:1.1}) with respect to the Heisenberg operators $X$ of the
object of measurement in order to derive the same reduction as the result of
conditional expectation.
The minimal dilation giving, in principle, the solution of this problem even
for non-Markovian relativistic cases was constructed in [35], but it is
worth finding also more realistic, nonminimal dilations defining the object
of measurement as a quantum stochastic process in the strong sense for the
particular Markovian cases.
In the case of a single instantaneous measurement described by an instrument
$\Pi _{\Delta }$, this can be formulated as the problem of finding the
unitary dilation $U\varphi _{0}:\eta \in \mathcal{H}\mapsto U(\eta \otimes
\varphi _{0})$ in a tensor product $\mathcal{G}=\mathcal{H}\otimes \mathcal{F
}$ and an observable $\hat{y}=\int y\mathrm{d}\hat{1}$ in $\mathcal{F}$,
giving $\Pi _{\Delta }$ as the conditional expectation
\begin{equation*}
\Pi _{\Delta }[C]=\omega _{0}[AE_{\Delta }]\ ,\quad \langle \eta ,\omega
_{0}[X]\eta \rangle =\langle \eta \otimes \varphi _{0},X\eta \otimes \varphi
_{0}\rangle
\end{equation*}
of $AE_{\Delta }=U^{\dagger }(C\otimes \hat{1}_{\Delta })U$. In principle,
such a quadruple $(\mathcal{F},\varphi _{0},\hat{y},U)$ was constructed in
[36] and [37] for the normal completely positive $\Pi _{\Delta }$, giving a
justification of the general reduction postulate as described above for the
case of the projective $\Pi _{\Delta }$. For the continuous observation this
problem was solved~[39] on the infinitesimal level in terms of the quantum
stochastic unitary dilation of a differential evolution equation for
characteristic operations
\begin{equation*}
\tilde{\Psi}(t,q)=\int e^{\mathrm{i}qy}\Psi (t,y)\mathrm{d}\nu \ ,\quad \Psi
(t,y)=\lim_{\Delta \downarrow y}{\frac{1}{\nu _{\Delta }}}\Pi _{\Delta
}^{t}\ ,
\end{equation*}
where $\mathrm{d}\pi $ is a standard probability measure of $\mathrm{d}
y\subset \Lambda $. This corresponds to the stationary Markovian evolution
of the convolutional instrumental semigroups $\{\Pi _{\Delta }^{t}|t\geq 0\}$
giving the reduced description of the continuous measurement, with the data $
y(t)$ having the values in an additive group.
Unfortunately the characteristic operational description of the quantum
measurement is not relevant to the sample-paths representation. It is not
suitable for the conditioning of the quantum evolution under the given data
of the observations and hence does not allow one to obtain explicitly the
corresponding dynamical reduction. Moreover, the continuous measurements
have the data $y$ not necessary in a group, and in the nonstationary cases
they cannot be described by the convolution instrumental semigroups and the
corresponding evolution equations.
Recently a new differential description of continual nondemolition
measurements was developed within the noncommutative stochastic calculus
method [13,14,31]. A general stochastic filtering equation was derived for
the infinitesimal sample-paths representation of the quantum conditional
expectations, giving the continuous generalized reduction of the \emph{a
posteriori\/} states [25,26,29].
Simultaneously, some particular cases of the filtering equation for the
stochastic state-vector $\varphi (t,\omega )=\chi _{y^{t}}(\omega )$,
corresponding to the functional spectrum $\Lambda ^{t}$ of the diffusion
trajectories $y^{t}(\omega )=\{y(s,\omega )|s\leq t\}$, were discovered
within the phenomenological theories of the dynamical reduction and
spontaneous localization [16--18]. As was shown in [21,27] and [29], the
nonlinearity of such equations is related only to the normalization $\Vert
\varphi (t,\omega )\Vert =1$ and after the proper renormalization $\chi
_{t}(\omega )=\sqrt{g_{t}(\omega )}\varphi (t,\omega )$, where $g_{t}(\omega
)$ is the probability density of the process
\begin{equation*}
y(s,\omega )={\frac{1}{s}}\int_{0}^{s}\langle \varphi (t,\omega ),R\varphi
(t,\omega )\rangle \mathrm{d}t+s^{-1}w_{s}\ ,\quad s\in \lbrack 0,t)
\end{equation*}
generated by the standard Wiener process $\omega =\{w_{t}\}$ with respect to
the Wiener probability measure $\mathrm{d}\pi $ on the continuous
trajectories $\omega \in \Omega $, they become the linear ones
\begin{equation}
\mathrm{d}\chi _{t}+\left( {\frac{\mathrm{i}}{\hbar }}H+{\frac{1}{2}}
L^{\dagger }L\right) \chi _{t}\mathrm{d}t=L\chi _{t}\mathrm{d}w\ .
\label{eq:3.1}
\end{equation}
Here $H$ is the Hamiltonian of the object, $L$ is an arbitrary operator in $
\mathcal{H}$ defining the variable $R=\sqrt{\hbar }(L+L^{\dagger })$, under
the continuous measurement, and $\mathrm{d}w=w_{t+\mathrm{d}t}-w_{t}$ is the
forward increment, such that the stochastic equation (\ref{eq:3.1}) has to
be solved in the It\^{o} sense. This solution can be explicitly written as
\begin{equation}
\chi _{t}(\omega )=T_{t}(\omega )\xi ,\quad T_{t}(\omega )=\exp
\{w_{t}L-tL^{2}\} \label{eq:3.2}
\end{equation}
in the case $L=\sqrt{\pi /2h}\,R$, $(h=2\pi \hbar )$, $H=0$, corresponding
to the unsharp measurement of the self-adjoint operator $R$ during the time
interval $[0,t)$ with the trivial free Hamiltonian evolution of the object.
In the case $H\not=0$ this can be used for the approximate solution of (\ref
{eq:3.1}) with $L^{\dagger }=L$, $\chi (0)=\eta $ as $\chi _{t}(\omega
)\simeq T_{t}(\omega )\xi (t)$, where $\xi (t)=V(t)\eta $ is the unitary
evolution $V(t)=\exp \left\{ -{\frac{\mathrm{i}}{\hbar }}Ht\right\} $
without the measurement.
The stochastic transformation (\ref{eq:3.2}) defines the operational density
\begin{equation*}
\Theta _{t}[C](\omega )=T_{t}^{\dagger }(\omega )CT_{t}(\omega )
\end{equation*}
of an instrument as in (\ref{eq:2.9}) with respect to the standard Wiener
probability measure $\mathrm{d}\pi $ on $\omega ^{t}=\{w_{s}\}_{s\leq t}\in
\Omega ^{t}$ having the Gaussian marginal distribution of $q_{t}=\sqrt{\hbar
}w_{t}$
\begin{equation*}
\mathrm{d}\nu :=\int_{q_{t}\in \mathrm{d}q}\mathrm{d}\pi =(ht)^{-1/2}\exp
[-\pi q^{2}/ht\}\mathrm{d}q\,.
\end{equation*}
Hence $\Psi (t,q)\mathrm{d}\nu :=\int\limits_{q_{t}\in \mathrm{d}q}\Theta
_{t}(\omega )\mathrm{d}\pi =\Phi (t,y)\mathrm{d}y$, where $y={\frac{1}{t}}q$
,
\begin{equation}
\Phi \lbrack C](t,y)=\sqrt{\frac{t}{h}}\exp \left\{ -{\frac{\pi t}{2h}}
(y-R)^{2}\right\} C\exp \left\{ -{\frac{\pi t}{2h}}(y-R)^{2}\right\} \,,
\label{eq:3.3}
\end{equation}
because $\Theta _{t}(\omega )$ depends only on $w_{t}$: $\Theta _{t}(\omega
)=\Psi (t,\sqrt{\hbar }w_{t})$, and
\begin{equation*}
\Psi \lbrack C](t,q)=G(t,q)^{\dagger }CG(t,q)\,,\quad G(t,q)=\exp \left\{ -{
\frac{\pi }{h}}\left( qR-{\frac{t}{2}}R^{2}\right) \right\} \,.
\end{equation*}
The operator $E(t,y)=\Phi \lbrack I](t,y)=f_{R}(t,y)$,
\begin{equation*}
f_{R}(t,y)=\sqrt{\frac{t}{h}}\exp \left\{ -\pi {\frac{t}{h}}
(y-R)^{2}\right\} =F(t,y)^{\dagger }F(t,y)
\end{equation*}
defines the probability density of the unsharp measurement of $R$ with
respect to the ordinary Lebesgue measure $\mathrm{d}y$ as the convolution
\begin{equation*}
g_{\xi }(t,y)=\int \sqrt{\frac{t}{h}}\exp \left\{ -\pi {\frac{t}{h}}
(y-x)^{2}\right\} h_{\xi }(x)\mathrm{d}\lambda =(f_{0}\ast h_{\xi })(y)\,,
\end{equation*}
where $h_{\xi }(x)=|\xi (x)|^{2}$, $\xi (x)=\langle x|\xi $, $\mathrm{d}
\lambda =\sum \delta (x-x_{i})\mathrm{d}x$ in the case of discrete spectrum $
\{x_{i}\}$ of $R$, and $\mathrm{d}\lambda =\mathrm{d}x$ in the case of
purely continuous spectrum of $R$.
This means that the continuous unsharp measurement of $R$ can be described
by the observation model $y_x(t)=x+(1/t)q_t$ of signal $x$ plus Gaussian
error $e(t)=(1/t)q_t$ with independent increments as
\begin{equation}
y_R(t)=R+e(t)I\,,\quad e(t)={\frac{\sqrt\hbar}{t}}w_t\,. \label{eq:3.4}
\end{equation}
The noise $e(t)$ with the mean value $\langle e(t)\rangle=0$ gives a
decreasing unsharpness $\langle e(t)^2\rangle=\hbar/t$ of the measurement
from infinity to zero that is inversely proportional to the duration of the
observation interval $t>0$.
In general, such a model can be realized [21]--[25] \nocite{bib:21,22,23}
\nocite{bib:24,25} as the nondemolition observation within the quantum
stochastic theory of unitary evolution of the compound system on the product
${\mathcal{G}=\mathcal{H}}\otimes \mathcal{F}$ with the Fock space $\mathcal{
F}$ over the one-particle space $L^{2}(\mathbb{R}_{+})$ for a
one-dimensional bosonic field, modeling the measurement apparatus of the
continuous observation.
Let us illustrate this general construction for our particular case $H=0$, $
L=L^{\dagger }$. The unitary interaction $S(t)$ in $\mathcal{G}$, defining
the transformations (\ref{eq:3.2}) as (\ref{eq:2.2}) with respect to the
vacuum state-vector $\varphi _{0}\in \mathcal{F}$, is generated by the field
momenta operators
\begin{equation}
\hat{p}_{s}={\frac{\mathrm{i}}{2}}\sqrt{\hbar }(\hat{a}_{s}^{\dagger }-\hat{a
}_{s})\,,\quad s\in \mathbb{R}_{+} \label{eq:3.5}
\end{equation}
as $S(t)=\exp \left\{ -{\frac{\mathrm{i}}{\hbar }}R\otimes \hat{p}
_{t}\right\} $.
Here $\hat{a}_{s}$ and $\hat{a}_{s}^{\dagger }$ are the canonical
annihilation and creation operators in $\mathcal{F}$, localized on the
intervals $[0,s]$ according to the commutation relations
\begin{equation*}
\lbrack \hat{a}_{r},\hat{a}_{s}]=0,\quad \lbrack \hat{a}_{r},\hat{a}
_{s}^{\dagger }]=s\hat{1}\ ,\quad \forall r\geq s\,,
\end{equation*}
The pointer of the apparatus for the measurement of $R$ is defined by the
field coordinate observables
\begin{equation}
\hat{q}_{s}=\sqrt{\hbar }(\hat{a}_{s}+\hat{a}_{s}^{\dagger })\,,\quad s\in
\mathbb{R}_{+} \label{eq:3.6}
\end{equation}
which are compatible with $[\hat{q}_{r},\hat{q}_{s}]=0$ as well as with $[
\hat{p}_{r},\hat{p}_{s}]=0$, but incompatible with (\ref{eq:3.5}):
\begin{equation*}
\lbrack \hat{p}_{r},\hat{q}_{s}]=s{\frac{\hbar }{\mathrm{i}}}\hat{1}\ ,\quad
\forall r\geq s\,.
\end{equation*}
The operators $S^{\dagger }(t)$ satisfy the condition (\ref{eq:1.2}): $
\langle x|S(t)=\hat{s}_{x}(t)\langle x|$, where the unitary operators $\hat{s
}_{x}^{\dagger }(t):\mathcal{F}\rightarrow \mathcal{F}$ can be described by
the shifts
\begin{equation}
\hat{s}_{x}^{\dagger }(t):|q,t\rangle \mapsto |xt+q,t\rangle \,,\quad
\forall x,q,t
\end{equation}
similarly to (\ref{eq:2.7}). Here $|q,t\rangle $ is the (generalized)
marginal eigenvector of the self-adjoint operator
\begin{equation*}
\hat{e}(t)=t^{-1}\hat{q}_{t}\ ,\quad \hat{q}_{t}|q,t\rangle =q|q,t\rangle
\,,
\end{equation*}
uniquely defined up to the phase by an eigenvalue $q\in \mathbb{R}$ as the
Dirac $\delta $-function $\delta _{q}$ in the $\hat{q}_{t}$-representation $
L^{2}(\mathbb{R})$ of the Hilbert subspace $\mathcal{A}(t)\varphi
_{0}\subset \mathcal{F}$, where $\mathcal{A}(t)$ is the Abelian algebra
generated by $\hat{q}_{t}$, and $\varphi _{0}\in \mathcal{F}$ is the
vacuum--vector of the Fock space $\mathcal{F}$. Due to this,
\begin{equation*}
\hat{y}_{x}(t)=\hat{s}_{x}^{\dagger }(t)\hat{e}(t)\hat{s}_{x}(t)=x\hat{1}+
\hat{e}(t)\,,
\end{equation*}
which gives the quantum stochastic realization of the observation model (\ref
{eq:3.4}) in terms of the output nondemolition process $\hat{y}_{R}(t)={
\frac{1}{t}}Y(t)$,
\begin{equation}
Y(t)=S^{\dagger }(t)(I\otimes \hat{q}_{t})S(t)=tR\otimes \hat{1}+I\otimes
\hat{q}_{t} \label{eq:3.8}
\end{equation}
similarly to (\ref{eq:2.8}) with $\hat{q}_{t}$ represented by the operator $
\sqrt{\hbar }(\hat{a}_{t}+\hat{a}_{t}^{\dagger })$. Indeed, the classical
noise $q_{t}=\sqrt{\hbar }w_{t}$ is statistically equivalent to the quantum
one $\hat{q}_{t}=\sqrt{\hbar }(\hat{a}_{t}+\hat{a}_{t}^{\dagger })$ with
respect to the vacuum state, as can be seen by a comparison of their
characteristic functionals:
\begin{eqnarray*}
\langle e^{\mathrm{i}\int f(s)\mathrm{d}q}\rangle &:&=\int \exp \{\mathrm{i}
\sqrt{\hbar }\int_{0}^{\infty }f(s)\mathrm{d}w\}\mathrm{d}\pi =\exp \left\{ -
{\frac{\hbar }{2}}\int_{0}^{\infty }f(s)^{2}\mathrm{d}s\right\} \\
&=&\langle \varphi _{0},e^{\mathrm{i}\int f(s)\mathrm{d}\hat{a}^{\dagger
}}e^{-{\frac{\hbar }{2}}\int_{0}^{\infty }f(s)^{2}\mathrm{d}s}e^{\mathrm{i}
\int f(s)\mathrm{d}\hat{a}}\varphi _{0}\rangle =\langle \varphi _{0},e^{
\mathrm{i}\int f(s)\mathrm{d}\hat{q}}\varphi _{0}\rangle \,.
\end{eqnarray*}
Here we used the annihilation property $\hat{a}_{s}\varphi _{0}=0$ and the
Wick ordering formula
\begin{equation}
\exp \{z^{\prime }\hat{a}_{s}+\hat{a}_{s}^{\dagger }z\}=e^{z\hat{a}
_{s}^{\dagger }}\exp \left\{ z^{\prime }{\frac{s}{2}}z\right\} e^{z^{\prime }
\hat{a}_{s}}\ . \label{eq:3.9}
\end{equation}
The observable process (\ref{eq:3.8}) satisfies the nondemolition condition (
\ref{eq:1.1}) (and self-nondemolition) with respect to any quantum process $
X(t)=\left( S^{\dagger }ZS\right) (t)$ given by the operators $Z(t)$,
commuting with all $Q(s)=I\otimes \hat{q}(s)$, $s\leq t$, because
\begin{equation*}
Y(s)=S^{\dagger }(t)(I\otimes \hat{q}(s))S(t)\,,\quad \forall s\leq t\ ,
\end{equation*}
as follows from the commutation relations
\begin{equation*}
\hat{s}_{x}^{\dagger }(t)\hat{q}_{s}=(sx\hat{1}+\hat{q}_{s})\hat{s}
_{x}^{\dagger }(t)\ ,\quad \forall s\leq t
\end{equation*}
for $\hat{s}_{x}^{\dagger }(t)=\exp \left\{ {\frac{\mathrm{i}}{\hbar }}x\hat{
p}_{t}\right\} $. Indeed, due to this
\begin{equation*}
\lbrack X(t),Y(s)]=W(t)[Z(t),Q(s)]W^{\dagger }(t)=0\,,
\end{equation*}
if $t>s$ and $[Z(t),Q(s)]=0$, as in the cases $Z(t)=C\otimes \hat{1}$ and $
Z(t)=Q(t)$, where $Q(t)=I\otimes \hat{q}_{t}$.
Now we can find the transform
\begin{equation*}
\langle q,t|S\varphi _{0}=G(t,q)\varphi _{0}(t,q)={\frac{1}{\sqrt{t}}}
T\left( t,{\frac{1}{t}}q\right) \,,
\end{equation*}
where $\varphi _{0}(t,q)=\langle q,t|\varphi _{0}$ is the vacuum-vector $
\varphi _{0}\in \mathcal{F}$ in the marginal $\hat{q}_{t}=q$ representation
\begin{equation*}
\varphi _{0}(t,q)=(ht)^{-1/4}\exp \{-\pi q^{2}/2ht\}\,,\quad q\in \mathbb{R}
\end{equation*}
normalized with respect to the Lebesgue measure $\mathrm{d}q$ on $\mathbb{R}$
. To this end, let us apply the formula (\ref{eq:3.9}) to $S^{\dagger
}(t)=\exp \left\{ {\frac{\mathrm{i}}{\hbar }}R\otimes \hat{p}_{t}\right\} $:
\begin{equation*}
\exp \{-L\otimes \hat{a}_{t}+L\otimes \hat{a}_{t}^{\dagger }\}=e^{L\otimes
\hat{a}_{t}^{\dagger }}\exp \left\{ -{\frac{t}{2}}L^{2}\right\} e^{-L\otimes
\hat{a}_{t}}\,,
\end{equation*}
where $L=R/2\sqrt{\hbar }$. Using the annihilation property $\exp \{\pm
L\otimes \hat{a}_{t}\}\varphi _{0}=\varphi _{0}$, we obtain
\begin{eqnarray*}
W(t)^{\dagger }\varphi _{0} &=&e^{L\otimes \hat{a}_{t}^{\dagger }}\exp
\left\{ -{\frac{t}{2}}L^{2}\right\} e^{-L\otimes \hat{a}_{t}}\varphi _{0} \\
&=&e^{L\otimes \hat{a}_{t}^{\dagger }}\exp \left\{ -{\frac{t}{2}}
L^{2}\right\} e^{L\otimes \hat{a}_{t}}\varphi _{0}=e^{L\otimes \hat{w}
_{t}-tL^{2}}\varphi _{0}\,.
\end{eqnarray*}
This is equivalent to (\ref{eq:3.2}) because of the Segal isometry of the
vectors $\exp \{x\hat{w}_{t}\}\varphi _{0}\in \mathcal{F}$, where $x\in
\mathbb{R} $, $\hat{w}_{t}=\hat{a}_{t}+\hat{a}_{t}^{\dagger }$, and the
stochastic functions $\exp \{xw_{t}\}\in L_{\pi }^{2}(\Omega )$ in the
Hilbert space of the Wiener measure $\pi $ on $\Omega $. Hence the transform
$F\left( t,{\frac{1}{t}}q\right) =\sqrt{t}G(t,q)\varphi _{0}(t,q)$ defining
the density $\Phi (t,y)=F(t,y)^{\dagger }[\cdot ]F(t,y)$ of the instrument (
\ref{eq:2.9}) with respect to $\mathrm{d}y$ has the same form, as in (\ref
{eq:3.3}):
\begin{equation}
F(t,y)=(t/h)^{1/4}\exp \left\{ -{\frac{\pi t}{2h}}(y-R)^{2}\right\} \,.
\label{eq:3.10}
\end{equation}
\section{A Hamiltonian model for continuous reduction}
As we have shown in the previous section the continuous reduction equation (
\ref{eq:3.1}) for the non-normalized stochastic state-vector $\chi(t,\omega)$
can be obtained from an interaction model of the object of measurement with
a bosonic field. This can be done by conditioning with respect to a
nondemolition continuous observation of field coordinate observables (\ref
{eq:3.6}) in the vacuum state.
The unitary evolution $\psi (t)=U(t)\psi _{0}$ in the tensor product $
\mathcal{G}=\mathcal{H}\otimes \mathcal{F}$ with the Fock space $\mathcal{F}$
corresponding to (\ref{eq:3.1}) can be written as the generalized Schr\"{o}
dinger equation
\begin{equation}
\mathrm{d}\psi (t)+K_{0}\psi (t)\mathrm{d}t=(L\otimes \mathrm{d}\hat{a}
_{t}^{\dagger }-L^{\dagger }\otimes \mathrm{d}\hat{a}_{t})\psi (t)
\label{eq:4.1}
\end{equation}
in terms of the annihilation and creation canonical field operators $\hat{a}
_{s}$, $\hat{a}_{s}^{\dagger }$. This is a singular differential equation
which has to be treated as a quantum stochastic one [29] in terms of the
forward increments $\mathrm{d}\psi (t)=\psi (t+\mathrm{d}t)-\psi (t)$ with $
K_{0}=K\otimes \hat{1}$, $K=(\mathrm{i}/\hbar )H+{\frac{1}{2}}L^{\dagger }L$
. In the particular case $L=R/2\sqrt{\hbar }=L^{\dagger }$ of interest, eq. (
\ref{eq:4.1}) can be written simply as a classical stochastic one, $\mathrm{d
}\psi +K\psi \mathrm{d}t=(\mathrm{i}/\hbar )R\mathrm{d}p$, in It\^{o} sense
with respect to a Wiener process $p_{t}$ of the same intensity $(\mathrm{d}
p_{t})^{2}=\hbar \mathrm{d}t/4$ as the field momenta operators (\ref{eq:3.5}
) with respect to the vacuum state. But the standard Wiener process $
v_{t}=2p_{t}/\sqrt{\hbar }$ cannot be identified with the Wiener process $
w_{t}$ in the reduction equation (\ref{eq:3.1}) because of the nondemolition
principle. Moreover, there is no way to get the nondemolition property (\ref
{eq:1.1}) for
\begin{equation*}
X(t)=U(t)^{\dagger }X_{0}U(t)\ ,\quad Y(s)=U(s)^{\dagger }Y_{0}(s)U(s)
\end{equation*}
with the independent or if only commuting $v_{t}$ and $w_{t}$, as one can
see in the simplest case $H=0$, $X_{0}={\frac{\hbar }{\mathrm{i}}}{\frac{
\mathrm{d}}{\mathrm{d}x}}\otimes \hat{1}$, $R=x$, $Y_{0}(s)=I\otimes \hat{q}
_{s}$.
Indeed, the error process $q_{t}=\sqrt{\hbar }w_{t}$ is appearing in (\ref
{eq:3.4}) as a classical representation of the field coordinate observables (
\ref{eq:3.6}) which do not commute with (\ref{eq:3.5}). In this case, eq. (
\ref{eq:4.1}) gives the unitary operator $U(t)=\exp \left\{ -{\frac{\mathrm{i
}}{\hbar }}x\otimes \hat{p}_{t}\right\} $ and the Heisenberg operators
\begin{equation*}
X(t)={\frac{\hbar }{\mathrm{i}}}{\frac{\mathrm{d}}{\mathrm{d}x}}\otimes \hat{
1}-I\otimes \hat{p}_{t}\ ,\quad Y(s)=sx\otimes \hat{1}+I\otimes \hat{q}_{s}
\end{equation*}
commute for all $t\geq s$ only because
\begin{equation*}
\left[ {\frac{\hbar }{\mathrm{i}}}{\frac{\mathrm{d}}{\mathrm{d}x}},sx\right]
\otimes \hat{1}=s{\frac{\hbar }{\mathrm{i}}}I\otimes \hat{1}=[\hat{p}_{t},
\hat{q}_{s}]\ ,\quad \forall t\geq s\,.
\end{equation*}
Hence, there is no way to obtain (\ref{eq:1.1}) for the classical stochastic
processes $p_{t}$, $q_{s}$ by replacing simultaneously $\hat{p}_{t}$ and $
\hat{q}_{s}$ for commuting $\sqrt{\hbar }v_{t}/2$ and $\sqrt{\hbar }w_{t}$
even though $p_{t}$ is statistically identical to $\hat{p}_{t}$ and
separately $q_{s}$ to $\hat{q}_{s}$.
Let us show now how one can get a completely different type of the reduction
equation than postulated in [16]--[20] simply by fixing an another
nondemolition process for the same interaction, corresponding to the Schr
\"{o}dinger stochastic equation (\ref{eq:4.1}) with $L=L^{\dagger }$ and $
H=0 $.
We fix the discrete pointer of the measurement apparatus, which is described
by the observable $\hat{n}_{s}={\frac{1}{s}}\hat{a}_{s}^{\dagger }\hat{a}_{s}
$, by counting the quanta of the Bosonic field in the mode $1_{s}(r)=1$ if $
r\in \lbrack 0,s)$ and $1_{s}(r)=0$ if $r\notin \lbrack 0,s)$. The operators
$\hat{n}_{t}$ have the integer eigenvalues $0,1,2,\dots $ corresponding to
the eigen-vectors
\begin{equation*}
|n,t\rangle =e^{t/2}(\hat{a}_{t}^{\dagger }/t)^{n}\varphi _{0}\ ,\quad \hat{a
}_{t}\varphi _{0}=0
\end{equation*}
which we have normalized with respect to the standard Poissonian
distribution
\begin{equation}
\nu _{n}=e^{-t}t^{n}/n!\ ,\quad n=0,1,\dots \ \label{eq:4.2}
\end{equation}
as $\langle n,t|n,t\rangle =1/\nu _{n}$. Let us find the matrix elements
\begin{equation*}
\langle n,t|S(t)\varphi _{0}=G(t,n)
\end{equation*}
for the unitary evolution operators
\begin{equation}
S(t)=\exp \{-L\otimes \hat{a}_{t}+L\otimes \hat{a}_{t}^{\dagger }\}\,,
\label{eq;4.3}
\end{equation}
by resolving eq. (\ref{eq:4.1}) in the considered case. This can be done
again by representing $S(t)$ in the form (\ref{eq:3.9}) for $z^{\prime }=L$,
$z=-L$ and the commutation rule
\begin{equation*}
(I\otimes \hat{a}_{t})e^{L\otimes \hat{a}_{t}^{\dagger }}=e^{L\otimes \hat{a}
_{t}^{\dagger }}(tL\otimes \hat{1}+I\otimes \hat{a}_{t})\ .
\end{equation*}
Due to the annihilation property, this gives
\begin{equation}
\varphi _{0}^{\dagger }(\hat{a}_{t}/t)^{n}e^{L\otimes \hat{a}_{t}^{\dagger
}}\exp \left\{ {\frac{t}{2}}(1-L^{2})\right\} e^{-L\otimes \hat{a}
_{t}}\varphi _{0}=L^{n}\exp \left\{ {\frac{t}{2}}(1-L^{2})\right\} =G(t,n)\ .
\label{eq:4.4}
\end{equation}
The obtained reduction transformations are not unitary and not projective
for any $n=0,1,2,\dots $, but they define the nonorthogonal identity
resolution
\begin{equation*}
\sum_{n=0}^{\infty }G(t,n)^{\dagger }G(t,n)e^{-t}t^{n}/n!=I
\end{equation*}
corresponding to the operational density
\begin{equation}
\Psi \lbrack C](t,n)=e^{t}L^{n}e^{-L^{2}/2}CL^{n}e^{-L^{2}/2} \label{eq:4.5}
\end{equation}
with respect to the measure (\ref{eq:4.2}). Now we can easily obtain the
stochastic reduction equation for $\chi (t,\omega )=T(t,\omega )\eta $ if we
replace the eigenvalue $n$ of $\hat{n}_{t}$ by the standard Poissonian
process $n_{t}(\omega )$ with the marginal distributions (\ref{eq:4.2}).
Such a process $n_{t}$ describes the trajectories $t\mapsto n_{t}(\omega )$
that spontaneously increase by $\mathrm{d}n_{t}(\omega )=1$ at random time
instants $\omega =\{t_{1}<t_{2}<\dots \}$ as the spectral functions $
\{n_{t}(\omega )\}$ for the commutative family $\{\hat{n}_{t}\}$. The
corresponding equation for the stochastic state-vector $\chi (t,\omega
)=\chi (t,n_{t}(\omega ))$ can be written in the It\^{o} sense as
\begin{equation}
\mathrm{d}\chi (t)+{\frac{1}{2}}(L^{2}-I)\chi (t)\mathrm{d}t=(L-1)\chi (t)
\mathrm{d}n_{t}\,. \label{eq:4.6}
\end{equation}
Obviously Eq. (\ref{eq:4.6}) has the unique solution $\chi (t)=F(t)\eta $
written for a given $\eta \in \mathcal{H}$ as
\begin{equation}
\chi (t)=L^{n_{t}}\exp \left\{ {\frac{t}{2}}(1-L^{2})\right\} \eta
=G(t,n_{t})\eta \label{eq:4.7}
\end{equation}
because of $\mathrm{d}\chi (t)=(L-1)\chi (t)$ when $\mathrm{d}n_{t}=1$,
otherwise $\mathrm{d}\chi (t)={\frac{1}{2}}(1-L^{2})\chi (t)\mathrm{d}t$ in
terms of the forward differential $\mathrm{d}\chi (t)=\chi (t+\mathrm{d}
t)-\chi (t)$.
Such an equation was derived in [26]--[30] also for the general quantum
stochastic equation (\ref{eq:4.1}) on the basis of quantum stochastic
calculus and filtering theory [31]. Moreover, it was proved that any other
stochastic reduction equation can be obtained as a mixture of Eq. (\ref
{eq:3.1}) and (\ref{eq:4.4}) which are of fundamentally different types.
Finally let us write down a Hamiltonian interaction model corresponding to
the quantum stochastic Schr\"{o}dinger equation (\ref{eq:4.1}). Using the
notion of chronologically ordered exponential
\begin{equation}
U(t)=\exp ^{\leftarrow }\left\{ -{\frac{\mathrm{i}}{\hbar }}\int_{0}^{t}H(r)
\mathrm{d}r\right\} \label{eq:4.8}
\end{equation}
one can extend its solutions $\psi (t)=\exp \left\{ -{\frac{\mathrm{i}}{
\hbar }}R\otimes \hat{p}_{t}\right\} \psi _{0}$ also to the general case, $
H\not=0$, $L^{\dagger }\not=L$ in terms of the generalized Hamiltonian
\begin{equation*}
H(t)=H_{0}+{\frac{\hbar }{\mathrm{i}}}(L^{\dagger }\otimes \hat{a}
(t)-L\otimes \hat{a}(t)^{\dagger })\,,
\end{equation*}
where $\hat{a}(t)=\mathrm{d}\hat{a}_{t}/\mathrm{d}t$, $\hat{a}^{\dagger }(t)=
\mathrm{d}\hat{a}_{t}^{\dagger }/\mathrm{d}t$, $H_{0}=H\otimes \hat{1}$. The
time-dependent Hamiltonian $H(t)$ can be treated as the object interaction
Hamiltonian
\begin{equation*}
H(t)=H_{0}+{\frac{\hbar }{\mathrm{i}}}e^{{\frac{\mathrm{i}}{\hbar }}
H_{1}t}(L^{\dagger }\otimes \hat{a}(0)-L\otimes \hat{a}(0)^{\dagger })e^{-{
\frac{\mathrm{i}}{\hbar }}H_{1}t}
\end{equation*}
for a special free evolution Hamiltonian $H_{1}=I\otimes \hat{h}$ of the
quantum bosonic field $\hat{a}(r)$, $r\in \mathbb{R}$ described by the
canonical commutation relations
\begin{equation*}
\lbrack \hat{a}(r),\hat{a}(s)]=0,\quad \lbrack \hat{a}(r),\hat{a}
(s)^{\dagger }]=\delta (r-s)\hat{1}\,,\quad \forall \,r,s\in \mathbb{R}\,.
\end{equation*}
This free evolution in the Fock space $\mathcal{F}$ over one particle space $
L^{2}(\mathbb{R})$ is simply given by the shifts
\begin{equation*}
e^{{\frac{\mathrm{i}}{\hbar }}\hat{h}t}\hat{a}(r)e^{-{\frac{\mathrm{i}}{
\hbar }}\hat{h}t}=\hat{a}(r+t)\,,\quad \forall \,r,t\in \mathbb{R}\,,
\end{equation*}
corresponding to the second quantization $\hat{h}=\hat{a}^{\dagger }\hat{
\varepsilon}\hat{a}$ of the one-particle Hamiltonian $\hat{\varepsilon}={
\frac{\hbar }{\mathrm{i}}}{\frac{\partial }{\partial r}}$ in $L^{2}(\mathbb{R
})$. Hence, the total Hamiltonian of the system \textquotedblleft object
plus measurement apparatus\textquotedblright\ can be written as
\begin{equation}
H_{s}=H\otimes \hat{1}+{\frac{\hbar }{\mathrm{i}}}(L^{\dagger }\otimes \hat{a
}(0)-L\otimes \hat{a}(0)^{\dagger }+I\otimes \hat{a}^{\dagger }\hat{a}
^{\prime })\,, \label{eq:4.9}
\end{equation}
where $a^{\dagger }a^{\prime }=\int\limits_{-\infty }^{\infty }\hat{a}
(r)^{\dagger }\hat{a}(r)^{\prime }$\textrm{$d$}$r$, $\hat{a}(r)^{\prime }=
\mathrm{d}\hat{a}(r)/\mathrm{d}r$. Of course, the free field Hamiltonian $
\hat{h}=\hbar \hat{a}^{\dagger }\hat{a}^{\prime }/\mathrm{i}$ is rather
unusual as with respect to the single-particle energy $\varepsilon (p)=p$ in
the momentum representation giving the unbounded (from below) spectrum of $
\hat{\varepsilon}$.
But one can consider such an energy as an approximation
\begin{equation}
\varepsilon (p)=\lim_{p_{0}\rightarrow \infty }c\left( \sqrt{
(p+p_{0})^{2}+(m_{0}c)^{2}}-\sqrt{p_{0}^{2}+(m_{0}c)^{2}}\right) =v_{0}p
\label{eq:4.10}
\end{equation}
in the velocity units $v_{0}=c/\sqrt{1+(m_{0}c/p_{0})^{2}}=1$ for the shift $
\varepsilon _{0}(p)-\varepsilon _{0}(0)$ of the standard relativistic energy
$\varepsilon _{0}(p)=c\sqrt{(p+p_{0})^{2}+(m_{0}c)^{2}}$ as the function of
small deviations $|p|\ll p_{0}$ from the initially fixed momentum $p_{0}>0$.
This corresponds to the treatment of the measurement apparatus as a beam of
bosons with mean momentum $p_{0}\rightarrow \infty $ given in an initial
coherent state by a plane wave
\begin{equation*}
f_{0}(r)=c\exp \{\mathrm{i}p_{0}r/\hbar \}\,.
\end{equation*}
This input beam of bosons illuminate the position $R=\sqrt{\hbar }
(L+L^{\dagger })$ of the object of measurement via the observation of the
commuting position operators $Y(t)$, $t\in \mathbb{R}$ of the output field
given by the generalized Heisenberg operator-process,
\begin{eqnarray*}
\dot{Y}(t) &=&e^{{\frac{\mathrm{i}}{\hbar }}H_{s}t}(I\otimes \hat{q}(0))e^{-{
\frac{\mathrm{i}}{\hbar }}H_{s}t} \\
&=&U(t)^{\dagger }(I\otimes \hat{q}(t))U(t)=R(t)+I\otimes \hat{q}(t)
\end{eqnarray*}
This is the simplest quantum Hamiltonian model for the continuous
nondemolition measurement of the physical quantity $R$ of a quantum object.
Thus the unitary evolution group $U_{s}(t)=e^{{\frac{-\mathrm{i}}{\hbar }}
H_{s}t}$ of the compound system is defined on the product $\mathcal{H}
\otimes \mathcal{F}$ with the two--sided Fock space $\mathcal{F}=\Gamma
(L^{2}(\mathbb{R}))$ by $U_{s}(t)=V_{1}(t)U(t)$, where $V_{1}=I\otimes \hat{v
}$ is the free evolution group $\hat{v}(t)=e^{{\frac{-\mathrm{i}}{\hbar }}
\hat{h}t}$ of the field, corresponding to the shifts
\begin{equation*}
f\in L^{2}(\mathbb{R})\mapsto f^{t}(s)=f(s-t)
\end{equation*}
of the one-particle space $L^{2}(\mathbb{R})$. To obtain such an evolution
from a realistic Hamiltonian of a system of atoms interacting with an
electromagnetic field one has to use a Markovian approximation,
corresponding to the weak-coupling or low density limits [39].
Thus, the problem of unitary dilation of the continuous reduction and
spontaneous collapse was solved in [25] even for infinite-dimensional Wiener
noise in a stochastic equation of type (\ref{eq:3.2}).
\section*{Conclusion}
Analysis [1] of the quantum measurement notion shows that it is a complex
process, consisting of the stage of preparation [15] and the stage of
registration, i.e., fixing of the pointer and its output state and the
objectification [40].
The dynamical process of the interaction is properly treated within the
quantum theory of singular coupling to get the nontrivial models of
continuous nondemolition observation while the statistical process of the
objectification is properly treated within the quantum theory of stochastic
filtering to get the nonlinear models of continuous spontaneous localization
[21--31].
The nondemolition principle plays the role of superselection for the
observable processes provided the quantum dynamics is given and restricts
the dynamics provided the observation is given. It is a necessary and
sufficient condition for the statistical interpretation of quantum
causality, giving rise to the quantum noise environment but not to the
classical noise environment of the phenomenological continuous reduction and
spontaneous localization theories [16--20].
The axiomatic quantum measurement theory based on the nondemolition
principle abandons the projection postulate as the redundancy given by a
unitary interaction with a meter in the initial eigen-state. It treats the
reduction of the wave packet not as a real dynamical process but as the
statistical evaluation of the \emph{a posteriori\/} states for the
prediction of the probabilities of the future measurements conditioned by
the past observation.
There is no need to postulate a nonstandard, nonunitary, and nonlinear
evolution for the continuous state-vector reduction in the phenomenological
quantum theories of spontaneous localization, and there is no universal
reduction modification of the fundamental Schr\"odinger equation. The
nonunitary stochastic evolution giving the continuous reduction and the
spontaneous localization of the state-vector can be and has been rigorously
derived within the quantum stochastic theory of unitary evolution of the
corresponding compound system, the object of the measurement and an input
Bose field in the vacuum state.
The statistical treatment of the quantum measurement as nondemolition
observation is possible only in the framework of open systems theory in the
spirit of the modern astrophysical theory of the spreading universe. The
open systems theory assumes the possibility of producing for each quantum
object an arbitrary time series of its copies and enlarges these objects
into an environment, a quantum field, innovating the measurement apparatus
by means of a singular interaction for a continuous observation.
It is nonsense to consider seriously a complete observation in the closed
universe; there is no universal quantum observation, no universal reduction
and spontaneous localization for the wave function of the world. Nobody can
prepare an\textit{\ a priori\/ }state compatible with a complete world
observation and reduce the \textit{a posteriori}\emph{\/} state, except God.
But acceptance of God as an external subject of the physical world is at
variance with the closeness assumption of the universe. Thus, the world
state-vector has no statistical interpretation, and the humanitarian
validity of these interpretations would, in any case, be zero. The
probabilistic interpretation of the state-vector is relevant to only the
induced states of the quantum open objects being prepared by
experimentalists in an appropriate compound system for the nondemolition
observation to produce the reduced states after the registration.
\section*{Acknowledgment}
This work was supported by Deutsche Forschung Gemeinschaft at Philipps
Universit\"at, Marburg. I am deeply grateful to Professors L. Accardi, O.
Melsheimer, and H. Neumann for stimulating discussions and encouragement.
\section*{References}
\begin{description}
\item {[1]} \textsc{G. Ludwig}, \textit{Math. Phys.}, 4:331 (1967); 9, 1
(1968).
\item {[2]} \textsc{E.B. Davies, J. Lewis}, \textit{Commun. Math. Phys.},
17:239--260, (1970).
\item {[3]} \textsc{L.E. Ballentine}, \textit{Rev. Mod. Phys.}, 42:358--381,
(1970).
\item {[4]} \textsc{A. Shimony}, \textit{Phys. Rev. D}, 9:2321--2323, (1974).
\item {[5]} \textsc{V.P. Belavkin}, Optimal linear random filtration of
quantum Boson signals. Problems of Control and Inform. Theory, 3:47--62,
(1974).
\item {[6]} \textsc{V.P. Belavkin}, Optimal quantum filtration of Markovian
singals. Problems of Control and Inform. Theory", 7(5):345--360, (1978).
\item {[7]} \textsc{V.P. Belavkin}, Optimal filtering of Markov signals with
quantum noise, \textit{Radio Eng. Electron. Physics}, 25:1445--1453, (1980).
\item {[8]} \textsc{A. Barchielli, L. Lanz, G.M. Prosperi}, \textit{Nuovo
Cimento}, 72B:79, (1982).
\item {[9]} \textsc{V.P. Belavkin}, Theory of Control of Observable Quantum
Systems, \textit{Automatica and Remote Control}, 44(2):178--188, (1983).
\item {[10]} \textsc{A. Peres}, \textit{Am. J. Phys.}, 52:644, (1984).
\item {[11]} \textsc{R.L. Stratonovich}, Conditional Markov processes and
their applications to optimal control, \textit{MGU}, Moscow 1966.
\item {[12]} \textsc{R.E. Kalman, R.S. Bucy}, New results in linear
filtering theory and prediction problems, \textit{J. Basic Engineering,
Trans. ASME}, 83:95--108, (1961).
\item {[13]} \textsc{V.P. Belavkin}, Nondemolition measurement and control
in quantum dynamical systems. In: Proc. of CISM seminar on "\textit{Inform.
Compl. and Control in Quantum Physics\textquotedblright }, A. Blaquiere,
ed., Udine 1985, 311--239, Springer--Verlag, Wien, 1987.
\item {[14]} \textsc{V.P. Belavkin}, Nondemolition measurements, nonlinear
filtering and dynamical programming of quantum stochastic processes. In:
Proc. of Bellmann Continuum Workshop \textquotedblleft \textit{Modelling and
Control of Systems}\textquotedblright , A. Blaquiere, ed., Sophia--Antipolis
1988, 245--265, \textit{Lect. Not. Cont. Inf. Sci.}, 121, Springer--Verlag,
Berlin, 1988.
\item {[15]} \textsc{L.E. Ballentine}, \textit{Int. J. Theor. Phys.},
27:211--218, (1987).
\item {[16]} \textsc{P. Pearle}, \textit{Phys. Rev.}, D29:235, (1984).
\item {[17]} \textsc{N. Gisen}, \textit{J. Phys. A.: Math. Gen.},
19:205--210, (1986).
\item {[18]} \textsc{L. Diosi}, \textit{Phys. Rev.}, A40:1165--1174, (1988).
\item {[19]} \textsc{G.C. Ghirardi, A. Rimini, T. Weber}, \textit{Phys. Rev.}
. D34(2):470--491, (1986).
\item {[20]} \textsc{G.C. Ghirardi, P. Pearle, A. Rimini}, \textit{Phys. Rev.
}, A42:478--89, (1990).
\item {[21]} \textsc{V.P. Belavkin}, A new wave equation for a continuous
non--demolition measurement", \textit{Phys. Lett.}, A140:355--358, (1989).
\item {[22]} \textsc{V.P. Belavkin, P. Staszewski}, A quantum particle
undergoing continuous observation", \textit{Phys. Lett.}, A140:359--362,
(1989).
\item {[23]} \textsc{V.P. Belavkin}, A posterior Schr\"{o}dinger equation
for continuous non--demolition measurement, \textit{J. Math. Phys},
31(12):2930--2934, (1990).
\item {[24]} \textsc{V.P. Belavkin, P. Staszewski}, Nondemolition
observation of a free quantum particle, \textit{Phys. Rev. A.\/},
45(3):1347--1356, (1992).
\item {[25]} \textsc{V.P. Belavkin}, Quantum continual measurements and a
posteriori collapse on CCR, \textit{Commun. Math. Phys.}, 146, 611--635,
(1992).
\item {[26]} \textsc{V.P. Belavkin}, A continuous counting observation and
posterior quantum dynamics, \textit{J. Phys. A, Math. Gen.}, 22: L
1109--1114, (1989).
\item {[27]} \textsc{V.P. Belavkin}, A stochastic posterior Schr\"{o}dinger
equation for counting non--demolition measurement, \textit{Letters in Math.
Phys.}, 20"85--89, (1990).
\item {[28]} \textsc{V.P. Belavkin, P. Staszewski}, \textit{Rep. Math. Phys.}
, 29:213--225, (1991).
\item {[29]} \textsc{V.P. Belavkin}, Stochastic posterior equations for
quantum nonlinear filtering. Probab., \textit{Theory and Math. Stat.}, ed.
B. Grigelionis, 1:91--109, VSP/Mokslas 1990.
\item {[30]} \textsc{A. Barchielli, V.P. Belavkin}, Measurements continuous
in time and a posteriori states in quantum mechanics, \textit{J. Phys. A,
Math. Gen.}, 24:1495--1514, (1991).
\item {[31]} \textsc{V.P. Belavkin}, Quantum stochastic calculus and quantum
nonlinear filtering, \textit{J. of Multivar. Analysis}, 42(2):171--201,
(1992).
\item {[32]} \textsc{V.B. Braginski, Y.I. Vorontzov, F.J. Halili}, \textit{
Sov. Phys.--JETP}, 46(2):765, 4:171--201, (1977).
\item {[33]} \textsc{K.S. Thorne, R.W.P. Drever, C.M. Caves, M. Zimmermann,
V.D. Sandberg}, \textit{Phys. Rev. Lett.}, 40:667, (1978).
\item {[34]} \textsc{A.S. Holevo}, Quantum estimation. In Advances in
statistical signal processing, 1:157--202, (1987).
\item {[35]} \textsc{V.P. Belavkin}, Reconstruction theorem for quantum
stochastic processes, \textit{Theoret. Math. Phys.}, 3:409--431, (1985).
\item {[36]} \textsc{K. Kraus}, States, Effects and operations,
Springer--Verlag, Berlin 1983.
\item {[37]} \textsc{E.B. Ozawa}, \textit{J. Math. Phys.}, 25:79--87, (1984).
\item {[38]} \textsc{A. Barchielli, G. Lupieri}, \textit{J. Math. Phys.},
26:2222--2230, (1985).
\item {[39]} \textsc{L. Accardi, R. Alicki, A. Frigerio, Y.G. Lu}, An
invitation to weak coupling and low density limits, Quantum probability and
re. topics VI, ed. L. Accardi, 3--62, World Scientific, Singapore 1991.
\item {[40]} \textsc{P. Busch, P.J. Lahti, P. Mittelstaedt}, The quantum
theory of measurement, \textit{Lecture Notes in Physics}, Springer--Verlag,
Berlin 1991.
\end{description}
\end{document} |
\begin{document}
\title{Comment on ``Generators of matrix algebras in dimension 2 and 3"}
\begin{abstract}
Theorem 7 in \cite{AslaksenSletsjoe_WrongPaper_2009} states sufficient conditions to determine whether a pair generates the algebra of $3 \times 3$ matrices over an algebraically closed field of characteristic zero. In that case, an explicit basis for the full algebra is provided, which is composed of words of small length on such pair. However, we show that this theorem is wrong, since it is based on the validity of an identity which is not true in general.
\end{abstract}
\section{discussion}
Let $M_n (\mathbb{K})$ denote the set of all $n \times n $ matrices over a field $\mathbb{K}$. Let $S$ be a subset of $M_n (\mathbb{K})$ and denote by $S^m$ the set of all products of the form $A_1 \cdots A_m$, with $A_i \in S\cup\{I_n\}$ for all $i=1, \ldots , m$, where $I_n$ is the $n \times n$ identity matrix. We say that a generating set $S$ has length $k \in \mathbb{N}$ if
\begin{equation*}
\text{span}\left\lbrace S^{k} \right\rbrace = M_n (\mathbb{K}) \, \, , \phantom{sdad}\text{and } \, \, \; \; \; \text{ span}\left\lbrace S^{k-1} \right\rbrace \subsetneq \text{span}\left\lbrace S^{k} \right\rbrace \, .
\end{equation*}
The problem of finding bounds on the length of generating sets, and in particular generating pairs, has been thoroughly studied in the past decades. For arbitrary order $n$, the best known bound on the length of any generating set is $O(n\log n)$ \cite{shitov2019improved}, although it was conjectured years before that the optimal bound might be $2n-2$ \cite{Paz_Conjecture_1984}. Indeed, this is the case at least for $n \leq 6$ \cite{LongstaffNiemeyerPanaia_LengthDimension5_2006, LambrouLongstaff_LengthDimension6_2009}, for which the bound is shown to be sharp. Moreover, this bound also holds for arbitrary $n$ under different conditions on one of the generators \cite{LogstaffRosenthal_LengthsIrreduciblePairs_2011, GutermanLaffeyMarkovaSmigoc_PazConjecture_2018}, even though it is always possible to find $n$ matrices in $M_n (\mathbb{K})$ such that the words of length $2$ in those matrices span the whole algebra \cite{Rosenthal_WordBasis_2012}. Finally, when the problem is reduced to the study of generic matrices, the bound can be arguably improved to $O(\log n)$ \cite{KlepSpenko_SweepingWords_2016}.
In \cite{AslaksenSletsjoe_WrongPaper_2009}, the problem of providing conditions under which a set of $2 \times 2$ or $3 \times 3$ matrices over an algebraically closed field of characteristic zero generate the full matrix algebra is addressed. However, Theorem 7 in that paper is wrong, since Equation (3) in that reference does not hold in general. In this note, we give some comments on that result and present numerical evidence to show the falseness of the aforementioned expression for arbitrary matrices $A$ and $B$. Unless stated otherwise, we follow \cite{AslaksenSletsjoe_WrongPaper_2009} for terminology and notations.
Before recalling Theorem 7 of \cite{AslaksenSletsjoe_WrongPaper_2009}, we denote by $ M_3$ the algebra of $3 \times 3$ matrices over a certain algebraically closed field of characteristic zero. Moreover, we write $[A,B]$ for the commutator of two matrices $A$ and $B$ and we define
\begin{equation*}
H(M):= \frac{ \tr[M]^2 - \tr[M^2]}{2} \, ,
\end{equation*}
where $\tr[M]$ denotes the trace of a matrix $M$. We can now state the aforementioned result, namely Theorem 7 of \cite{AslaksenSletsjoe_WrongPaper_2009}.
\begin{specthm2*}\label{thm:wrong}
Let $A,B\in M_3$. Then
\begin{align}\label{eq:false-expression}
\det(I,A,A^2,B,B^2,AB,BA,[A,[A,B]],[B,[B,A]])=9\det [A,B]H([A,B]),
\end{align}
so if $\det [A,B]\neq0$ and $H([A,B])\neq 0$, then
\begin{align}
\{I,A,A^2,B,B^2,AB,BA,[A,[A,B]],[B,[B,A]]\}
\end{align}
form a basis for $M_3$.
\end{specthm2*}
Note that this result intends to provide a sufficient condition for a pair of matrices $\left\lbrace A, B \right\rbrace$ to generate the full algebra $M_3$, as well as construct a basis for the algebra from words in such matrices. In particular, if the result was correct, it would yield the fact that for pairs $\left\lbrace A, B \right\rbrace$ such that $\det [A,B]\neq0$ and $H([A,B])\neq 0$, the necessary length of words to generate the full $M_3$ is 3, improving thus for this subclass of pairs the bound of $2n-2$ obtained in \cite{LongstaffNiemeyerPanaia_LengthDimension5_2006}. Moreover, such matrices are in particular generic, so they belong to the case studied in \cite{KlepSpenko_SweepingWords_2016}, in which the tighter bound for the length of words in dimension $3$ is $2 \lceil \log_2 (3) \rceil =4$. Thus, Theorem 7 would also provide an improvement to the latter result in dimension $3$.
To show that Equation \eqref{eq:false-expression} is false, and thus Theorem 7 does not hold in general, it is enough to construct a counterexample. For any $\delta >0 , \, \varepsilon>0$, consider the following pair of matrices:
\begin{align}
A=\begin{pmatrix}
1&0&1\\0&-1&\delta\\1&0&1
\end{pmatrix},\quad
B=\begin{pmatrix}
-1&0&1\\0&1&0\\1&\varepsilon&-1
\end{pmatrix}.
\end{align}
Then, a short computation yields the following:
\begin{align}
&\det(I,A,A^2,B,B^2,AB,BA,[A,[A,B]],[B,[B,A]])=-27 \, \delta^5 \varepsilon^5+9 \, \delta^6 \varepsilon^6 \, ,\\
&9\det [A,B]H([A,B])=27 \, \delta^3 \varepsilon^3- 9 \, \delta^4 \varepsilon^4.
\end{align}
Clearly, we can conclude that, for $A$ and $B$ as above, the left- and right-hand sides of Equation \eqref{eq:false-expression} are different, although we appreciate that both vanish if, and only if, either $\varepsilon$ or $\delta$ is zero. Thus, despite being different, both sides of the equation present a strong correlation. This is noticeable in Figure \ref{commenttest}.
\begin{figure}
\caption{Test for 5000 randomly chosen $3\times3$ real matrix pairs. There exists no line such that every point fits to it, leading to the result that the factorization cannot be exact. However, there exists a strong correlation between the absolute values of both sides of Equation \eqref{eq:false-expression}
\label{fig:numericaltest}
\label{commenttest}
\end{figure}
In Figure \ref{fig:numericaltest}, we generate 5000 random matrix pairs and compare their value when we insert the pair into the LHS and the RHS of Equation \eqref{eq:false-expression}. The points do not lie on some line $f(x)=ax+b$, especially not on $f(x)=x$. Therefore, we exclude the possibility that the incorrectness is caused by the improper calculation of coefficient $9$ in the RHS of Equation \eqref{eq:false-expression}. Moreover, if we restrict the data to the first and the third quadrants close to zero, the best fitted line has slope $a\approx9$ and intercept $b\approx 0$. For example, for the data in Figure \ref{fig:numericaltest}, the program returns $f(x)\approx9.3138x+0.0044$ for the fitted line, illustrated by the green line. If we consider the data that is further from the origin, the absolute value of the slope decreases and the points are not so concentrated as the image near 0, which is similar to the shape $``x"$.
To conclude, we have shown that Theorem 7 in \cite{AslaksenSletsjoe_WrongPaper_2009} is false. For a correct upper bound on the necessary length of words on a pair to generate $M_3$, we refer the reader to \cite{LongstaffNiemeyerPanaia_LengthDimension5_2006} for the general case and to \cite{KlepSpenko_SweepingWords_2016} for the generic case.
\noindent
\hrulefill
After the completion of this note, we realized that for Equation \eqref{eq:false-expression} to be true, there were a factor $\det [A,B]$ and a minus sign missing in the right hand side. Therefore, we present the correct form of Theorem 7 below.
\begin{specthm*}\label{thm:true}
Let $A,B\in M_3$. Then
\begin{align}\label{eq:true-expression}
\det(I,A,A^2,B,B^2,AB,BA,[A,[A,B]],[B,[B,A]])=-9(\det [A,B])^2 H([A,B]),
\end{align}
so if $\det [A,B]\neq0$ and $H([A,B])\neq 0$, then
\begin{align}
\{I,A,A^2,B,B^2,AB,BA,[A,[A,B]],[B,[B,A]]\}
\end{align}
form a basis for $M_3$.
\end{specthm*}
This yields the fact that whenever $\det [A,B]\neq0$ and $H([A,B])\neq 0$, the set composed of words on matrices $A, B$, which was presented in the original paper \cite{AslaksenSletsjoe_WrongPaper_2009}, actually spans the whole matrix algebra $M_3$. In particular, the necessary length of words to generate the full $M_3$ is 3. As mentioned above, this indeed improves the bound $2n-2$ obtained in \cite{LongstaffNiemeyerPanaia_LengthDimension5_2006}, as well as the bound $2 \lceil \log_2 (n) \rceil$ provided in \cite{KlepSpenko_SweepingWords_2016} for generic matrices in dimension $n=3$.
\noindent {\it Acknowledgments.}
We thank Michael Wolf for his comments and suggestions. This work has been partially supported by the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) under Germany's Excellence Strategy EXC-2111 390814868. YJ acknowledges support from the TopMath Graduate Center of the TUM Graduate School and the TopMath Program of the Elite Network of Bavaria.
\end{document} |
\begin{document}
\title{Sampling Arbitrary Subgraphs Exactly Uniformly in Sublinear Time}
\begin{abstract}
We present a simple sublinear-time algorithm for sampling an arbitrary subgraph $H$ \emph{exactly uniformly} from a graph $G$ with $m$ edges, to which the algorithm has access by performing the following types of queries: (1) degree queries, (2) neighbor queries, (3) pair queries and (4) edge sampling queries. The query complexity and running time of our algorithm are $\tilde{O}(\min\{m, \frac{m^{\rho(H)}}{\# H}\})$ and $\tilde{O}(\frac{m^{\rho(H)}}{\# H})$, respectively, where $\rho(H)$ is the fractional edge-cover of $H$ and $\# H$ is the number of copies of $H$ in $G$. For any clique on $r$ vertices, i.e., $H=K_r$, our algorithm is almost optimal as any algorithm that samples an $H$ from any distribution that has $\Omega(1)$ total probability mass on the set of all copies of $H$ must perform $\Omega(\min\{m, \frac{m^{\rho(H)}}{\# H\cdot (cr)^r}\})$ queries.
Together with the query and time complexities of the $(1\vec{p}m \varepsilon)$-approximation algorithm for the number of subgraphs $H$ by Assadi, Kapralov and Khanna~\cite{assadi2018simple} and the lower bound by Eden and Rosenbaum \cite{total-lower-bound} for approximately counting cliques, our results suggest that in our query model, approximately counting cliques is ``equivalent to'' exactly uniformly sampling cliques, in the sense that the query and time complexities of exactly uniform sampling and randomized approximate counting are within polylogarithmic factor of each other. This stands in interesting contrast to an analogous relation between approximate counting and almost uniformly sampling for self-reducible problems in the polynomial-time regime by Jerrum, Valiant and Vazirani~\cite{jerrum1986random}.
\end{abstract}
\section{Introduction}
``\emph{Given a huge real graph, how can we derive a representative sample?}'' is a first question asked by Leskovec and Faloutsos in their seminal work on graph mining~\cite{leskovec2006sampling}, which is motivated by the practical concern that most classical graph algorithms are too expensive for massive graphs (with millions or billions of vertices), and graph sampling seems essential for lifting the dilemma.
In this paper, we study the question of how to sample a subgraph $H$ uniformly at random from the set of all subgraphs that are isomorphic to $H$ contained in a large graph $G$ in \emph{sublinear time}, where the algorithm is given query access to the graph $G$. That is, the algorithm only probes a small portion of the graph while still returning a sample with provable performance guarantee.
Such a question is relevant for statistical reasons: we might need a few representative and unbiased motifs from a large network \cite{triangle_counting_app_3}, or edge-colored subgraphs in a structured database~\cite{atserias2008size}, in a limited time. A subroutine for extracting a uniform sample of $H$ is also useful in streaming (e.g., \cite{ahmed2017sampling}), parallel and distributed computing (e.g., \cite{feng2017can}) and other randomized graph algorithms (e.g., \cite{hu2013survey}).
Currently, our understanding of the above question is still rather limited. Kaufman, Krivelevich and Ron gave the first algorithm for sampling an edge almost uniformly at random \cite{edge_sampling_begin}. Eden and Rosenbaum gave a simpler and faster algorithm~\cite{edge_sampling_1}. Both works considered the \emph{general graph model}, where an algorithm is allowed to perform the following queries, where each query will be answered in constant time:
\begin{description}
\item[uniform vertex query] the algorithm can sample a vertex uniformly at random;
\item[degree query] for any vertex $v$, the algorithm can query its degree $d_v$;
\item[neighbor query] for any vertex $v$ and index $i\leq d_v$, the algorithm can query the $i$-th neighbor of $v$;
\item[pair query] for any two vertices $u,v$, the algorithm can query if there is an edge between $u,v$.
\end{description}
In \cite{edge_sampling_1}, Eden and Rosenbaum gave an algorithm that takes as input a graph with $n$ vertices and $m$ edges (where $m$ is unknown to the algorithm), uses $\tilde{O}(n/\sqrt{m})$ queries\footnote{Throughout the paper, we use $\tilde{O}(\cdot)$ to suppress any dependencies on the parameter $\varepsilon$, the size of the corresponding subgraph $H$ and $\log(n)$-terms.} in expectation and returns an edge $e$ that is sampled with probability $(1\vec{p}m \varepsilon)/m$ (i.e., almost uniformly at random). This is almost optimal in the sense that any algorithm that samples an edge from an almost-uniform distribution requires $\Omega(n/\sqrt{m})$ queries. In their sublinear-time algorithm for approximately counting the number cliques \cite{eden2018approximating} (see below), Eden, Ron and Seshadhri use a procedure to sample cliques incident to a suitable vertex subset $S$ almost uniformly at random. However, for an arbitrary subgraph $H$, it is still unclear how to obtain an almost uniform sample in sublinear time.
\subparagraph{Approximate counting in sublinear-time} In contrast to sampling subgraphs (almost) uniformly at random, the very related line of research on approximate counting the number of subgraphs in sublinear time has made some remarkable progress in the past few years. Feige gave a $(2+\varepsilon)$-approximation algorithm with $\tilde{O}(n/\sqrt{m})$ queries for the average degree, which is equivalent to estimating the number of edges, of a graph in the model that only uses vertex sampling and degree queries \cite{edge_counting_app_1}. He also showed that any $(2-o(1))$-approximation for the average degree using only vertex and degree queries requires
$\Omega(n)$ queries. Goldreich and Ron then gave a $(1+\varepsilon)$-approximation algorithm with $\tilde{O}(n/\sqrt{m})$ queries for the average degree in the model that allows vertex sampling, degree and neighbor queries~\cite{edge_counting_0}.
Eden et al. recently gave the first sublinear-time algorithm for $(1\vec{p}m \varepsilon)$-approximating the number of triangles~\cite{triangle_counting_1}. Later, Eden, Ron and Seshadhri generalized it to $(1\vec{p}m \varepsilon)$-approximating the number of $r$-cliques $K_r$ \cite{eden2018approximating} in the general graph model that allows vertex sampling, degree, neighbor and vertex-pair queries. The query complexity and running time of their algorithms for $r$-clique $K_r$ counting are $\tilde{O}(\frac{n}{(\# {K_r})^{1/3}} +\min\{m,\frac{m^{r/2}}{\# {K_r}}\})$ and $\tilde{O}(\frac{n}{(\# {K_r})^{1/3}} +\frac{m^{r/2}}{\# {K_r}})$ respectively, for any $r\geq 3$, where $\# {K_r}$ is the number of copies of $K_r$ in $G$. Furthermore, in both works it was proved that the query complexities of the respective algorithms are optimal up to polylogarithmic dependencies on $n, \epsilon$ and $r$.
Later, Assadi et al.~\cite{assadi2018simple} gave a sublinear-time algorithm for $(1\vec{p}m \varepsilon)$-approximating the number of copies of an arbitrary subgraph $H$ in the \emph{augmented general graph model}~\cite{AliSub17}. That is, besides the aforementioned vertex sampling, degree, neighbor and pair queries, the algorithm is allowed to perform the following type of queries:
\begin{description}
\item[edge sampling query] the algorithm can sample an edge uniformly at random.
\end{description}
The algorithm in \cite{assadi2018simple} uses $\tilde{O}(\min\{m, \frac{m^{\rho(H)}}{\# H}\})$ queries and $\tilde{O}(\frac{m^{\rho(H)}}{\# H})$ time, where $\rho(H)$ is the fractional edge-cover of $H$ and $\# H$ is the number of copies of $H$ in $G$. For the special case $H=K_r$, their algorithm performs $\tilde{O}(\min\{m,\frac{m^{r/2}}{\# {K_r}}\})$ queries and runs in $\tilde{O}(\frac{m^{r/2}}{\# {K_r}})$ time, which do not have the additive term $\frac{n}{(\# K_r)^{1/3}}$ in the query complexity and running time of the algorithms in \cite{triangle_counting_1,eden2018approximating}.
Eden and Rosenbaum provided simple proofs that most of the aforementioned results are nearly optimal in terms of their query complexities by reducing from communication complexity problems~\cite{total-lower-bound}. Further investigation of sampling an edge and estimating subgraphs in low arboricity graphs~\cite{eden_et_al:LIPIcs:2019:10628,ERS20} and approximately counting stars~\cite{AliSub17} has also been performed.
\subparagraph{Relation of approximate counting and almost uniform sampling}
One of our original motivations is to investigate the relation of approximate counting and almost uniform sampling in the sublinear-time regime. That is, we are interested in the question whether \emph{in the sublinear-time regime, is almost uniform sampling ``computationally comparable'' to approximate counting, or is it strictly harder or easier, in terms of the query and/or time complexities for solving these two problems?} Indeed, in the polynomial-time regime, Jerrum, Valiant and Vazirani showed that for self-reducible problems (e.g., counting the number of perfect matchings of a graph), approximating counting is ``equivalent to'' almost uniform sampling~\cite{jerrum1986random}, in the sense that the time complexities of almost uniform sampling and randomized approximate counting are within polynomial factor of each other. Such a result has been instrumental for the development of the area of approximate counting (e.g., \cite{sinclair1989approximate}). It is natural to ask if similar relations between approximate counting and sampling hold in the sublinear-time regime. In \cite{eden_et_al:LIPIcs:2019:10628}, the authors mentioned that in the general graph model, the query complexities of approximate counting and almost uniformly sampling \emph{edges} are the same (up to $\log n, 1/\varepsilon$ dependencies), while there exist constant-arboricity graphs from which sampling \emph{triangles} almost uniformly requires $\Omega(n^{1/4})$ queries while approximately counting triangles can be done with $\tilde{O}(1)$ queries.
\subsection{Our Results}
In this paper, we consider the problem of (almost) uniformly sampling a subgraph in the augmented general graph model. As mentioned above, this model has been studied in \cite{AliSub17,assadi2018simple}, in which the authors find that ``allowing edge-sample queries results in considerably simpler and more general algorithms for
subgraph counting and is hence worth studying on its own''. On the other hand, allowing edge sampling queries is also natural in models where neighbor queries are allowed, e.g., in the well-studied bounded-degree model and the general model: most graph representations that allow efficient neighbor queries (e.g., GEXF, GML or GraphML) store edges in linear data structures, which often allows efficient (nearly) uniformly sampling of edges. We refer to \cite{assadi2018simple} for a deeper discussion on allowing edge sampling queries from both theoretical and practical perspectives.
We prove the following upper bound on sampling subgraphs (exactly) uniformly at random and provide a corresponding algorithm in \cref{sec:upperbound}.
\begin{theorem}\label{thm:main}
Let $H$ be an arbitrary subgraph of constant size. There exists an algorithm in the augmented general graph model that given query access to the input graph $G=(V,E)$ and the number of edges $m$ in $G$ and uses $\tilde{O}(\min\{m, \frac{m^{\rho(H)}}{\# H}\})$ queries in expectation, and with probability at least $2/3$, returns a copy of $H$, if $\# H>0$. Each returned $H$ is sampled according to the uniform distribution over all copies of $H$ in $G$. The expected running time of the algorithm is $\tilde{O}(\frac{m^{\rho(H)}}{\# H})$.
\end{theorem}
We stress that our sampler is an exactly uniform sampler, i.e., the returned $H$ is sampled from the uniform distribution, while to the best of our knowledge, the previous sublinear-time subgraph sampling algorithms are only \emph{almost} uniform samplers. That is, they return an edge or a clique that is sampled from a distribution that is \emph{close} to the corresponding uniform distribution. Indeed, it has been cast as an open question if it is possible to sample an edge exactly uniformly at random in the general graph model in \cite{edge-sampling}. Furthermore, we remark that our algorithm actually does not perform any uniform vertex sampling query, a feature that might be preferable in practice.
Our algorithm is based on one idea from \cite{assadi2018simple} (see also \cite{atserias2008size}) that uses the fractional edge cover to partition a subgraph $H$ into stars and odd cycles (i.e., Lemma \ref{decomposition-lemma}). The authors of \cite{assadi2018simple} also provided a scheme called \emph{subgraph-sampler trees} for recursively sampling stars and odd cycles that compose $H$, while the resulting distribution is not (almost) uniform distribution. Instead, we show that one can sample stars and odd cycles in parallel (or, more precisely, sequentially but independently of each other) and check whether they form a copy of $H$.
To complement our algorithmic result, we give a lower bound on the query complexity for sampling a clique in sublinear time by using a simple reduction from \cite{total-lower-bound}. We show the following theorem and present its proof in \cref{sec:lowerbound}.
\begin{theorem}\label{thm:lowerbound}
Let $r\geq 3$ be an integer. Suppose $\mathcal{A}$ is an algorithm in the augmented general graph model that for any graph $G=(V,E)$ on $n$ vertices and $m$ edges returns an arbitrary $r$-clique $K_r$, if one exists; furthermore, each returned clique $K_r$ is sampled according to a distribution $\mathcal{D}$, such that the total probability mass of $\mathcal{D}$ on the set of all copies of $K_r$ is $\Omega(1)$. Then $\mathcal{A}$ requires $\Omega(\min\{m, \frac{m^{r/2}}{\# K_r\cdot (cr)^r}\})$ queries, for some absolute constant $c>0$.
\end{theorem}
Note that the above theorem gives a lower bound for sampling $K_r$ from almost every non-trivial distribution $\mathcal{D}$. In particular, it holds if $\# K_r>0$ and $\mathcal{D}$ is a distribution that is only supported on the set of all copies of $K_r$, e.g., the (almost) uniform distribution on these copies. Together with the query and time complexities of the $(1\vec{p}m \varepsilon)$-approximation algorithm for the number of subgraphs $H$ by Assadi, Kapralov and Khanna~\cite{assadi2018simple} and the lower bound by Eden and Rosenbaum \cite{total-lower-bound} for approximately counting cliques, our \cref{thm:main,thm:lowerbound} imply that in the augmented general graph model, \emph{approximately} counting the number of cliques is equivalent to \emph{exactly} sampling cliques in the sense that the query and time complexities of them are within a polylogarithmic factor of each other.
\subparagraph{Future Work} Considering real-world applications, it would be interesting to relax the guarantees of the queries available to the algorithm. In particular, one may not be able to sample edges \emph{exactly} uniformly at random, but only \emph{approximately} uniformly. For example, there exist works that consider weaker query models in which they sample vertices or edges almost uniformly at random by performing random walks from some fixed vertex (see, e.g., \cite{Ben-Hamou2018,Chiericetti2016}). Implementing these changes in the model would result in a weaker guarantee for the distribution of sampled subgraphs in \cref{thm:main} but would be potentially more practical.
\section{Preliminaries}
Let $G=(V,E)$ be a simple graph with $|V|=n$ vertices and $|E|=m$ edges. For a vertex $v \in V$, we denote by $d_v$ the degree of the vertex, by $\Gamma_v$ the set of all the neighbors of $v$, and by $E_v$ the set of edges incident to $v$. We fix a total order on vertices denoted by $\vec{p}rec$ as follows:
\begin{definition}
For any two vertices $u$ and $v$, we say that $u \vec{p}rec v$ if $d_u < d_v$ or $d_u = d_v$ and $u$ appears before $v$ in the lexicographic order.
\end{definition}
For any two vertices, we denote by $\Gamma_{uv}$ the set of the shared neighbors of $u$ and $v$ that are larger than $u$ with respect to ``$\vec{p}rec$'', i.e., $\Gamma_{uv} = \{ w \mid w \in \Gamma_u \cap \Gamma_v \wedge u \vec{p}rec w \}$. Sometimes, we view our graph $G=(V,E)$ as a directed graph $(V,\vec{E})$ by treating each undirected edge $e=\{u,v\}\in E$ as two directed edges $\vec{e}_1=(u,v)$ and $\vec{e}_2=(v,u)$. The following was proven in \cite{triangle_counting_1}.
\begin{lemma}[\cite{triangle_counting_1}]
\label{lemma: tu-upper-bound}
For any vertex $v$, the number of neighbors $w$ of $v$ such that $v\vec{p}rec w$ is at most $\sqrt{2 m}$.
\end{lemma}
Given a graph $H$, we say that a subgraph $H'$ of $G$ is a \emph{copy} or an \emph{instance} of $H$ if $H'$ is isomorphic to $H$. An isomorphism-preserving mapping from $H$ to a copy of $H$ in $G$ is called an \emph{embedding} of $H$ in $G$.
\subparagraph{Edge Cover and Graph Decomposition}
We use the following definition of the fractional edge cover of a graph and a decomposition result based on it by Assadi, Kapralov and Khanna~\cite{assadi2018simple}.
\begin{definition}[Fractional Edge-Cover Number]
A fractional edge-cover of $H(V_H,E_H)$ is a mapping $\vec{p}si: E_H \rightarrow [0,1]$ such that for each vertex $v\in V_H$, $\sum_{e\in E_H, v\in e} \vec{p}si(e)\geq 1$. The fractional edge-cover number $\rho(H)$ of $H$ is the minimum value of $\sum_{e\in E_H}\vec{p}si(e)$ among all fractional edge-covers $\vec{p}si$.
\end{definition}
Let $C_k$ denote the cycle of length $k$. Let $S_k$ denote a star with $k$ petals, i.e., $S_k = (\{u, v_1, \ldots, v_k\}, \cup_{i \in [k]} \{u, v_k\})$. Let $K_k$ denote a clique on $k$ vertices. It is known that $\rho(C_{2k+1})=k+1/2$, $\rho(S_k)=k$ and $\rho(K_k)=k/2$.
\begin{lemma}[\cite{assadi2018simple}]
\label{decomposition-lemma}
Any subgraph $H$ can be decomposed into a collection of vertex-disjoint odd cycles $\overline{C_1},\ldots,\overline{C_o}$ and star graphs $\overline{S_1},\ldots,\overline{S_s}$ such that
$$\rho(H)=\sum_{i=1}^o\rho(\overline{C_i})+\sum_{j=1}^s\rho(\overline{S_j}).$$
\end{lemma}
By a result of Atserias, Grohe and Marx \cite{atserias2008size}, the number of instances of $H$ in a graph $G$ with $m$ edges is $O(m^{\rho(H)})$.
\section{Sampling an Arbitrary Subgraph $H$}
\label{sec:upperbound}
In this section, we present sampling algorithms for odd cycles and stars and show how to combine them to obtain a sampling algorithm for arbitrary subgraphs.
\subsection{Sampling an Odd-Length Cycle}
\begin{algorithm}
\caption{Sampling a wedge}
\label{wedge-sample}
\begin{algorithmic}[1]
\Procedure{\textsc{SampleWedge}}{$G,u,v$}
\If{$d_u \leq \sqrt{2 m}$} \label{wedge-degree}
\State sample a number $i \in \{ 1, \ldots \sqrt{2 m} \}$ uniformly at random
\If{$i > d_u$}
\State \Return \textbf{Fail}
\EndIf
\State $w$ $\leftarrow$ $i^{th}$ neighbor of $u$
\Else
\State
sample a vertex $w$ with probability proportional to its degree
\State sample a number $t \in [0,1]$ uniformly at random
\If{$t > \sqrt{2m} / d_w$}
\State \Return \textbf{Fail}
\EndIf
\EndIf
\State \Return $w$
\EndProcedure
\end{algorithmic}
\end{algorithm}
\begin{algorithm}
\caption{Sampling a cycle of length $2k+1$}
\label{odd-cycle-sample}
\begin{algorithmic}[1]
\Procedure{\textsc{SampleOddCycle}}{$G,2k+1$}
\State sample $k$ directed edges $\diedge{u_1,v_1}, \ldots, \diedge{u_k,v_k}$ u.a.r. and i.i.d. \label{odd-cycle-loop}
\State $w$ $\leftarrow$ \textsc{SampleWedge}($G,u_1,v_{k}$)
\If{$u_1 \vec{p}rec w \vec{p}rec v_1$, and $\forall i > 1 : u_1 \vec{p}rec u_i, v_i$} \label{path-check}
\State \Return $\{(u_1,v_1),\ldots,(u_k,v_k)\}\cup \{ (v_{k},w), (w,u_1) \}$
\EndIf
\State \Return\textbf{Fail}
\EndProcedure
\end{algorithmic}
\end{algorithm}
We describe our algorithm \textsc{SampleOddCycle} for sampling a uniformly random odd-length $k$-cycle. For any instance $C$ of $C_{2k+1}$ in the input graph, our goal is to guarantee that it will be sampled with probability $\frac{1}{m^{k+1/2}}$. Let $e_1, \ldots, e_{2k+1}$ be a sequence of edges that represents a cycle of length $2k+1$. While we can use edge sampling to sample every second edge of the first $2k$ edges sequentially, i.e., $e_1, e_3, \ldots, e_{2k-1}$, and query the edges inbetween, i.e., $e_2, \ldots, e_{2k-2}$, by vertex pair queries, we use a different strategy to sample $e_{2k}$ and $e_{2k+1}$. Let $\{u,v\} = e_1$. If $u$ has low degree, i.e., $d_u \leq \sqrt{2m}$, we can afford to sample each neighbor of $u$ with probability $1 / \sqrt{2m}$ and fail if no neighbor is sampled. If $d_u > \sqrt{2m}$, we reduce the number of candidate neighbors to $\sqrt{2m}$ and sample from the remaining candidates uniformly at random. To this end, we define a unique embedding of $C$ such that $u$ is the smallest vertex according to the order ``$\vec{p}rec$''. By \cref{lemma: tu-upper-bound}, it follows that the number of candidate neighbors is at most $\sqrt{2m}$. Another reason to accept exactly one embedding of $C$ is that there exists a linear number of automorphisms for every cycle. If we would accept every embedding, bounding the probability that every instance of $C_{2k+1}$ is sampled \emph{exactly} uniformly is hard as some instance might be sampled less likely because, e.g., its edges participate in many overlapping cycles.
In particular, we sample $k$ directed edges $(u_1, v_1), \ldots, (u_k, v_k)$ independently and uniformly at random and call \textsc{SampleWedge} on $u_1, v_k$. Then, we require that $u_1$ is the (unique) smallest vertex according to the order ``$\vec{p}rec$'' among all $u_i, v_i, i \geq 1$ and $w$. This leaves only two orientations of the cycle that are distinguished by $w \vec{p}rec v_1$ and $v_1 \vec{p}rec w$. We (arbitrarily) choose $w \vec{p}rec v_1$. If any of these requirements is not met, we have not sampled the uniquely defined embedding we are looking for, and the algorithm fails.
\begin{lemma}
\label{odd-cycle-sampler-lemma}
For any instance of an odd cycle $C_{2k+1}$ in $G$, the probability that it will be returned by \textsc{SampleOddCycle}($G,2k+1$) is $\frac{1}{(2m)^{k+1/2}}$.
\end{lemma}
\begin{proof}
Let $\mathcal{C}_{2k+1}$ be any instance of a cycle of odd length $2k+1$ in $G$. Let $x_0$ be the smallest vertex on $\mathcal{C}_{2k+1}$ according to the total order ``$\vec{p}rec$''. Let $x_1, x_{2k}$ be the two neighbors of $x_0$ on $\mathcal{C}_{2k+1}$ such that $x_1\vec{p}rec x_{2k}$. Then, we let $x_i$ denote the vertices on $\mathcal{C}_{2k+1}$ such that $(x_i, x_{i+1}) \in E(\mathcal{C}_{2k+1})$ for $0\leq i\leq 2k-1$ and $(x_{2k},x_0)\in E(\mathcal{C}_{2k+1})$. Note that for any $\mathcal{C}_{2k+1}$, there is a \emph{unique} way of mapping its vertices to $x_i$, for $0\leq i\leq 2k$.
Thus, $\textsc{SampleOddCycle}$ returns $\mathcal{C}_{2k+1}$ if and only if
\begin{enumerate}
\item $u_1 = x_0$ and $v_1 = x_{2k}$; \label{odd-mapping-a}
\item $u_i=x_{2k-2i+3}$ and $v_i=x_{2k-2i+2}$ for $2\leq i\leq k$; \label{odd-mapping-b}
\item \textsc{SampleWedge}($G,u_{1},v_k$) returns $x_1$. \label{odd-mapping-c}
\end{enumerate}
Event \ref{odd-mapping-a} occurs with probability $1/(2m)$, and event \ref{odd-mapping-b} occurs with probability $1/(2m)^{k-1}$, as each directed edge is sampled with probability $1/(2m)$.
Now we bound the probability of event \ref{odd-mapping-c}. In the call to \textsc{SampleWedge}, let $u := u_1$ and $v := v_k$, which satisfies that $u \vec{p}rec v$. We first note that if $d_u < \sqrt{2m}$ in \textsc{Sample\-Wedge}($G,u_{1},v_k$), then the vertex $x_1$ will be sampled with probability $1 / \sqrt{2m}$. Now we consider the case that $d_u \geq \sqrt{2m}$. It follows that $d_{x_1} \geq d_u > \sqrt{2m}$. In this case, we will sample $x_1$ with probability $\frac{d_{x_1}}{2m} \cdot t = \frac{d_{x_1}}{2m} \cdot \frac{\sqrt{2m}}{d_{x_1}} = \frac{1}{\sqrt{2m}}$. Thus in both cases, the probability that event \ref{odd-mapping-c} occurs is $\frac{1}{\sqrt{2m}}$.
Therefore, the probability that \textsc{SampleOddCycle} returns $\mathcal{C}_{2k+1}$ is $\frac{1}{\sqrt{2m}} \cdot \frac{1}{2m} \cdot (\frac{1}{2m})^{k-1}=\frac{1}{(2m)^{k+1/2}}$. \vec{q}edhere
\end{proof}
\subsection{Sampling a Star}
Similarly to odd cycles, we observe that every $k$-star admits an exponential number of automorphisms. Therefore, we enforce a unique embedding of every instance of a $k$-star in our sampling algorithm \textsc{SampleStar}. Let $e_1, \ldots, e_k$ be the petals of an instance of a $k$-star. We sample $e_1, \ldots, e_k$ sequentially. If these edges form a star, we output it only if the leaves where sampled in ascending order with respect to ``$\vec{p}rec$''.
\begin{algorithm}[H]
\caption{Sampling a star with $k$ petals}
\label{star-sample}
\begin{algorithmic}[1]
\Procedure{SampleStar}{$G,k$}
\State Sequentially sample $k$ directed edges $ \{\diedge{u_1, v_1}, \ldots, \diedge{u_{k}, v_{k}}\}$ u.a.r. and i.i.d.
\If{$u_1 = u_2 = \ldots = u_k$ and $v_1 \vec{p}rec v_2 \vec{p}rec \ldots \vec{p}rec v_k$
}
\State \Return $(u_1, v_1, \ldots, v_k)$
\EndIf
\State \Return \textbf{Fail}
\EndProcedure
\end{algorithmic}
\end{algorithm}
\begin{lemma}
\label{star-sampler-lemma}
For any instance of a $k$-star $S_k$ in $G$, the probability that it will be returned by the algorithm \textsc{SampleStar}($G,k$) is $\frac{1}{(2m)^k}$.
\end{lemma}
\begin{proof}
Consider any instance of $S_k$ with root $x$ and petals $y_1, \ldots, y_k$ such that $y_1 \vec{p}rec \ldots y_k$.
Note that it will be returned by \textsc{SampleStar} if and only if all the directed edges $\diedge{x,y_1},\ldots,\diedge{x,y_k}$ are sequentially sampled, which occurs with probability $1 /(2m)^k$.
\end{proof}
\subsection{Sampling $H$}
Let $H$ be a subgraph. It can be decomposed
into collections of $o$ odd cycles $\overline{C_i}$ and $s$ stars $\overline{S_j}$ as given in \cref{decomposition-lemma}. We say that $H$ has a (decomposition) \emph{type} $\overline{T}=\{\overline{C_1},\ldots,\overline{C_o},\overline{S_1},\ldots,\overline{S_s}\}$.
\begin{definition}
Given a graph $G$, for each potential \emph{instance} $\mathcal{H}$ of $H$, we say that $\mathcal{H}$ can be decomposed into \emph{configurations} $\mathcal{T}=\{\mathcal{C}_1,\ldots,\mathcal{C}_o,\mathcal{S}_1,\ldots,\mathcal{S}_s\}$ with respect to type $\overline{T}=\{\overline{C_1},\ldots,\overline{C_o},\overline{S_1},\ldots,\overline{S_s}\}$, if
\begin{enumerate}
\item $\mathcal{C}_i \cong \overline{C_i}$ for any $1\leq i\leq o$, and $\mathcal{S}_j \cong \overline{S}_j$, for any $1\leq i\leq s$
\item all the remaining edges of $H$ between vertices specified in $\mathcal{T}$ all are present in $G$.
\end{enumerate}
We let $f_{\overline{T}}(H)$ denote the number of all possible configurations $\mathcal{T}$ into which $H$ can be decomposed with respect to $\overline{T}$.
\end{definition}
\begin{algorithm}[H]
\caption{Sampling a copy of subgraph $H$}
\label{subgraph-sample}
\begin{algorithmic}[1]
\Procedure{SampleSubgraph}{$G, H$}
\State{Let $\overline{T}=\{\overline{C_1},\ldots,\overline{C_o},\overline{S_1},\ldots,\overline{S_s}\}$ denote a (decomposition) type of $H$. }
\ForAll{$i=1\ldots o$}
\If{ \textsc{SampleOddCycle($G, \lvert E(\overline{C}_i) \rvert$)} returns a cycle $\mathcal{C}$}
\State $\mathcal{C}_i \gets \mathcal{C}$\label{alg:cycle_H}
\Else
\State \Return \textbf{Fail}
\EndIf
\EndFor
\ForAll{$j =1\ldots s$}
\If{\textsc{SampleStar($G,\lvert V(\overline{S}_j) \rvert - 1$)} returns a star $\mathcal{S}$}
\State $\mathcal{S}_j \gets \mathcal{S}$\label{alg:star_H}
\Else
\State \Return \textbf{Fail}
\EndIf
\EndFor
\State Query all edges $(\bigcup_{i \in [o]} V(\mathcal{C}_i) \cup \bigcup_{j \in [s]} V(\mathcal{S}_j))^2$
\If{$S := (\mathcal{C}_1, \ldots, \mathcal{C}_o, \mathcal{S}_1, \ldots, \mathcal{S}_s)$ forms a copy of $H$}
\State flip a coin and with probability $\frac{1}{f_{\overline{T}}(H)}$: \Return $S$ \label{alg:occurrence_H}
\EndIf
\State \Return \textbf{Fail}
\EndProcedure
\end{algorithmic}
\end{algorithm}
\begin{lemma}
\label{subgraph-sampler-lemma}
For any instance of a subgraph $H$ in $G$, the probability that it will be returned by the algorithm \textsc{SampleSubgraph}($G, H$) is $\frac{1}{(2m)^{\rho(H)}}$.
\end{lemma}
\begin{proof}
For any instance $\mathcal{H}$ of $H$ in $G$, and any configuration $\mathcal{T}=\{\mathcal{C}_1,\ldots,\mathcal{C}_O,\mathcal{S}_1,\ldots,\mathcal{S}_s\}$ of $\mathcal{H}$ with respect to $\overline{T}$, $\mathcal{H}$ will be returned by \textsc{SampleSubgraph}($G,H$) if and only if
\begin{enumerate}
\item $\mathcal{C}_i$ is returned in \cref{alg:cycle_H} for each $1\leq i\leq o$, and $\mathcal{S}_j$ is returned in \cref{alg:star_H} for any $1\leq j\leq s$;
\item the configuration is returned with probability $\frac{1}{f_{\overline{T}}(H)}$ in \cref{alg:occurrence_H}.
\end{enumerate}
By Lemma \ref{odd-cycle-sampler-lemma}, each $\mathcal{C}_i$ will be returned with probability $\frac{1}{(2m)^{|E(\overline{C_i})|/2}}=\frac{1}{(2m)^{\rho(\overline{C_i})}}$. By Lemma \ref{star-sampler-lemma} each $\mathcal{S}_j$ will be returned with probability $\frac{1}{(2m)^{|V(\overline{S_j})|-1}}=\frac{1}{(2m)^{\rho(\overline{S_j})}}$. Thus, $\mathcal{T}$ will be returned with probability
\begin{equation*}
\vec{p}rod_{i=1}^o \frac{1}{(2m)^{\rho(\overline{C_i})}}\cdot \vec{p}rod_{j=1}^s \frac{1}{(2m)^{\rho(\overline{S_j})}} \cdot \frac{1}{f_{\overline{T}}(H)} = \frac{1}{(2m)^{\rho(H)}}\cdot \frac{1}{f_{\overline{T}}(H)}.
\end{equation*}
Finally, since there are $f_{\overline{T}}(H)$ configurations of $\mathcal{H}$ with respect to $\overline{T}$, the instance will be returned with probability $f_{\overline{T}}(H)\cdot \frac{1}{(2m)^{\rho(H)}} \cdot \frac{1}{f_{\overline{T}}(H)} = \frac{1}{(2m)^{\rho(H)}}$. \vec{q}edhere
\end{proof}
\subsection{The Final Sampler}
Let $X_H$ be an estimate of $\# H$. Such an estimate can be obtained by, e.g., the subgraph counting algorithm of Assadi, Kapralov and Khanna~\cite{assadi2018simple} in expected time $\tilde{O}(m^{\rho(H)}/\# H)$. We show that by sufficiently many calls to \textsc{SampleSubgraph}, we can obtain a uniformly random sample of an instance of $H$ with constant probability.
\begin{algorithm}[H]
\caption{Sampling a copy of subgraph $H$ uniformly at random}
\label{subgraph-sample-uniformly}
\begin{algorithmic}[1]
\Procedure{SampleSubgraphUniformly}{$G, H, X_H$}
\ForAll{$j=1,\ldots, q=10 \cdot {(2m)}^{\rho(H)}/X_H$}
\State Invoke \textsc{SampleSubgraph}($G,H$)
\If{a subgraph $H$ is returned}
\Return $H$
\EndIf
\EndFor
\State \Return \textbf{Fail}
\EndProcedure
\end{algorithmic}
\end{algorithm}
\begin{lemma}
\label{correctness}
If $\# H \leq X_H\leq 2 \# H$, then Algorithm \textsc{SampleSubgraphUniformly}$(G,{H}, X_H)$ returns a copy $H$ with probability at least $2/3$. The distribution induced by the algorithm is (exactly) uniform over the set of all instances of $H$ in $G$.
\end{lemma}
\begin{proof}
Since $\# H \leq X_H\leq 2 \# H$, the probability that no instance of $H$ is returned in $q=10 \cdot {(2m)}^{\rho(H)}/X_H$ invocations is at most
\[
\left(1-\frac{\# H}{(2m)^{\rho(H)}}\right)^{q} \leq e^{-\frac{\# H}{(2m)^{\rho(H)}} \cdot q} < \frac13
\]
by \cref{subgraph-sampler-lemma}. Let $\mathcal{H}$ be an instance of $H$. By Lemma \ref{subgraph-sampler-lemma}, the probability that \textsc{SampleSubgraph}($H$) returns $\mathcal{H}$ is $\frac{1}{(2m)^{\rho(H)}}$. Thus, the probability that \textsc{SampleSubgraphUniformly}$(G,{H})$ successfully output an instance of $H$ is
\begin{align*}
\frac{\# H}{(2m)^{\rho(H)}}.
\end{align*}
Conditioned on the event that \textsc{SampleSubgraphUniformly}$(G,{H})$ succeeds, the probability that any specific instance $\mathcal{H}$ will be returned is
\begin{align*}
p_\mathcal{H} = \frac{\frac{1}{(2m)^{\rho(H)}}}{\frac{\# H}{(2m)^{\rho(H)}}} = \frac{1}{\# H}.
\end{align*}
That is, with probability at least $\frac23$, an instance $\mathcal{H}$ is sampled from the uniform distribution over all the instances of $H$ in $G$. \vec{q}edhere
\end{proof}
Finally, we prove the expected query and time complexity of \textsc{SampleSubgraphUniformly}.
\begin{lemma}
\label{complexity}
The expected query and time complexity of \textsc{SampleSubgraph}-\\ \textsc{Uniformly}$(G,H,X_H)$ is $O(m^{\rho(H)} / X_H)$.
\end{lemma}
\begin{proof}
The query complexity of \textsc{SampleOddCycle}$(G,2k+1)$ is $O(1)$, as the query complexity of \textsc{SampleWedge}$(G, u_1, v_k)$ is $O(1)$. The query complexity of \textsc{SampleStar}$(G,k)$ is bounded by $k \in O(1)$. It follows that the query complexity of \textsc{SampleSubgraph}$(G, H)$ is at most $(o + s + \lvert H \rvert^2) \cdot O(1) \subseteq \lvert H \rvert \cdot O(1)$. The query complexity of \textsc{SampleSubgraphUniformly}$(G, H)$ is $O({(2m)}^{\rho(H)}/X_H \cdot \lvert H \rvert^2)=\tilde{O}({(2m)}^{\rho(H)}/X_H)$. To bound the running time, we observe that every loop in our algorithm issues at least one query, and we only perform isomorphism checks on subgraphs of constant size. Thus the running time is still $\tilde{O}({(2m)}^{\rho(H)}/X_H)$.
\end{proof}
The proof of \cref{thm:main} follows almost directly from \cref{correctness,complexity}.
\begin{proof}[Proof of \cref{thm:main}]
For the case that $m \geq m^{\rho(H)} / \# H$, the claim follows from \cref{correctness,complexity}. If $m < m^{\rho(H)} / \# H$, we can query the whole graph, which requires $O(m)$ degree and neighbor queries, store the graph and answer the queries of the algorithm from this internal memory.
\end{proof}
Finally, we remark that our algorithm assumes the knowledge of the number of edges $m$. This assumption can be lifted with a slight cost on the query complexity and approximation of our algorithm: for any constant $\varepsilon>0$, one can first find a $(1+\varepsilon)$-approximation of $m$ by making $\tilde{O}(\sqrt{n})$ neighbor queries \cite{edge_counting_0}. We can then guarantee that the returned copy of $H$ is sampled with probability $(1\vec{p}m \Theta(\varepsilon))\cdot \frac{1}{\# H}$. That is, we simply replace $m$ by a $(1+\varepsilon)$-approximation of $m$ in \cref{wedge-sample,subgraph-sample-uniformly} and then the performance guarantee of the resulting algorithm directly follows from the previous analysis.
\section{Proof of Theorem \ref{thm:lowerbound}}\label{sec:lowerbound}
In this section, we give the proof of Theorem~\ref{thm:lowerbound}, which follows by adapting the proofs for the lower bounds on the query complexity for approximate counting subgraphs given by Eden and Rosenbaum \cite{total-lower-bound}.
\begin{theorem}[see Theorems 4.7 and B.1 in \cite{total-lower-bound}]
\label{thm:lower-bound-families}
For any choices of $n,m,r,c_r > 0$, there exist families of graphs with $n$ vertices and $m$ edges, $\mathcal{F}_0$ and $\mathcal{F}_1$, such that
\begin{itemize}
\item all graphs in $\mathcal{F}_0$ are $K_r$-free,
\item all graphs in $\mathcal{F}_1$ contain at least $c_r$ copies of $K_r$,
\item and any algorithm in the augmented general graph model that distinguishes a graph $G \in \mathcal{F}_0$ from $G \in \mathcal{F}_1$ with probability $\Omega(1)$ requires $\Omega(\min\{m, m^{r/2} / c_r(cr)^r\})$ queries for some constant $c>0$.
\end{itemize}
\end{theorem}
Now we prove our Theorem \ref{thm:lowerbound}.
\begin{proof}[Proof of Theorem \ref{thm:lowerbound}]
Let $\mathcal{A}$ be an algorithm that for any graph $G=(V,E)$ on $n$ vertices and $m$ edges returns an arbitrary $r$-clique $K_r$, if one exists; and each $K_r$ is sampled according to $\mathcal{D}$, using $f(m, r, \# K_r) \in o(\min\{m, \frac{m^{r/2}}{\# K_r\cdot (cr)^r}\})$ neighbor, degree, pair and edge sampling queries.
Let $n, m, c_r > 0$ and let $\mathcal{F}_0, \mathcal{F}_1$ be the families from \cref{thm:lower-bound-families}. Consider the following algorithm $\mathcal{A}'$: run $\mathcal{A}$ on a graph from $\mathcal{F}_0 \cup \mathcal{F}_1$ and terminate $\mathcal{A}$ if it did not produce a $K_r$ after $f(m, r, c_r)$ queries. If it output a clique, $\mathcal{A}'$ claims that $G \in \mathcal{F}_1$, otherwise it claims that $G \in \mathcal{F}_0$. By the assumption, $\mathcal{A}$ returns a clique after at most $f(m, r, c_r)$ queries with probability $\Omega(1)$ if $G \in \mathcal{F}_1$ because then $G$ contains at least $c_r$ copies of $K_r$ and the probability mass of $\mathcal{D}$ on the set of all copies of $K_r$ is $\Omega(1)$. Otherwise, $G \in \mathcal{F}_0$, which implies that $G$ contains no triangle. Therefore, $\mathcal{A}$ cannot output a triangle from $G$.
It follows that $\mathcal{A}'$ can distinguish $\mathcal{F}_0$ and $\mathcal{F}_1$, which is a contradiction to \cref{thm:lower-bound-families}.
\end{proof}
\section*{Acknowledgments}
We would like to thank the anonymous reviewers for their detailed comments. In particular, we would like to thank an anonymous reviewer for their suggestion to improve the presentation of the proof of \cref{thm:lowerbound} and their comment on applications, which we included as future work.
\end{document} |
\begin{document}
\title{Communicating Quantum Processes}
\begin{abstract}
We define a language CQP (Communicating Quantum Processes) for
modelling systems which combine quantum and classical communication
and computation. CQP combines the communication primitives of the
pi-calculus with primitives for measurement and transformation of
quantum state; in particular, quantum bits (qubits) can be transmitted
from process to process along communication channels. CQP has a static
type system which classifies channels, distinguishes between quantum
and classical data, and controls the use of quantum state. We formally
define the syntax, operational semantics and type system of CQP, prove
that the semantics preserves typing, and prove that typing guarantees
that each qubit is owned by a unique process within a system. We
illustrate CQP by defining models of several quantum communication
systems, and outline our plans for using CQP as the foundation for
formal analysis and verification of combined quantum and classical systems.
\end{abstract}
\toappear{An earlier version of this paper is in \emph{Proceedings of
the 2nd International Workshop on Quantum Programming Languages},
Turku Centre for Computer Science General Publication No.\ 33, June 2004.}
\section{Introduction}
\label{sec-intro}
Quantum computing and quantum communication have attracted growing
interest since their inception as research areas more than twenty years
ago, and there has been a surge of activity among computer scientists
during the last few years. While quantum computing offers the prospect
of vast improvements in algorithmic efficiency for certain problems,
quantum cryptography can provide communication systems which
will be secure even in the presence of hypothetical future quantum computers.
As a practical technology, quantum communication has progressed far
more rapidly than quantum computing. Secure communication involving
quantum cryptography has recently been demonstrated in a scenario
involving banking transactions in Vienna
\cite{PoppeA:praqkd}, systems are commercially available from
Id Quantique, MagiQ Technologies and NEC, and plans
have been reported to establish a nationwide quantum communication
network in Singapore. Secure quantum communication will undoubtedly become a
fundamental part of the technological infrastructure of society,
long before quantum computers can tackle computations of a useful size.
However, secure quantum communication is not a solved
problem. Although particular protocols have been mathematically proved
correct (for example, Mayers' analysis
\cite{MayersD:uncsqc} of the Bennett-Brassard protocol (BB84)
\cite{BennettCH:quacpd} for quantum key distribution), this does not
guarantee the security of systems which use them. Experience of
classical security analysis has shown that even if \emph{protocols}
are theoretically secure, it is difficult to achieve robust and
reliable implementations of secure \emph{systems}: security can be
compromised by flaws at the implementation level or at the boundaries
between systems. To address this problem, computer scientists have
developed an impressive armoury of techniques and tools for formal
modelling, analysis and verification of classical security protocols
and communication systems which use them
\cite{RyanP:modasp}. These techniques have been remarkably successful both in
establishing the security of new protocols and in demonstrating flaws
in protocols which had previously been believed to be secure. Their
strength lies in the ability to model \emph{systems} as well as idealized
protocols, and the flexibility to easily re-analyze variations in
design.
Our research programme is to develop techniques and tools for formal
modelling, analysis and verification of quantum communication and
cryptographic systems. More precisely we aim to handle systems which
combine quantum and classical communication and computation, for two
reasons: the first quantum communication systems will implement
communication between classical computers; and protocols such as BB84
typically contain classical communication and computation as well as
quantum cryptography. We cannot simply make use of existing techniques
for classical security analysis: for example, treating the security of
quantum cryptography axiomatically would not permit analysis of the
protocols which \emph{construct} quantum cryptographic
keys. Furthermore, the inherently probabilistic nature of quantum
systems means that not all verification consists of checking absolute
properties; we need a probabilistic modelling and analysis framework.
Any formal analysis which involves automated tools requires a
modelling language with a precisely-defined semantics. The purpose of
this paper is to define a language, CQP (Communicating Quantum
Processes), which will serve as the foundation for the programme
described above. CQP combines the communication primitives of the
pi-calculus \cite{MilnerR:calmpfull,SangiorgiD:pictm} with primitives
for transformation and measurement of quantum state. In particular,
qubits (quantum bits, the basic elements of quantum data) can be
transmitted along communication channels. In
Section~\ref{sec-examples} we introduce CQP through a series of
examples which cover a wide spectrum of quantum information processing
scenarios: a quantum coin-flipping game; a quantum communication
protocol known as teleportation; and a quantum
bit-commitment protocol. The latter will lead naturally to a model of the
BB84 quantum key-distribution protocol in future work. In
Section~\ref{sec-syntax} we formalize the syntax of CQP and define an
operational semantics which combines non-determinism (arising in the
same way as in pi-calculus) with the probabilistic results of quantum
measurements. In Section~\ref{sec-types} we define a static type
system which classifies data and communication channels, and crucially
treats qubits as physical resources: if process $P$ sends qubit $q$ to
process $Q$, then $P$ must not access $q$ subsequently, and this
restriction can be enforced by static typechecking. In
Section~\ref{sec-soundness} we prove that the invariants of the type
system are preserved by the operational semantics, guaranteeing in
particular that at every point during execution of a system, every qubit is
uniquely owned by a single parallel component. In
Section~\ref{sec-future} we outline our plans for further work,
focusing on the use of both standard (non-deterministic) and
probabilistic model-checking systems.
\subsection*{Related Work}
There has been a great deal of interest in quantum programming languages,
resulting in a number of proposals in different styles, for example
\cite{KnillE:conqp,OmerB:quapq,SandersJW:quap,SelingerP:towqpl,vanTonderA:lamcqc}.
Such languages can express arbitrary quantum state transformations and
could be used to model quantum protocols in those terms. However, our
view is that any model lacking an explicit treatment of communication
is essentially incomplete for the analysis of protocols; certainly in
the classical world, standard programming languages are not considered
adequate frameworks in which to analyze or verify
protocols. Nevertheless, Selinger's functional language QPL
\cite{SelingerP:towqpl} in particular has influenced our choice of
computational operators for CQP.
The closest work to our own, developed simultaneously but
independently, is Jorrand and Lalire's QPAlg \cite{JorrandP:towqpa}, which
also combines process-calculus-style communication with
transformation and measurement of quantum state. The most distinctive
features of our work are the type system and associated proofs, the
explicit formulation of an expression language which can easily be
extended, and our emphasis on a methodology for formal verification.
The work of Abramsky and Coecke
\cite{AbramskyS:catsqp} is also relevant. They define a
category-theoretic semantic foundation for quantum protocols, which
supports reasoning about systems and exposes deep connections between
quantum systems and programming language semantics, but they do not
define a formal syntax in which to specify models. It will be
interesting to investigate the relationship between CQP and the
semantic structures which they propose.
\section{Preliminaries}
\label{sec-prelim}
We briefly introduce the aspects of quantum theory which are needed
for the rest of the paper. For more detailed presentations we refer
the reader to the books by Gruska \cite{GruskaJ:quac} and Nielsen and
Chuang \cite{NielsenMA:quacqi}. Rieffel and Polak
\cite{RieffelEG:intqcn} give
an account aimed at computer scientists.
A \emph{quantum bit} or \emph{qubit} is a physical system which has
two states, conventionally written $\ket{0}$ and $\ket{1}$,
corresponding to one-bit classical values. These could be, for
example, spin states of a particle or polarization states of a photon,
but we do not consider physical details. According to quantum theory,
a general state of a quantum system is a \emph{superposition} or
linear combination of basis states. Concretely, a qubit has state
$\alpha\ket{0}+\beta\ket{1}$, where $\alpha$ and $\beta$ are complex
numbers such that $\ms{\alpha}+\ms{\beta}=1$; states which differ only
by a (complex) scalar factor with modulus $1$ are
indistinguishable. States can be represented by column vectors:
\[
\vect{\alpha}{\beta} = \alpha\ket{0}+\beta\ket{1}.
\]
Superpositions are illustrated by the quantum
coin-flipping game which we discuss in Section~\ref{sec-coinflip}.
Formally, a quantum state is a unit vector in a Hilbert space, i.e.\ a
complex vector space equipped with an inner product satisfying certain
axioms. In this paper we will restrict attention to collections of qubits.
The basis $\{\ket{0},\ket{1}\}$ is known as the \emph{standard}
basis. Other bases are sometimes of interest, especially the
\emph{diagonal} (or \emph{dual}, or \emph{Hadamard}) basis consisting
of the vectors $\ket{+} = \frac{1}{\sqrt{2}}(\ket{0}+\ket{1})$ and
$\ket{-} = \frac{1}{\sqrt{2}}(\ket{0}-\ket{1})$. For example, with respect to
the diagonal basis, $\ket{0}$ is in a superposition of basis states:
\[
\ket{0} = \frac{1}{\sqrt{2}}\ket{+} +
\frac{1}{\sqrt{2}}\ket{-}.
\]
Evolution of a closed quatum system can be described by a
\emph{unitary transformation}. If the state of a qubit is represented
by a column vector then a unitary transformation $U$ can be
represented by a complex-valued matrix $(u_{ij})$ such that $U = U^*$,
where $U^*$ is the conjugate-transpose of $U$ (i.e.\ element $ij$ of
$U^*$ is $\bar{u}_{ji}$). $U$ acts by matrix multiplication:
\[
\vect{\alpha'}{\beta'} = \matr{u_{00}}{u_{01}}{u_{10}}{u_{11}}\vect{\alpha}{\beta}
\]
A unitary transformation can also be defined by its effect on basis
states, which is extended linearly to the whole space. For example,
the \emph{Hadamard} transformation is defined by
\[
\begin{array}{lcl}
\ket{0} & \mapsto & \frac{1}{\sqrt{2}}\ket{0}+\frac{1}{\sqrt{2}}\ket{1} \\
\ket{1} & \mapsto & \frac{1}{\sqrt{2}}\ket{0}-\frac{1}{\sqrt{2}}\ket{1}
\end{array}
\]
which corresponds to the matrix
\[
\qgate{H} = \frac{1}{\sqrt{2}}\matr{1}{1}{1}{-1}.
\]
The Hadamard transformation creates superpositions:
$\qgate{H}{\ket{0}} = \ket{+}$ and $\qgate{H}{\ket{1}} = \ket{-}$.
We will also make use of the \emph{Pauli} transformations, denoted by
either $I,\sigma_x,\sigma_y,\sigma_z$ or $\sigma_0,\sigma_1,\sigma_2,\sigma_3$:
\[
\begin{array}{@{\extracolsep{2mm}}cccc}
I / \sigma_0 & \sigma_x / \sigma_1 & \sigma_y / \sigma_2 & \sigma_z / \sigma_3 \\ \\
\matr{1}{0}{0}{1} & \matr{0}{1}{1}{0} &
\matr{0}{-i}{i}{0} & \matr{1}{0}{0}{-1}
\end{array}
\]
A key feature of quantum physics is the r\^{o}le of
\emph{measurement}. If a qubit is in the state
$\alpha\ket{0}+\beta\ket{1}$ then measuring its value gives the result
$0$ with probability $\ms{\alpha}$ (leaving it in state $\ket{0}$) and
the result $1$ with probability $\ms{\beta}$ (leaving it in state
$\ket{1}$). Protocols sometimes specify measurement with respect to a
different basis, such as the diagonal basis; this can be expressed as
a unitary change of basis followed by a measurement with respect to
the standard basis. Note that if a qubit is in state
$\ket{+}$ then a measurement with respect
to the standard basis give result $0$ (and state $\ket{0}$) with
probability $\frac{1}{2}$, and result $1$ (and state $\ket{1}$) with
probability $\frac{1}{2}$. If a qubit is in state $\ket{0}$ then a
measurement with respect to the diagonal basis gives
result\footnote{Strictly speaking, the outcome of the measurement is
just the final state; the specific association of numerical results with final
states is a matter of convention.} $0$ (and
state $\ket{+}$) with probability
$\frac{1}{2}$, and result $1$ (and state
$\ket{-})$) with probability
$\frac{1}{2}$, because of the representation of $\ket{0}$ in the
diagonal basis noted above. If a classical bit is represented by a
qubit using either the standard or diagonal basis, then a measurement
with respect to the correct basis results in the original bit, but a
measurement with respect to the other basis results in $0$ or $1$ with equal
probability. This behaviour is used by the quantum bit-commitment
protocol which we discuss in Section~\ref{sec-bitcommitment}.
To go beyond single-qubit systems, we consider tensor products of
spaces (in contrast to the cartesian products used in classical
systems). If spaces $U$ and $V$ have bases $\{u_i\}$ and $\{v_j\}$
then $U\otimes V$ has basis $\{u_i\otimes v_j\}$. In particular, a
system consisting of $n$ qubits has a $2^n$-dimensional space whose
standard basis is $\ket{00\ldots 0}\ldots\ket{11\ldots 1}$. We can now
consider measurements of single qubits or collective measurements of
multiple qubits. For example, a $2$-qubit system has basis
$\ket{00},\ket{01},\ket{10},\ket{11}$ and a general state is
$\alpha\ket{00}+\beta\ket{01}+\gamma\ket{10}+\delta\ket{11}$ with
$\ms{\alpha}+\ms{\beta}+\ms{\gamma}+\ms{\delta}=1$. Measuring the
first qubit gives result $0$ with probability $\ms{\alpha}+\ms{\beta}$
(leaving the system in state
$\frac{1}{\ms{\alpha}+\ms{\beta}}(\alpha\ket{00}+\beta\ket{01})$) and
result $1$ with probability $\ms{\gamma}+\ms{\delta}$ (leaving the
system in state
$\frac{1}{\ms{\gamma}+\ms{\delta}}(\gamma\ket{10}+\delta\ket{11})$).
Measuring both qubits simultaneously gives result $0$ with probability
$\ms{\alpha}$ (leaving the system in state $\ket{00}$), result $1$
with probability $\ms{\beta}$ (leaving the system in state $\ket{01}$)
and so on; note that the association of basis states
$\ket{00},\ket{01},\ket{10},\ket{11}$ with results $0,1,2,3$ is just a
conventional choice. The power of quantum computing, in an algorithmic
sense, results from calculating with superpositions of states; all the
states are transformed simultaneously (\emph{quantum parallelism}) and
the effect increases exponentially with the dimension of the state
space. The challenge in quantum algorithm design is to make
measurements which enable this parallelism to be exploited; in general
this is very difficult.
We will make use of the \emph{conditional not} ($\qgate{CNot}$)
transformation on pairs of qubits. Its action on basis states is
defined by
\[
\begin{array}{@{\extracolsep{1mm}}cccc}
\ket{00}\mapsto\ket{00} & \ket{01}\mapsto\ket{01} &
\ket{10}\mapsto\ket{11} & \ket{11}\mapsto\ket{10}
\end{array}
\]
which can be understood as inverting the second qubit if and only if
the first qubit is set, although in general we need to consider the
effect on non-basis states.
Systems of two or more qubits can exhibit the phenomenon of
\emph{entanglement}, meaning that the states of the qubits are
correlated. For example, consider a measurement of the first qubit of
the state $\frac{1}{\sqrt{2}}(\ket{00}+\ket{11})$. The result is $0$
(and resulting state $\ket{00}$) with probability $\frac{1}{2}$, or
$1$ (and resulting state $\ket{11}$) with probability
$\frac{1}{2}$. In either case a subsequent measurement of the second
qubit gives a definite (non-probabilistic) result which is always the
same as the result of the first measurement. This is true even if the
entangled qubits are physically separated. Entanglement illustrates
the key difference between the use of tensor product (in quantum
systems) and cartesian product (in classical systems): an entangled
state of two qubits is one which cannot be decomposed as a pair of
single-qubit states. Entanglement is used in an essential way in the
quantum teleportation protocol which we discuss in
Section~\ref{sec-teleportation}. That example uses the
$\qgate{CNot}$ transformation to create entanglement:
$\qgate{CNot}((\qgate{H}\otimes I)\ket{00}) =
\frac{1}{\sqrt{2}}(\ket{00}+\ket{11})$.
\section{Examples of Modelling in CQP}
\label{sec-examples}
\subsection{A Quantum Coin-Flipping Game}
\label{sec-coinflip}
Our first example is based on a scenario used by Meyer
\cite{MeyerDA:quas} to initiate the study of quantum game
theory. Players $P$ and $Q$ play the following game: $P$ places a
coin, head upwards, in a box, and then the players take turns ($Q$,
then $P$, then $Q$) to optionally turn the coin over, without being
able to see it. Finally the box is opened and $Q$ wins if the coin is
head upwards.
Clearly neither player has a winning strategy, but the situation
changes if the coin is a quantum system, represented by a qubit
($\ket{0}$ for head upwards, $\ket{1}$ for tail upwards). Turning the
coin over corresponds to the transformation $\sigma_1$, and this is
what $P$ can do. But suppose that $Q$ can apply $\qgate{H}$, which
corresponds to transforming from head upwards ($\ket{0}$) to a
superposition of head upwards and tail upwards
($\frac{1}{\sqrt{2}}(\ket{0}+\ket{1})$), and does this on both
turns. Then we have two possible runs of the game, (a) and (b):
\begin{center}
\renewcommand{1}{1.2}
\begin{tabular}{l|l}
\multicolumn{2}{c}{(a)}\\
Action & State \\ \hline
& $\ket{0}$ \\
$Q$: $\qgate{H}$ & $\frac{1}{\sqrt{2}}(\ket{0}+\ket{1})$ \\
$P$: $\sigma_1$ & $\frac{1}{\sqrt{2}}(\ket{1}+\ket{0})$ \\
$Q$: $\qgate{H}$ & $\ket{0}$
\end{tabular}\hspace{5mm}
\begin{tabular}{l|l}
\multicolumn{2}{c}{(b)}\\
Action & State \\ \hline
\phantom{$P$: $\sigma_1$} & $\ket{0}$ \\
$Q$: $\qgate{H}$ & $\frac{1}{\sqrt{2}}(\ket{0}+\ket{1})$ \\
$P$: $-$ & $\frac{1}{\sqrt{2}}(\ket{0}+\ket{1})$ \\
$Q$: $\qgate{H}$ & $\ket{0}$
\end{tabular}
\renewcommand{1}{1}
\end{center}
and in each case the coin finishes head upwards. To verify this we
calculate that the state $\frac{1}{\sqrt{2}}(\ket{0}+\ket{1})$ is
invariant under $\sigma_1$:
\[
\matr{0}{1}{1}{0}\frac{1}{\sqrt{2}}\vect{1}{1} = \frac{1}{\sqrt{2}}\vect{1}{1}
\]
and that the Hadamard transformation $\qgate{H}$ is self-inverse:
\[
\frac{1}{\sqrt{2}}\matr{1}{1}{1}{-1}\frac{1}{\sqrt{2}}\matr{1}{1}{1}{-1} = \matr{1}{0}{0}{1}
\]
Meyer considers game-theoretic issues relating to the expected outcome
of repeated runs, but we just model a single run in CQP
(Figure~\ref{fig-coinflip}). Most of the syntax of CQP is based on
typed pi-calculus, using fairly common notation (for example, see
Pierce and Sangiorgi's presentation \cite{PierceBC:typsmpfull}). $P$
and $Q$ communicate by means of the typed channel
$\tid{s}{\Chant{\mktype{Qbit}}}$ which carries qubits. It is a parameter of
both $P$ and $Q$. At the top level, $\pname{System}$ creates $s$ with
$(\mathsf{new}\
\tid{s}{\Chant{\mktype{Qbit}}})$ and starts $P$ and $Q$ in parallel. $Q$ and
$\pname{System}$
are also parameterized by $x$, the qubit representing the initial
state of the coin.
$Q$ applies ($\trans{x}{\qgate{H}}$) the Hadamard transformation to
$x$; this syntax is based on Selinger's QPL
\cite{SelingerP:towqpl}. This expression is converted into an action
by $\action{\ldots}$. Using a standard pi-calculus programming style,
$Q$ creates a channel $t$ and sends ($\outp{s}{x,t}$) it to $P$ along
with the qubit $x$. $P$ will use $t$ to send the qubit back, and $Q$
receives it with $\inp{t}{\tid{z}{\mktype{Qbit}}}$, binding it to the name $z$
in the rest of the code. Finally $Q$ applies $\qgate{H}$ again, and
continues with some behaviour $C(z)$.
$P$ contains two branches of behaviour, corresponding to the
possibilities of applying (second branch) or not applying (first
branch) the transformation $\sigma_1$. Both branches terminate with
the null process $\mathbf{0}$. The branches are placed in
parallel\footnote{Simpler definitions can be obtained if we add
guarded sums to CQP; there is then no need for the channel $t$. This
is straightforward but we have chosen instead to simplify the
presentation of the semantics.} and the operational semantics means
that only one of them interacts with $Q$; the other is effectively
$\pname{Garbage}$ (different in each case).
Figure~\ref{fig-coinflip-execution} shows the execution (combining some
steps) of
$\pname{System}$ according to the operational semantics which we will
define formally in Section~\ref{sec-syntax}. Reduction takes place on
configurations $\cnfig{\sigma}{\phi}{P}$ where $\sigma$ is a list of
qubits and their collective state, $\phi$ lists the channels which
have been created, and $P$ is a process term. Note that the state of
the qubits \emph{must} be a global property in order to be physically
realistic. We record the channels globally in order to give the
semantics a uniform style; this is different from the usual approach
to pi-calculus semantics, but (modulo garbage collection) is
equivalent to expanding the scope of every \textsf{new} before
beginning execution.
The execution of $\pname{System}$ tracks the informal calculation
which we worked through above. Our CQP model makes the
manipulation of the qubit very explicit; there are other ways to
express the behaviour (including putting everything into a single
process with no communication), but the point is that we have a
framework in which to discuss such issues.
\input{fig-coinflip}
\input{fig-coinflip-execution}
\input{fig-teleportation}
\input{fig-teleportation-execution}
\input{fig-teleportation-source}
\input{fig-bitcommitment}
\input{fig-syntax}
\input{fig-internal-syntax}
\input{fig-structural}
\subsection{Quantum Teleportation}
\label{sec-teleportation}
The quantum teleportation protocol \cite{BennettCH:teluqs} is a
procedure for transmitting a quantum state via a non-quantum
medium. This protocol is particularly important: not only is it a
fundamental component of several more complex protocols, but it is
likely to be a key enabling technology for the development of the
\emph{quantum repeaters}
\cite{deRiedmattenH:londqt} which will be necessary in
large-scale quantum communication networks.
Figure~\ref{fig-teleportation} shows a simple model of the quantum
teleportation protocol. Alice and Bob each possess one qubit ($x$ for
Alice, $y$ for Bob) of an
entangled pair whose state is
$\frac{1}{\sqrt{2}}(\ket{00} + \ket{11})$. At this point
we are assuming that appropriate qubits will be supplied to Alice and
Bob as parameters of the system. Alice is also parameterized by a
qubit $z$, whose state is to be teleported. She applies
($\trans{z,x}{\qgate{CNot}}$) the conditional not
transformation to $z$ and $x$ and then applies
($\trans{z}{\qgate{H}}$) the
Hadamard transformation to $z$, finally measuring
$z$ and $x$ to yield a two-bit classical value which she sends
($\outp{c}{\mathsf{measure}\ z,x}$) to Bob on the typed
channel $\tid{c}{\Chant{\ranget{0}{3}}}$ and then terminates
($\mathbf{0}$). Bob receives ($\inp{c}{\tid{r}{\ranget{0}{3}}}$) this value
and uses it to select\footnote{We can easily extend the expression
language of CQP to allow explicit testing of $r$.} a \emph{Pauli} transformation
$\sigma_{0}\ldots\sigma_{3}$ to apply ($\trans{y}{\sigma_{r}}$) to
$y$. The result is that Bob's qubit $y$ takes on the state of $z$,
without a physical qubit having been transmitted from Alice to
Bob. Bob may then use $y$ in his continuation process
$\pname{Use}(y)$.
This example introduces measurement, with a syntax similar to that of
Selinger's QPL \cite{SelingerP:towqpl}. We treat measurement as an
expression, executed for its value as well as its side-effect on the
quantum state. Because the result of a measurement is probabilistic,
evaluation of a $\kw{measure}$ expression introduces a probability
distribution over configurations: $\boxplus_{0\leqslant i\leqslant
n}\,\prob{p_i}{\cnfig{\sigma_i}{\phi_i}{P_i}}$. The next step is a
probabilistic transition to one of the configurations; no reduction
takes place underneath a probability distribution. In general a
configuration reduces non-deterministically to
one of a collection of probability distributions over configurations
(in some cases this is trivial, with only one distribution or only one
configuration within a distribution). A non-trivial probability
distribution makes a probabilistic transition to a
single configuration; this step is omitted in the case of a trivial
distribution.
Figure~\ref{fig-teleportation-execution} shows the complete execution
of $\pname{System}$ in the particular case in which $z$, the qubit
being teleported, has state $\ket{1}$. The measurement produces a
probability distribution over four configurations, but in all cases
the final configuration (process $\pname{Use}(y)$) has a state
consisting of a single basis vector in which $y = \ket{1}$. To verify
the protocol for an arbitrary qubit, we can repeat the calculation
with initial state $x,y,z = \frac{\alpha}{\sqrt{2}}(\ket{000}+\ket{110})+\frac{\beta}{\sqrt{2}}(\ket{001}+\ket{111})$.
Alice and Bob are parameterized by their parts ($x,y$) of the
entangled pair (and by the channel $c$). We can be more explicit about
the origin of the entangled pair by introducing what is known in the
physics literature as an
\emph{EPR source}\footnote{EPR stands for
Einstein, Podolsky and Rosen.} (computer scientists might regard it as an
\emph{entanglement server}). This process constructs the entangled pair
(by using the Hadamard and controlled not transformations)
and sends its components to Alice and Bob on the typed channels
$\tid{s,t}{\Chant{\mktype{Qbit}}}$. Figure~\ref{fig-teleportation-source}
shows the revised model.
\subsection{Bit-Commitment}
\label{sec-bitcommitment}
The bit-commitment problem is to design a protocol such that Alice
chooses a one-bit value which Bob then attempts to guess. The key
issue is that Alice must evaluate Bob's guess with respect to her
original choice of bit, without changing her mind; she must be
committed to her choice. Similarly, Bob must not find out Alice's
choice before making his guess. Bit-commitment turns out to be an important
primitive in cryptographic protocols. Classical bit-commitment schemes
rely on assumptions on the computational complexity of certain
functions; it is natural to ask whether quantum techniques can remove
these assumptions.
We will discuss a quantum bit-commitment protocol due to Bennett and
Brassard \cite{BennettCH:quacpd} which is closely related to the
quantum key-distribution protocol proposed in the same paper and
known as BB84. The following description of the protocol is
based on Gruska's~\cite{GruskaJ:quac} presentation.
\begin{enumerate}
\item Alice randomly chooses a bit $x$ and a sequence
of bits $\mathit{xs}$. She encodes $\mathit{xs}$ as a sequence of
qubits and sends them to Bob. This encoding uses the standard basis
(representing $0$ by $\ket{0}$ and $1$ by $\ket{1}$) if $x=0$, and the
diagonal basis (representing $0$ by $\ket{+}$ and $1$ by $\ket{-}$) if $x=1$.
\item Upon receiving each qubit, Bob randomly chooses to measure it
with respect to either the standard basis or the diagonal basis. For each
measurement he stores the result and his choice of basis. If the basis
he chose matches Alice's $x$ then the result of the measurement is the
same as the corresponding bit from $\mathit{xs}$; if not, then the
result is $0$ or $1$ with equal probability. After receiving all of
the qubits, Bob tells Alice his guess at the value of $x$.
\item Alice tells Bob whether or not he guessed
correctly. To certify her claim she sends $\mathit{xs}$ to Bob.
\item Bob verifies Alice's claim by looking at the measurements in
which he used the basis corresponding to $x$, and checking that the
results are the same as the corresponding bits from $\mathit{xs}$. He
can also check that the results of the other measurements are
sufficiently random (i.e.\ not significantly correlated with the
corresponding bits from $\mathit{xs}$).
\end{enumerate}
Figure~\ref{fig-bitcommitment} shows our model of this protocol in
CQP. The complexity of the definitions reflects the fact that
we have elaborated much of the computation which is implicit in the
original description.
The definitions use the following features which are not present in
our formalization of CQP, but can easily be added.
\begin{itemize}
\item The type constructor $\kw{List}$ and associated functions and
constructors such
as $\kw{hd}$, $\kw{tl}$, $\kw{length}$, $[\,]$, $@$.
\item Product types ($*$) and functions such as $\kw{fst}$,
$\kw{snd}$.
\item $\kw{if-then-else}$ for expressions and processes.
\item Recursive process definitions.
\end{itemize}
$\pname{Alice}$ is parameterized by $x$ and $\mathit{xs}$; they could
be explicitly chosen at random if desired. $\pname{Bob}$ uses $m$ to
record the results of his measurements, and $n$ (received from
$\pname{Alice}$ initially) as a recursion parameter. $\pname{Bob}$
receives random bits, for his choices of basis, from the server
$\pname{Random}$; he also guesses $x$ randomly. The state
$\pname{BobVerify}$ carries out the first part of step (4) above, but
we have not included a check for non-correlation of the remaining
bits.
Communication between $\pname{Alice}$ and $\pname{Bob}$ uses four
separate channels, $c,\ldots,f$. This proliferation of channels is a
consequence of the fact that our type system associates a unique message
type with each channel. Introducing \emph{session types} \cite{HondaK:intblt}
would allow a single channel to be used for the entire protocol,
although it is worth noting that depending on the physical
implementation of qubits, separation of classical and quantum channels
might be the most accurate model.
We intend to use this CQP model as the basis for various kinds of
formal analysis of the bit-commitment protocol; we make some specific
suggestions in Section~\ref{sec-future}. We should point out, however,
that this bit-commitment protocol is insecure in that it allows Alice
to cheat: if each qubit which she sends to Bob is part of an entangled
pair, then Bob's measurements transmit information back to Alice which
she can use to change $x$ after receiving Bob's guess. The real value
of quantum bit-commitment is as a stepping-stone to the BB84 quantum
key-distribution protocol, which has a very similar structure and is
already being used in practical quantum communication systems.
\input{fig-reduction-exp}
\section{Syntax and Operational Semantics}
\label{sec-syntax}
We now formally define the syntax and operational semantics of the
core of CQP, excluding named process definitions and recursion, which
can easily be added.
\subsection{Syntax}
The syntax of CQP is defined by the grammar in
Figure~\ref{fig-syntax}. Types $T$ consist of data types such as
$\mktype{Int}$ and $\mktype{Unit}$ (others can easily be added), the type $\mktype{Qbit}$ of
qubits, channel types $\Chant{T_1,\ldots,T_n}$ (specifying that each
message is an $n$-tuple with component types $T_1,\ldots,T_n$) and
operator types $\Op{n}$ (the type of a unitary operator on $n$
qubits). The integer range type $\ranget{0}{3}$ used in the
teleportation example is purely for clarification and should be
replaced by $\mktype{Int}$; we do not expect to typecheck with range types.
We use the notation $\vec{T} = T_1,\ldots,T_n$ and $\vec{e} =
e_1,\ldots,e_n$ and write $\length{\vec{e}}$ for the length of a
tuple. Values $v$ consist of variables ($x$, $y$, $z$ etc.), literal
values of data types ($\num{0},\num{1},\ldots$ and $\mkterm{unit}$) and
unitary operators such as the Hadamard operator
$\qgate{H}$. Expressions $e$ consist of values, measurements
$\msure{e_1,\ldots,e_n}$, applications of unitary operators
$\trans{e_1,\ldots,e_n}{e}$, and expressions involving data operators
such as $e+e'$ (others can easily be added). Note that although the
syntax refers to measurements and transformation of expressions $e$,
the type system will require these expressions to refer to
qubits. Processes $P$ consist of the null (terminated) process $\mathbf{0}$,
parallel compositions $P\mathbin{\mid} Q$, inputs
$\inp{e}{\tid{\vec{x}}{\vec{T}}}\,.\, P$ (notation:
$\tid{\vec{x}}{\vec{T}} = \tid{x_1}{T_1},\ldots,\tid{x_n}{T_n}$,
declaring the types of all the input-bound variables), outputs
$\outp{e}{\vec{e}}\,.\, P$, actions $\action{e}\,.\, P$ (typically $e$
will be an application of a unitary operator), channel declarations
$(\mathsf{new}\ \tid{x}{T})P$ and qubit declarations $(\mathsf{qbit}\ x)P$. In inputs and
outputs, the expression $e$ will be constrained by the type system to
refer to a channel.
The grammar in Figure~\ref{fig-internal-syntax} defines the
\emph{internal} syntax of CQP, which is needed in order to define the
operational semantics. Values are extended by two new forms: qubit
names $q$, and channel names $c$. Evaluation contexts $\ctxt{E}{\,}$
(for expressions) and $\ctxt{F}{\,}$ (for processes) are used in the
definition of the operational semantics, in the style of Wright and
Felleisen \cite{WrightAK:synats}. The structure of $\ctxt{E}{\,}$
is used to define call-by-value evaluation of expressions; the hole
$\ctxt{\,}{\,}$ specifies the first part of the expression to be
evaluated. The structure of $\ctxt{F}{\,}$ is used to define reductions
of processes, specifying which expressions within a process must be evaluated.
Given a process $P$ we define its free variables $\fv{P}$, free qubit
names $\fq{P}$ and free channel names $\fc{P}$ in the usual way;
the binders (of $x$ or $\vec{x}$) are
$\inp{y}{\tid{\vec{x}}{\vec{T}}}$, $(\mathsf{qbit}\ x)$ and
$(\mathsf{new}\ \tid{x}{T})$.
\subsection{Operational Semantics}
The operational semantics of CQP is defined by reductions (small-step
evaluations of expressions, or inter-process communications)
and probabilistic transitions. The general form of a
reduction is $\cred{t}{\Prob{i}~\prob{p_{i}}{t_{i}}}$ where $t$ and
the $t_i$ are configurations consisting of expressions or processes
with state information. The notation $\Prob{i}~\prob{p_{i}}{t_{i}}$
denotes a probability distribution over configurations, in which
$\Sigma_{i}p_{i} = 1$; we may also write this distribution as
$\prob{p_{1}}{t_{1}} \boxplus \cdots \boxplus \prob{p_{n}}{t_{n}}$. If the
probability distribution contains a single configuration (with
probability $1$) then we simply write $\cred{t}{t'}$. Probability
distributions reduce probabilistically to single configurations:
$\ptrns{\Prob{i}~\prob{p_{i}}{t_{i}}}{p_{i}}{t_{i}}$ (with probability
$p_{i}$, the distribution $\Prob{i}~\prob{p_{i}}{t_{i}}$ reduces to
$t_{i}$).
The semantics of expressions is defined by the reduction relations
$\vred{}{}$ and $\ered{}{}$ (Figure~\ref{fig-reduction-exp}), both on
configurations of the form $\cnfig{\sigma}{\phi}{e}$. If $n$ qubits
have been declared then $\sigma$ has the form $q_0,\ldots,q_{n-1} =
\ket{\psi}$ where $\ket{\psi} =
\alpha_0\ket{\psi_0}+\cdots+\alpha_{2^n-1}\ket{\psi_{2^n-1}}$ is an
element of the $2^n$-dimensional vector space with basis $\ket{\psi_0}
= \ket{0\ldots 0},\ldots,\ket{\psi_{2^n-1}} = \ket{1\ldots 1}$. The
remaining part of the configuration, $\phi$, is a list of channel
names. Reductions $\vred{}{}$ are basic steps of evaluation, defined
by the rules $\mkRrule{Plus}$ (and similar rules for any other data
operators), $\mkRrule{Measure}$ and $\mkRrule{Trans}$. Rule $\mkRrule{Perm}$ allows qubits in
the state to be permuted, compensating for the way that $\mkRrule{Measure}$
and $\mkRrule{Trans}$ operate on qubits listed first in the state. Measurement
specifically measures the values of a collection of qubits; in the
future we should generalize to measuring \emph{observables} as allowed
by quantum physics.
Reductions
$\ered{}{}$ extend execution to evaluation contexts $\ctxt{E}{\,}$, as
defined by rule $\mkRrule{Context}$. Note that the probability distribution
remains at the top level.
Figure~\ref{fig-reduction-proc} defines the reduction relation
$\cred{}{}$ on configurations of the form
$\cnfig{\sigma}{\phi}{P}$. Rule $\mkRrule{Expr}$ lifts reductions of
expressions to $\ctxt{F}{\,}$ contexts, again keeping probability
distributions at the top level. Rule $\mkRrule{Com}$ defines communication in
the style of pi-calculus, making use of substitution, which is defined
in the usual way (we assume that bound identifiers are
renamed to avoid capture). Rule $\mkRrule{Act}$ trivially removes actions; in
general the reduction of the action expression to $v$ will have
involved side-effects such as measurement or transformation of quantum
state. Rules $\mkRrule{New}$ and $\mkRrule{Qbit}$ create new channels and qubits,
updating the state information in the configuration. Note that this
treatment of channel creation is different from standard presentations
of the pi-calculus; we treat both qubits and channels as elements of a
global store. Rule $\mkRrule{Par}$ allows reduction to take place in parallel
contexts, again lifting the probability distribution to the top level,
and rule $\mkRrule{Cong}$ allows the use of a structural congruence relation
as in the pi-calculus. Structural congruence is the smallest
congruence relation (closed under the process constructions)
containing $\alpha$-equivalence and closed under the rules in
Figure~\ref{fig-structural-congruence}.
\input{fig-reduction-proc}
\section{Type System}
\label{sec-types}
The typing rules defined in Figure~\ref{fig-typing} apply to the syntax
defined in Figure~\ref{fig-syntax}. Environments $\Gamma$ are mappings
from variables to types in the usual way. Typing judgements are of two kinds.
$\typed{\Gamma}{e}{T}$ means that expression $e$ has type $T$ in
environment $\Gamma$. $\ptyped{\Gamma}{P}$ means that process $P$ is
well-typed in environment $\Gamma$. The rules for expressions are
straightforward; note that in rule $\mkTrule{Trans}$, $x_1,\ldots,x_n$ must be
distinct variables of type $\mktype{Qbit}$.
In rule $\mkTrule{Par}$ the operation $+$ on environments
(Definition~\ref{def-addition-env}) is the key to
ensuring that each qubit is controlled by a unique part of a system.
An implicit hypothesis of $\mkTrule{Par}$ is that $\Gamma_1+\Gamma_2$ must be defined.
This is very similar to the linear type system for the pi-calculus,
defined by Kobayashi \emph{et al.} \cite{KobayashiN:linpcfull}.
\begin{definition}[Addition of Environments]\mbox{}\\
The partial operation of adding a typed variable to an environment,
$\Gamma + \tid{x}{T}$, is defined by
\[
\begin{array}{rcll}
\Gamma + \tid{x}{T} & = & \Gamma,\tid{x}{T} & \text{if $x\not\in\dom{\Gamma}$} \\
\Gamma + \tid{x}{T} & = & \Gamma & \text{if $T\not=\mktype{Qbit}$ and
$\tid{x}{T}\in\Gamma$} \\
\Gamma + \tid{x}{T} & = & \multicolumn{2}{l}{\text{undefined, otherwise}}
\end{array}
\]
This operation is extended inductively to a partial operation
$\Gamma+\Delta$ on environments.
\label{def-addition-env}
\end{definition}
Rule $\mkTrule{Out}$ allows output of classical values and qubits to be
combined, but the qubits must be distinct variables and they cannot be
used by the continuation of the outputting process (note the
hypothesis $\ptyped{\Gamma}{P}$). The remaining rules are
straightforward.
According to the operational semantics, execution of $(\mathsf{qbit}\ )$ and
$(\mathsf{new}\ )$ declarations introduces qubit
names and channel names. In order to be able to use the type system to
prove results about the behaviour of executing processes, we introduce
the internal type system (Figure~\ref{fig-typing-int}). This uses
judgements $\ityped{\Gamma}{\Sigma}{\Phi}{e}{T}$ and
$\iptyped{\Gamma}{\Sigma}{\Phi}{P}$ where $\Sigma$ is a set of qubit
names and $\Phi$ is a mapping from channel names to channel
types. Most of the typing rules are straightforward extensions of the
corresponding rules in Figure~\ref{fig-typing}. Because references to
qubits may now be either variables or explicit qubit names, the rules
represent them by general expressions $e$ and impose conditions that
$e$ is either a variable or a qubit name. This is seen in rules
$\mkITrule{Trans}$ and $\mkITrule{Out}$. Note that in $\mkITrule{Par}$, the operation
$\Sigma_1+\Sigma_2$ is disjoint union and an implicit hypothesis is
that $\Sigma_1$ and $\Sigma_2$ are disjoint.
By standard techniques for linear type systems, the typing rules in
Figure~\ref{fig-typing} can be converted into a typechecking algorithm
for CQP models.
As an illustration of the linear control of qubits, consider the
coin-flipping example (Figure~\ref{fig-coinflip}). In $\pname{P}$,
any non-trivial continuation replacing $\mathbf{0}$ would not be able to use
the qubit $y$, which has been sent on $t$. In $\pname{Q}$, after the
qubit $x$ has been sent on $s$, the continuation cannot use $x$. Of
course, at run-time, the qubit variable $z$ in
$\inp{t}{\tid{z}{\mktype{Qbit}}}$ is instantiated by $x$, but that is not a
problem because $\pname{P}$ does not use $x$ after sending it. In
$\pname{System}$, $x$ is used as an actual parameter of $\pname{Q}$
and therefore could not also be used as an actual parameter of
$\pname{P}$ (if $\pname{P}$ had a formal parameter of type $\mktype{Qbit}$).
\input{fig-typing}
\input{fig-typing-int}
\section{Soundness of the Type System}
\label{sec-soundness}
We prove a series of standard lemmas, following the approach of Wright
and Felleisen \cite{WrightAK:synats}, leading to a proof that typing
is preserved by execution of processes
(Theorem~\ref{theorem-type-preservation}). We then prove that in a
typable process, each qubit is used by at most one of any parallel
collection of sub-processes
(Theorem~\ref{theorem-unique-ownership-qubits}); because of type
preservation, this property holds at every step of the execution of a
typable process. This reflects the physical reality of the protocols
which we want to model.
We can also prove a standard runtime safety theorem, stating that a typable
process generates no communication errors or incorrectly-applied
operators, but we have not included it in the present paper.
\begin{lemma}[Typability of Subterms in $E$]
\label{lemma-typability-subterms-E}\mbox{}\\
If $\mathcal{D}$ is a typing derivation concluding
$\ityped{\Gamma}{\Sigma}{\Phi}{\ctxt{E}{e}}{T}$ then there exists $U$
such that $\mathcal{D}$ has a subderivation $\mathcal{D}'$ concluding
$\ityped{\Gamma}{\Sigma}{\Phi}{e}{U}$ and the position of
$\mathcal{D}'$ in $\mathcal{D}$ corresponds to the position of the
hole in $\ctxt{E}{\,}$.
\end{lemma}
\begin{proof}
By induction on the structure of $\ctxt{E}{\,}$.
$\Box$
\end{proof}
\begin{lemma}[Replacement in $E$]
\label{lemma-replacement-E}If
\begin{enumerate}
\item $\mathcal{D}$ is a derivation concluding
$\ityped{\Gamma}{\Sigma}{\Phi}{\ctxt{E}{e}}{T}$
\item $\mathcal{D}'$ is a subderiv.\ of $\mathcal{D}$ concluding
$\ityped{\Gamma}{\Sigma}{\Phi}{e}{U}$
\item the position of $\mathcal{D}'$ in $\mathcal{D}$ matches the hole in $\ctxt{E}{\,}$
\item $\ityped{\Gamma}{\Sigma}{\Phi}{e'}{U}$
\end{enumerate}
then $\ityped{\Gamma}{\Sigma}{\Phi}{\ctxt{E}{e'}}{T}$.
\end{lemma}
\begin{proof}
Replace $\mathcal{D}'$ in $\mathcal{D}$ by a deriv.\ of
$\ityped{\Gamma}{\Sigma}{\Phi}{e'}{U}$.
$\Box$
\end{proof}
\begin{lemma}[Type Preservation for $\vred{}{}$]
\label{lemma-type-preservation-v}\mbox{}\\
If $\ityped{\Gamma}{\Sigma}{\Phi}{e}{T}$ and
$\vred{\cnfig{\sigma}{\phi}{e}}{\Prob{i} \prob{p_i}{\cnfig{\sigma_i}{\phi_i}{e_i}}}$
and $\Sigma=\dom{\sigma}$ and $\phi=\dom{\Phi}$ then $\forall
i.(\sigma_i=\sigma)$ and $\forall i.(\phi_i=\phi)$ and $\forall i.(\ityped{\Gamma}{\Sigma}{\Phi}{e_i}{T})$.
\end{lemma}
\begin{proof}
Straightforward from the definition of $\vred{}{}$ by examining each
case.
$\Box$
\end{proof}
\begin{lemma}[Type Preservation for $\ered{}{}$]
\label{lemma-type-preservation-e}\mbox{}\\
If $\ityped{\Gamma}{\Sigma}{\Phi}{e}{T}$ and
$\ered{\cnfig{\sigma}{\phi}{e}}{\Prob{i} \prob{p_i}{\cnfig{\sigma_i}{\phi_i}{e_i}}}$
and $\Sigma=\dom{\sigma}$ and $\phi=\dom{\Phi}$ then $\forall
i.(\sigma_i=\sigma)$ and $\forall i.(\phi_i=\phi)$ and $\forall i.(\ityped{\Gamma}{\Sigma}{\Phi}{e_i}{T})$.
\end{lemma}
\begin{proof}
$\ered{\cnfig{\sigma}{\phi}{e}}{\Prob{i}
\prob{p_i}{\cnfig{\sigma_i}{\phi_i}{e_i}}}$ is derived by
$\mkRrule{Context}$, so for some $\ctxt{E}{\,}$ we have $e =
\ctxt{E}{f}$ and $\forall i.(e_i = \ctxt{E}{f_i})$ and
$\vred{\cnfig{\sigma}{\phi}{f}}{\Prob{i}
\prob{p_i}{\cnfig{\sigma_i}{\phi_i}{f_i}}}$. From
$\ityped{\Gamma}{\Sigma}{\Phi}{\ctxt{E}{f}}{T}$,
Lemma~\ref{lemma-typability-subterms-E} gives
$\ityped{\Gamma}{\Sigma}{\Phi}{f}{U}$ for some $U$,
Lemma~\ref{lemma-type-preservation-v} gives $\forall
i.(\ityped{\Gamma}{\Sigma}{\Phi}{f-i}{U})$ and $\forall
i.(\sigma_i=\sigma)$ and $\forall i.(\phi_i=\phi)$, and
Lemma~\ref{lemma-replacement-E} gives $\forall i.(\ityped{\Gamma}{\Sigma}{\Phi}{\ctxt{E}{f_i}}{T})$.
$\Box$
\end{proof}
\begin{lemma}[Typability of Subterms in $F$]
\label{lemma-typability-subterms-F}\mbox{}\\
If $\mathcal{D}$ is a typing derivation concluding
$\iptyped{\Gamma}{\Sigma}{\Phi}{\ctxt{F}{e}}$ then there exists $T$
such that $\mathcal{D}$ has a subderivation $\mathcal{D}'$ concluding
$\ityped{\Gamma}{\Sigma}{\Phi}{e}{T}$ and the position of
$\mathcal{D}'$ in $\mathcal{D}$ corresponds to the position of the
hole in $\ctxt{F}{\,}$.
\end{lemma}
\begin{proof}
By case-analysis on the structure of $\ctxt{F}{\,}$.
$\Box$
\end{proof}
\begin{lemma}[Replacement in $F$]
\label{lemma-replacement-F}If
\begin{enumerate}
\item $\mathcal{D}$ is a derivation concluding
$\iptyped{\Gamma}{\Sigma}{\Phi}{\ctxt{F}{e}}$
\item $\mathcal{D}'$ is a subderiv.\ of $\mathcal{D}$ concluding
$\ityped{\Gamma}{\Sigma}{\Phi}{e}{T}$
\item the position of $\mathcal{D}'$ in $\mathcal{D}$ matches the hole in $\ctxt{F}{\,}$
\item $\ityped{\Gamma}{\Sigma}{\Phi}{e'}{T}$
\end{enumerate}
then $\iptyped{\Gamma}{\Sigma}{\Phi}{\ctxt{E}{e'}}$.
\end{lemma}
\begin{proof}
Replace $\mathcal{D}'$ in $\mathcal{D}$ by a deriv.\ of
$\ityped{\Gamma}{\Sigma}{\Phi}{e'}{T}$.
$\Box$
\end{proof}
\begin{lemma}[Weakening for Expressions]
\label{lemma-weakening-expressions}\mbox{}\\
If $\ityped{\Gamma}{\Sigma}{\Phi}{e}{T}$ and $\Gamma\subseteq\Gamma'$
and $\Sigma\subseteq\Sigma'$ and $\Phi\subseteq\Phi'$ then
$\ityped{\Gamma'}{\Sigma'}{\Phi'}{e}{T}$.
\end{lemma}
\begin{proof}
Induction on the derivation of
$\ityped{\Gamma}{\Sigma}{\Phi}{e}{T}$.
$\Box$
\end{proof}
\begin{lemma}
\label{lemma-free-names-expressions}\mbox{}\\
If $\ityped{\Gamma}{\Sigma}{\Phi}{e}{T}$ then
$\fv{e}\subseteq\dom{\Gamma}$ and $\fq{e}\subseteq\Sigma$ and
$\fc{e}\subseteq\dom{\Phi}$.
\end{lemma}
\begin{proof}
Induction on the derivation of $\ityped{\Gamma}{\Sigma}{\Phi}{e}{T}$.
$\Box$
\end{proof}
\begin{lemma}
\label{lemma-free-names-processes}\mbox{}\\
If $\iptyped{\Gamma}{\Sigma}{\Phi}{P}$ then
$\fv{P}\subseteq\dom{\Gamma}$ and $\fq{P}\subseteq\Sigma$ and
$\fc{P}\subseteq\dom{\Phi}$.
\end{lemma}
\begin{proof}
Induction on the derivation of $\iptyped{\Gamma}{\Sigma}{\Phi}{P}$.
$\Box$
\end{proof}
\begin{lemma}[Substitution in Expressions]
\label{lemma-substitution-expressions}\mbox{}\\
Assume that $\ityped{\Gamma,\tid{\vec{x}}{\vec{T}}}{\Sigma}{\Phi}{e}{T}$
and let $\vec{v}$ be values such that, for each $i$:
\begin{enumerate}
\item if $T_i=\mktype{Qbit}$ then $v_i$ is a variable or a qubit name
\item if $T_i=\mktype{Qbit}$ and $v_i=y_i$ (a var) then
$y_i\not\in\Gamma,\tid{\vec{x}}{\vec{T}}$
\item if $T_i=\mktype{Qbit}$ and $v_i=q_i$ (a qubit name) then
$q_i\not\in\Sigma$
\item if $T_i\not=\mktype{Qbit}$ then $\ityped{\Gamma}{\Sigma}{\Phi}{v_i}{T_i}$.
\end{enumerate}
Let $\vec{y}$ be the variables of type $\mktype{Qbit}$ from $\vec{v}$
(corresponding to condition (2)) and assume that they are distinct;
let $\vec{q}$ be the
qubit names from $\vec{v}$ (corresponding to condition (3)) and assume
that they are distinct.
Then
$\ityped{\Gamma,\tid{\vec{y}}{\vec{\mktype{Qbit}}}}{\Sigma,\vec{q}}{\Phi}{\subst{e}{\vec{v}}{\vec{x}}}{T}$.
\end{lemma}
\begin{proof}
Induction on the deriv.\ of
$\ityped{\Gamma,\tid{\vec{x}}{\vec{T}}}{\Sigma}{\Phi}{e}{T}$.
$\Box$
\end{proof}
\begin{lemma}[Substitution in Processes]
\label{lemma-substitution-processes}\mbox{}\\
Assume that $\iptyped{\Gamma,\tid{\vec{x}}{\vec{T}}}{\Sigma}{\Phi}{P}$
and let $\vec{v}$ be values such that, for each $i$:
\begin{enumerate}
\item if $T_i=\mktype{Qbit}$ then $v_i$ is a variable or a qubit name
\item if $T_i=\mktype{Qbit}$ and $v_i=y_i$ (a var) then
$y_i\not\in\Gamma,\tid{\vec{x}}{\vec{T}}$
\item if $T_i=\mktype{Qbit}$ and $v_i=q_i$ (a qubit name) then
$q_i\not\in\Sigma$
\item if $T_i\not=\mktype{Qbit}$ then $\ityped{\Gamma}{\Sigma}{\Phi}{v_i}{T_i}$.
\end{enumerate}
Let $\vec{y}$ be the variables of type $\mktype{Qbit}$ from $\vec{v}$
(corresponding to condition (2)) and assume that they are distinct;
let $\vec{q}$ be the
qubit names from $\vec{v}$ (corresponding to condition (3)) and assume
that they are distinct.
Then
$\iptyped{\Gamma,\tid{\vec{y}}{\vec{\mktype{Qbit}}}}{\Sigma,\vec{q}}{\Phi}{\subst{P}{\vec{v}}{\vec{x}}}$.
\end{lemma}
\begin{proof}
By induction on the derivation of
$\iptyped{\Gamma,\tid{\vec{x}}{\vec{T}}}{\Sigma}{\Phi}{P}$. The key
cases are $\mkTrule{Par}$ and $\mkTrule{Out}$.
For $\mkTrule{Par}$ the final step in the typing derivation has the form
\[
\frac{
\iptyped{\Gamma_1}{\Sigma_1}{\Phi}{P}\quad
\iptyped{\Gamma_2}{\Sigma_2}{\Phi}{Q}
}
{
\iptyped{\Gamma,\tid{\vec{x}}{\vec{T}}}{\Sigma}{\Phi}{P\mathbin{\mid} Q}
}
\]
where $\Gamma_1+\Gamma_2=\Gamma,\tid{\vec{x}}{\vec{T}}$ and
$\Sigma_1+\Sigma_2=\Sigma$. Each variable of type $\mktype{Qbit}$ in
$\Gamma,\tid{\vec{x}}{\vec{T}}$ is in exactly one of $\Gamma_1$ and
$\Gamma_2$. Because the free variables of $P$ and $Q$ are contained in
$\Gamma_1$ and $\Gamma_2$ respectively, substitution into $P\mathbin{\mid}
Q$ splits into disjoint substitutions into $P$ and $Q$. The induction
hypothesis gives typings for $\subst{P}{\vec{v}}{\vec{x}}$ and
$\subst{Q}{\vec{v}}{\vec{x}}$, which combine (by $\mkTrule{Par}$) to give
$\iptyped{\Gamma,\tid{\vec{y}}{\vec{\mktype{Qbit}}}}{\Sigma,\vec{q}}{\Phi}{\subst{P\mathbin{\mid}
Q}{\vec{v}}{\vec{x}}}$.
$\Box$
\end{proof}
\begin{lemma}[Struct.\ Cong.\ Preserves Typing]
\label{lemma-structural-congruence-typing}\mbox{}\\
If $\iptyped{\Gamma}{\Sigma}{\Phi}{P}$ and $P\equiv Q$ then
$\iptyped{\Gamma}{\Sigma}{\Phi}{Q}$.
\end{lemma}
\begin{proof}
Induction on the derivation of $P\equiv Q$.
$\Box$
\end{proof}
\begin{lemma}[External/Internal Type System]\mbox{}\\
$\typed{\Gamma}{e}{T} \Rightarrow
\ityped{\Gamma}{\emptyset}{\emptyset}{e}{T}$ and $\ptyped{\Gamma}{P}
\Rightarrow \iptyped{\Gamma}{\emptyset}{\emptyset}{P}$.
\end{lemma}
\begin{proof}
Induction on the derivations.
$\Box$
\end{proof}
\begin{theorem}[Type Preservation for $\cred{}{}$]
\label{theorem-type-preservation}\mbox{}\\
If $\iptyped{\Gamma}{\Sigma}{\Phi}{P}$ and
$\cred{\cnfig{\sigma}{\phi}{P}}{\Prob{i} \prob{p_i}{\cnfig{\sigma_i}{\phi_i}{P_i}}}$
and $\Sigma=\dom{\sigma}$ and $\phi=\dom{\Phi}$ then $\forall
i.(\sigma_i=\sigma)$ and $\forall i.(\phi_i=\phi)$ and $\forall i.(\iptyped{\Gamma}{\Sigma}{\Phi}{P_i})$.
\end{theorem}
\begin{proof}
By induction on the derivation of
$\cred{\cnfig{\sigma}{\phi}{P}}{\Prob{i}
\prob{p_i}{\cnfig{\sigma_i}{\phi_i}{P_i}}}$, in each case examining
the final steps in the derivation of $\iptyped{\Gamma}{\Sigma}{\Phi}{P}$.
$\Box$
\end{proof}
\begin{theorem}[Unique Ownership of Qubits]
\label{theorem-unique-ownership-qubits}\mbox{}\\
If $\iptyped{\Gamma}{\Sigma}{\Phi}{P\mathbin{\mid} Q}$ then
$\fq{P}\cap\fq{Q}=\emptyset$.
\end{theorem}
\begin{proof}
The final step in the derivation of
$\iptyped{\Gamma}{\Sigma}{\Phi}{P\mathbin{\mid} Q}$ has the form
\[
\frac{
\iptyped{\Gamma_1}{\Sigma_1}{\Phi}{P}
\quad
\iptyped{\Gamma_2}{\Sigma_2}{\Phi}{Q}
}
{
\iptyped{\Gamma}{\Sigma}{\Phi}{P\mathbin{\mid} Q}
}
\]
where $\Gamma = \Gamma_1+\Gamma_2$ and $\Sigma =
\Sigma_1+\Sigma_2$. By Lemma~\ref{lemma-free-names-processes},
$\fq{P}\subseteq\Sigma_1$ and $\fq{Q}\subseteq\Sigma_2$. The implicit
hypothesis of the typing rule $\mkTrule{Par}$ is that $\Sigma_1+\Sigma_2$ is
defined, meaning that $\Sigma_1\cap\Sigma_2=\emptyset$. Hence
$\fq{P}\cap\fq{Q}=\emptyset$.
$\Box$
\end{proof}
\section{Future Work}
\label{sec-future}
Our aim is to develop techniques for formal verification of systems
modelled in CQP. In particular we are working towards an analysis of
the BB84 quantum key distribution protocol, including both the core
quantum steps and the classical authentication phase. Initially we
will use model-checking, in both standard (non-deterministic) and
probabilistic forms. Standard model-checking is appropriate for
absolute properties (for example, the quantum teleportation protocol
(Section~\ref{sec-teleportation}) claims that the final state of $y$
is always the same as the initial state of $z$). In general, however,
probabilistic model-checking is needed. For
example, the bit-commitment protocol (Section~\ref{sec-bitcommitment})
guarantees that, with some high probability which is
dependent on the number of bits used by Alice, Bob's verification step
is successful. We have obtained preliminary results
\cite{NagarajanR:forvqp,PapanikolaouN:msc} with the CWB-NC
\cite{CleavelandR:cwbnc} and PRISM \cite{KwiatkowskaMZ:pripsm}
systems, working directly with the modelling language of each
tool. The next step is to develop automated translations of CQP into
these lower-level modelling languages; note that our operational
semantics matches the semantic model used by PRISM.
Another major area for future work is to develop a theory of
equivalence for CQP processes, as a foundation for compositional
techniques for reasoning about the behaviour of systems.
We can also consider extending the language. It should be
straightforward to add purely classical features such as functions and
assignable variables. Extensions which combine quantum data with
enhanced classical control structures require more care. Valiron's
\cite{ValironB:quat} recent formulation of a typed quantum lambda
calculus seems very compatible with our approach, and it should fit
into CQP's expression language fairly easily.
\section{Conclusions}
\label{sec-conclusions}
We have defined a language, CQP, for modelling systems which combine
quantum and classical communication and computation. CQP has a formal
operational semantics, and a static type system which guarantees that
transmitting a qubit on a communication channel corresponds to a
physical transfer of ownership.
The syntax and semantics of CQP are based on a combination of the
pi-calculus and an expression language which includes measurement and
transformation of quantum state. The style of our definitions makes it
easy to enrich the language.
Our research programme is to use CQP as the basis for
analysis and verification of quantum protocols, and we have outlined
some possibilities for the use of both standard and probabilistic
model-checking.
\input{main.bbl}
\end{document} |
\begin{equation}gin{document}
\maketitle
{\it Dedicated to Franco Giannessi for his 85th birthday}
\begin{equation}gin{abstract}
We consider shape optimization problems involving functionals depending on perimeter, torsional rigidity and Lebesgue measure. The scaling free cost functionals are of the form $P(\Omega)T^q(\Omega)|\Omega|^{-2q-1/2}$ and the class of admissible domains consists of two-dimensional open sets $\Omega$ satisfying the topological constraints of having a prescribed number $k$ of bounded connected components of the complementary set. A relaxed procedure is needed to have a well-posed problem and we show that when $q<1/2$ an optimal relaxed domain exists. When $q>1/2$ the problem is ill-posed and for $q=1/2$ the explicit value of the infimum is provided in the cases $k=0$ and $k=1$.
\end{abstract}
\textbf{Keywords:} torsional rigidity, shape optimization, perimeter, planar sets, topological genus.
\textbf{2010 Mathematics Subject Classification:} 49Q10, 49J45, 49R05, 35P15, 35J25.
\section{Introduction\label{sintro}}
In the present paper we aim to study some particular shape optimization problems in classes of planar domains having a prescribed topology. The quantities we are going to consider for a general bounded open set $\Omega $ are the distributional perimeter $P(\Omega)$ and the torsional rigidity $T(\Omega)$. More precisely, we deal with a scaling free functional $F_q$ which is expressed as the product of the perimeter, and of a suitable powers of the torsional rigidity and of the Lebesgue measure of $\Omega$, depending on a positive parameter $q$.
The restriction to the planar case is essential and is not made here for the sake of simplicity; indeed, in higher dimension stronger topological constraints have to be imposed to make the problems well posed.
In a previous paper \cite{BBP20} we treated the problem above in every space dimension and, after discussing it for general open sets, we focused to the class of convex open sets.
In the following we consider the optimization problems for $F_q$ in the classes $\mathcal{A}_k$ of planar domains having at most $k$ ``holes".
While the maximization problems are always ill posed, even in the class of smooth open sets in $\mathcal{A}_k$, it turns out that the minimizing problems are interesting if $q\le 1/2$ and some regularity constraints are imposed to the sets $\Omega\in\mathcal{A}_k$.
In this case, we provide a explicit lower bound for $F_q$ in the class of Lipschitz sets in $\mathcal{A}_k$, which turns out to be sharp when $k=0,1$ and $q=1/2$ and coincides with the infimum of $F_q$ in the class of convex sets, as pointed out by Polya in \cite{polya60}.
When $q<1/2$ we study the existence of minimizers for $F_q$ and our approach is the one of direct methods of the calculus of variations which consists in the following steps:
\begin{equation}gin{itemize}
\item[-]defining the functional $F_q$ only for Lipschitz domains of the class $\mathcal{A}_k$;
\item[-]relaxing the functional $F_q$ on the whole class $\mathcal{A}_k$, with respect to a suitable topology;
\item[-]showing that the relaxed functional admits an optimal domain in $\mathcal{A}_k$;
\item[-]proving that such a domain is Lipschitz.
\end{itemize}
The relaxation procedure above is necessary to avoid trivial counterexamples due to the fact that the perimeter is Lebesgue measure sensitive, while the torsional rigidity is capacity sensitive.
As in most of the free boundary problems, the last regularity step presents strong difficulties and, even if the regularity of optimal domains could be expected, we are unable to give a full proof of this fact. It would be very interesting to establish if an optimal domain fulfills some kind of regularity, or at least if its perimeter coincides with the Hausdorff measure of the boundary, which amounts to exclude the presence of internal fractures.
This paper is organized as follows.
In Section \ref{spre}, after recalling the definitions of perimeter and torsional rigidity, we summarize the main results of this paper. In Section \ref{sapp} we describe the key tools necessary to apply the so-called method of \textit{interior parallels}, introduced by Makai in \cite{Ma},\cite{Ma59} and by Polya in \cite{polya60}, to our setting. Section \ref{shau} contains a review of some basic facts concerning the complementary Hausdorff convergence, with respect to which we perform the relaxation procedure. Although Sections \ref{sapp} and \ref{shau} may be seen as preliminary, we believe they contain some interesting results that, as far as we know, are new in literature. Finally, in Section \ref{sexis} we discuss the optimization problem: we extend a well known inequality due to Polya (Theorem \ref{theo.Polya} and Remark \ref{rem.polya}), and we prove the main results (Corollary \ref{coro.polya} and Theorem \ref{theo.exis}).
\section{Preliminaries}\label{spre}
The shape functionals we consider in this paper are of the form
\begin{equation}gin{equation}\label{Fq}F_q(\Omega)=\frac{P(\Omega)T^q(\Omega)}{|\Omega|^{2q+1/2}}
\end{equation}
where $q>0$, $\Omega\subset\mathbb{R}^2$ is a general bounded open set and, $|\Omega|$ denotes its Lebesgue measure.
For the reader's convenience, in the following we report the definitions and the basic properties of the perimeter and of the torsional rigidity. According to the De Giorgi formula, the perimeter is given by
$$P(\Omega)=\sup\left\{\int_\Omega\dive\phi\,dx\ :\ \phi\in C^1_c(\mathbb{R}^2;\mathbb{R}^2),\ \|\phi\|_{L^\infty(\mathbb{R}^2)}\le1\right\},$$
and satisfies:
\begin{equation}gin{itemize}
\item[-]the {\it scaling property}
$$P(t\Omega)=tP(\Omega)\qquad\text{for every }t>0;$$
\item[-] the lower semicontinuity with respect to the $L^1$-convergence, that is the convergence of characteristic functions.
\item[-]the {\it isoperimetric inequality}
\begin{equation}\label{isoper}
\frac{P(\Omega)}{|\Omega|^{1/2}}\ge\frac{P(B)}{|B|^{1/2}}
\end{equation}
where $B$ is any disc in $\mathbb{R}^2$. In addition the inequality above becomes an equality if and only if $\Omega$ is a disc (up to sets of Lebesgue measure zero).
\end{itemize}
The torsional rigidity $T(\Omega)$ is defined as
$$T(\Omega)=\int_\Omega u\,dx$$
where $u$ is the unique solution of the PDE
\begin{equation}\label{pdetorsion}\begin{equation}gin{cases}
-\Delta u=1&\text{in }\Omega,\\
u\in H^1_0(\Omega).
\end{cases}
\end{equation}
By means of an integration by parts we can equivalently express the torsional rigidity as
\begin{equation} \label{vartor}
T(\Omega)=\max\Big\{\Big[\int_\Omega u\,dx\Big]^2\Big[\int_\Omega|\nabla u|^2\,dx\Big]^{-1}\ :\ u\in H^1_0(\Omega)\setminus\{0\}\Big\}.
\end{equation}
The main properties we use for the torsional rigidity are:
\begin{equation}gin{itemize}
\item[-]the monotonicity with respect to the set inclusion
$$\Omega_1\subset\Omega_2\mathcal{L}ongrightarrow T(\Omega_1)\le T(\Omega_2);$$
\item[-]the additivity on disjoint families of open sets
$$T\Big(\bigcup_n\Omega_n\Big)=\sum_n T(\Omega_n)\qquad\text{whenever $\Omega_n$ are pairwise disjoint;}$$
\item[-]the scaling property
$$T(t\Omega)=t^4T(\Omega),\qquad\text{for every }t>0;$$
\item[-]the relation between torsional rigidity and Lebesgue measure (known as {\it Saint-Venant inequality})
\begin{equation}\label{stven}
\frac{T(\Omega)}{|\Omega|^2}\le\frac{T(B)}{|B|^2}.
\end{equation}
In addition, the inequality above becomes an equality if and only if $\Omega$ is a disc (up to sets of capacity zero).
\end{itemize}
If we denote by $B_1$ the unitary disc of $\mathbb{R}^2$, then the solution of \eqref{pdetorsion}, with $\Omega=B_1$, is
$$u(x)=\frac{1-|x|^2}{4}$$
which provides
$$T(B_1)=\frac{\pi}{8}.$$
Thanks to the scaling properties of the perimeter and of the torsional rigidity, the functional $F_q$ defined by \eqref{Fq} is {\it scaling free} and optimizing it in a suitable class $\mathcal{A}$ is equivalent to optimizing the product $P(\Omega)T^q(\Omega)$ over $\mathcal{A}$ with the additional measure constraint $|\Omega|=m$, for a fixed $m>0$.
In a previous paper \cite{BBP20} we considered the minimum and the maximum problem for $F_q$ (in every space dimension) in the classes
\[\begin{equation}gin{split}
&\mathcal{A}_{all}:=\big\{\Omega\subset\mathbb{R}^d\ :\ \Omega\ne\emptyset\big\}\\
&\mathcal{A}_{convex}:=\big\{\Omega\subset\mathbb{R}^d\ :\ \Omega\ne\emptyset,\ \Omega\text{ convex}\big\}.
\end{split}\]
We summarize here below the results available in the case of dimension 2:
\begin{equation}gin{itemize}
\item[-]for every $q>0$
$$\inf\big\{F_q(\Omega)\ :\ \Omega\in\mathcal{A}_{all},\ \Omega\text{ smooth}\big\}=0;$$
\item[-]for every $q>0$
$$\sup\big\{F_q(\Omega)\ :\ \Omega\in\mathcal{A}_{all},\ \Omega\text{ smooth}\big\}=+\infty;$$
\item[-]for every $q>1/2$
$$\begin{equation}gin{cases}
\inf\big\{F_q(\Omega)\ :\ \Omega\in\mathcal{A}_{convex}\big\}=0\\
\max\big\{F_q(\Omega)\ :\ \Omega\in\mathcal{A}_{convex}\big\}\quad\text{is attained};
\end{cases}$$
\item[-]for every $q<1/2$
$$\begin{equation}gin{cases}
\sup\big\{F_q(\Omega)\ :\ \Omega\in\mathcal{A}_{convex}\big\}=+\infty\\
\min\big\{F_q(\Omega)\ :\ \Omega\in\mathcal{A}_{convex}\big\}\quad\text{is attained};\\
\end{cases}$$
\item[-]for $q=1/2$
$$\begin{equation}gin{cases}
\inf\big\{F_{1/2}(\Omega)\ :\ \Omega\in\mathcal{A}_{convex}\big\}=(1/3)^{1/2}\\
\sup\big\{F_{1/2}(\Omega)\ :\ \Omega\in\mathcal{A}_{convex}\big\}=(2/3)^{1/2},
\end{cases}$$
asymptotically attained, respectively, when $\Omega$ is a long thin rectangle and when $\Omega$ is a long thin triangle.
\end{itemize}
Here we discuss the optimization problems for $F_q$ on the classes of planar domains
$$\mathcal{A}_k:=\big\{\Omega\subset\mathbb{R}^2\ :\ \Omega\ne\emptyset,\ \Omega\text{ bounded, }\#\Omega^c\le k\big\},$$
where, for every set $E$, we denote by $\#E$ the number of bounded connected components of $E$ and $\Omega^c=\mathbb{R}^2\setminus\Omega$. In particular $\mathcal{A}_0$ denotes the class of simply connected domains (not necessarily connected).
From what seen above the only interesting cases to consider are:
$$\begin{equation}gin{cases}
\text{the maximum problem for $F_q$ on $\mathcal{A}_k$ when $q\ge1/2$ ;}\\
\text{the minimum problem for $F_q$ on $\mathcal{A}_k$ when $q\le1/2$.}
\end{cases}$$
We notice that the maximum problem is not well posed, since for every $q>0$ and every $k\ge0$
$$\sup\big\{F_q(\Omega)\ :\ \Omega\text{ smooth},\ \Omega\in\mathcal{A}_k\big\}=+\infty.$$
Indeed, it is enough to take as $\Omega_n$ a smooth perturbation of the unit disc $B_1$ such that
$$B_{1/2}\subset\Omega_n\subset B_2\qquad\text{and}\qquad P(\Omega_n)\to+\infty.$$
All the domains $\Omega_n$ are simply connected, so belong to $\mathcal{A}_k$ for every $k\ge0$, and
$$|\Omega_n|\le|B_2|,\qquad T(\Omega_n)\ge T(B_{1/2}),$$
where we used the monotonicity of the torsional rigidity. Therefore
$$F_q(\Omega_n)\ge\frac{P(\Omega_n)T^q(B_{1/2})}{|B_2|^{2q+1/2}}\to+\infty.$$
Moreover $$\inf\big\{F_q(\Omega)\ :\ \Omega\in\mathcal{A}_k\big\}=0,$$ as we can easily see by taking as $\Omega_n$ the unit disk of $\mathbb{R}^2$ where we remove the $n$ segments (in polar coordinates $r,\theta$)
$$S_i=\big\{\theta=2\pi i/n,\ r\in[1/n,1]\big\}\qquad i=1,\dots,n.$$
We have that all the $\Omega_n$ are simply connected, and
$$|\Omega_n|=\pi,\qquad P(\Omega_n)=2\pi,\qquad T(\Omega_n)\to0,$$
providing then $F_q(\Omega_n)\to0$.
Therefore, the problems we study in the sequel are
$$\inf\big\{F_q(\Omega)\ :\ \Omega\in \mathcal{A}_k,\text{ $\Omega$ Lipschitz}\},$$
when $q\le1/2$ and $k\in\mathbb{N}$. Denoting by $m_{q,k}$ the infimum above we summarize here below our main results.
\begin{equation}gin{itemize}
\item[-] For every $q\le1/2$ the values $m_{q,k}$ are decreasing with respect to $k$ and
$$\lim_{k\to\infty}m_{q,k}=0.$$
\item[-]When $k=0,1$ it holds
$$m_{1/2,0}=m_{1/2,1}=3^{-1/2}=\inf\big\{F_{1/2}(\Omega)\ :\ \Omega\text{ convex}\big\};$$
in particular, for $q=1/2$ there is no gap for $\inf F_{1/2}$ between the classes $\mathcal{A}_{convex}$, $\mathcal{A}_0$, $\mathcal{A}_1$, and the infimum is asymptotically reached by a sequence of long and thin rectangles.
\item[-]For every $q\le1/2$ and $k\in\mathbb{N}$, we have
$$m_{q,k}\ge\begin{equation}gin{cases}(8\pi)^{1/2-q}3^{-1/2}&\text{if }k=0,1, \\
(8\pi)^{1/2-q}(3^{1/2}k)^{-1}&\text{if }k>1.
\end{cases}$$
\item[-]For $q<1/2$, we define a relaxed functional $\mathcal{F}_{q,k}$, which coincides with $F_q$ in the class of the sets $\Omega\in\mathcal{A}_k$ satisfying $P(\Omega)=\mathcal{H}^1(\partial\Omega)$, being $\mathcal{H}^1$ the $1$-dimensional Hausdorff measure.
We also prove that $\mathcal{F}_{q,k}$ admits an optimal domain $\Omega^{\star}\in\mathcal{A}_k$ with $\mathcal{H}^{1}(\partial\Omega^\star)<\infty$.
\end{itemize}
\section{ Approximation by interior parallel sets} \label{sapp}
For a given bounded nonempty open set $\Omega$ we denote by $\rho(\Omega)$ its \textit{inradius}, defined as
$$\rho(\Omega):=\sup\big\{d(x,\partial\Omega)\ :\ x\in\Omega\big\},$$
where, as usual,
$d(x,E):=\inf\big\{d(x,y)\ :\ y\in E\big\}$.
For every $t\ge 0$, we denote by $\Omega(t)$ the \textit{interior parallel set} at distance $t$ from $\partial\Omega$, i.e.
$$\Omega(t):=\big\{x\in\Omega\ :\ d(x,\partial\Omega)>t\big\},$$
and by $A(t):=|\Omega(t)|$. Moreover we denote by $L(t)$ the length of the \textit{interior parallel}, that is the set of the points in $\Omega$ whose distance from $\partial\Omega$ is equal to $t$.
More precisely we set
$$L(t):=\mathcal{H}^1 (\{x\in\Omega\ :\ d(x,\partial\Omega)=t \}).$$
Notice that $\partialrtial \Omega(t)\subseteq \{x\in\Omega\ :\ d(x,\partial\Omega)=t \}$.
Using coarea formula (see \cite{EvGa} Theorem 3.13) we can write the following identity:
\begin{equation}\label{eq.Evans}
A(t)=\int_t^{\rho(\Omega)}L(s)\,ds\qquad\forall t\in(0,\rho(\Omega)).
\end{equation} As a consequence it is easy to verify that for a.e. $t\in(0,\rho(\Omega))$ there exists the derivative $A'(t)$ and it coincides with $-L(t)$. The interior parallel sets $\Omega(t)$ belong to $\mathcal{A}_k$ as soon as $\Omega\in\mathcal{A}_k$, as next elementary argument shows.
\begin{equation}gin{lemm}\label{lemm.innerA_k} Let $\Omegamega\in\mathcal{A}_k$. Then $\Omega(t)\in \mathcal{A}_k$ for every $t\in [0,\rho(\Omega))$.
\end{lemm}
\begin{equation}gin{proof} Let $\alpha:=\#\Omega^c$ ($\le k$), and $C^1,C^2,\cdots C^\alpha$ be the (closed) bounded connected components of $\Omega^c$ and $C^0$ the unbounded one. Define
$$C^i(t):=\big\{x\in\mathbb{R}^2\ : \ d(x,C^i)\le t\big\}.$$
Since $C^i$ is connected, then $C^i(t)$ is connected and the set $\bigcup_{i=0}^\alpha C^i(t)$ has at most $\alpha+1$ connected components. Since we have
$\Omega^c(t)=\bigcup_{i=0}^\alpha C^i(t)$,
the lemma is proved.
\end{proof}
In the planar case, even without any regularity assumptions on $\partial\Omega$, the sets $\Omega(t)$ are a slightly smoothed version of $\Omega$. In particular the following result (see \cite{Fu85}), that we limit to report in the two dimensional case, proves that $\Omega(t)$ has a Lipschitz boundary for a.e. $t\in(0,\rho(\Omega))$.
\begin{equation}gin{theo}[Fu]\label{theo.Fu}
Let $K\subseteq \mathbb{R}^2$ be a compact set. There exists a compact set $C=C(K)\subseteq [0, 3^{-1/ 2} diam(K)]$ such that $|C|=0$ and if $t\notin C$ then the boundary of $\{x\in\mathbb{R}^2\ :\ d(x,K)>t\}$ is a Lipschitz manifold.
\end{theo}
We recall now some general facts of geometric measure theory. Let $E\subset \mathbb{R}^2$, we denote by $E^{(t)}$ the set of the points where the density of $E$ is $t\in [0,1]$, that is
$$E^{(t)}:=\{ x\in\mathbb{R}^2: \lim_{r\to 0^+} (\pi r^2)^{-1}|E\cap B_r(x)|=t\}.$$
It is well known (see \cite{AFP} Theorem 3.61) that if $E$ is a set of finite perimeter, then $P(E)=\mathcal{H}^1(E^{1/2})$ and $E$ has density either $0$ or $1/2$ or $1$ at $\mathcal{H}^1$-a.e $x\in\mathbb{R}^2$. In particular it holds
\begin{equation}\label{eq.decomp}\mathcal{H}^1(\partialrtial E)= \mathcal{H}^1(\partialrtial E\cap E^{(0)})+ \mathcal{H}^1(\partialrtial E\cap E^{(1)})+ P(E),
\end{equation}
which implies
\begin{equation} \label{eq.decomp2}
P(E)+2\mathcal{H}^1(\partial E\cap E^{(1)})\le 2 \mathcal{H}^1(\partial E)-P(E).
\end{equation}
The Minkowski content and the outer Minkowski content of $E$ are, respectively, defined as
$$\mathcal{M}(E):=\lim_{t\to 0}\frac {|\{x\in\mathbb{R}^2\ :\ d(x,E)\le t\}|}{2t},$$
and
$$\mathcal{SM}(E):=\lim_{t\to 0}\frac {|\{x\in\mathbb{R}^2\ :\ d(x,E)\le t\}\setminus E|}{t},$$
whenever the limits above exist.
We say that a compact set $E\subset\mathbb{R}^2$ is $1$-rectifiable if there exists a compact set $K\subset\mathbb{R}$ and a Lipschitz map $f:\mathbb{R}\to \mathbb{R}^2$ such that $f(K)=E$. Any compact connected set of $\mathbb{R}^2$, namely a \textit{continuum}, with finite $\mathcal{H}^1$-measure is $1$-rectifiable (see, for instance, Theorem 4.4 in \cite{AO}). Finally, if $E$ is $1$-rectifiable then
\begin{equation}\label{eq.Amb}
\mathcal{M}(E)=\mathcal{H}^1(E)
\end{equation} (see Theorem $2.106$ in \cite{AFP})
and by Proposition 4.1 of \cite{V}, if $E$ is a Borel set and $\partial E$ is $1$-rectifiable it holds
\begin{equation}\label{eq.Villa}
\mathcal{SM}(E)=P(E)+2\mathcal{H}^1(\partial E\cap E^{(0)}).
\end{equation}
Next two results are easy consequence of \eqref{eq.Amb} and \eqref{eq.Villa}.
\begin{equation}gin{theo}\label{theo.min}
Let $\Omega$ be a bounded open set with $\mathcal{H}^1(\partial \Omega)<\infty$ and $\#\partial\Omega<+\infty$. Then $
\mathcal{M}(\partial\Omega)=\mathcal{H}^{1}(\partial \Omega)$ and $ \mathcal{SM}(\Omega)=P(\Omega)+2 \mathcal{H}^1(\partialrtial \Omega\cap \Omega^{(0)}).$
\end{theo}
\begin{equation}gin{proof}
Since $\mathcal{H}^1(\partial\Omega)<\infty$, each connected component of $\partial\Omega$ is $1$-rectifiable. Being the connected components pairwise disjoint and compact, we easily prove that their finite union is $1$-rectifiable. Then, applying \eqref{eq.Amb} and \eqref{eq.Villa}, we get the thesis.
\end{proof}
\begin{equation}gin{coro}\label{coro.Mink}
Let $\Omega$ be an open set such that $\mathcal{H}^1(\partial\Omega)<\infty$ and $\#\partial\Omega<+\infty$. Then there exists
$$\lim_{r\to 0^+}\frac 1 r\int_{0}^r L(t)dt= P(\Omega)+2 \mathcal{H}^1(\partialrtial \Omega\cap \Omega^{(1)}).$$
\end{coro}
\begin{equation}gin{proof}
We denote by $L^c(t)$ the following quantity
$$L^c(t):=\mathcal{H}^1(\{x\in\Omega^c\ :\ d(x,\partial\Omega)=t\}).$$
By applying coarea formula and Theorem \ref{theo.min}, it holds
\begin{equation}\label{eq.g1}
\lim_{r\to 0^+} \frac{1}{r}\int_{0}^{r}L^c(t)dt=\mathcal{SM}(\Omega)=P(\Omega)+2 \mathcal{H}^1(\partialrtial \Omega\cap \Omega^{(0)}).
\end{equation}
and
\begin{equation}\label{eq.min}
\lim_{r\to 0^+}\frac{1}{r}\int_{0}^{r}\left[L(t)+L^c(t)\right]dt=2\mathcal{M}(\partial\Omega)=2\mathcal{H}^1(\partial \Omega).
\end{equation}
Combining \eqref{eq.decomp}, \eqref{eq.g1} and \eqref{eq.min} we get
$$
\lim_{r\to 0^+}\frac{1}{r}\int_{0}^{r}L(t) dt=
\lim_{r\to 0^+}\left(\frac{1}{r}\int_{0}^{r}L(t) dt+\frac{1}{r}\int_{0}^{r}L^c(t) dt-\frac{1}{r}\int_{0}^{r}L^c(t) dt\right)$$
$$= 2\mathcal{H}^1(\partial \Omega)-P(\Omega)-2 \mathcal{H}^1(\partialrtial \Omega\cap \Omega^{(0)})= P(\Omega)+2 \mathcal{H}^1(\partialrtial \Omega\cap \Omega^{(1)}) $$
and the thesis is achieved.
\end{proof}
Most of the results we present rely on a geometrical theorem proved by Sz. Nagy in \cite{Nagy59}, concerning the behavior of the function $t\to A(t)=|\Omega(t)|$ for a given set $\Omega\in\mathcal{A}_k$.
\begin{equation}gin{theo}[Sz. Nagy]\label{theo.Na}
Let $\Omega\in\mathcal{A}_k$ and let $\alpha:=\#\Omega^c$. Then the function
$$t\mapsto-A(t)-(\alpha-1)\pi t^{2}$$
is concave in $[0,\rho(\Omega))$.
\end{theo}
As a consequence of Corollary \ref{coro.Mink} and Theorem \ref{theo.Na} we have the following result.
\begin{equation}gin{theo}\label{theo.Nareg}
Let $\Omega\in\mathcal{A}_k$ with $\mathcal{H}^1(\partial \Omega)<\infty$ and $\#\Omega<+\infty$. Then, for a.e. $t\in(0,\rho(\Omega))$, it holds:
\begin{equation}gin{align}\label{eq.boundL}
&L(t)\le P(\Omega)+2 \mathcal{H}^1(\partialrtial \Omega\cap \Omega^{(1)}) +2\pi(k-1)t;\\
\label{eq.boundA}
&A(t)\le (P(\Omega)+2 \mathcal{H}^1(\partialrtial \Omega\cap \Omega^{(1)}))(\rho(\Omega)-t)+\pi(k-1)(\rho(\Omega)-t)^2.
\end{align}
In particular $A\in W^{1,\infty}(0,\rho(\Omega))$.
\end{theo}
\begin{equation}gin{proof}
We denote by $g(t)$ the right derivative of the function $t\mapsto -A(t)-(\alpha-1)\pi t^2$ where $\alpha:=\#\Omega^c$ $(\le k)$. By Theorem \ref{theo.Na}, $g$ is a decreasing function in $(0,\rho(\Omega))$ and an easy computation through \eqref{eq.Evans} shows that
\begin{equation}\label{Lg}
g(t)=L(t)-2\pi(\alpha-1)t\qquad\hbox {for a.e. }t\in(0,\rho(\Omega)).
\end{equation}
Thus,
$$\lim_{r\to 0^+}\frac 1 r\int_{0}^r L(t)dt=\lim_{r\to 0^+}\frac 1 r\int_{0}^rg(t)dt=\sup_{(0,\rho(\Omega))}g(t).$$
Since $\Omega\in\mathcal{A}_k$ and $\#\Omega<\infty$ we have also $\#\partial\Omega<\infty$. Hence we can apply Corollary \ref{coro.Mink} to get
\begin{equation}\label{eq.g}
P(\Omega)+2 \mathcal{H}^1(\partialrtial \Omega\cap \Omega^{(1)})=\sup_{(0,\rho(\Omega))}g(t).
\end{equation}
By using \eqref{Lg} and \eqref{eq.g}, inequality \eqref{eq.boundL} easily follows. Finally, by applying \eqref{eq.Evans}, we get both $A\in W^{1,\infty}(0,\rho(\Omega))$ and formula \eqref{eq.boundA}.
\end{proof}
The following lemma can be easily proved by lower semicontinuity property of the perimeter.
\begin{equation}gin{lemm}\label{lem.top1}
Let $\Omegamega\subset\mathbb{R}^2$ be an open set. Let $(\Omegamega^i)$ be its connected components and $\Omega_n:=\bigcup_{i=1}^n\Omega^i$. Then we have:
\begin{equation}gin{enumerate}
\item[(i)]$\partial\Omega_n=\bigcup_{i=1}^n\partial\Omega^i\subseteq\partial\Omega$ and $\mathcal{H}^1(\partial\Omega_n)\le \mathcal{H}^1(\partial\Omega)$;
\item [(ii)]$\displaystyle P(\Omega)\le\liminf_{n\to\infty}P(\Omega_n)\le\limsup_{n\to\infty}P(\Omega_n)\le\limsup_{n\to\infty}\mathcal{H}^1(\partial\Omega_n)\le \mathcal{H}^1(\partial\Omega)$.
\end{enumerate}
\end{lemm}
We are now in a position to prove the main results of this section. In Theorem 1.1 of \cite{Sc15} it is shown that, given any set $\Omega$ of finite perimeter satisfying $\mathcal{H}^1(\partial\Omega)=P(\Omega)$, it is possible to approximate $P(\Omega)$ with the perimeters of smooth open sets compactly contained in $\Omega$. Here we show that, if we assume the further hypothesis $\Omega\in\mathcal{A}_k$, then we can construct an approximation sequence made up of Lipschitz sets in $\mathcal{A}_k$.
\begin{equation}gin{theo}\label{theo.approxim}
Let $\Omega\in\mathcal{A}_k$ be a set of finite perimeter. Then there exists an increasing sequence $(A_n)\subset \mathcal{A}_k$ such that:
\begin{equation}gin{enumerate}
\item [(i)] $\overline A_n\subset \Omega$;
\item[(ii)] $\bigcup_{n} A_n=\Omega$;
\item[(iii)] $A_n$ is a Lipschitz set;
\item[(iv)] $\displaystyle P(\Omega)\le\liminf_{n\to\infty}P(A_n)\le\limsup_{n\to\infty}P(A_n)\le2\mathcal{H}^1(\partial\Omega)-P(\Omega)$.
\end{enumerate}
In addition, if $\# \Omega<\infty$, then
$$\lim_{n\to\infty}P(A_n)=P(\Omega)+2 \mathcal{H}^1(\partialrtial\Omega\cap\Omega^{(1)}).$$
\end{theo}
\begin{equation}gin{proof}
Let $\Omega_n$ be defined as in Lemma \ref{lem.top1}. Clearly $\Omega_n\in\mathcal{A}_k$.
Since $\Omega_n(t)$ converges to $\Omega_n$ in $L^1$ when $t\to0^+$, it follows that, for every $n$,
$$\liminf_{t\to 0^+} P(\Omega_n(t))\ge P(\Omega_n).$$
Then there exists $0<\delta_n<1/n\wedge \rho(\Omega_n)$ such that
\begin{equation}\label{primacond}P(\Omega_n(t))\ge P(\Omega_n)-\frac1n\qquad\forall t<\delta_n.\end{equation}
Since $\#\Omega_n\le n$, by applying Theorem \ref{theo.Fu}, Lemma \ref{lemm.innerA_k} and Theorem \ref{theo.Nareg} to the set $\Omega_n$, we can choose a decreasing sequence $(t_n)$ with $0<t_n< \delta_n$ such that the set $A_n:=\Omega_n(t_n)$ is in $\mathcal{A}_k$, has Lipschitz boundary, and
\begin{equation}\label{secondacond}
\mathcal{H}^1(\{x\in\Omega_n\ :\ d(x,\partial\Omega_n)=t_n \})\le P(\Omega_n)+2\mathcal{H}^1(\partial\Omega_n\cap\Omega_n^{(1)})+2\pi(k-1)t_n.
\end{equation}
It is easy to prove that the sequence $(A_n)$ is increasing and satisfies (i) and (ii).
By putting together \eqref{primacond} and \eqref{secondacond}, we get
\begin{equation}gin{equation*}
P(\Omega_n)-\frac1n\le P(A_n)\le P(\Omega_n)+2\mathcal{H}^1(\partial\Omega_n\cap\Omega_n^{(1)})+2\pi(k-1)t_n.
\end{equation*}
By Lemma \ref{lem.top1}, taking also into account \eqref{eq.decomp2}, the previous inequality implies
$$P(\Omega)\le\liminf_n P(A_n)\le\limsup_n P(A_n)\le2\mathcal{H}^1(\partial\Omega)-P(\Omega)$$
which proves $(iv)$.
To conclude consider the case $\#\Omega<+\infty$. We can choose $n$ big enough such that $\Omega_n=\Omega$, $A_n=\Omega(t_n)$ and $\alpha:=\# \Omega^c=\# A_n^c$. For simplicity we denote $\rho_n:=\rho(A_n)$ and $\rho:=\rho(\Omega)$. By applying equality \eqref{eq.g} to the Lipschitz set $A_n$, we get
\begin{equation}\label{terza}
P(A_n)=\sup_{ (0,\rho_n)}g_n(t)
\end{equation}
where $g_n$ is the right derivative of the function $t\mapsto -|A_n(t)|-(\alpha-1)\pi t^2$.
Now, exploiting the equality $A_n(t)=\Omega(t+t_n)$,
we obtain
$$g_n(t)= g(t+t_n)+2\pi(\alpha-1)t_n$$
for all $0<t<(\rho-t_n)\wedge \rho_n$. Thus, as $t\to 0^+$ and applying \eqref{terza}, we can conclude that, for every $n$, it holds
$$\lim_{t\to 0^+}g(t+t_n)+2\pi(\alpha-1)t_n=\sup_{(0,\rho_n)}g_n(t)=P(A_n).$$
Passing to the limit as $n\to\infty$ in the equality above and taking into account \eqref{eq.g} we achieve the thesis.
\end{proof}
\section{Continuity of volume for co-Hausdorff convergence}\label{shau}
The Hausdorff distance between closed sets $C_1, C_2$ of $\mathbb{R}^2$ is defined by
$$d_H(C_1,C_2):=\sup_{x\in C_1}d(x,C_2)\vee\sup_{x\in C_2}d(x,C_1).$$
Through $d_H$ we can define the so called co-Hausdorff distance $d_{H^c}$ between a pair of bounded open subsets $\Omega_1,\Omega_2$ of $\mathbb{R}^2$
$$d_{H^c}(\Omega_1,\Omega_2):=d_H(\Omega_1^c,\Omega_2^c).$$
We say that a sequence of compact sets $(K_n)$ converges in the sense of Hausdorff to some compact set $K$, if $ (d_H(K_n,K))$ converges to zero. In this case we write $K_n\overset{H}{\to}K$. Similarly we say that a sequence of open sets $(\Omega_n)$ converges in the sense of co-Hausdorff to some open set $\Omega$, if $(d_{H^c}(\Omega_n,\Omega))$ converges to zero, and we write $\Omega_n\overset{H^c}{\to}\Omega$. In the rest of the paper we use some elementary properties of Hausdorff distance and co-Hausdorff distance for which we refer to \cite{bubu05} and \cite{He}, (see, for instance, Proposition 4.6.1 of \cite{bubu05}). In particular we recall that if $(\Omega_n)$ is a sequence of equi-bounded sets in $\mathcal{A}_k$ and $\Omega_n\overset{H^c}{\to}\Omega$, then $\Omega$ still belongs to $\mathcal{A}_k$ (see Remark 2.2.20 of \cite{He}).
The introduction of co-Hausdorff convergence is motivated by Sver\'ak's Theorem (see \cite{sv93}) which ensures the continuity of the torsional rigidity in the class $\mathcal{A}_k$. Actually the result is stronger and gives the continuity with respect to the $\gamma$-convergence (we refer to \cite{bubu05} for its precise definition and the related details).
\begin{equation}gin{theo}[Sver\'ak]\label{theo.Sve}
Let $(\Omega_n)\subset\mathcal{A}_k$ be a sequence of equi-bounded open sets. If $\Omega_n\overset{H^c}{\to}\Omega$, then $\Omega_n\to\Omega$ in the $\gamma$-convergence. In particular $T(\Omega_n)\to T(\Omega)$.
\end{theo}
Combining Sver\'ak theorem and Theorem \ref{theo.approxim}, we prove that we can equivalently minimize the functional $F_q$ either in the class of Lipschitz set in $\mathcal{A}_k$ or in the larger class of those sets $\Omega\in\mathcal{A}_k$ satisfying $P(\Omega)=\mathcal{H}^1(\partial\Omega)$.
\begin{equation}gin{prop}
The following identity holds:
$$m_{q,k}=\inf\{F_q(\Omega)\ :\ \Omega\in\mathcal{A}_k,\ P(\Omega)=\mathcal{H}^1(\partial\Omega)\}$$
\end{prop}
\begin{equation}gin{proof}
By Theorem \ref{theo.approxim}, for every $\Omega\in\mathcal{A}_k$ such that $P(\Omega)=\mathcal{H}^1(\partial\Omega)<\infty$, there exists a sequence $(A_n)\subset\mathcal{A}_k$ of Lipschitz sets satisfying $\lim_n P(A_n)=P(\Omega)$. By construction $(A_n)$ is an equi-bounded sequence which converges both in the co-Hausdorff and in the $L^1$ sense. By Theorem \ref{theo.Sve} we have
$$\lim_{n\to\infty} F_q(\Omega_n)=F_q(\Omega),$$
so that
$$m_{q,k}\le\inf\{F_q(\Omega)\ :\ \Omega\in\mathcal{A}_k,\ P(\Omega)=\mathcal{H}^1(\partial\Omega)\}.$$
The thesis is then achieved since the opposite inequality is trivial.
\end{proof}
In general the volume is only lower semicontinuous with respect to the $H^c$-convergence as simple counterexamples may show. In this section we prove that $L^1$-convergence is guaranteed in the class $\mathcal{A}_k$ under some further hypotheses, see Theorem \ref{theo.convmeas}. The proof of this result requires several lemma and relies on the classical Go\l ab's semicontinuity theorem, which deals with the lower semicontinuity of the Hausdorff measure $\mathcal{H}^1$ (see, for instance, \cite{AO}, \cite{amti04}).
\begin{equation}gin{theo}[Go\l ab]\label{theo.Golab}
Let $X$ be a complete metric space and $k\in\mathbb{N}$ let
$$\mathcal{C}_k:=\{K\ :\ K\subset X, \ K\text{ is closed},\ \#K\le k\}.$$
Then the function $K\mapsto\mathcal{H}^1(K)$ is lower semicontinuous on $\mathcal{C}_k$ endowed with the Hausdorff distance.
\end{theo}
\begin{equation}gin{lemm}\label{lem.inradcon}
Let $(\Omega_n)$ be a sequence of equi-bounded open sets. If $\Omega_n\overset{H^c}{\to}\Omega$ we have also $\rho(\Omega_n)\to\rho(\Omega)$.
\end{lemm}
\begin{equation}gin{proof}
For simplicity we denote $\rho:=\rho(\Omega)$, and $\rho_n:=\rho(\Omega_n)$. First we show that \begin{equation}\label{basso}\rho\le\liminf_n\rho_n.\end{equation} Indeed, without loss of generality let us assume $\rho>0$. Then for any $0<{\varepsilon}<\rho$, there exists a ball $B_{\varepsilon}$ whose radius is $\rho-{\varepsilon}$ and whose closure is contained in $\Omega$. By elementary properties of co-Hausdorff convergence, there exists $\nu$ such that $B_{\varepsilon}\subset\Omega_n$, for $n>\nu$, which implies $\rho_n\ge\rho-{\varepsilon}$. Since ${\varepsilon}>0$ is arbitrary, we get \eqref{basso}.
In order to prove the upper semicontinuity, assume by contradiction that there exist ${\varepsilon}>0$ and a subsequence $(n_k)$ such that $\rho_{n_k}>\rho+{\varepsilon}$ for every $k\in \mathbb{N}$. Then there exists a sequence of balls $B_{n_k}=B_{ \rho_{n_k} }(x_{n_k})\subseteq\Omega_{n_k}$. Eventually passing to a subsequence, the sequence $(x_{n_k})$ converges to a point $x_{\infty}$ and the sequence of the translated open set $\Omega_{n_k}-x_{n_k}$ converges to $\Omega-x_{\infty}$. Since $B_{r}(0)\subseteq \Omega_{n_k}-x_{n_k} $ for $r=\rho+{\varepsilon}$, it turns out that $B_{r}(0)\subseteq \Omega-x_{\infty}$, i.e. $B_{r}(x_{\infty})\subseteq\Omega$ which leads to a contradiction.
\end{proof}
\begin{equation}gin{lemm}\label{lem.top2}
Let $\Omega$ be a connected bounded open set of $\mathbb{R}^n$. There exists a sequence of connected bounded open sets $(\Omega_n)$ such that $\overline\Omega_n\subset\Omega_{n+1}$ and $\bigcup_n\Omega_n=\Omega$.
\end{lemm}
\begin{equation}gin{proof}
We construct the sequence by induction. First of all we notice that there exists an integer $\nu_1>0$ such that $\Omega(\nu_1^{-1})$ contains at least one connected component of $\Omega$ with Lebesgue measure greater than $\pi\nu_1^{-2}$. Indeed it suffices to choose
$$\nu_1^{-1}\le\min\{ d(y,\partial\Omega)\ :\ y\in\partial B_r(x)\}\wedge r$$
where $B_r(x)$ is any ball with closure contained in $\Omega$. Now let $M$ be the number of connected components of $\Omega(\nu_1^{-1})$ with Lebesgue measure greater than $\pi\nu_1^{-2}$. If $M=1$ we define $ \Omega_{1}:=\Omega(\nu_1^{-1})$. Otherwise, since $\Omega$ is pathwise connected, we can connect the closures of the $M$ connected components with finitely many arcs to define a connected compact set $K\subset\Omega$. Then, we choose $m$ such that $m>\nu_1$ and $m^{-1}<\inf\{d(x,\partial\Omega) : x\in K\}$ and we set
$$\Omega_{1}:=\{ x\in \Omega: \ d(x,K)< (2m)^{-1} \}.$$
In both cases $\Omega_1$ is a connected open set which contains all the connected components of $\Omega(\nu_1^{-1})$ having Lebesgue measure greater then $\pi\nu_1^{-2}$. Moreover by construction there exists $\nu_2>\nu_1$ such that $\overline \Omega_1\subseteq \Omega(\nu^{-1}_2)$.
Replacing $\nu_1$ with $\nu_2$ we can use the previous argument to define $\Omega_2$ such that $\overline\Omega_1\subset \Omega_2$.
Iterating this argument we eventually define an increasing sequence $\nu_n$ and a sequence of connected open sets $(\Omega_n)$ such that
$\overline \Omega_n\subset\Omega_{n+1}\subset\Omega$ and $\Omega_n$ contains all the connected components of $\Omega(\nu_n^{-1})$ of Lebesgue measure greater than $\pi\nu_{n}^{-2}$.
Since for any $x\in\Omega$ there exists $r>0$ such that $\overline{B}_r(x)\subset\Omega$, choosing $\nu_n^{-1}\le \min\{ d(y,\partial\Omega)\ : y\in\partial B_r(x)\}\wedge r$,
it is easy to show that $x\in\Omega_n$. Thus $\bigcup_{n} \Omega_{n}=\Omega$.
\end{proof}
In the following lemma we establish a Bonnesen-type inequality for sets $\Omegamega\in \mathcal{A}_k$ satisfying $\mathcal{H}^1(\partial \Omega)<\infty$ (see Theorem 2 in \cite{Oss79} when $\Omega$ is a simply connected plane domain bounded by a rectifiable Jordan curve).
\begin{equation}gin{lemm}\label{lem.coarea2}
Let $\Omega\in\mathcal{A}_k$ with $\mathcal{H}^1(\partial \Omega)<\infty$.
Then \begin{equation}\label{eq.coarea}
|\Omega| \le [2\mathcal{H}^1(\partial \Omega)-P(\Omegamega)+\pi ( k-1) \rho(\Omega)]\rho(\Omega).
\end{equation}
\end{lemm}
\begin{equation}gin{proof}
If $\#\Omega<\infty$, by Theorem \ref{theo.Nareg} and \eqref{eq.Evans},
$$|\Omega|\le \left(P(\Omega)+2\mathcal{H}^1(\partial\Omega \cap \Omega^{(1)})+\pi(k-1)\rho(\Omega)\right)\rho(\Omega),$$
and we conclude by \eqref{eq.decomp2}. To prove the general case we denote by $(\Omega^i)$ the connected components of $\Omega$ and we set $\Omega_n:=\bigcup_{i=1}^{n}\Omega^i$. By the previous step we have
$$|\Omega_n|\le \big(2\mathcal{H}^1(\partial \Omega_n)-P(\Omegamega_n)+\pi (k-1) \rho(\Omega_n)\big)\rho(\Omega_n).$$
Since $\Omega_n\overset{H^c}{\to}\Omega$ and $\Omega_n\to\Omega$ in the $L^1$-convergence, taking into account Lemma \ref{lem.inradcon} and Lemma \ref{lem.top1}, we can conclude that
\begin{equation}gin{align*}
|\Omega|=\lim_{n\to\infty}|\Omega_n|&\le\big(2\mathcal{H}^1(\partial\Omega)-\limsup_n P(\Omega_n)+\pi(\alpha-1)\rho(\Omega)\big)\rho(\Omega)\\
&\le\big(2 \mathcal{H}^1(\partial\Omega)-P(\Omega)+\pi (k-1)\rho(\Omega)\big)\rho(\Omega),
\end{align*}
from which the thesis is achieved.
\end{proof}
\begin{equation}gin{theo}\label{theo.convmeas}
Let $(\Omega_n)\subset\mathcal{A}_k$ be a sequence of equi-bounded open sets with
$$\sup_n\mathcal{H}^1(\partial\Omega_n)<\infty.$$
If $\Omega_n\overset{H^c}{\to}\Omega$ then $\Omega\in \mathcal{A}_k$ and $\Omega_n\to \Omega$ in the $L^1$-convergence.
If, in addition, either $\sup_n\#\partial\Omega_n< \infty$ or $\#\Omega<\infty$ then
\begin{equation}
\label{eq.golab}
\mathcal{H}^1(\partial\Omega)\le \liminf_n \mathcal{H}^1(\partial\Omega_n).
\end{equation}
\end{theo}
\begin{equation}gin{proof}
We first deal with the case when $\sup_{n}\#\partial\Omega_n<\infty$, already considered in \cite{ChDo} and \cite{bubu05}. By compactness we can suppose that $\partial\Omega_n$ converges to some nonempty compact set $K$ which contains $\partial\Omega$. Then it is easy to show that $\bar\Omega_n\overset{H}{\to}\Omega\cup K$, which implies $\chi_{\Omega_n}\to\chi_\Omega$ pointwise in $\mathbb{R}^2\setminus K$, where $\chi_E$ denotes the characteristic function of a set $E$. By Theorem \ref{theo.Golab} we have also
\begin{equation}\label{eq.golabK}
\mathcal{H}^1(\partialrtial \Omega)\le\mathcal{H}^1(K)\le\liminf_{n\to\infty}\mathcal{H}^1(\partialrtial \Omega_n)<+\infty,
\end{equation}
which implies \eqref{eq.golab}. In particular, we have $|K|=0$, and $\Omega_n\to \Omega$ in the $L^1$ convergence.
We consider now the general case. Let $(\Omega^i)$ be the connected components of $\Omega$ and ${\varepsilon}>0$. There exists an integer $\nu({\varepsilon})$ such that
$$|\Omega|-{\varepsilon}<|\bigcup_{i=1}^{\nu({\varepsilon})}\Omega^i|\le |\Omega|$$
(when $\#\Omega<\infty$ we simply choose $\nu({\varepsilon})=\#\Omega$).
For each $i\le \nu({\varepsilon})$, and for each set $\Omega^i$, we consider the sequence $(\Omega^i_n)$ given by Lemma \ref{lem.top2}. By elementary properties of co-Hausdorff convergence there exists $l:=l(n)$ such that
$$\bigcup_{i}^{\nu({\varepsilon})}\overline{\Omega^i_n}\subset\Omega_{l}.$$
Let's denote by $\widetilde\Omega^i_{l}$ the connected component of $\Omega_{l}$ which contains $\overline{\Omega^i_n}$ (eventually $\widetilde{\Omega}^h_{l}=\widetilde{\Omega}^s_{l}$), and define
$$\widetilde\Omega_{l}:=\bigcup_{i=1}^{\nu({\varepsilon})} \widetilde\Omega^i_{l}.$$
By compactness, passing eventually to a subsequence, there exists $\widetilde{\Omega}\in\mathcal{A}_k$ such that $\widetilde\Omega_{l}\overset{H^c}{\to}\widetilde\Omega$.
Moreover, since $\widetilde\Omega_l\in\mathcal{A}_k$, $\sup_l\#\widetilde\Omega_l\le\nu({\varepsilon})$, and by Lemma \ref{lem.top1} we have
$$\sup_l \mathcal{H}^1( \partial \widetilde \Omega_{l})\le\sup_l \mathcal{H}^1(\partial\Omega_{l})<\infty,$$
we can apply the first part of the proof to conclude that $\widetilde\Omega_l\to \widetilde\Omega$ in the $L^1$-convergence. If $\#\Omega<\infty$ an easy argument shows that $\widetilde\Omega$ must be equal to $\Omega$ and that \eqref{eq.golabK} holds with $K$ the Hausdorff limit of $(\partial\widetilde\Omega_l)$.
In particular \eqref{eq.golab} holds.
Otherwise we consider the set $\Omega^R_l$ of those connected components of $\Omega_{l}$ that have been neglected in the definition of $\widetilde\Omega_l$, that is
$$\Omega^R_{l}:=\Omega_{l}\setminus\widetilde\Omega_{l}.$$
Passing to a subsequence we can suppose that $\Omega^R_l\overset{H^c}{\to}\Omega^R$, for some open set $\Omega^R\in\mathcal{A}_k$. Moreover since $|\widetilde\Omega|>|\Omega|-{\varepsilon}$, $\Omega^R\cap\tilde\Omega=\emptyset$ and $\Omega^R\subset\Omega$ we have also $|\Omega^R|\le{\varepsilon}$. This implies $\rho(\Omega^R)\le\sqrt{\pi^{-1}{\varepsilon}}$ and by Lemma \ref{lem.inradcon},
$$\lim_{l\to\infty}\rho(\Omega^R_{l})\le \sqrt{\pi^{-1}{\varepsilon}}.$$
Finally, by Lemma \ref{lem.coarea2}, we have
\begin{equation}gin{align*}
|\Omega|&\le\liminf_{n\to\infty}|\Omega_{n}|\le \limsup_{l\to\infty} ( |\widetilde\Omega_{l}|+|\Omega^R_{l}|)= |\widetilde \Omega|+\limsup_{l\to\infty}|\Omega^R_{l}|\le |\Omega|+o({\varepsilon}).
\end{align*}
Since ${\varepsilon}$ was arbitrary this shows that
$$
\liminf_{n\to\infty}|\Omega_n|=|\Omega|,
$$
and the thesis is easily achieved.
\end{proof}
As an application of the previous theorem we prove the following fact.
\begin{equation}gin{coro}
Let $\Omega\in\mathcal{A}_k$ with $\mathcal{H}^1(\partial\Omega)<\infty$ and $\#\Omega<\infty$. Then it holds
$$\mathcal{H}^1(\partial\Omega\cap\Omega^{(0)})\le\mathcal{H}^1(\partial\Omega\cap\Omega^{(1)}).$$
\end{coro}
\begin{equation}gin{proof}
By Theorem \ref{theo.approxim} we can consider a sequence $(A_n)\in\mathcal{A}_k$ of Lipschitz sets such that $A_n\overset{H^c}{\to}\Omega$ and $P(A_n)\to P(\Omega)+2\mathcal{H}^1(\partial\Omega\cap \Omega^{(1)})<\infty$. Then, by Theorem \ref{theo.convmeas}, we conclude
$$\mathcal{H}^1(\partial\Omega)\le \lim_{n\to\infty} P(A_n)\le P(\Omega)+2\mathcal{H}^1(\partial\Omega\cap \Omega^{(1)}),$$
which easily implies the thesis, using \eqref{eq.decomp}.
\end{proof}
\begin{equation}gin{rem} We remark the fact that the inequality
$$\lim_{n\to\infty}P(A_n)\ge\mathcal{H}^1(\partial \Omega)$$
is not in general satisfied when $\#\Omega=\infty$, see also Remark \ref{ex.ce}.
\end{rem}
\section{Existence of relaxed solutions}\label{sexis}
Our next result generalizes the estimate
$F_{1/2}(\Omega)\ge3^{-1/2}$, proved in \cite{polya60} for the class $\mathcal{A}_{convex}$, to the class $\mathcal{A}_k$.
\begin{equation}gin{theo}\label{theo.Polya}
For every $\Omega\in\mathcal{A}_k$ set of finite perimeter we have
\begin{equation}\label{eq.polro}
\frac{T^{1/2}(\Omega)}{|\Omega|^{3/2}}\ge\frac{3^{-1/2}}{\left(2\mathcal{H}^1(\partial\Omega)-P(\Omega)+2\pi(k-1)\rho(\Omega)\right)}.
\end{equation}
\end{theo}
\begin{equation}gin{proof}
Without loss of generality we may assume that $\mathcal{H}^1(\partial\Omega)<\infty$ and we set $\rho:=\rho(\Omega)$. First we consider the case $\#\Omega<\infty$. We define
$$G(t):=\int_{0}^{t}\frac{A(t)}{L(t)}dt, \quad u(x):=G(d(x,\partial\Omega)).$$
Notice that, since for any $t\in (0,\rho)$ it holds $L(t)\ge\mathcal{H}^1(\partialrtial \Omega(t))\ge P(\Omega(t))$,
by isoperimetric inequality \eqref{isoper} we have
$$\frac{A(t)}{L(t)}=\frac{|\Omegamega(t)|^{1/2}}{L(t)}A^{1/2}(t)\le\frac{|\Omegamega(t)|^{1/2}}{P(\Omegamega(t))}A^{1/2}(t)\le\frac{|B_1|^{1/2}}{P(B_1)}A^{1/2}(t).$$
In particular, since $A$ is bounded, we get that $L^{-1}A$ is summable on $(0,\rho)$ and $G$ is a Lipschitz function on in the interval $(0,\rho)$. Thus $u\in H^1_{0}(\Omega)$.
Using \eqref{vartor} and \eqref{eq.boundL} we have
\begin{equation}gin{align*}
T(\Omega)&\ge \frac{\left(\int_{\Omega}udx\right)^{2}}{\int_{\Omega}|\nabla u|^{2}dx}\ge\frac{\left(\int_{0}^{\rho}G(t)L(t)dt\right)^2}{\int_0^\rho (G'(t))^{2}L(t)dt}\ge\int_0^\rho\frac{(A(t))^{2}}{L(t)}\,dt=\int_{0}^\rho \frac{A^2(t)L(t)}{L^{2}(t)}dt\\
&\ge\frac{1}{(P(\Omega)+2\mathcal{H}^1(\partialrtial \Omega\cap\Omega^{(1)})+2\pi(k-1)\rho)^2}\int_0^\rho A^2(t)L(t)\,dt.
\end{align*}
Since $A\in W^{1,\infty}(0,\rho(\Omega))$ by Corollary \ref{theo.Nareg} then, set $\psi(s)=s^2,$ we have that the function $\psi\circ A\in W^{1,\infty}(0,\rho(\Omega))$, so that
$$\int_0^\rho A^2(t)L(t)\,dt=-\int_0^{\rho}A^2(t)A'(t)\,dt=-\frac13\left[A^3(t)\right]_0^{\rho(\Omega)}=\frac13|\Omega|^3.$$
Thus
\begin{equation}\label{eq.polro1}
\frac{T(\Omega)}{|\Omega|^3}\ge\frac{1}{3(P(\Omega)+2\mathcal{H}^1(\partialrtial \Omega\cap\Omega^{(1)})+2\pi(k-1)\rho)^2}.
\end{equation}
Taking into account \eqref{eq.decomp2} we get
$$
\frac{T(\Omega)}{|\Omega|^3}\ge\frac{1}{3(2\mathcal{H}^1(\partial\Omega)-P(\Omega)+2\pi(k-1)\rho)^2}.
$$
To prove the general case, let $\Omega_n$ be defined as in Lemma \ref{lem.top1}. Since $\#\Omega_n<\infty$ and $\Omega_n\in \mathcal{A}_k$, by the first part of this proof we have that
$$\frac{T(\Omega_n)}{|\Omega_n|^3}\left(2\mathcal{H}^1(\partial\Omega_n)-P(\Omega_n)+2\pi(k-1)\rho_n\right)^2\ge\frac1{3},$$
where $\rho_n:=\rho(\Omega_n)$.
When $n\to\infty$ we have $|\Omega_n|\to |\Omega|$, $\rho_n\to\rho$ by Lemma \ref{lem.inradcon} and $T(\Omega_n)\to T(\Omega)$ by Theorem \ref{theo.Sve}. Hence, passing to the $\limsup$ in the previous inequality and using Lemma \ref{lem.top1}, we get \eqref{eq.polro}.
\end{proof}
\begin{equation}gin{rem}\label{rem.polya}
Note that, in the special case of $\Omega\in\mathcal{A}_k$ and $\#\Omega<\infty$, we have the improved estimate \eqref{eq.polro1}.
Moreover, if $k=0,1$, \eqref{eq.polro} implies
\begin{equation}\label{eq.polyagen}
F_{1/2}(\Omega)\ge\frac{3^{-1/2}P(\Omega)}{2\mathcal{H}^1(\partial\Omega)-P(\Omega)}\, ,
\end{equation}
while, if $k>1$, we can use the inequality $2\pi\rho(\Omega)\le P(\Omega)$ (which can be easily derived from \eqref{isoper}), to obtain
\begin{equation}\label{eq.polyagenk}
F_{1/2}(\Omega)\ge\frac{3^{-1/2}P(\Omega)}{2\mathcal{H}^1(\partial\Omega)+(k-2)P(\Omega)}\;.
\end{equation}
\end{rem}
As a consequence of Theorem \ref{theo.Polya}, and using the well known fact that for a Lipschitz open set $\Omega$ it holds $P(\Omega)=\mathcal{H}^1(\partial\Omega)$, we have the following main results.
\begin{equation}gin{coro}\label{coro.polya} For every $q\le1/2$ we have
\begin{equation}\label{eq.m01}
m_{1/2,0}=m_{1/2,1}=3^{-1/2}
\end{equation}
and the value $3^{-1/2}$ is asymptotically reached by a sequence of long thin rectangles. More in general, for $k\ge 1$, it holds
\begin{equation}\label{eq.boundkq}
m_{q,k}\ge (8\pi)^{1/2-q}(3^{1/2}k)^{-1}
\end{equation}
and the sequence $(m_{q,k})$ decreases to zero as $k\to \infty$.
\end{coro}
\begin{equation}gin{proof}
By inequality \eqref{eq.polyagen} we have that $m_{1/2,0}, m_{1/2,1}\ge 3^{-1/2}$. Moreover the computations made in \cite{BBP20} show that the value $3^{-1/2}$ is asymptotically reached by a sequence of long thin rectangles, that are clearly in $\mathcal{A}_0$. Thus, being $A_0\subset\mathcal{A}_1$, \eqref{eq.m01} holds. To prove \eqref{eq.boundkq} it is enough to notice that
$$F_q(\Omega)=F_{1/2}(\Omega)\left(\frac{T(\Omega)}{|\Omega|^{2}}\right)^{q-1/2}$$
and apply \eqref{eq.polyagenk} together with the Saint-Venant inequality \eqref{stven}.
Finally to prove that $m_{q,k}\to 0$ as $k\to \infty$, it is enough to consider the sequence $(\Omega_{1,n})$ defined in Theorem 2.1 of \cite{BBP20}, taking into account that $\Omega_{1,n}\in \mathcal{A}_k$ for $k$ big enough.
\end{proof}
We now introduce a relaxed functional $\mathcal{F}_{q,k}$.
More precisely, for $\Omega\in\mathcal{A}_k$ we denote by $\mathcal{O}_k(\Omega)$ the class of equi-bounded sequences of Lipschitz sets in $\mathcal{A}_k$ which converge to $\Omega$ in the sense of co-Hausdorff and we define $\mathcal{F}_{q,k}$ as follows:
$$\mathcal{F}_{q,k}(\Omega):=\inf\left\{\liminf_{n\to\infty} F_q(\Omega_{n}): \ (\Omega_n)\in\mathcal{O}_k(\Omega) \right\}.$$
It is straightforward to verify that $\mathcal{F}_{q,k}$ is translation invariant and scaling free.
As already mentioned in the introduction, when $q<1/2$, we prove the existence of a minimizer for $\mathcal{F}_{q,k}$. We notice this relaxation procedure can be made on the perimeter term only. More precisely, defining
$$\mathcal{P}_k(\Omega):=\inf \left\{\liminf_{n\to\infty} P(\Omega_{n})\ :\ (\Omega_n)\in\mathcal{O}_k(\Omega)\right\},$$
the following proposition holds.
\begin{equation}gin{prop}\label{prop.PPkF}
For every $\Omega\in\mathcal{A}_k$ we have
$$\mathcal{F}_{q,k}(\Omega)=\frac{\mathcal{P}_k(\Omega)T^{q}(\Omega)}{|\Omega|^{2q+1/2}}.$$
\end{prop}
\begin{equation}gin{proof}
Fix ${\varepsilon}>0$. Suppose that $\infty>\mathcal{P}_k(\Omega)+{\varepsilon}\ge\lim_n P(\Omega_n)$, for some $(\Omega_n)\in\mathcal{O}_{k}(\Omega)$. By Theorems \ref{theo.Sve} and \ref{theo.convmeas}, we have
$$
\frac{(\mathcal{P}_k(\Omega)+{\varepsilon})T^{q}(\Omega)}{|\Omega|^{2q+1/2}}\ge\lim_n\left(\frac{P(\Omega_n)T^q(\Omega_n)}{|\Omega_n|^{2q+1/2}}\right)\ge \mathcal{F}_{q,k}(\Omega),
$$
and since ${\varepsilon}$ is arbitrary we obtain the $\le$ inequality.
Similarly, to prove the opposite inequality assume $\lim_n F_q(\Omega_n)\le \mathcal{F}_{q,k}(\Omega)+{\varepsilon}<\infty$, for some sequence $(\Omega_n)\in \mathcal{O}_k(\Omega)$. Let $D$ be a compact set which contains each $\Omega_n$. Thanks to Theorem \ref{theo.Sve}, we have that $T(\Omega_n)\to T(\Omega)$ and, since $P(\Omega_n)=\mathcal{H}^1(\Omega_n)$, we have also
$$\sup_n\mathcal{H}^1(\partial\Omega_n)=\sup_n\left( \frac{F_q(\Omega_n)|\Omega_n|^{2q+1/2}}{\displaystyle{T^q}(\Omega_n)}\right)\le
\sup_n \left(\frac{F_q(\Omega_n)|D|^{2q+1/2}}{\displaystyle{T^q}(\Omega_n)}\right)<+\infty.$$
Applying again Theorem \ref{theo.convmeas} we have $|\Omega_n|\to|\Omega|$ and we can conclude
$$\frac{\mathcal{P}_k(\Omega)T^q(\Omega)}{|\Omega|^{2q+1/2}}\le\lim_n F_q(\Omega_n)\le\mathcal{F}_{q,k}(\Omega)+{\varepsilon},$$
which implies the $\ge$ inequality as ${\varepsilon}\to 0$.
\end{proof}
The perimeter $\mathcal{P}_k$ satisfies the following properties.
\begin{equation}gin{prop}\label{prop.Pk}
For every $\Omega\in\mathcal{A}_k$ of finite perimeter we have
\begin{equation}\label{eq.PPk}
P(\Omega)\le\mathcal{P}_k(\Omega)\le 2\mathcal{H}^1(\partial\Omega)-P(\Omega).
\end{equation}
Moreover if $\#\Omega<\infty$ and $\mathcal{H}^1(\partial\Omega)<+\infty$ it holds
\begin{equation}\label{eq.PPkH1}
\mathcal{H}^1(\partialrtial\Omega)\le\mathcal{P}_k(\Omega)\le P(\Omega)+2\mathcal{H}^1(\partialrtial \Omega \cap \Omega^{(1)})
\end{equation}
and $P(\Omega)=\mathcal{P}_k(\Omega)$ if and only if $P(\Omega)=\mathcal{H}^1(\partial\Omega)$.
\end{prop}
\begin{equation}gin{proof}
Taking into account Theorem \ref{theo.convmeas} and lower semicontinuity of the perimeter with respect to the $L^1$-convergence we have $\mathcal{P}_k(\Omega)\ge P(\Omega)$.
To prove the right-hand inequalities in \eqref{eq.PPk} and \eqref{eq.PPkH1} it is sufficient to take the sequence $(A_n)$ given by Theorem \ref{theo.approxim}. Finally, when $\#\Omega<\infty$, the inequality $\mathcal{H}^1(\partial\Omega)\le \mathcal{P}_k(\Omega)$ follows by Theorem \ref{theo.convmeas}.
\end{proof}
\begin{equation}gin{rem} \label{ex.ce}
If we remove the assumption $\#\Omega<\infty$, then \eqref{eq.PPkH1} is no longer true. For instance, we can slightly modify the Example $3.53$ in \cite{AFP} to define $\Omega\in\ \mathcal{A}_0$ such that $P(\Omega),\mathcal{P}_0(\Omega)<\infty$ while $\mathcal{H}^1(\partial\Omega)=\infty$. More precisely let $(q_n)$ be an enumeration of $\mathbb{Q}^2\cap B_1(0)$ and $(r_n)\subset(0,{\varepsilon})$ be a decreasing sequence such that
$\sum_n 2\pi r_n\le 1$. We recursively define the following sequence of open sets.
Let
$$\Omega_0:=B_{r_0}(q_0),\ \Omega_{n+1}:=\Omega_n\cup B_{s_n}(q_{h_n}),$$
where
$$h_n:=\inf\{k: q_k \in\overline\Omega_n^c\},\quad s_n:=r_{n+1}\wedge\sup\{r_k: B_{r_k}(q_{h_n})\cap \Omega_n=\emptyset\}.$$
Finally let $\Omega=\bigcup_n\Omega_n$. By construction $\Omega_n\overset{H^c}{\to}\Omega$ and since $\Omega_n\in\mathcal{A}_0$ for all $n$, we have also $\Omega\in\mathcal{A}_0$. Moreover we notice that $P(\Omega)\le 1$ and it is easy to verify that the two dimensional Lebesgue measure of $\partial\Omega$ is positive, which implies $\mathcal{H}^1(\partial\Omega)=\infty$. Finally, since the sequence $(\Omega_n)\in \mathcal{O}_0(\Omega)$, we have also $\mathcal{P}_0(\Omega)\le 1$.
\end{rem}
Next we prove that the relaxed functional $\mathcal{F}_{q,k}$ agrees with $F_q$ on the class of Lipschitz open sets in $\mathcal{A}_k$.
\begin{equation}gin{coro}\label{coro.Frel}
For every $\Omega\in\mathcal{A}_k$ we have
\begin{equation}\label{eq.FgF}
\mathcal{F}_{q,k}(\Omega)\ge F_q(\Omega).
\end{equation}
If, in addition, $P(\Omega)=\mathcal{H}^1(\partial\Omega)$ then we have
\begin{equation} \label{eq.FgF1}
F_q(\Omega)=\mathcal{F}_{q,k}(\Omega).
\end{equation}
In particular $\mathcal{F}_{q,k}$ and $F_q$ coincide on the class of Lipschitz sets and it holds
\begin{equation}\label{eq.infrel}
m_{q,k}=\inf\{\mathcal{F}_{q,k}(\Omega)\ :\ \Omega\in\mathcal{A}_k\}.
\end{equation}
\end{coro}
\begin{equation}gin{proof}
The inequalities \eqref{eq.FgF} and \eqref{eq.FgF1} follow by Proposition \ref{prop.PPkF} and \eqref{eq.PPk}. The last part of the theorem follows as a general property of relaxed functionals.
\end{proof}
\begin{equation}gin{lemm}\label{lem.coninf}
For every Lipschitz set $\Omega\in\mathcal{A}_k$, there exists a sequence of connected open sets $(\Omega_n)\subset\mathcal{A}_k$ such that
$$P(\Omega_n)=\mathcal{H}^1(\partial\Omega_n)\qquad\text{and}\qquad\lim_{n\to\infty}F_q(\Omega_n)=F_q(\Omega).$$
\end{lemm}
\begin{equation}gin{proof}
Since $\Omega$ is a bounded Lipschitz set we necessarily have $\#\Omega<\infty$. If $\Omega$ is connected we can take $\Omega_n$ to be constantly equal to $\Omega$. Suppose instead that $\#\Omega=2$ and let $\Omega^1$ and $\Omega^2$ be the connected components of $\Omega$. Since $\Omega$ is Lipschitz there exist $x_1\in\partial \Omega^1, x_2\in\partial\Omega^2$ such that
$$
0<d:=d(x_1,x_2)=\inf\{d(w,v): \ v\in\Omega^1,\ w\in\Omega^2\}.
$$
Define
$$\Omega^{2}_n:=\Omega^2-\left(1-\frac 1 n\right)(x_2-x_1).$$
Clearly we have $\overline{\Omega^2_{n}}\cap \overline{\Omega^1}=\emptyset$ for every $n\ge 1$ and $\Omega^{2}_1=\Omega^2$.
We set $$x_n=x_2-\left(1-\frac 1 n\right)(x_2-x_1).$$
Now we can join $x_1$ and $x_n$ through a segment $\Sigma_n$. By using the fact that the boundary of both $\partial\Omega^1$ and $\partial\Omega^2_n$ are represented as the graph of a Lipschitz functions in a neighborhood of $x_1$ and $x_n$ respectively, then the thin open channel
$$C_{\varepsilon}:=\{ x\in\mathbb{R}^2\setminus\overline\Omega^1\cup\overline\Omega^2_n\ :\ d(x,\Sigma_n)<{\varepsilon}\}$$
of thickness ${\varepsilon}:={\varepsilon}(n)$ is such that the set
$$\Omega_n:=\Omega^1\cup \Omega^2_n\cup C_{{\varepsilon}}$$
belongs to $\mathcal{A}_k$, it is connected and $P(\Omega_n)=\mathcal{H}^1(\partial\Omega_n)$. The following identities are then verified
$$|\Omega_n|\to|\Omega|,\quad T(\Omega_n)\to T(\Omega),\quad P(\Omega_n)\approx P(\Omega^1)+P(\Omega^2)+\frac{2{\varepsilon}}{n},$$
so that $F_q(\Omega_n)\to F_q(\Omega)$ (notice that this does not imply $\Omega_n\to\Omega$). The general case is achieved by induction on $\#\Omega$. More precisely suppose $\#\Omega=N+1$. Let $(\Omega^i)$ be the connected components of $\Omega$. By induction we have
$$F_q(\Omega^1\cup\dots \cup\Omega^N)=\lim_{n\to\infty}F_q(\Omega'_n),$$
for a sequence $(\Omega'_n)\subset\mathcal{A}_k$ of connected open sets satisfying $P(\Omega'_n)=\mathcal{H}^1(\partial\Omega'_n)$.
Using the fact that, being $\Omega$ Lipschitz, the value of $F_q(\Omega)$ do not change if we translate (possibly in different direction and with different magnitude) each connected component of $\Omega$, being careful to avoid intersections, we can suppose $\overline{\Omega}^{N+1}$ to have a positive distance from $\overline{\Omega}'_n$, as $n$ is large enough.
We then apply the previous step to define a sequence of connected open sets $\Omega_{n,m}\in\ A_k$ such that $P(\Omega_{n,m})=\mathcal{H}^1(\partial\Omega_{n,m})$ and
$$F_q(\Omega_{n,m})\to F_q(\Omega'_n\cup \Omega^{N+1}),$$
as $m\to\infty$. Using a diagonal argument we achieve the thesis.
\end{proof}
We finally show the existence of a relaxed solution to the minimization problem of $\mathcal{F}_{q,k}$ in $\mathcal{A}_k$ when $q<1/2$.
\begin{equation}gin{theo}\label{theo.exis}
For $q<1/2$ there exists a nonempty bounded open set $\Omega^{\star}\in\mathcal{A}_k$ minimizing the functional $\mathcal{F}_{q,k}$ such that $\mathcal{H}^1(\partial\Omega^\star)<\infty$.
\end{theo}
\begin{equation}gin{proof}
Let $(\widetilde\Omega_n)\subset\mathcal{A}_k$ be a sequence of Lipschitz sets such that
$$
\lim_{n\to\infty} F_q(\widetilde\Omega_n)=m_{q,k}.
$$
Applying Lemma \ref{lem.coninf} and \eqref{eq.FgF1}, we can easily replace the sequence $(\widetilde\Omega_n)$ with a sequence $(\Omega_n)\subset\mathcal{A}_k$ of connected (not necessarily Lipschitz) open sets, satisfying $\mathcal{H}^1(\Omega_n)=P(\Omega_n)$ and such that
$$\lim_{n\to\infty} F_q(\Omega_n)=\lim_{n\to\infty} F_q(\widetilde\Omega_n)=m_{q,k}.$$
Eventually using the translation invariance of $F_q$ and possibly rescaling the sequence $(\Omega_n)$, we can assume that $(\Omega_n)$ is equi-bounded and
\begin{equation}\label{ipinf}
\mathcal{H}^1(\Omega_n)=P(\Omega_n)=1.
\end{equation}
By compactness, up to subsequences, there exists an open sets $\Omega^\star\in\mathcal{A}_k$ such that $\Omega_n\overset{H^c}{\to}\Omega^\star$.
By \eqref{eq.infrel} we have
$$m_{q,k}\le \mathcal{F}_{q,k}(\Omega^\star).$$
Let us prove the opposite inequality. We notice that, by Theorem \ref{theo.approxim} and \eqref{ipinf}, for every $n$ there exists a sequence $(A_{n,m})_m\subset\mathcal{A}_k$ of Lipschitz sets, such that, as $m\to\infty$,
$$
P(A_{n,m})\to P(\Omega_n)\ \text{and}\ |A_{n,m}|\to |\Omega_n|.
$$
By Theorem \ref{theo.Sve}, we have also $T(A_{n,m})\to T(\Omega_n)$ as $m\to \infty$.
Thus
$$F_q(\Omega_n)=\lim_{m\to\infty}F_q(A_{n,m}).$$
A standard diagonal argument allows us to define a subsequence $A_{n,m_n}\in\mathcal{O}_k(\Omega^\star)$. Then we have
$$\mathcal{F}_{q,k}(\Omega^\star)\le \lim_n F_{q}(A_{n,m_n})=\lim_{n}F_{q}(\Omega_n)= m_{q,k}.
$$
Hence $\Omega^\star$ is a minimum for $\mathcal{F}_{q,k}$.
Moreover, notice that there exists a compact set $K$ containing $\partial\Omega^\star$ such that, up to a subsequence, $\partial\Omega_n\overset{H}{\to} K$.
So, being $\Omega_n$ connected, we have
$$
\sup_n\#\partial\Omega_n<\infty,
$$
and by Theorem \ref{theo.Golab},
$$
\mathcal{H}^1(\partial\Omega^\star)\le \mathcal{H}^1(K)\le \liminf_{n\to\infty}\mathcal{H}^1(\Omega_n)\le 1.
$$
To conclude we have only to show that $\Omega^*$ is nonempty. Notice that for $n$ big enough there exists $C>0$ such that $F_q(\Omega_n)< C$. Thus we have
\begin{equation} \label{eq.final1}
C>F_q(\Omega_n)=\frac{T^q(\Omega_n)}{|\Omega_n|^{2q+1/2}}=\left(\frac{T(\Omega_n)}{|\Omega_n|^{3}}\right)^{q}|\Omega_n|^{q-1/2}\ge \frac{1}{|\Omega_n|^{1/2-q}(\sqrt{3}k)^{2q}}\;,
\end{equation}
where the last inequality follows by \eqref{eq.polyagen}, using \eqref{ipinf}. By \eqref{eq.coarea} we have also
\begin{equation} \label{eq.final2}
|\Omega_n|\le (1+\pi(k-1)\rho(\Omega_n))\rho(\Omega_n).
\end{equation}
Combining \eqref{eq.final1}, \eqref{eq.final2} and the assumption $q<1/2$, we conclude that the sequence of inradius $(\rho(\Omega_n))$ must be bounded from below by some positive constant. By Lemma \ref{lem.inradcon}, $\Omega^{\star}$ is nonempty.
\end{proof}
\section{Conclusions}\label{sconc}
We have seen that in the planar case the topological constraint present in classes $\mathcal{A}_k$ is strong enough to ensure the existence of at least a relaxed optimizer. In higher dimensions this is no longer true and easy examples show that it is possible to construct sequences $(\Omega_n)$ in $\mathcal{A}_k$ with $P(\Omega_n)$ bounded and $T(\Omega_n)\to0$. This suggests that in higher dimensions stronger constraints need to be imposed in order to have well posed optimization problems.
Another interesting issue is the analysis of the same kind of questions when the exponent $2$ is replaced by a general $p>1$ in \eqref{vartor}; the torsional rigidity $T(\Omega)$ then becomes the $p$-torsion $T_p(\Omega)$ and it would be interesting to see how our results depend on the exponent $p$ and if in this case the analysis in dimensions higher than two is possible.
Finally, shape functionals $F(\Omega)$ involving quantities other than perimeter and torsional rigidity are interesting to be studied: we point out some recent results in \cite{bbp20},\cite{FtLa} and references therein. However, to our knowledge, the study of these shape functionals under topological constraints as the ones of classes $\mathcal{A}_k$ is still missing.
\noindent{\bf Acknowledgments.} The work of GB is part of the project 2017TEXA3H {\it``Gradient flows, Optimal Transport and Metric Measure Structures''} funded by the Italian Ministry of Research and University. The authors are member of the Gruppo Nazionale per l'Analisi Matematica, la Probabilit\`a e le loro Applicazioni (GNAMPA) of the Istituto Nazionale di Alta Matematica (INdAM).
\begin{equation}gin{thebibliography}{999}
\bib{BBP20}{L.~Briani, G.~Buttazzo, F.~Prinari}{Some inequalities involving perimeter and torsional rigidity}{Appl. Math. Optim., (to appear), preprint available at {\tt http://cvgmt.sns.it} and at {\tt http://www.arxiv.org}}
\bib{polya60}{G.~P\'olya}{Two more inequalities between physical and geometrical quantities}{J. Indian Math. Soc., {\bf24} (1960), 413--419}
\bib{Ma}{E.~Makai}{Bounds for the principal frequency of a membrane and the torsional rigidity of a beam}{Acta Sci. Math., (Szeged) {\bf20}, (1959) 33-35}
\bib{Ma59}{E.~Makai}{On the principal frequency of a convex membrane and related problems}{Czechoslowak Math. J., {\bf9} (1959), 66-70}
\bib{EvGa}{L.C.~Evans, R.F.~Gariepy}{Measure Theory and Fine Properties of Functions}{Textbooks in Mathematics, CRC Press, Boca Raton (2015)}
\bib{Fu85}{J.H.G.~Fu}{Tubular neighborhoods in Euclidean spaces}{Duke Math. J., {\bf52} (4) (1985), 1025--1046}
\bib{AFP}{L.~Ambrosio, N.~Fusco, D.~Pallara}{Functions of bounded variation and free discontinuity problems}{Oxford Mathematical Monographs, The Clarendon Press, Oxford University Press, New York (2000)}
\bib{AO}{G.~Alberti, M.~Ottolini}{On the structure of continua with finite length and Go\l ab's semicontinuity theorem}{Nonlinear Anal., {\bf153} (2017), 35--55}
\bib{V}{E.~Villa}{On the outer Minkowski content of sets}{Ann. Mat. Pura Appl., {\bf188} (4) (2009), 619-630}
\bib{Nagy59}{B.~Sz.-Nagy}{\"Uber Parallelmengen nichtkonvexer ebener Bereiche}{Acta Sci. Math. (Szeged), {\bf20} (1959), 36--47}
\bib{Sc15}{T.~Schmidt}{Strict interior approximation of sets of finite perimeter and functions of bounded variation}{Proc. Amer. Math. Soc., {\bf143} (2015), 2069--2084}
\bib{bubu05}{D.~Bucur, G.~Buttazzo}{Variational Methods in Shape Optimization Problems}{Progress in Nonlinear Differential Equations {\bf65}, Birkh\"auser Verlag, Basel (2005)}
\bib{He}{A.~Henrot, M.~Pierre}{Shape variation and optimization}{EMS Tracts in Mathematics {\bf28}, European Mathematical Society, Z\"urich (2018)}
\bib{sv93}{V.~Sver\'ak}{On optimal shape design}{J. Math. Pures Appl., {\bf72} (1993), 537--551}
\bib{amti04}{L.~Ambrosio, P.~Tilli}{Selected topics on analysis in metric spaces}{Oxford Lecture Series in Mathematics and its Applications {\bf25}, Oxford University Press, Oxford, 2004}
\bib{Oss79}{R.~Osserman}{Bonnesen-Style Isoperimetric Inequalities}{The American Mathematical Monthly, {\bf86} (1) (1979),1--29 }
\bib{ChDo}{A.~Chambolle, F.~Doveri}{Continuity of Neumann linear elliptic problems on
varying two-dimensional bounded open sets}{Comm. Partial Differential Equations, {\bf22} (1997), 811--840}
\bib{bbp20}{M.~van den Berg, G.~Buttazzo, A.~Pratelli}{On the relations
between principal eigenvalue and torsional rigidity}{Commun. Contemp.
Math., (to appear), preprint available at {\tt http://cvgmt.sns.it} and
at {\tt http://www.arxiv.org}}
\bib{FtLa}{I.~Ftouhi, J.~Lamboley}{
Blaschke-Santal\'o diagram for volume, perimeter and first Dirichlet
eigenvalue}{
Preprint available at {\tt https://hal.archives-ouvertes.fr/hal-02850711}}
\end{thebibliography}
{\small\noindent
Luca Briani:
Dipartimento di Matematica,
Universit\`a di Pisa\\
Largo B. Pontecorvo 5,
56127 Pisa - ITALY\\
{\tt [email protected]}
\noindent
Giuseppe Buttazzo:
Dipartimento di Matematica,
Universit\`a di Pisa\\
Largo B. Pontecorvo 5,
56127 Pisa - ITALY\\
{\tt [email protected]}\\
{\tt http://www.dm.unipi.it/pages/buttazzo/}
\noindent
Francesca Prinari:
Dipartimento di Matematica e Informatica,
Universit\`a di Ferrara\\
Via Machiavelli 30,
44121 Ferrara - ITALY\\
{\tt [email protected]}\\
{\tt http://docente.unife.it/francescaagnese.prinari/}}
\end{document} |
\begin{document}
\title{ $3j$-symbols for representation of the Lie algebra $\mathfrak{gl}
\title{ $3j$-symbols for representation of the Lie algebra $\mathfrak{gl}
\renewcommand{\abstractname}{}
\begin{abstract}
In the paper a simple explicit formula for an arbitrary $3j$-symbol for the Lie algebra $\mathfrak{gl}_3$ is given. It is expressed through a fraction of values of hypergeometric functions when one substitutes $\pm 1$ instead of all it's arguments. The problem of calculation of an arbitrary $3j$-symbol is equivalent to the problem of calculation of an arbitrary Clebsh-Gordan coefficient for the algebra $\mathfrak{gl}_3$. These coefficients play an important role in quantum mechanics in the theory of quarks.
\end{abstract}
\section{Introduction}
Consider a tensor product of irreducible representation $V$ and $W$ of the algebra $\mathfrak{gl}_3$ and let us split it into a sum of irreducibles:
\begin{equation}
\label{rzl}
V\otimes W=\sum_{U,s} U^s, \end{equation}
where $U$ denotes possible types of irreducible representations that occur in this decomposition and the symbol $s$ is indexing irreducible representations $U^s$ of type $U$, occurring in the decomposition \footnote{ A precise definition of the index $s$ is the following. We are writing a decomposition as follows: $V\otimes W=\sum_{U} M_U\otimes U$, where $M_U$ is a linear space named the multiplicity space. Let $\{e_i\}$ be it's base, then $U^s:=e_s\otimes U$. }.
Let us choose in these representations bases $\{v_{\mu}\}$, $\{w_{\nu}\}$, $\{u^s_{\rho}\}$. The Clebsh-Gordon coefficients are numeric coefficients $C^{U,\rho,s}_{V,W;\mu,\nu}\in\mathbb{C}$, appearing in the decomposition
\begin{equation}
\label{kg1}
v_{\mu}\otimes w_{\nu}=\sum_{s,\rho} C^{U,\rho,s}_{V,W;\mu,\nu} u_{\rho}^s.
\end{equation}
Also we use the term the Clebsh-Gordan coefficients for coefficients $D^{U,\rho,s}_{V,W;\mu,\nu}\in\mathbb{C}$, occuring in the decomposition
\begin{equation}
\label{kg2}
u_{\rho}^s = \sum_{\mu,\nu} D^{U,\rho,s}_{V,W;\mu,\nu} v_{\mu}\otimes w_{\nu}.
\end{equation}
These coefficients in the case of algebras $\mathfrak{gl}_2$, $\mathfrak{gl}_3$ play an important role in the quantum mechanics. The Clebsh-Gordan coefficients for the algebra $\mathfrak{gl}_2$ are used in the spin theory (see \cite{blb}), and the Clebsh-Gordan coefficients for the algebra $\mathfrak{gl}_3$ are used in the theory of quarks (see \cite{GrM}). The problem of their calculation in the case $\mathfrak{gl}_2$ is quite simple. There are explicit formulas that were first obtained by Van der Varden \cite{Gkl0}. The Clebsh-Gordan coefficients for the algebra $\mathfrak{sl}_2$ allow to construct new realization of representations of real Lie groups associated with this Lie algebra ($SU(2)$ in \cite{blb}, \cite{Go}; \cite{GoGo}; $SO(3)$ in \cite{GM}, \cite{GoGo}, \cite{Go}). They appear in the elasticity theory (see \cite{Sel}, \cite{Sel2}, where actually the $SO(3)$ group is considered).
In the application it is especially important to find the Clebsh-Gordan coefficients in the case when in representations the Gelfand-Tsetlin base is taken.
The problem of calculation of Clebsh-Gordan coefficients for $\mathfrak{gl}_n$ when $n\geq 3$ is much more difficult than in the case $n=2$. In the case $n=3$ they for the first time were calculated in a series of papers by Biedenharn, Louck, Baird \cite{bb1963}, \cite{bl1968}, \cite{bl1970}, \cite{bl19731}, \cite{bl19732}. In these papers the general case $\mathfrak{gl}_n$ is considered, but only in the case $n=3$ their calculations allow to obtain in principle a formula for a general Clebsh-Gordan coefficient. The calculations are based on the following principle. Consider tensor opertors between two representations of $\mathfrak{gl}_n$, i.e. collections $\{f_u\}$ of mappings
$$
f_{u}: V \rightarrow W
$$
between representations of $\mathfrak{gl}_n$, the mappings $\{f_u\}$ are in one-to-one correspondence with vectors of a representation $U$ of $\mathfrak{gl}_n$. Some condition relating the actions of $\mathfrak{gl}_n$ on these representations must hold. The matrix elements of tensor operators are closely related to the Clebsh-Gordan coefficients (The Wigner Eckart theorem ). In papers \cite{bb1963}-\cite{bl19732} some explicit realization of these operators is given (in different papers cases of different representations $U$ are considered). Such an explicit realization does not allow to obtain exlicit formulas for matrix elements of a tensor operator corresponding tо a given $U$ but it allows to express them through the matrix elelments of tensor operators corresponding to $U$, considered in previous papers.
Unfortunately there is no explicit formula in their papers. It is just clear that it can be obtained.
Thus the problem of finding of an {\it explicit and simple
} formula for a general Clebsh-Gordan coefficient for the algebra $\mathfrak{gl}_3$ still remained unsolved. A review of proceeding papers can be found in \cite{a1}.
In this paper for the first time a really {\it explicit } formula for a general Clebsh-Gordan coefficient for $\mathfrak{gl}_3$ in the decomposition \eqref{kg2} was obtained. That is an explicit formula of type $D^{U,\gamma,s}_{V,W;\alpha,\beta}=...$ was derived. Unfortunately this formula is quite cumnbersome, thus the problem of derivation of a {\it simple } formul remained unsolved.
For the purpose of caculation of Clebsh-Gordan coefficients it is necessary to choose an explicit realization of a representation of $\mathfrak{gl}_3$.
A realization which is very convenient for calculation was suggested in \cite{bb1963}. In this paper the following is proved. If one uses a realization of a representation in the space of functions on the Lie group $GL_3$, then functions corresponding to Gelfand-Tsetlin base vectors can be expressed through the Gauss' hypergeometric function (see a modern viewpoint in \cite{a2}). This idea was used in \cite{a1}.
In the present paper we change an approach of \cite{a1} and this allows to obtain a much more simpler result. Instead of coefficients in the decomposition \eqref{kg1} we are calculating the $3j$-symbols (see their definition and their relation to Clebsh-Gordan coefficients in Section \ref{3jcg}). We
express the $3j$-symbols through the values of an hypergeometric function. As the result we manage to obtain {\it explicit and simple} formulas for a general Clebsh-Gordan coefficient for the algebras $\mathfrak{gl}_3$. The main result is formulated in Theorem \ref{ost}, where
a formula for a $3j$-in this case (see \eqref{osntf2}).
In Appendix \ref{dop} we give s selection rulers for Clebsh-Gordan coefficients and $3j$-symbols.
\section{ The basic notions}
\subsection{ $A$-hypergeometric functions}
One can find information about a $\Gamma$-series in \cite{GG}.
Let $B\subset \mathbb{Z}^N$ be a lattice, let $\mu\in \mathbb{Z}^N$ be a fixed vector. Define a {\it hypergeometric
$\Gamma$-series } in variables $z_1,...,z_N$ by formulas
\begin{equation}
\label{gmr}
\mathcal{F}_{\mu}(z,B)=\sum_{b\in
B}\frac{z^{b+\mu}}{\Gamma(b+\mu+1)},
\end{equation}
where $z=(z_1,...,z_N)$, and we use the multiindex notations
$$
z^{b+\mu}:=\prod_{i=1}^N
z_i^{b_i+\mu_i},\,\,\,\Gamma(b+\mu+1):=\prod_{i=1}^N\Gamma(b_i+\mu_i+1).
$$
Note that in the case when at least one of the components of the vector $b+\mu$ is non-positive integer then the corresponding summand in \eqref{gmr} vanishes. Thus in the considered below $\Gamma$-series there are only finitely many terms. Also we write below factorials instead of $\Gamma$-functions.
A $\Gamma$-series satisfies the Gelfand-Kapranov-Zelevinsky system. Let us write it in the case $z=(z_1,z_2,z_3,z_4)$, $B=\mathbb{Z}<(1,-1,-1,1)>$:
\begin{align}
\begin{split}
\label{gkzs} \Big(\frac{\partial^2}{\partial z_1\partial
z_4}-\frac{\partial^2}{\partial z_2\partial z_3}\Big)F_{\mu,B}&=0,
\\
z_1\frac{\partial}{\partial z_1}F_{\mu,B}+ z_2\frac{\partial}{\partial
z_2}F_{\mu,B}& =(\mu_1+\mu_2)F_{\mu,B},\quad
\\
z_1\frac{\partial}{\partial z_1}F_{\mu,B}+
z_3\frac{\partial}{\partial z_3}F_{\mu,B}&=(\mu_1+\mu_3)F_{\mu,B},
\\
z_1\frac{\partial}{\partial z_1}F_{\mu,B}-z_4\frac{\partial}{\partial
z_4}F_{\mu,B}&=(\mu_1-\mu_4)F_{\mu,B}.
\end{split}
\end{align}
\subsection{ A functional realization of a Gelfand-Tsetlin base}
In the paper Lie algebras and groups over $\mathbb{C}$ are considered.
Functions on $GL_3$ form a representation of the group $GL_3$. On a function $f(g)$, $g\in GL_3$, an element $X\in GL_{3}$ acts by right shifts
\begin{equation}
\label{xf} (Xf)(g)=f(gX).
\end{equation}
Passing to an infinitesimal action we obtain that on the space of all functions on $GL_3$ there exists an action of $\mathfrak{gl}_3$.
Every finite dimensional irreducible representation can be realized as a subrepresentation in the space of functions.
Let
$[m_{1},m_2,m_{3}]$ be a highest weight, then in the space of functions there is a highest vector with such a weight, which is written explicitly as follows.
Let $a_{i}^{j}$, $i,j=1,2,3$ -
be a function of a matrix element on the group $GL_{3}$. Here $j$ is a row index and $i$ is a column index.
Also put
\begin{equation}
\label{dete}
a_{i_1,...,i_k}:=det(a_i^j)_{i=i_1,...,i_k}^{j=1,...,k},
\end{equation}
where we take a determinant of a submatrix in a matrix $(a_i^j)$,
formed by rows with indices $1,...,k$ and columns with indices $i_1,...,i_k$.
The operator $E_{i,j}$ acts onto determinants by transforming their column indices
\begin{equation}
\label{edet1}
E_{i,j}a_{i_1,...,i_k}=a_{\{i_1,...,i_k\}\mid_{j\mapsto i}},
\end{equation}
where $.\mid_{j\mapsto i}$ denotes an operation of substitution $i$ instead of $j$
$i$; if $j$ does not occur among $\{i_1,...,i_k\}$, then we obtain zero.
One sees that a rising operator $E_{i,j}$, $i<j$ tryes to change an index to a smaller one. Thus the function
\begin{equation}
\label{stv}
v_0=\frac{a_{1}^{m_{1}-m_{2}}}{(m_1-m_2)!}\frac{a_{1,2}^{m_{2}-m_{3}}}{(m_2-m_3)!}\frac{a_{1,2,3}^{m_{3}}}{m_{3}!}
\end{equation}
is a highest vector for the algebra $\mathfrak{gl}_{3}$ with the weight
$[m_{1},m_{2},m_3]$.
Let us write a formula for a function corresponding to a Gelfand-Tselin diagram for $\mathfrak{gl}_3$ (see definition of this base in \cite{zh}). A diagram is an integer table of the following type in which the betweeness conditions hold
\begin{align*}
\begin{pmatrix}
m_{1} && m_{2} &&0\\ &k_{1}&& k_{2}\\&&s
\end{pmatrix}
\end{align*}
A formula for a function corresponding to this diagram is given in the Theorem proved in \cite{bb1963}.
\begin{thm}\label{vec3} Put $B_{GC}=\mathbb{Z}<(0,1,-1,-1,1,0)>$, $\mu=(m_1-k_1,s-m_{2},k_{1}-s_{},m_{2}-k_{2},0,k_2)$, then to a diagram there corresponds a function $$
\mathcal{F}_{\mu}(a_3,a_{1},a_{2},a_{1,3},a_{2,3},a_{1,2},B_{GC})
$$
\end{thm}
In \cite{bb1963} an expression involving the Gauss' hypergeometric function is given. A modern form of this formula involving a $\Gamma$-series was presented in \cite{a2}.
To make notations shorter in the case when we use a $\Gamma$-series corresponding to a lattice $B_{BG}$ from Theorem \ref{vec3} we omit the notation $B_{GC} $ and we write just $\mathcal{F}_{\mu}(a)$. In the case when we use another lattice we do not omit it.
Note also that the first equation form the GKZ system looks as follows
\begin{equation}
\label{gkz}
\mathcal{O}\mathcal{F}=0,\,\,\,\mathcal{O}=\frac{\partial^2}{\partial a_1\partial a_{2,3}}-\frac{\partial^2}{\partial a_2\partial a_{1,3}}.
\end{equation}
\subsection{ A-GKZ system and it's solutions. A-GKZ realization }
\subsubsection{ A-GKZ system. A base $F_{\mu}$ and a base $\tilde{F}_{\mu}$ in it's solution space}
Instead of determinants $a_X$, $X\subset \{1,2,3\}$, that satisfy the Plucker relations let us introduce variables $A_X$, $X\subset \{1,2,3\}$, we suppose that $A_X$ are skew-symmetric functions of $X$.
Let us return to the GKZ-system \eqref{gkz} and let us change the differential operators that define it.
Consider functions $F(A)$, that satisfies the equations
\begin{equation}
\label{agkz}
\bar{\mathcal{O}}_{A}F=0, \,\,\, \bar{\mathcal{O}}_{A}=\frac{\partial^2}{\partial A_1\partial A_{2,3}}-\frac{\partial^2}{\partial A_2\partial A_{1,3}}+\frac{\partial^3}{\partial A_3\partial A_{1,2}}
\end{equation}
This system is called an antisymmetrized Gelfand-Kapranov-Zelevinsky system(or A-GKZ for short).
Let us find a base in the space of polynomial solutions of such a system.
Using the equality (65) from \cite{a1} one can show that the following function is a solution
\begin{equation}
\label{Fmu}
F_{\mu}(A):=\sum_{s\in\mathbb{Z}_{\geq 0}} q^{\mu}_s \zeta_A^{s}\mathcal{F}_{\mu-s(e_3+e_{1,2})}(A),
\end{equation}
where \begin{align}\begin{split}\label{cs} &t^{\mu}_0=1, \,\,\,\,\, t^{\mu}_s=\frac{1}{s(s+1)+s(\mu_1+\mu_2+\mu_{1,3}+\mu_{2,3})}, \text{ при }s>0\\& q_{\mu}^s=\frac{t_s^{\mu}}{\sum_{s'\in\mathbb{Z}_{\geq 0}} t_{s'}^{\mu}}, \,\,\,\,\,
\zeta_A=A_1A_{2,3}-A_{2}A_{1,3}.
\end{split}\end{align}
One has $F_{\mu}=\mathcal{F}_{\mu}\cdot const$ modulo the Plucker relations.
In the space of solutions of the system \eqref{agkz} one can construct another base. Put
$$
v=e_1+e_{2,3}-e_{2}-e_{1,3},\,\,\,\, r=e_3+e_{1,2}-e_1-e_{2,3},
$$
and for all $s\in\mathbb{Z}_{\geq 0}$ consider a function
\begin{equation}
\mathcal{F}^s_{\mu}(A):=\sum_{t\in \mathbb{Z}}\frac{(t+1)...(t+s-1)A^{\mu+tv}}{\Gamma(\mu+tv+1)}
\end{equation}
One proves directely that
\begin{equation}
\label{Ftildmu}
\tilde{F}_{\mu}(A)=\sum_{s\in\mathbb{Z}_{\geq 0}} \frac{(-1)^{s}}{s!} \mathcal{F}^s_{\mu-sr}(A)
\end{equation}
is also a solution of \eqref{agkz}. Let us introduce an order on shift vectors considered $mod B_{GC}$
\begin{equation}
\label{por}
\mu\preceq \nu \Leftrightarrow \mu=\nu-sr \,\, mod B_{GC}\,\,\, s\in\mathbb{Z}_{\geq 0}.
\end{equation}
Then considering supports of functions $F_{\mu}(A)$, $\tilde{F}_{\mu}(A)$ one comes to a conclusion that the base $\tilde{F}_{\mu}$ is related to the collection of functions $F_{\mu}$ by a low-unitriangular relatively the ordering \eqref{por} linear transformation. That is
$$
\tilde{F}_{\mu}=\sum_{s\in \mathbb{Z}_{\geq 0}}d_s F_{\mu-sr},\,\,\, d_0=1
$$
Hence functions $\tilde{F}_{\mu}(A)$ form a base in the solution space of the system \eqref{agkz}.
\subsubsection{ A realization of a representation in the space of solutions of the A-GKZ system}
Define an action onto the variables $A_X$ of the algebra $\mathfrak{gl}_3$ by the ruler:
$$
E_{i,j}A_X=\begin{cases}A_{X\mid_{j\mapsto i}}, \text{ если }j\in X, \,\,\,\text{см.} \eqref{edet1} \\0\text{ otherwise. }\end{cases}
$$
One easily checks that this is an action of the Lie algebra. One continues this action to the polynomials in $A_X$ by the Leibnitz ruler.
The operator $ \bar{\mathcal{O}}_{A}$ commutes with the action of $\mathfrak{gl}_3$. Hence the solution space of the system \eqref{agkz} is a representation of $\mathfrak{gl}_3$.
When one applies the Plucker relations (i.e. changes $A_X\mapsto a_X$) the considered realization is transformed to the functional relization.
Thus the functions $F_{\mu}$ for shift vectors $\mu$, corresponding to all possible Gelfand-Tsetlin diagrams of a irreducible representation with the highest weight $[m_1,m_2,0]$ (see Theorem \ref{vec3}) form a representation with this highest weight.
The obtained realization is called the A-GKZ realization.
One easily checks that if one substracts from $\mu$ the vector $r$ then a diagram transforms as follows $k_1\mapsto k_1-1$, $k_2\mapsto k_2+1$. Thus $\tilde{F}_{\mu}$ also form a base in the A-GKZ realization.
\subsubsection{ An explicit form of an invariant scalar product in the A-GKZ realization}
In the A-GKZ realization one can write explicitely an invariant scalar product. Between two monomials the scalar product is define as follows:
\begin{equation}
<A_{X_1}^{\alpha_1}...A_{X_n}^{\alpha_n},A_{Y_1}^{\beta_1}...A_{Y_m}^{\beta_m}>
\end{equation}
is nonzero if and only if $n=m$, and (maybe a permutation is needed) $X_1=Y_1$ and $\alpha_1=\beta_1$,..., $X_n=Y_n$ and $\alpha_n=\beta_n$. In this case
\begin{equation}
<A_{X_1}^{\alpha_1}...A_{X_n}^{\alpha_n},A_{X_1}^{\alpha_1}...A_{X_n}^{\alpha_n}>=\alpha_1!\cdot...\cdot\alpha_n!
\end{equation}
One easyly proves that this product is invariant.
A function in variables $A_X$, $X\subset \{1,2,3\}$ we denote just as $f(A)$. Then the scalar product can be rewritten using the multi-index notations as follows.
Define an action
\begin{equation}
\label{dve}
f(A)\curvearrowright h(A):=f(\frac{d}{dA})h(A),
\end{equation}
then
\begin{equation}
\label{skd}
<f(A),h(A)>= f(A)\curvearrowright h(A)\mid_{A=0}.
\end{equation}
Due to the symmetry of the scalar product one can write $ <f(A),h(A)>= h(A)\curvearrowright f(A)\mid_{A=0}.$
\subsubsection{ A relation between a base $F_{\mu}$ and a base $\tilde{F}_{\mu}$ of the A-GKZ realization}
Using the constructed scalar product one can find a relation between two basis of the A-GKZ realization.
Note that $\mathcal{F}_{\mu}=const F_{\mu}+pl$, where $pl=0$ modulo Plucker relation. Then for every function $g(A)$, which is a solution of the A-GKZ system one has
$$
<h,\mathcal{F}_{\mu}>=const<h, F_{\mu}>.
$$
Thus to prove that
$$
\tilde{F}_{\mu}=\sum_{s\in \mathbb{Z}_{\geq 0}}d_s F_{\mu-sr}
$$
it is sufficient to show that
\begin{equation}
\label{ur}
<\tilde{F}_{\mu},\mathcal{F}_{\nu}>=\sum_{s\in \mathbb{Z}_{\geq 0}}d_s <F_{\mu-sr},\mathcal{F}_{\nu}>
\end{equation}
Using the formula \eqref{dve} and definitions of $\tilde{F}_{\mu}$, $\mathcal{F}_{\nu}$, one gets that the scalar product of these function is nonzero if and only if $\nu \preceq \mu$, that is $\nu=\mu-sr\,\,\, mod B_{GC}$. Under this condition one has
$$
<\tilde{F}_{\mu},\mathcal{F}_{\mu-sr}>=\frac{(-1)^{s}}{s!}\mathcal{F}_{\mu-sr}^s(1),
$$
where on the right hand side one writes a result of substitution of $1$ instead of all arguments of $\mathcal{F}_{\mu-sr}^s(A)$.
Now let us find a scalar product $<F_{\mu},\mathcal{F}_{\nu}>$. Note that $<\zeta^k h(A),\mathcal{F}_{\nu} >$, where $k>0$, equal to $0$ (due to the formula \eqref{dve} and the fact that $\zeta$ acts as a GKZ operator wich send to $0$ the function $\mathcal{F}_{\nu}$). Thus the scalar product only with the first summand in \eqref{Fmu} is non-zero, thus
$$
<F_{\mu},\mathcal{F}_{\nu}>=<\mathcal{F}_{\mu},\mathcal{F}_{\nu}>
$$
Using the formula \eqref{dve}, one obtaines that this expression is non-zero only if $\mu=\nu mod B_{GC}$, in this case it equals $\mathcal{F}_{\mu}(1)$.
Thus \eqref{ur} gives that
\begin{equation}
\label{ds}
\frac{(-1)^{s+1}}{s!}\mathcal{F}_{\mu-sr}^s(1)=d_s\mathcal{F}_{\mu-sr}(1) \Rightarrow d_s=\frac{(-1)^{s} \mathcal{F}_{\mu-sr}^s(1) }{\mathcal{F}_{\mu-sr}(1) }
\end{equation}
We need also an invertion of that expression.
\begin{equation}
\label{fs0}
F_{\mu}=\sum_{s\in \mathbb{Z}_{s\geq 0}}f_s\tilde{F}_{\mu-sr}
\end{equation}
Thus we nedd to find an inverse matrix to the mentioned low-unitriangular matrix. One has
$$
\begin{pmatrix}
1&0&0...\\
\frac{ - \mathcal{F}_{\mu-r}^1(1) }{\mathcal{F}_{\mu-r}(1) } &1 &0...\\
\frac{ \mathcal{F}_{\mu-2r}^2(2) }{\mathcal{F}_{\mu-2r}(1) } &... &1\\
...\\
\end{pmatrix}^{-1}=\begin{pmatrix}
1&0&0...\\
\frac{ \mathcal{F}_{\mu-r}^1(1) }{\mathcal{F}_{\mu-r}(1) } &1 &0...\\
\frac{ - \mathcal{F}_{\mu-2r}^2(2) }{\mathcal{F}_{\mu-2r}(1) } &... &1\\
...\\
\end{pmatrix}
$$
Thus in \eqref{fs0} one has
\begin{equation}
\label{kfs}
f^{\mu}_s=\frac{(-1)^{s+1} \mathcal{F}_{\mu-sr}^s(1) }{\mathcal{F}_{\mu-sr}(1) }
\end{equation}
\section{ A solution of the multiplicity problem for the Clebsh-Gordan coefficients }
\label{krtn}
In the case of the algebra $\mathfrak{gl}_2$ different representation $U^s$ occurring in the decomposition \eqref{rzl} have different highest weights. Thus one can use the highest weight as the index $s$. In the case $\mathfrak{gl}_3$ the situation is much more difficult - there appears the multiplicity problem: in the decomposition \eqref{rzl} a representation $U$ of a given highest weight can occur with some multiplicity.
In the paper \cite{a1} the following solution of the problem of an explicit description of representations $U^s$ occurring in \eqref{rzl}. One realizes $V\otimes W$ in the space of functions on a product of groups $GL_3\times GL_3$. The functions of a matrix element on the first factor are denoted as $a_i^j$, and on the second as $b_i^j$. Introduce functions on $GL_3\times GL_3$:
\begin{align}
\begin{split}
\label{aab}
&(ab)_{i_1,i_2}:=det\begin{pmatrix} a_i^1\\ b_i^1
\end{pmatrix}_{i=i_1,i_2},\,\,\,\, (aabb)_{i_1,i_2,i_3,i_4}:=a_{i_1,i_2}b_{i_3,i_4}-a_{i_3,i_4}b_{i_1,i_2},\\
&(aab)=det\begin{pmatrix} a_i^1\\ a_i^2\\b_i^1
\end{pmatrix}_{i=1,2,3}, (abb)=det\begin{pmatrix} a_i^1\\ b_i^1\\b_i^2
\end{pmatrix}_{i=1,2,3}
\end{split}\end{align}
Consider a tensor product $V\otimes W$ of representation with highest weights $[m_1,m_2,0]$ and $[m'_1,m'_2,0]$\footnote{Below we put $m_3=0$, $m'_3=0$}. Then a base in the space of $\mathfrak{gl}_3$-highest vectors is formed by the following functions. Put
\begin{equation}
\label{foo}
f(\omega,\varphi,\psi,\theta):=a_1^{\alpha}b_1^{\beta}a_{1,2}^{\gamma}b_{1,2}^{\delta}(ab)_{1,2}^{\omega}(abb)^{\varphi}(aab)^{\psi}(aabb)_{1,2,1,3}^{\theta},
\end{equation}
where
\begin{align}
\begin{split}
\label{usl0}
&\alpha+\omega+\varphi=m_1-m_2,\,\,\,\gamma+\theta+\psi=m_2,\\
&\beta+\omega+\psi=m'_1-m'_2,\,\,\,\delta+\varphi+\theta=m'_2.
\end{split}
\end{align}
The function \eqref{foo} is indexed not by all exponents. The reason in that the exponents $\alpha,\beta,\gamma,\delta$ can be obtained from \eqref{usl0}.
\begin{prop}
\label{mlt}
In the space of $\mathfrak{gl}_3$-highest vector there is a base consisting of functions of type $f(0,\varphi,\psi,\theta)$ and $f(\omega,\varphi,\psi,0)$.
\end{prop}
Thus an index $s$ from \eqref{rzl} runs through the set of functions $f(0,\varphi,\psi,\theta)$ and $f(\omega,\varphi,\psi,0)$, where $f$ is defined in \eqref{foo}, and the exponents satisfy conditions \eqref{usl0}. One can identify a function with it's exponents $\alpha,...,\theta$.
\section{$3j$-symbols and Clebsh-Gordan coefficients}
\subsection{A relation to the Clebsh-Gordan coefficients}
\label{3jcg}
Let us be given representations $V$, $W$, $U$ of the Lie algebra $\mathfrak{gl}_3$. Choose in them bases $\{v_{\mu}\}$, $\{w_{\nu}\}$, $\{u_{\rho}\}$. Then a $3j$-symbol is a collection of numbers
\begin{equation}
\label{3j}
\begin{pmatrix}
V& W& U\\ v_{\mu} & w_{\nu} & u_{\rho}
\end{pmatrix}^s,
\end{equation}
such that the value
$$
\sum_{\mu,\nu,\rho}\begin{pmatrix}
V& W& U\\ v_{\mu} & w_{\nu} & u_{\rho}
\end{pmatrix}^s v_{\mu} \otimes w_{\nu} \otimes u_{\rho}
$$
is $\mathfrak{gl}_3$ semi-invariant. The $3j$-symbols with the same inner indices form a linear space. An index $s$ is indexing basic $3j$-symbols with the same inner indices.
These coefficients are closely related to the Clebsh-Gordan coefficients. Indeed let us be given a decomposition into a sum of irreducible representations:
\begin{equation}
\label{rzl9}
V\otimes W=\sum_s U^s. \end{equation}
Take basis $\{v_{\mu}\}$, $\{w_{\nu}\}$, $\{u^s_{\rho}\}$. The Clebsh-Gordan coefficients are coefficients in the decomposition
\begin{equation}
\label{ur}
u_{\rho'}^s = \sum_{\mu,\nu} D^{U,\rho',s}_{V,W;\mu,\nu} v_{\mu}\otimes w_{\nu}.
\end{equation}
Consider a representation $\bar{U}$, contragradient to $U$, and take in $\bar{U}$ a base $\bar{u}_{\rho}$, dual to $u_{\rho}$
There exists a mapping $U\otimes \bar{U} \rightarrow {\bf 1}$ into a trivial representation, such that $u_{\rho'}\otimes \bar{u}_{\rho}\mapsto \delta_{\rho,\rho'}$, where $\delta_{\rho,\rho'}$ is the Cronecer symbol.
Multiply \eqref{ur} by $\bar{u}_{\rho}$, take a sum over $\rho$, one gets
$$
{\bf 1}=\sum_{\mu,\nu,\rho}D^{U,\rho',s}_{V,W;\mu,\nu} v_{\mu}\otimes w_{\nu}\otimes \bar{u}_{\rho}.
$$
Thus one has
$$
D^{U,\gamma,s}_{V,W;\mu,\nu}=\begin{pmatrix}
V& W& \bar{U}\\ v_{\mu} & w_{\nu} & \bar{u}_{\rho}
\end{pmatrix}^s.
$$
This formula allows to identify the multiplicity spaces for the Clebsh-Gordan coefficients and for the $3j$-symbols.
Thus the problem of calculation of the Clebsh-Gordan coefficients and the $3j$-symbols are equivalent.
\subsection{$3j$-symbols in functional realization}
In the functional realization a $3j$-symbol for representations $V$, $W$, $U$ is described as follows. Let the representations be realized in the space of functions on $GL_3\times GL_3\times GL_3$. We can put $m_3=0$, $m'_3=0$. Functions of matrix elements on the factors $GL_3$ we denote as $a_i^j$, $b_i^j$, $c_i^j$. Analogous letters denote the determinants of matrices composed of matrix elements.
Decompose a tensor product $V\otimes W\otimes U$ into a sum of irreducible representations and take one of the occurring trivial representations
$$V\otimes W\otimes U={\bf 1}^s\oplus...,$$
where ${\bf 1}^s$ is one of the occurring trivial representations. More precise one can write $V\otimes W\otimes U=\Big ( {\bf 1}\otimes M \Big ) \oplus...,$ where $M$ is a linear space called the multiplicity space. Choose in $M$ a base $\{e_i\}$ and denote ${\bf 1}^s:={\bf 1}\otimes e_s$.
In this representation several trivial representations can occur and $s$ is their index \footnote{Let us mention papers \cite{kl}, \cite{tk}, \cite{tk1}, where analogous approach to the decomposition of a double tensor product is used}.
Let base vectors be indexed by diagrams:
\begin{align}
\begin{split}
\label{3d}
&v_{\mu}=\begin{pmatrix}
m_{1} && m_{2} &&0\\ &k_{1}&& k_{2}\\&&s
\end{pmatrix},\,\,\, w_{\nu}=\begin{pmatrix}
m'_{1} && m'_{2} &&0\\ &k'_{1}&& k'_{2}\\&& s'
\end{pmatrix},\\
&u_{\rho}=\begin{pmatrix}
M_1 &&M_2 &&0\\ &K_1 &&K_2\\&&S
\end{pmatrix}
\end{split}
\end{align}
One has
\begin{equation}
\label{3ddu}
\bar{ u}_{\rho}=\begin{pmatrix}
-M_3 &&-M_2 &&-M_1\\ &-K_2 &&-K_1\\&&-S
\end{pmatrix}
\end{equation}
\subsection{ An explicit form of a $\mathfrak{gl}_3$-invariant in $V\otimes W\otimes U$}
Let us prove that the highest vector of a representation ${\bf 1}^s$ must be of type (or linear combination of them)
\begin{equation}
\label{skob}
g=\prod_i (\underbrace{a\cdots a}_{k^i_1}\underbrace{b\cdots b}_{k^i_2}\underbrace{c\cdots c}_{k^i_3}),\end{equation}
where, by analogy with \eqref{aab} as determinants we introduce expressions $(abc)$, $(aac)$, $(acc)$, also put $(bcc)$, $(bbc)$.
\begin{align*}&(aabbcc):=(\tilde{a}\tilde{b}\tilde{c}),\,\,\,\tilde{a}_1^1:=a_{2,3},\,\,\,\tilde{a}_2^1:=-a_{1,3},\,\,\,\tilde{a}_3^1:=a_{1,2},\end{align*}
$\tilde{b}_i^1$, $\tilde{c}_i^1$ are defined analogously.
The following conditions must hold
\begin{align}
\begin{split}
\label{usl}
& m_1=\# \{ i : \,\,\, k_1^i=1 \} ,\,\,\,\, m_2=\# \{ i : \,\,\, k_1^i=2 \} ,\,\,\,\, 0=\# \{ i : \,\,\, k_1^i=3 \},\\
& m'_1=\# \{ i : \,\,\, k_2^i=1 \} ,\,\,\,\, m'_2=\# \{ i : \,\,\, k_2^i=2 \} ,\,\,\,\, 0=\# \{ i : \,\,\, k_2^i=3 \},\\
& M_1=\# \{ i : \,\,\, k_1^i=1 \} ,\,\,\,\, M_2=\# \{ i : \,\,\, k_1^i=2 \} ,\,\,\,\, M_3=\# \{ i : \,\,\, k_1^i=3 \}.\\
\end{split}
\end{align}
they provide that $g\in V\otimes W\otimes U$ (см. \cite{zh}).
Indeed from one hand the index $s$ indexing different $3j$-symbols \begin{equation}
\begin{pmatrix}
V& W& U\\ v_{\mu} & w_{\nu} & u_{\rho}
\end{pmatrix}^s
\end{equation}
with the same inner indices coincides with the index numerating different Clebsh-Gordan coefficients
$$ C^{\bar{U},\bar{\gamma},s}_{V,W;\alpha,\beta}.$$
From the other hand since to the Clebsh-Gordan coefficient to the index $s$ there corresponds a function \eqref{foo}, to which there corresponds an expression of type \eqref{skob}, that is constructed as follows. To factors of \eqref{foo} there correspond factors form \eqref{skob} by the ruler:
\begin{align*}
&a_1 \mapsto (acc),\,\,\, a_{1,2} \mapsto (aac),\,\,\, b_1 \mapsto (bcc),\,\,\, b_{1,2} \mapsto (bbc),\,\,\, \\
& (ab)\mapsto (abc),\,\,\, (aabb)_{1,2,1,3} \mapsto (aabbcc),\,\,\,\\
&(aab)\mapsto (aab),\,\,\, (abb) \mapsto (abb).
\end{align*}
Thus starting from the expression \eqref{foo} we have constructed an expression \eqref{skob}, this construction is one-to-one. That is we have an isomorphism.
Hence we have descibed all $\mathfrak{gl}_3$-invariants of a triple tensor product.
\section{A formula for a $3j$-symbol}
Let us find an expression for a $3j$-symbol. A $3j$-symbol has a multiplicity index $s$, which is a function \eqref{foo}. To this function there corresponds a trivial represnetation in the triple tensor product with the highest vector
\begin{equation}
\label{goo}
g(\omega,\varphi,\psi,\theta):=\frac{(acc)^{\alpha}(bcc)^{\beta}(aac)^{\gamma}(bbc)^{\delta}(abc)^{\omega}(abb)^{\varphi}(aab)^{\psi}(aabbcc)^{\theta}}{\alpha!\beta!\gamma!\omega!\varphi!\psi!\theta!}.
\end{equation}
We have changed the expression \eqref{skob} by adding division by factorials of exponents.
\subsection{ Lattices $B'_1$ and $B''_1$}
Consider independent variables corresponding to summands in determinants $(caa),(acc),...,(aabbcc)$. Denote these variables as follows
\begin{align}
\begin{split}
\label{perem}
&Z=\{[[c_1a_{2,3}] ,[c_2a_{1,3}] ,[c_3a_{1,2}] , [a_1c_{2,3}] , [a_{2}c_{1,3}] ,[a_3c_{1,2}] ,
[c_1b_{2,3}] , [c_2b_{1,3}] , [c_3b_{1,2}] ,\\& [b_1c_{2,3}] ,[b_{2}c_{1,3}] ,[b_3c_{1,2}] ,
[b_1a_{2,3}] , [b_2a_{1,3}] , [b_3a_{1,2}], [a_1b_{2,3}] , [a_{2}b_{1,3}] ,[a_3b_{1,2}] ,\\&
[a_1b_2c_3],[a_2b_3c_1], [a_3b_1c_2], [a_2b_1c_3],[a_1b_3c_2], [a_3b_2c_1]
\\&
[a_{2,3}b_{1,3}c_{1,2}] , [a_{1,3}b_{1,2}c_{2,3}] , [a_{1,2}b_{2,3}c_{1,3}] ,
[a_{1,3}b_{2,3}c_{1,2}] , [a_{2,3}b_{1,2}c_{1,3}] , [a_{1,2}b_{1,3}c_{2,3}] \}.
\end{split}
\end{align}
These variables are coordinates in a $30$-dimentional space. When one opens brackets in determinants occuring in $g$, there appear monomials in variables \eqref{perem}. The vectors of exponents of these monomials are vectors in $30$-dimentional space.
Consider a vector
$$
v_0=(\gamma,0,0,\alpha,0,0,\delta ,0,0,\beta,0,0,\psi,0,0,\varphi,0,0,\omega,0,0,0,0,0,\theta,0,0,0,0,0)
$$
Such a vector of exponents is obtained in the case when one takes the first summand in each determinant.
Note that either $\omega=0$, or $\theta=0$.
If one changes a choice of summands that to the vector of exponents $v_0$ one need to add one of the following vectors
\begin{align*}
&p_1= e_{[c_1a_{2,3}]}-e_{ [c_2a_{1,3}] }, & p_2=e_{[c_1a_{2,3}]}-e_{ [c_3a_{1,2}] }, \\&p_3= e_{[a_1c_{2,3}] }-e_{[a_2c_{1,3}] }, & p_4= e_{[a_1c_{2,3}] }-e_{[a_3c_{1,2}] },\\
&p_5= e_{[c_1b_{2,3}]}-e_{ [c_2b_{1,3}] }, & p_6=e_{[c_1b_{2,3}]}-e_{ [c_3b_{1,2}] }, \\& p_7= e_{[b_1c_{2,3}] }-e_{[b_2c_{1,3}] }, & p_8= e_{[b_1c_{2,3}] }-e_{[b_3c_{1,2}] },\\
&p_9= e_{[a_1b_{2,3}]}-e_{ [a_2b_{1,3}] }, & p_{10}=e_{[a_1b_{2,3}]}-e_{ [a_3b_{1,2}] }, \\& p_{11}= e_{[b_1a_{2,3}] }-e_{[b_2a_{1,3}] }, & p_{12}= e_{[b_1a_{2,3}] }-e_{[b_3a_{1,2}] },\\&
p_{13}=e_{[a_1b_2c_3]}-e_{[a_2b_3c_1]} & p_{14}=e_{[a_1b_2c_3]}-e_{[a_3b_1c_2]} \\
& p_{15}=e_{[a_1b_2c_3]}-e_{[a_2b_1c_3]} & p_{16}=e_{[a_1b_2c_3]}-e_{[a_1b_3c_2]} \\& p_{17}=e_{[a_1b_2c_3]}-e_{[a_3b_2c_1]},
&p_{18}= e_{[a_{2,3}b_{1,3}c_{1,2}]}-e_{ [a_{1,3}b_{1,2}c_{2,3}] }, \\& p_{19}= e_{[a_{2,3}b_{1,3}c_{1,2}]}-e_{ [a_{1,2}b_{2,3}c_{1,3}] }, & p_{20}= e_{[a_{2,3}b_{1,3}c_{1,2}]}-e_{ [a_{1,3}b_{2,3}c_{1,2}] }, \\& p_{21}= e_{[a_{2,3}b_{1,3}c_{1,2}]}-e_{ [a_{1,2}b_{1,3}c_{2,3}] }, & p_{22}= e_{[a_{2,3}b_{1,3}c_{1,2}]}-e_{ [a_{2,3}b_{1,2}c_{1,3}] },
\end{align*}
Define projectors
$$
pr_a,pr_b,pr_c:\mathbb{C}^{30}\rightarrow \mathbb{C}^6,
$$
which operate as follows. Given a vector of exponents for variables \eqref{perem} they construct vectors of exponents for variables $a_X$, $b_X$, $c_X$.
Conside a vector $v$ of exponents of a monomial in variables \eqref{perem}, which corresponds to an arbitrary choice of summands in determinants. Let us find which vectors $\tau\in\mathbb{Z}<p_1,...,p_{22}>$ has the following property. When one adds it to $v$ then to the projections $pr_a$, $pr_b$, $pr_c$ vectors proportional to $(0,1,-1,-1,1,0)$ are added.
\begin{defn} {\it An elementary cycle} is the following object. Take two determinant from $(caa),...,(aabbcc)$ and draw two arrows from a symbol $x=a,b,c$ in one determinant to a symbol $xx=aa,bb,cc$ in another determinant. Or from a symbol $x$ in one determinant to $x$ in another determinant. Or from a symbol $xx$ in one determinant to a symbol $xx$ in another determinant.
The following two condition must be satisfied. One arrow goes from the first determinant to the second and another goes form the second to the first. And one of the arrows goes from $x$ to $xx$.
\end{defn}
Here are examples of cycles:
\[
(a\tikzmark{0} bb\tikzmark{11} ) (\tikzmark{00} aa \tikzmark{1} b),\,\,\,
(a\tikzmark{5} b\tikzmark{66} c) (\tikzmark{55} aa \tikzmark{6} bb cc),\,\,\,(a\tikzmark{90} bb\tikzmark{911} ) (\tikzmark{900} a \tikzmark{91} bc),\,\,\,
(a\tikzmark{95} bb\tikzmark{966} ) (\tikzmark{955} aa \tikzmark{96} bb cc).
\]
\begin{tikzpicture}[remember picture, overlay, bend left=45, -latex, blue]
\draw ([yshift=2ex]pic cs:0) to ([yshift=2ex]pic cs:00);
\draw ([yshift=0ex]pic cs:1) to ([yshift=0ex]pic cs:11);
\draw ([yshift=2ex]pic cs:5) to ([yshift=2ex]pic cs:55);
\draw ([yshift=0ex]pic cs:6) to ([yshift=0ex]pic cs:66);
\draw ([yshift=2ex]pic cs:90) to ([yshift=2ex]pic cs:900);
\draw ([yshift=0ex]pic cs:91) to ([yshift=0ex]pic cs:911);
\draw ([yshift=2ex]pic cs:95) to ([yshift=2ex]pic cs:955);
\draw ([yshift=0ex]pic cs:96) to ([yshift=0ex]pic cs:966);
\end{tikzpicture}
All other examples of cycles are obtained form these by applying a permutation of symbols $a,b,c$.
To an elementary cycle there corresponds a vector $\tau\in\mathbb{Z}<p_1,...,p_{22}>$ by the following ruler. To the elementary cycles written above there correspond vectors
\begin{align}
\begin{split}
\label{vec}
& u_1=-e_{[a_1b_{2,3}]}+e_{[a_2b_{1,3}]}-e_{[b_1a_{2,3}]}+e_{[b_2a_{1,3}]},\\
& u_2=-e_{[a_1b_{2}c_{3}]}+e_{[a_2b_{1}c_{3}]}-e_{[a_{2,3}b_{1,3}c_{1,2}]}+e_{[a_{1,3}b_{2,3}c_{1,2}]},\\
& u_3=-e_{[a_1b_{2,3}]}+e_{[a_2b_{1,3}]}-e_{[a_{2,3}b_{1,3}c_{1,2}]}+e_{[a_{1,3}b_{2,3}c_{1,2}]},\\
& u_4=-e_{[a_1b_{2,3}]}+e_{[a_2b_{1,3}]}-e_{[a_{2,3}b_{1,3}c_{1,2}]}+e_{[a_{1,3}b_{2,3}c_{1,2}]},\\
\end{split}
\end{align}
Let $B_1$ be an integer lattice spanned by the vectors correponding to elementary cycles. One has
\begin{prop}
\label{prp}
The lattice $B_1$ is generated by vectors correspondig to elementary cycles of type
\[
(a\tikzmark{995} bb\tikzmark{9966} ) (\tikzmark{9955} aa \tikzmark{996} bb cc),\,\,\,
(a\tikzmark{85} bb\tikzmark{866} ) (\tikzmark{855} a \tikzmark{86} b c)
\]
\begin{tikzpicture}[remember picture, overlay, bend left=45, -latex, blue]
\draw ([yshift=2ex]pic cs:995) to ([yshift=2ex]pic cs:9955);
\draw ([yshift=0ex]pic cs:996) to ([yshift=0ex]pic cs:9966);
\draw ([yshift=2ex]pic cs:85) to ([yshift=2ex]pic cs:855);
\draw ([yshift=0ex]pic cs:86) to ([yshift=0ex]pic cs:866);
\end{tikzpicture}
\end{prop}
\proof
One proves directely that a vector corresponding to an elementary elementary cycle can be expressed through these vectors.
\endproof
One has $B_1\subset B$.
In Proposition \ref{prp} two series of generators of the lattice $B_1$ are listed. Let $B'_1$ be a sublattice of $B_1$, generated by elementary cycles of the first type. Let $B''_1$ be a sublattice of $B_1$, generated by elementary cycles of the second type.
In both cases the generators are basis. The fact that the selected vectors are linearly independend follows form the fact that each vector containes coordinate vectors that are not involved in other vectors.
Thus in $B'_1$ there exists a base
\begin{align}
\begin{split}
\label{bb1}
&v_1^a=-e_{[a_1b_{2,3}]}+e_{[a_2b_{1,3}]}-e_{[a_{2,3}b_{1,3}c_{1,2}]}+e_{[a_{1,3}b_{2,3}c_{1,2}]},\,\,\, v_2^a=-e_{[a_1c_{2,3}]}+e_{[a_2c_{1,3}]}-e_{[a_{2,3}b_{1,2}c_{1,3}]}+e_{[a_{1,3}b_{1,2}c_{2,3}]}\\
&v_1^b=-e_{[b_1a_{2,3}]}+e_{[b_2a_{1,3}]}-e_{[a_{1,3}b_{2,3}c_{1,2}]}+e_{[a_{2,3}b_{1,3}c_{1,2}]},\,\,\, v_2^b=-e_{[b_1c_{2,3}]}+e_{[b_2c_{1,3}]}-e_{[a_{1,2}b_{2,3}c_{1,3}]}+e_{[a_{1,2}b_{1,3}c_{2,3}]}\\
&v_1^c=-e_{[c_1a_{2,3}]}+e_{[c_2a_{1,3}]}-e_{[a_{1,3}b_{1,2}c_{2,3}]}+e_{[a_{2,3}b_{1,2}c_{1,3}]},\,\,\, v_2^c =-e_{[c_1b_{2,3}]}+e_{[c_2b_{1,3}]}-e_{[a_{1,2}b_{1,3}c_{2,3}]}+e_{[a_{1,2}b_{2,3}c_{1,3}]}
\end{split}
\end{align}
In the case when \eqref{goo} contains $(aabbcc)$ but does not contain $(abc)$ one uses $B'_1$. In the case when \eqref{goo} contains $(abc)$ but does not contain $(aabbcc)$ one uses $B''_1$.
\begin{prop}
Let \eqref{goo} contain $(aabbcc)$ and does not contain $(abc)$. Take $30$-dimensional vectors $\varpi$ , $\varpi'$ of exponents of two monomials in variables $Z$ which one obtains when one opens brackets in \eqref{goo}. Let $[\mu,\nu,\rho]=[pr_a(\varpi),pr_b(\varpi),pr_c(\varpi)]$, $[\mu',\nu',\rho']=[pr_a(\varpi'),pr_b(\varpi'),pr_c(\varpi')]$.
Then $[\mu',\nu',\rho'] =[\mu,\nu,\rho]+b_1$, $b_1\in B'_1$ if and only if simultaneously
\begin{align*}& \mu=\mu' mod (0,-1,1,1,-1,0),
& \nu=\nu' mod (0,-1,1,1,-1,0),
& \rho=\rho' mod (0,-1,1,1,-1,0).\end{align*}
In the case when \eqref{goo} contains $(abc)$ but does not contain $(aabbcc)$ the same is true if one changes $B'_1$ to $B''_1$.
\end{prop}
\begin{proof}
Consider the case when \eqref{goo} contains $(aabbcc)$ but does not contain $(abc)$.
From $[\mu',\nu',\rho'] =[\mu,\nu,\rho] +b_1$, $b_1\in B'_1$ it follows that
$\mu=\mu' mod (0,-1,1,1,-1,0)$,
$ \nu=\nu' mod (0,-1,1,1,-1,0)$,
$\rho=\rho' mod (0,-1,1,1,-1,0)$. It can be proved by direct computations.
Let us prove the contraverse.
One changes the choice of summands in the determinants, such that $\mu=\mu' mod (0,-1,1,1,-1,0)$,
$ \nu=\nu' mod (0,-1,1,1,-1,0)$,
$\rho=\rho' mod (0,-1,1,1,-1,0)$. For example we change $a_1b_{2,3}$ from the determinant $(abb)$ to $a_2b_{1,3}$. The exponent of $b_{2,3}$ reduces by $1$ and the exponent of $b_{1,3}$ increases by $1$. The vector of exponents of determinants $b$ must belong to the set $\delta+ (0,-1,1,1,-1,0)$. Thus we the exponent of $b_1$ must increase by $1$ and the exponent of $b_2$ must decrease by $1$. This can take place in the case when in another determinant say in $(bcc)$ the summand $b_1c_{2,3}$ is changed to $b_2c_{1,3}$. Then we consider the cahange of exponents of $c_{2,3}$, $c_{1,3}$ and so on. Finally we must return to the determinant $(abb)$ and obtain the change of exponent of determinants $a_1$ and $a_2$ which takes place due to the change of a summand in the first determinant.
This construction corresponds to the shift of the vector of exponents by a vector from $B'_1$.
\end{proof}
\subsubsection{Scalar products}
Consider the case when \eqref{goo} contains $(aabbcc)$ and does not contain $(abc)$.
Let us calculate scalar products. For $s_1,s_2,s_3\in\mathbb{Z}_{\geq 0 }$ introduce a hypergeometric type series in variabels $Z$:
\begin{align}
\begin{split}
\label{defz}
&\mathcal{F}_{\varpi}^{s_1,s_2,s_3}(Z,B'_1):=\sum_{t\in \mathbb{Z}^6} \binom{t_1^a+t_2^a+s_1}{s_1}
\binom{t_1^b+t_2^b+s_2}{s_2}
\binom{t_1^c+t_2^c+s_3}{s_3}
\frac{Z^{\varpi+tv}}{ (\varpi+tv)! },\\
&t=(t_1^1,t_2^a,t_1^b,t_2^b,t_1^c,t_2^c),\,\,\,\,tv=t^a_1v^a_1+t^a_2v^a_2+...,\,\,\, \varphi\in\mathbb{Z}^{30}\\
\end{split}
\end{align}
Introduce vectorw
\begin{align*}
& f_a=-e_{[a_1c_{2,3}]}+e_{[a_3c_{1,2}]}-e_{[a_{2,3}b_{1,3}c_{1,2}]}-e_{ [a_{1,2}b_{1,3}c_{2,3} ] },\\
&f_b=-e_{[b_1c_{2,3}]}+e_{[b_3c_{1,2}]}-e_{[a_{1,3}b_{2,3}c_{1,2}]}-e_{ [a_{1,3}b_{1,2}c_{2,3} ] },\\
&f_c=-e_{[a_1b_{2,3}]}+e_{[a_3b_{1,2}]}-e_{[a_{1,2}b_{2,3}c_{1,3}]}-e_{ [a_{2,3}b_{1,2}c_{1,3} ] }.
\end{align*}
One has (see. \eqref{vr})
\begin{align}
\begin{split}
\label{prabc}
&pr_a(\tau+f_a)=pr(\tau)+r,\\
&pr_b(\tau+f_a)=pr(\tau),\\
&pr_c(\tau+f_a)=pr(\tau),
\end{split}
\end{align}
and one has analogous conditions for $f_b,f_c$.
Below we consider vectors $\varphi\in\mathbb{Z}^{30}$ and vectors $\mu,\nu,\rho$, that satisfy the following relations
\begin{align}
\begin{split}
\label{vrph}
&\varpi=v_0+tv^{abc}+s_1f_a+s_2f_b+s_3f_c,\,\,\, t\in\mathbb{Z}^{9},\,\,\, s_1,s_2,s_3\in\mathbb{Z},\\
&\mu=pr_a(\varphi),\,\,\, \nu=pr_b(\varpi),\,\,\, \rho=pr_c(\varpi).
\end{split}
\end{align}
\begin{prop}
\label{prp1}
Let $\varpi, \mu,\nu,\rho$ satisfy the relations \eqref{vrph}. Then
\begin{equation}
\label{p1}
<g,\mathcal{F}_{\mu}^{s_1}(A)\mathcal{F}_{\nu}^{s_2}(B)\mathcal{F}_{\rho}^{s_3}(C)>=
\mathcal{F}_{\varpi}^{s_1,s_2,s_3}(\pm 1,B'_1),
\end{equation}
Instead od those variables from $Z$ (see \eqref{perem}) which occur in a determinant with a sign $+$, one substituts $+1$, and instead of those variables which occur in a determinant with a sign $-$, one substituts $-1$.
\end{prop}
\proof
Consider a product $\frac{A^x}{x!}\frac{B^y}{y!}\frac{C^z}{z!}$, occuring in $\mathcal{F}_{\mu}^{s_1}(A)\mathcal{F}_{\nu}^{s_2}(B)\mathcal{F}_{\rho}^{s_3}(C)$. A coefficient at this product is defined as follows. If
$x=\mu+\tau_1v$, $y=\nu+\tau_2 v$, $z=\rho+\tau_3v$, then the coefficient equals
\begin{equation}
\label{kf1}
\binom{\tau_1+s_1}{s_1}
\binom{\tau_2+s_2}{s_2}
\binom{\tau_3+s_3}{s_3}
\end{equation}
Now consider a scalar product
\begin{equation}
\label{gabc}
<g,\frac{A^x}{x!}\frac{B^y}{y!}\frac{C^z}{z!}>.
\end{equation}
This scalar product can be calculated as follows. In \eqref{goo} the brackets are opened. One obtains an expression which is a $\Gamma$-series in variables $Z$, where the variables occuring in a determinant with a sign $-$ are taken with a sign $-$.
After this the variables $Z$ are replacet to products of variables $A_X,B_X,C_X$ in an obvious way.
Let $h$ be a vector of exponents of a monomila in variables $Z$, which appears when one opens brackets in \eqref{goo}. Sucj a monomial is divided by factorials of its powers and is multiplied by sign $\pm$.
The scalar product \eqref{gabc} is non-zero if for some $h$ one has $[pr_a(h),pr_b(h),pr_c(h)]=[x,y,z]$. In this case \eqref{gabc} is obtained by a change in a suitable monomial of all variables $Z$ to $1$ and a summation over all suitable monomials.
Finally let us write $x=\mu+\tau_1v$, $y=\nu+\tau_2v$, $z=\rho+\tau_3v$. In the case $\tau_1=\tau_2=\tau_3=0$ one has $[\mu,\nu,\rho]=[pr_a(\varpi),pr_b(\varpi),pr_c(\varpi)]$. Thus
$[x,y,z]=[pr_a(\varpi'),pr_b(\varpi'),pr_c(\varpi')]$, where
\begin{align*}
& \varphi'=\varpi+t^a_1 v^a_1+t^a_2v^a_2+
t^b_1 v^b_1+t^b_2v^b_2+
t^c_1 v^c_1+t^c_2v^c_2,\\
&\tau_1=t^a_1+t^a_2,\,\,\, \tau_2=t^b_1+t^b_2,\,\,\, \tau_3=t^c_1+t^c_2,\,\,\, \end{align*}
Let us take into consideration the coefficient \eqref{kf1} at $\frac{A^x}{x!}\frac{B^y}{y!}\frac{C^z}{z!}$ ,occuring in $\mathcal{F}_{\mu}^{s_1}(A)\mathcal{F}_{\nu}^{s_2}(B)\mathcal{F}_{\rho}^{s_3}(C)$. As the result one gets an expression in the right hand side in \eqref{p1}.
\endproof
Introduce vectors
\begin{align*}
& f_a=-e_{[a_1c_{2,3}]}+e_{[a_3c_{1,2}]}-e_{[a_{2,3}b_{1,3}c_{1,2}]}-e_{ [a_{1,2}b_{1,3}c_{2,3} ] },\\
&f_b=-e_{[b_1c_{2,3}]}+e_{[b_3c_{1,2}]}-e_{[a_{1,3}b_{2,3}c_{1,2}]}-e_{ [a_{1,3}b_{1,2}c_{2,3} ] },\\
&f_c=-e_{[a_1b_{2,3}]}+e_{[a_3b_{1,2}]}-e_{[a_{1,2}b_{2,3}c_{1,3}]}-e_{ [a_{2,3}b_{1,2}c_{1,3} ] }.
\end{align*}
One has relations
\begin{align}
\begin{split}
\label{prabc}
&pr_a(\tau+f_a)=pr(\tau)+r,\\
&pr_b(\tau+f_a)=pr(\tau),\\
&pr_c(\tau+f_a)=pr(\tau),
\end{split}
\end{align}
and analogous relations for $f_b,f_c$.
\begin{prop}
\label{prp2}
Let $\varpi, \mu,\nu,\rho$ satisfy the relations \eqref{vrph}. Then
$$
<g,\mathcal{F}_{\mu-s_1r}^{s_1}(A)\mathcal{F}_{\nu-s_2r}^{s_2}(B)\mathcal{F}_{\rho-s_3r}^{s_3}(C)>=
\mathcal{F}_{\varpi-s_1f_a-s_2f_b-s_3f_c}^{s_1,s_2,s_3}(\pm 1,B_1),
$$
Instead of those variables from $Z$ (see \eqref{perem}) which occur in a determinant with a sign $+$, one substituts $+1$, and instead of those variables which occur in a determinant with a sign $-$, one substituts $-1$.
\end{prop}
\proof
This Proposition follows imidiately from Proposition \ref{prp1} and the formula \eqref{prabc}.
\endproof
Introduce a function
\begin{equation}
\label{defz1}
\tilde{F}_{\varpi}(Z,B'_1):=\sum_{s_1,s_2,s_3\in\mathbb{Z}_{\geq 0}}(-1)^{s_1+s_2+s_3} \mathcal{F}_{\varpi-s_1f_a-s_2f_b-s_3f_c}^{s_1,s_2,s_3}(Z,B'_1).
\end{equation}
Note that the function $\mathcal{F}_{\varpi}^{0,0,0}(Z,B'_1)$ satisfies the GKZ equation and the function $\tilde{F}_{\kappa}(Z,B'_1)$ satisfies the A-GKZ equaiton which consists of 3 equations of type
$$
(\frac{\partial^2}{\partial [a_1c_{2,3}]\partial [a_{2,3}b_{1,2}c_{1,3}]}-\frac{\partial^2}{\partial [a_2c_{1,3}]\partial [a_{1,3}b_{1,2}c_{2,3}]}+\frac{\partial^2}{\partial [c_3a_{1,2}]\partial [a_{1,2}b_{1,3}c_{2,3}]})\tilde{F}_{\varpi}(Z,B'_1)=0.
$$
As a direct consequence of Proposition \ref{prp2} one gets
\begin{prop}
Let $\varpi, \mu,\nu,\rho$ satisfy the relations \eqref{vrph}. Then
$$<g,\tilde{F}_{\mu}(A)\tilde{F}_{\nu}(B)\tilde{F}_{\rho}(C)>=\tilde{F}_{\kappa}(\pm 1,B'_1).$$
Instead of those variables from $Z$ (see \eqref{perem}) which occur in a determinant with a sign $+$, one substituts $+1$, and instead of those variables which occur in a determinant with a sign $-$, one substituts $-1$.
\end{prop}
Introduce a function
\begin{equation}
\label{funs}
F_{\varpi}(Z,B'_1):=\sum_{s_1,s_2,s_3\in\mathbb{Z}_{\geq 0}}f^{pr_a(\varpi)}_{s_1}f^{pr_b(\varpi)}_{s_2}f^{pr_c(\varpi)}_{s_3}F_{\varpi-s_1f^a-s_2f^b-s_3f^c}(Z,B'_1),
\end{equation}
where coefficients $f$ are defined in \eqref{kfs}. Using the relation \eqref{fs0}, one gets the following statement
\begin{prop}
Let \eqref{goo} contain $(aabbcc)$ and does not contain $(abc)$, let $\varpi, \mu,\nu,\rho$ satisfy the relations \eqref{vrph}. Then
$$<g,F_{\mu}(A)F_{\nu}(B)F_{\rho}(C)>=F_{\varpi}(\pm 1,B'_1)$$
Let \eqref{goo} contain $(abc)$ and does not contain $(aabbcc)$, then
$$<g,F_{\mu}(A)F_{\nu}(B)F_{\rho}(C)>=F_{\varpi}(\pm 1,B''_1)$$
Instead of those variables from $Z$ (see \eqref{perem}) which occur in a determinant with a sign $+$, one substituts $+1$, and instead of those variables which occur in a determinant with a sign $-$, one substituts $-1$.
\end{prop}
\subsection{ $3j$-symbols}
\subsection{Selection rulers for $3j$-symbols}
Let us answer the following question: which products $\mathcal{F}_{\mu}(a)\mathcal{F}_{\nu}(b)\mathcal{F}_{\rho}(c)$ can have a non-zero scalar product with a function $g$, of type \eqref{goo}.
Let us consider an A-GKZ realization. In this realization the vector $\mathcal{F}_{\mu}(a)\mathcal{F}_{\nu}(b)\mathcal{F}_{\rho}(c)$ is presented by an expression of type $F_{\mu}(A)F_{\nu}(A)F_{\rho}(C)$. The function $g$ is presented by an expression of type $g+pl$, where $pl$ is proportional to $A_1A_{2,3}-A_{2}A_{1,3}+A_{3}A_{1,2}$,.... Since the function $F_{\mu}(A),F_{\nu}(A),F_{\rho}(C)$ is a solution of the A-GKZ system, one has
$$
<F_{\mu}(A)F_{\nu}(A)F_{\rho}(C), g+pl>=<F_{\mu}(A)F_{\nu}(A)F_{\rho}(C), g>.
$$
Thus we need to consider the scalar product $<F_{\mu}(A)F_{\nu}(A)F_{\rho}(C), g>$.
A support of a function presented as a power series is a set of vectors of exponents of it's monomials.
Note that the support of a function $g$, defined in \eqref{goo}, considered as a function of determinants
$a_X,b_X,c_X$
is the following
$$
supp(g)=(\kappa+B)\cap (\mathbb{Z}_{\geq 0}^{18})
$$
for some vector $\kappa\in\mathbb{Z}_{\geq 0}^{18}$ and some lattice $B$. The vector $\kappa$ and the generators of $B$ can be written in the following manner:
\begin{align*}
&\kappa=[pr_a(v_0),pr_b(v_0),pr_c(v_0)],
& B=\mathbb{Z}<\pi_1,...,\pi_{22}>,\,\,\,\,\, \pi_i=[pr_a(p_i),pr_b(p_i),pr_c(p_i)]
\end{align*}
The function $F_{\mu}(A)F_{\nu}(A)F_{\rho}(C)$ can have a non-zero scalar product with $g$ only if the following intersection is non-empty
$$
supp(F_{\mu}(A)F_{\nu}(A)F_{\rho}(C)) \cap supp(g)
$$
This condition is written explicitely as follows
\begin{align}
\begin{split}
\label{potb}
&(\bigcup_{s_1,s_2,s_3\in \mathbb{Z}_{\geq 0}} [mu+B_{GC}-s_1r, nu+B_{GC}-s_1r,rho+B_{GC}-s_2r ]\cap (\mathbb{Z}_{\geq 0}^{18})) \cap supp(g)\neq \emptyset
\end{split}
\end{align}
A condition for a vector $\varphi$, than provides us that the vector $[\mu,\nu,\rho]=[pr_a(\varpi),pr_b(\varpi),pr_c(\varpi)]$ satisfies \eqref{potb} is just the condition \eqref{vrph}.
\subsection{A formula for a $3j$-symbol}
Let us write
\begin{equation}
g=\sum_{\mu\nu\rho} c_{\mu',\nu',\rho'} \mathcal{F}_{\mu'}(a)\mathcal{F}_{\nu'}( b)\mathcal{F}_{\rho'}(c),
\end{equation}
where $ c_{\mu',\nu',\rho'}$ is a $3j$-symbol \eqref{3j}. Change determinants $a_X$, $b_X$, $c_X$ to independent variables $A_X$, $B_X$, $C_X$. Then the equalities do hold only the Plucker relations.
Take a scalar product of both sides of this equality with $F_{\mu}(A)\mathcal{F}_{\nu}(B)\mathcal{F}_{\rho}(C)$. Since these functions are solution of the A-GKZ system one can ignore the fact that the previous equality holds only the Plucker relation.
Using the previous caculations one obtaines the Theorem.
\begin{thm}
\label{ost}
Let us be given representations with the highest weights $[m_1,m_2,0]$, $[m'_1,m'_2,0]$, $[M_1,M_2,M_3]$. Let be given Gelfand-Tsetlin base vectors, the $\Gamma$-series that correspond to them have shift vectors $\mu,\nu,\rho$ (the formula for the shift vectors see in Theorem \ref{vec3}). Fix a fucntion of type \eqref{goo} with exponents satisfying conditions of Proposition \ref{mlt}.
Take a vector $\varpi$ wich is related by the equalities \eqref{vrph} with the vectors $\mu,\nu,\rho$ (if it is not possible, than the $3j$-symbol equals to zero).
Then a $3j$-symbol \eqref{3j} equals
\begin{align}
\begin{split}
\label{osntf2}
&\frac{F_{\varpi}(\pm 1,B'_1)}{\mathcal{F}_{\mu}(1)\mathcal{F}_{\nu}(1)\mathcal{F}_{\rho}(1)}, \text{ if \eqref{goo} does not contian $(abc)$},\\
&\frac{F_{\varpi}(\pm 1,B''_1)}{\mathcal{F}_{\mu}(1)\mathcal{F}_{\nu}(1)\mathcal{F}_{\rho}(1)}, \text{ if \eqref{goo} does not contain $(aabbcc)$},
\end{split}
\end{align} where the function occuring in the numerator is defined in \eqref{funs} (see also \eqref{defz1}, \eqref{defz}).
In the function $F_{\varpi}$ in the numerator we substitute $+1$ instead of those $Z$ (see \eqref{perem}), that occur in determinants in \eqref{goo} with the sign $+$, and instead of those $Z$ (see \eqref{perem}), that occur in determinants in \eqref{goo} with the sign $-$ we substitute $-1$.
In the denominator the $\Gamma$-series introduced in Theorem \ref{vec3} occur. WE substitute instead of all their arguments $1$.
\end{thm}
\section{ Appendix: simpler selection rulers}
\label{dop}
In this section we present conditons under which a Clebsh-Gordan coefficient or a $3j$-symbol can be non-zero.
\subsection{Selection rulers for Clebsh-Gordan coefficients}
Let us find necessary condition for the elements of the diagrams \eqref{3d} under which the Clebsh-Gordan coefficient $C^{U,\gamma,s}_{V,W;\alpha,\beta}$ can be non-zero.
Let us find condition for upper rows that are $\mathfrak{gl}_3$-highest weights. Let the index $s$ correspond to the function \eqref{foo}. In tern of exponents of \eqref{foo} these upper rows are written as follows
\begin{align}
\begin{split}
\label{s3d}
&[m_1,m_2,0]=[\alpha+\gamma+\omega+\varphi+\psi+\theta,\gamma+\psi+\theta,0],\\
&[m'_1,m'_2,0]=[\beta+\delta+\omega+\varphi+\psi+\theta, \delta+\varphi+\theta ,0]\\
&[M_1,M_2,M_3]=[\alpha+\beta+\gamma+\delta+\omega+\varphi+\psi+2\theta, \gamma+\delta+\omega+\varphi+\psi+\theta,\varphi+\psi+\theta]\\
\end{split}
\end{align}
One sees that between these rows one has a relation
\begin{align}
\begin{split}
\label{s3}
&[m_1,m_2,0]+[m'_1,m'_2,0]+\omega[-1,1,0]+(\varphi+\psi)[-1,0,1]+\theta[0,-1,1]=\\&=[M_1,M_2,M_3].
\end{split}
\end{align}
Moreover this relation is sufficient for existence of $\alpha,...,\omega$, for which the conditions \eqref{s3d} hold.
Let us find selection rulers for the second rows of diagrams \eqref{3d}. One can obtain them by considering the selection rulers for the Clebsh-Gordan coefficients for the algebra $\mathfrak{gl}_2$. One has a tensor product of representations of the algebra $\mathfrak{gl}_2$ with highest weights $[k_1,k_2]$ and $[\bar{k}_1,\bar{k}_2]$. Which highest weights $[K_1,K_2]$ occur in a decomposition of this tensor product?
A base in the space of $\mathfrak{gl}_2$-highest vectors in the functional realization is foremed by functions of type
\begin{equation}
\label{foo2}
f=a_1^{\alpha}a_{1,2}^{\beta}b_1^{\gamma}b_{1,2}^{\delta}(ab)^{\omega}.
\end{equation}
Then
\begin{align}
\begin{split}
\label{2d}
&[k_1,k_2]=[\alpha+\beta+\omega,\beta],\\
&[k'_1,k'_2]=[\gamma+\delta+\omega,\delta],\\
&[K_1,K_2]=[\alpha+\beta+\gamma+\delta+\omega,\beta+\delta+\omega].
\end{split}
\end{align}
One sees that the following condition holds
\begin{align}
\begin{split}
\label{s2}
[k_1,k_2]+[k'_1,k'_2]+\omega[-1,1]=[K_1,K_2].
\end{split}
\end{align}
The relations \eqref{s2} are sufficient for existence of $\alpha,...,\omega$, for which
the conditions \eqref{2d} hold.
Considering $E_{1,1}$-weights of diagrams one concludes that
for the third rows one has
\begin{equation}
\label{s1}
s+s'=S.
\end{equation}
Thus we have proved the following.
\begin{prop}
A Clebsh-Gordan coefficient can be non-zero only if conditions \eqref{s3}, \eqref{s2}, \eqref{s3} hold for some non-negative integers $\omega$, $\varphi$, $\psi$, $\theta$.
\end{prop}
\end{document} |
\betaegin{document}
\alphauthor{Matthew Robert Ballard}
\alphaddress{Department of Mathematics, University of Washington,
Seattle, WA 98195, USA}
\varepsilonmail{[email protected]}
\title{Sheaves on Local Calabi-Yau Varieties}
\maketitle
\betaegin{abstract}
We investigate sheaves supported on the zero section of the total space of a locally-free sheaf $E$ on a smooth, projective variety $X$ when $E$ satisfies $\betaigwedge^{\operatorname{rank} E} E \cong \omega_X$. We rephrase this construction using the language of $A_{\infty}$-algebra and provide a simple characterisation of the case $E = \omega_X$.
\varepsilonnd{abstract}
\section{Introduction}
Calabi-Yau varieties formed by taking the total space of a locally-free sheaf $E$ on a variety $X$ with $\betaigwedge^d E \cong \omega_X$ are important testing ground for ideas in mathematics and physics. They are often viewed as an approximation to a proper Calabi-Yau variety. In this paper, we study these local Calabi-Yau varieties from a homological perspective. We are interested in the bounded derived category of coherent sheaves with support on the zero section of $V(E)$, $D^b_X(\mathbb{C}oh(V(E)))$, and the larger unbounded derived category of quasi-coherent sheaves $D_X(\mathbb{Q}coh(V(E)))$. In the case that $X$ is smooth and proper, $D^b_X(\mathbb{C}oh(V(E)))$ possesses a trivial Serre functor, hence provide simple examples of Calabi-Yau triangulated categories. Then, we have two natural maps $i: X \hookrightarrow V(E)$ and $\pi: V(E) \rightarrow X$ satisfying $\pi \circ i = \operatorname{id}_X$. These furnish functors which descend to the derived categories $i_*,\pi^*: D(\mathbb{Q}coh(X)) \rightarrow D(\mathbb{Q}coh(V(E)))$ and $Li^*,\pi_*: D(\mathbb{Q}coh(V(E))) \rightarrow D(\mathbb{Q}coh(X))$. The image of $i_*$ lies in $D_X(\mathbb{Q}coh(V(E)))$ and generates it as a triangulated category. The functor $i_*$ is not full but as is well-known \cite{ST00}
\betaegin{displaymath}
\operatorname{Ext}^j_E(i_*F,i_*G) = \betaigoplus_{r=0}^{\operatorname{rank} E} \operatorname{Ext}^{j+r}_X(F,G \otimes \betaigwedge^r E)
\varepsilonnd{displaymath}
After a brief discussion of the choices of dg-models underlying $D^b(\mathbb{C}oh(X))$ and of sheaves with support on sub-schemes, we lift this isomorphism to a quasi-isomorphism on the chain level and give the proper algebra structure on the dg-endomorphisms of $i_*F$ for this quasi-isomorphism to be a morphism of dg-algebras. This allows us to describe $D_X(\mathbb{Q}coh(V(E)))$ and $D_X^b(\mathbb{C}oh(V(E)))$ in simple terms of $X$ and $E$. We can cut the data necessary by appealing to the triviality of Serre duality on $D^b_X(\mathbb{C}oh(V(E)))$. At the chain level, this manifests itself as symmetric pairing on the Hom-spaces of the dg-category which is non-degenerate on cohomology. In the special case where $E = \omega_X$, one expects that we can describe $D_X(\mathbb{Q}coh(\omega_X))$ solely in terms $D(\mathbb{Q}coh(X))$. Indeed, this is case. The idea, while straightforward, requires some technical development to make manifest. This leads into $A_{\infty}$-algebras and formal symplectic geometry, which occupies the final two sections of the paper. We conclude the paper by making connections with homological mirror symmetry.
\section{DG-models for the bounded derived category of coherent sheaves}
Our main interest in this paper is not in the triangulated category
$D^b(\mathbb{C}oh(X))$ but in an enhanced version of it \cite{BK91}. For any
separated scheme $X$ there is a standard enhancement - namely the dg-category
of bounded below complexes of injective sheaves with bounded cohomology.
The utility is its universality. Its main drawback is its inaccessibility to
computation. We have in mind another dg-category $\check{C}(X)$.
\betaegin{defn}
Let $\check{C}(X)$ denote the dg-category whose objects are locally-free
coherent sheaves and whose morphism spaces are given by choosing some
affine cover $\mathcal{U}$ and setting
\betaegin{equation*}
\mathbb{H}om_{\check{C}(X)}(E,F) = \check{C}(\mathcal{U},E^{\vee} \otimes F)
\varepsilonnd{equation*}
\varepsilonnd{defn}
If $X$ is a smooth projective variety over $\mathbb{C}$, there is another more
differential geometric construction. Although it will not be featured heavily
in this paper, it has one nice quality.
\betaegin{defn}
Let $\partialtaelbar(X)$ denote the dg-category with objects locally-free coherent sheaves
and morphisms give by
\betaegin{equation*}
\mathbb{H}om_{\partialtaelbar(X)}(E,F) = \Gamma(V(F)^{\vee}\otimes V(E) \otimes \mathcal{O}mega^{0,*})
\varepsilonnd{equation*}
The differential is the Dolbeault differential and $V(E)$ is the total space of the geometric vector bundle corresponding to the locally-free sheaf $E$. $\mathcal{O}mega^{0,*}$ the bundle of $d\betaar{z}$-forms.
\varepsilonnd{defn}
\betaegin{rmk}
The main attractive quality of $\partialtaelbar(X)$ is the presentation of Serre
duality. The trace morphism $\operatorname{tr}: H^n(\omega_X) \rightarrow \mathbb{C}$ is naturally defined. $\Gamma(\omega_X \otimes \mathcal{O}mega^{0,n})$
equals $\Gamma(\mathcal{O}mega^{n,n})$ and, hence, any section gives a top-degree differential
form $X$ which can be integrated. Applying the standard argument using
bump functions, one sees that the pairing is non-degenerate
at the chain level.
\varepsilonnd{rmk}
Given two locally-free sheaves $E$ and $F$, in each dg-model, the cohomology
of each morphism chain complex computes $\operatorname{Ext}_X^*(E,F)$. In nice situations, one therefore expects
that the derived categories are equivalent as triangulated categories.
Let us recall how to derive a dg-category. Let $\mathcal{C}$ be a
dg-category over $k$. Then, we can consider the category of dg-functors $M:
\mathcal{C}^{op} \rightarrow Ch(k)$ where $Ch(k)$ is the dg-category of chain complexes
over $k$. Such a functor is called a right $\mathcal{C}$-module. The dg-category
$\operatorname{Func}(\mathcal{C}^{op},Ch(k))$ is called the category of right $\mathcal{C}$-
modules and is denoted by $\operatorname{Mod} \mathcal{C}$. $\operatorname{Mod} \mathcal{C}$ naturally has
cones and a shift functor that provide it with a pre-triangulated structure. Therefore,
$H^0(\operatorname{Mod} \mathcal{C})$ is a triangulated category with triangles
induced by these cones. We consider the smallest ore-triangulated and idmepotent-closed triangulated category containing the image of $\mathcal{C}$ and closed under idempotent splittings. Localising this at quasi-isomorphisms yields the (perfect) derived category of $\mathcal{C}$ denoted by $D(\mathcal{C})$. (This is usually denoted $D_{\operatorname{perf}}(C)$). For more information and references see \cite{Kel06}.
Let $I(X)$ denote the dg-category consisting of a choice of K-injective resolution of each (unbounded) complex of quasi-coherent sheaves, see \cite{Spa88}. Then, if $X$ is quasi-compact, $H^0(I(X)) \cong D(\mathbb{Q}coh(X))$. Let us recall the following result from \cite{Sei03}.
\betaegin{prop}
$D(\check{C}(X))$ is equivalent to the smallest triangulated category of $D(I(X))$ containing all locally-free coherent sheaves.
\varepsilonnd{prop}
\proof Given such a locally-free sheaves $E$, let $I_E$ denote a choice of injective resolution. We shall form a new category $T(X)$ whose objects are locally-free sheaves and whose morphisms are given by
\betaegin{equation*}
\mathbb{H}om_{T(X)}(F,E) = \betaegin{pmatrix} \mathbb{H}om_{\mathcal{I}(X)}(I_F,I_E) &
\check{C}(F^* \otimes I_E)[1] \\ 0 & \check{C}(F^* \otimes E) \varepsilonnd{pmatrix}
\varepsilonnd{equation*}
Consider the double complex formed by $\check{C}(F^* \otimes I_E)$. From
this we have natural homomorphisms
\betaegin{equation*}
\mathbb{H}om_{I(X)}(I_F,I_E) \rightarrow \check{C}(F^* \otimes I_E) \leftarrow
\check{C}(F^* \otimes E)
\varepsilonnd{equation*}
The first map comes from composing with the map $F \rightarrow I_F$ and then restricting
to the open affines. The second map comes from composing with the map $E \rightarrow I_E$.
Using these maps we can appropriately define composition in $T(X)$
\betaegin{equation*}
\mathbb{H}om_{\mathcal{I}(X)}(I_F,I_G) \otimes \check{C}(E^* \otimes I_F) \rightarrow \check{C}
(E^* \otimes I_G)
\varepsilonnd{equation*}
\betaegin{equation*}
\check{C}(F^* \otimes I_G) \otimes \check{C}(E^* \otimes F) \rightarrow \check{C}(E^*
\otimes I_G)
\varepsilonnd{equation*}
Consequently using the natural two natural maps above, we obtain functors
$I(X) \leftarrow T(X) \rightarrow \check{C}(X)$. We wish to show that each of these is
a quasi-isomorphism. The complex $\check{C}(\mathcal{U},F^{\vee} \otimes I_E)$ is
naturally bi-graded with the decomposition of the differential into a \v{C}ech
piece and $d_{F^{\vee}} \otimes 1 + 1 \otimes d_{I_E}$. If we take cohomology
with respect the \v{C}ech differential, we will get only get the complex
$\Gamma(F^{\vee} \otimes I_E)$ since $F^{\vee} \otimes I_E$ is still a complex
of injectives. Thus, the cohomology of $\check{C}(\mathcal{U},F^{\vee} \otimes I_E)$
is simply $\operatorname{Ext}(F,E)$. Each map preserves the natural bi-gradings involved.
In the case of $\mathbb{H}om(I_F,I_E)$ the bi-grading is trivial (i.e. is a regular grading).
The computation above then shows that the first map is a quasi-isomorphism. For
the second, we note that $F^{\vee}|_U$ for any affine $U$ is a bounded complex of
projectives. Hence, $H(\mathbb{H}om(F|_U,(I_E)_U)) = H(\mathbb{H}om(F|_U,E_U)$ as $E \rightarrow I_E$ is
a quasi-isomorphism. Thus, taking the differential with respect to the other piece
produces an isomorphism on the $E^1$-page and hence induces a quasi-isomorphism.
Thus, the natural functors above induce a quasi-equivalence between $\check{C}(X)$ and the sub-category $I(X)$ consisting of resolutions of locally-free sheaves. The final statement is now clear. \qed
\betaegin{cor}
If $X$ is smooth and projective over $k$, then $D(\check{C}(X))$ is triangle equivalent to $D^b(\mathbb{C}oh(X))$.
\varepsilonnd{cor}
Next we essentially repeat the argument to show that $\check{C}(X)$ and $\partialtaelbar(X)$
are quasi-equivalent.
\betaegin{prop}
$D(\check{C}(X))$ and $D(\partialtaelbar(X))$ are equivalent.
\varepsilonnd{prop}
\proof We will combine \v{C}ech and Dolbeault cohomology into a double
complex $C^{s,t} = \check{C}^s(V(F) \otimes V(E)^{\vee} \otimes \mathcal{O}mega^{0,t})$ with the sum of the two
differentials. Now we proceed as in the previous proof by forming a new category $\mathcal{T}'$ whose objects are locally-free sheaves and whose morphism space between $E$ and $F$ is $\check{C}^s(V(F) \otimes V(E)^{\vee} \otimes \mathcal{O}mega^{0,t})$. We again have natural dg-functors from $\check{C}(X)$ and $\partialtaelbar(X)$ to $\mathcal{T}'$. These are quasi-isomorphisms as again can be checked by looking at the associated spectral sequence to $C^{s,t}$. \qed
We shall use $I(X)$ for most of the formal results in the next section and $\check{C}(X)$ when considering local Calabi-Yau varieties.
\section{Preliminaries on coherent sheaves supported on subvarieties}
Our objects of interest are coherent sheaves on $V(E)$ supported on the zero
section. In this section, we recall the general notions of categories of sheaves with fixed support. Let $Y$ be a
projective scheme over $k$ and $Z$ a closed subscheme with ideal sheaf $I_Z$.
\betaegin{defn}
A coherent sheaf $F$ on $Y$ is supported on $Z$ if some power of $I_Z$ annihilates $F$. A quasi-coherent sheaf is supported on $Z$ if all coherent subsheaves are supported on $Z$.
\varepsilonnd{defn}
The following is classical.
\betaegin{lem}
If $F$ is a quasi-coherent sheaf supported $Z$, then there exists an injective quasi-coherent sheaf $I(F)$ supported on $Z$ and an injection $F \hookrightarrow I(F)$.
\varepsilonnd{lem}
\proof First, note that for any point $x \not \in Z$, we have $F_x = 0$. To get the desired
quasi-coherent sheaf, choose the injective envelopes $I(F_x)$ of $F_x$ for all $x \in Z$
and let $I(F) = \prod_{x\in Z} i_x^*(I(F_x))$ where $i_x$ is the inclusion of the point in
$Y$. Now, let $J = \{i \in I(F_x) | (I_Z)_x^ki = 0 \text{ for some } k\}$. Then, $J$ is
injective and $F_x \subset J$. Hence, $J = I(F_x)$. $I(F)$ is therefore
supported on $Z$ and clearly $F$ injects into $I(F)$. \qed
Let $D_Z(\mathbb{Q}coh(Y))$ denote the subcategory of $D(\mathbb{Q}coh(Y))$ consisting of complexes whose cohomology sheaves are supported on the zero section.
\betaegin{lem}
$D(\mathbb{Q}coh_Z(Y))$ is triangle equivalent to $D_Z(\mathbb{Q}coh(Y))$.
\varepsilonnd{lem}
\proof
It is enough to prove this result for bounded below complexes of injectives in $D_Z(\mathbb{Q}coh(X))$ since we use these to build K-injective resolutions. Let $I_*$ be a bounded below complex of injective sheaves whose cohomology sheaves are supported on $Z$. We shall construct a complex of injective sheaves $J_*$ supported on $Z$ quasi-isomorphic to $I_*$. Assume that the first non-zero term in $I$ is $I_0$. Then, $\operatorname{ker} d_0 \subset I_0$ is a coherent sheaf supported on $Z$. From the previous lemma, we can choose an injective sheaf $J_0$ and an injection $\operatorname{ker} d_0 \hookrightarrow J_0$. This extends to a morphism $\psi_0:I_0 \rightarrow J_0$. $\operatorname{cok} \psi_0$ is also supported on $Z$. Choose an injective
sheaf $J_1'$ supported on $Z$ and so that $\operatorname{cok} \psi_0$ injects into $J_1'$. The
induced map $I_0 \rightarrow J_1'$ factors through $\operatorname{im} d_0$ by construction, hence induces a map $\psi_1' : I_1 \rightarrow J_1'$. Choose an injective sheaf $J_1''$ supported on $Z$
so that $\operatorname{ker} d_1 / \operatorname{cok} d_0$ injects into $J_1''$. Set $J_1 = J_1' \oplus J_1''$ with the differential $d_0: J_0 \rightarrow J_1$ given by the map to $J_1'$. Then we have an induced map $\psi_1: I_1 \rightarrow J_1$, $\psi_0$ is an isomorphism on $H^0$, $\psi_1$ is an injection on $H^1$. Now iterate this procedure to give $J_*$. \qed
\betaegin{cor}
$D^b(\mathbb{C}oh_Z(Y))$ is equivalent to $D^b_Z(\mathbb{C}oh(Y))$.
\varepsilonnd{cor}
\proof Assume that $J_*$ is a bounded below complex of injectives with support on $Z$ and that $J_*$ has bounded coherent cohomology. We need to show that $J_*$ is quasi-isomorphic to a bounded complex of coherent sheaves with support on $Z$. To do this we let $K_*$ and $B_*$ be the kernel and images of the differentials. We proceed again by induction and assume that $J_*$ begins at zero. We take $E_0 = K_0$. To construct $E_1$ we note that since $H^1(J_*)$ is coherent we can find a coherent subsheaf $F_1 \subset K_1$ which surjects onto $H^1(J_*)$. The kernel of the map $F_1 \rightarrow H^1(J_*)$ lies in the image of $d_0$ and is coherent. Thus, we can redefine $E_0$ as the preimage of this kernel under $d_0$ which is coherent. Now we proceed by induction, noting that to finish the $i$-the stage we only need to redefine $E_{i-1}$. \qed
\betaegin{rmk}
This answers part of a seemingly unanswered question from \cite{Bri05}, see the paragraph preceding lemma $4.4$.
\varepsilonnd{rmk}
\betaegin{lem}
Let $i: Z \rightarrow Y$ denote the inclusion and $i_*: \mathbb{C}oh(Z) \rightarrow \mathbb{C}oh(Y)$ ($i_*: \mathbb{Q}coh(Z) \rightarrow \mathbb{Q}coh(Y)$) the induced functor. Then, $i_*$ is exact and the image of $i_*$ generates $\mathbb{C}oh_Z(Y)$ as an abelian category, i.e. the smallest abelian subcategory of $\mathbb{C}oh_Z(Y)$ containing the essential image of $i_*$ is $\mathbb{C}oh_Z(Y)$ itself. Similarly, the smallest abelian subcategory closed under arbitrary direct sums containing the image of $i_*$ in $\mathbb{Q}coh_Z(Y)$ is $\mathbb{Q}coh_Z(Y)$ itself.
\varepsilonnd{lem}
\proof $i_*$ is always left-exact and since it is an isomorphism onto its image
it is right exact. Any object $F$ in $\mathbb{C}oh_Z(Y)$ admits a filtration $0 \subset F_0
\subset F_1 \subset \cdots \subset F_n \subset F$ such that $F_i/F_{i-1}$ is
annihilated by $I_Z$. Thus, it is of the form $i_*E$ for some $E$. The resulting
short-exact sequences show that we can get any object of $\mathbb{C}oh_Z(Y)$ using a finite number (which depends on $F$ and is unbounded over $\mathbb{C}oh_Z(Y)$) of iterations of short exact sequences starting from objects in the image of $i_*$. The final statement results from the fact any quasi-coherent sheaf is a union of its coherent subsheaves. \qed
We say that a subcategory $\mathcal{S}$ of a triangulated category $\mathcal{T}$ strongly generates if the smallest triangulated category of $\mathcal{T}$ containing $\mathcal{S}$ is $\mathcal{T}$ itself. We saw that it generates if the smallest triangulated category of $\mathcal{T}$ closed under direct sums and containing $\mathcal{S}$ is $\mathcal{T}$ itself. The following in now immediate from the previous lemma.
\betaegin{cor}
The image of $D^b(\mathbb{C}oh(Z))$ under $i_*$ strongly generates $D^b(\mathbb{C}oh_Z(Y))$ and the image of $D(\mathbb{Q}coh(Z))$ under $i_*$ generates $D(\mathbb{Q}coh_Z(Y)$.
\label{cor:generationcor}
\varepsilonnd{cor}
\betaegin{rmk}
In \cite{Rou03}, a notion of dimension of a triangulated category is defined. It is noted that the dimension of $D^b_Z(\mathbb{C}oh(Y))$ is infinite. Indeed, any complex must be annihilated by some large power of $I_Z$. Thus, an infinite number of extensions is required reach all sheaves supported on $Z$. Consequently, $D^b_Z(\mathbb{C}oh(Y))$ cannot be smooth in the sense of \cite{KS06} although it is compact.
\varepsilonnd{rmk}
Often one wants to view these local varieties as small neighborhoods of $Z$ in some
larger ambient variety. In the Zariski topology, this of course is problematic. To
make it precise we must pass to the formal completion of $Y$ along $Z$.
Let us first recall the local situation. Let $R$ be a Noetherian ring and $I$ an ideal. Then the formal completion $\hat{R}$ along $I$ is the inverse limit of the system $R/I^n \rightarrow R/I^{n-1}$. Similarly, given a module $M$ over $R$, we can complete $M$ using the inverse system $M/I^n \rightarrow M/I^{n-1}$ to get a module $\hat{M}$ over $\hat{R}$. The following lemma is standard
\betaegin{lem}
If $M$ is finitely-generated, then $\hat{M} \cong M \otimes_R \hat{R}$.
\varepsilonnd{lem}
Analogous to the previous discussion we can define the abelian category of finitely-
generated $\hat{R}$-modules supported along $\hat{I}$, $\operatorname{mod}_{\hat{I}}(\hat{R})$ or
the abelian category of modules supported along $\hat{I}$, $\operatorname{Mod}_{\hat{I}}(\hat{R})$. (Recall that the completion of a Noetherian ring is
again Noetherian).
\betaegin{lem}
$\operatorname{Mod}_I(R)$ is equivalent to $\operatorname{Mod}_{\hat{I}}(\hat{R})$ as an abelian category.
\varepsilonnd{lem}
\proof Recall that $\hat{R}/\hat{I}^n \cong R/I^n$. Now consider the completion
functor from $\operatorname{Mod}(R)$ to $\operatorname{Mod}(\hat{R})$. Restricted to $\operatorname{mod}(R)$, it is exact.
In addition, for $M \in \operatorname{mod}_I(R)$ we have $\hat{M} \cong M \otimes_R \hat{R} \cong
M \otimes_{R/I^n(M)} \hat{R}/\hat{I}^n(M) \cong M$. Thus, $\operatorname{mod}_I(R)$ is equivalent
to $\operatorname{mod}_{\hat{I}}(\hat{R})$. Any element of $\operatorname{Mod}_I(R)$ naturally has the structure of an $\hat{R}$-module. An $\hat{r} = \lbrace r_l \rbrace \in R/I^l$ acts on $n \in N$ by $\hat{r} n = r_l n$ for $l$ so that $I^l n = 0$. Since $I^j n = 0$ for $j \geq l$, $r_j$ acts the same as $r_l$. Let $N$ be an element of $\operatorname{Mod}_{\hat{I}}(\hat{R})$. $N = \betaigcup_{j \in J} F_j$ for $F_j$ finitely-generated for $\hat{R}$-modules. Thus, $F_j$ is annihilated by $\hat{I}^k$ for some $k$ and has the structure of an $R$-module. Consequently, $N$ is an $R$-module. \qed
This is simply the local case of completing a Noetherian scheme $X$ along a closed
subscheme $Y$. Consequently, we have
the following corollaries.
\betaegin{cor}
$\mathbb{Q}coh_Y(X) \cong \mathbb{Q}coh_{\hat{Y}}(\hat{X})$
\varepsilonnd{cor}
\betaegin{cor}
$D_Y(\mathbb{Q}coh(X))$ and $D^b_Y(\mathbb{C}oh(X))$ only depend on the isomorphism class of $\hat{X}$.
\varepsilonnd{cor}
Let us now restrict our attention to the case where $X$ and $Y$ are smooth. As before let $I_Y$ denote the ideal sheaf of $Y$ on $X$. Then $I_Y/I^2_Y$ is the conormal sheaf of $Y$ in $X$. Recall the following fact.
\betaegin{lem}
With the hypotheses above, $I_Y^n/I_Y^{n+1} \cong \mathbb{S}ym^n(I_Y/I^2_Y)$.
\varepsilonnd{lem}
We now have extensions.
\betaegin{equation*}
0 \rightarrow I_Y^n/I_Y^{n+1} \rightarrow \mathcal{O}_X/I_Y^{n+1} \rightarrow \mathcal{O}_X/I_Y^n \rightarrow 0
\varepsilonnd{equation*}
This gives us a way to determine the isomorphism class from the infinite sequence of extensions lying in $\operatorname{Ext}_Y^1(\mathcal{O}_X/I_Y^n, I_Y^n/I_Y^{n+1})$. There are some situations where we only need to know the sub-variety and its conormal bundle.
\betaegin{lem}
If $Y$ satisfies $H^1(I_Y^n/I_Y^{n+1}\otimes(I_Y^r/I_Y^{r+1})^{\vee}) = 0$ for any
$n > r \geq 0$, then $\hat{X}$ only depends on the isomorphism class of $Y$ and it's conormal bundle in $X$.
\label{cor:completionlemma}
\varepsilonnd{lem}
\proof We simply compute by induction. As mentioned, $\mathcal{O}_X/I_Y^2$ is an extension of $\mathcal{O}_Y$ by $I_Y/I_Y^2$ which must be trivial if $H^1(I_Y/I_Y^2) = 0$. Iterating we see that the given condition on the cohomology guarantees that all the extensions will be trivial \qed
In particular, we have the following useful case thanks to Kodaira vanishing.
\betaegin{cor}
Assume the characteristic of our field is zero. If the conormal bundle of $Y$ in $X$ is the dual of the canonical bundle and $Y$ is Fano, then the formal neighborhood of $Y$ and hence $D^b_Y(\mathbb{C}oh(X))$ is uniquely determined.
\varepsilonnd{cor}
In general, we shall say that the formal neighborhood is trivial if all the extensions mentioned above vanish. The trivial formal neighborhood is isomorphic to the formal neighborhood of $Y$ in the total space of its normal bundle.
\betaegin{cor}
Assuming the formal neighborhood is trivial, the Grothendieck groups $K(Y)$ and $K_Y(X) = K(D^b_Y(\mathbb{C}oh(X)))$ are isomorphic.
\varepsilonnd{cor}
\proof We have already seen that $i_*(D^b(\mathbb{C}oh(Y))$ generates $D^b_Y(\mathbb{C}oh(X))$.
We can assume we are working on total space the normal bundle $N$. Here we have $R\pi_* \circ i_* = \pi_* \circ i_* = \operatorname{id}$. Thus, $i_*$ is injective. \qed
\betaegin{rmk}
While the Grothendieck groups may coincide, the Euler pairings definitely do not.
\varepsilonnd{rmk}
\section{Sheaves on local varieties}
Recall the construction of a geometric vector bundle $V(E)$ from a locally-free coherent sheaf $E$ on a scheme $X$. Choose an affine covering $U_i=\mathbb{S}pec A_i$ for which $E$ is free. Then $E|_{U_i} \cong A_i^n$ with a basis we shall denote by $x_i$. Consider the algebra $B_i = \mathbb{S}ym^*(E|_{U_i})$. Then, $V(E)$ is the scheme formed by gluing together the copies of $\mathbb{S}pec B_i$ using the restriction morphism $U_i,U_j \rightarrow U_i \cap U_j$.
One common use of this construction is to make Calabi-Yau varieties. Assume that $X$ is smooth for the next lemma.
\betaegin{lem}
$V(\omega_X)$ has trivial canonical bundle.
\varepsilonnd{lem}
\proof We will first provide a local description for a non-vanishing section of the canonical
bundle of $V(\omega_X)$. Let $U$ be an affine chart on $X$ and take a non-vanishing section $\sigma$ of $\omega_X$ over $U$. Then, $V(\omega_X)(\pi^{-1}U) = \mathbb{S}pec \mathcal{O}(U)[\sigma]$. Identifying the
sheaf of sections with $\omega_X^{-1}$ we get an element $\sigma \wedge \sigma^{-1}$ of
$\omega_{V(\omega_X)}$ over $\pi^{-1}(U)$. Any other choice of section $\sigma'$ will differ
from $\sigma$ by a non-vanishing function that will be canceled out after wedging with
$\sigma^{-1}$. Thus, this construction is independent of the choice of $\sigma$ and gives
a trivialisation of $\omega_{V(\omega_X)}$. \qed
The proof of the lemma extends to following case.
\betaegin{lem} Let $E$ be a locally-free coherent sheaf on $X$ of rank $r$ such that
$\betaigwedge^r E \cong \omega_X$. Denote by $V(E)$ the total space of the vector bundle
associated to $E$. Then, $V(E)$ is Calabi-Yau.
\varepsilonnd{lem}
\betaegin{rmk}
\betaegin{enumerate}
\item We can require less than smoothness of $X$, \cite{Kaw02}.
\item There is an obvious strong relation between $V(\omega_X)$ and divisors in the anti-canonical class
of $X$. Namely, sections of $V(\omega_X)$ are in bijection with said divisors. Similarly, if we
took $E = L_1 \oplus \cdots L_n$ such that $L_1 \otimes \cdots \otimes L_n = \omega_X$,
sections of $V(E)$ are in bijection with complete intersections determined by divisors
in the classes of the $L^{-1}_i$.
\varepsilonnd{enumerate}
\varepsilonnd{rmk}
In this section, we work with the \v{C}ech dg-model for the bounded derived category of coherent sheaves on a variety.
Consider the pullback of $E^{\vee}$ to $V(E)$. The maps $x^{\vee}_i \otimes x_j \rightarrow \partialtaelta_{ij}$ give maps $\pi^*E^{\vee}|_{\pi^{-1}(U_i)} \rightarrow \mathcal{O}_{\pi^{-1}(U_i)}$ which glue to a map $\sigma: \pi^*E^{\vee} \rightarrow \mathcal{O}_{V(E)}$ whose zero locus is exactly the zero section $X$ in $V(E)$. From this we get a Koszul resolution of the zero section.
\betaegin{gather*}
0 \rightarrow \betaigwedge^{\operatorname{rank} E} \pi^*E^{\vee} \rightarrow \cdots \rightarrow \pi^*E^{\vee} \overset{\sigma}{\rightarrow} \mathcal{O}_{V(E)} \rightarrow \mathcal{O}_X \rightarrow 0
\label{eqn:koszulres}
\varepsilonnd{gather*}
The map $\pi: V(E) \rightarrow X$ is affine so it induces functors $\check{C}(X) \rightarrow \check{C}(V(E))$. Then given a complex of vector bundles $F$ on $X$ the resolution of the complex $i_* F$ is given tensoring $\pi^* F$ with the Koszul resolution and taking the total complex. We shall abuse notation and denote this complex of locally-free sheaves by $i_* F$.
Let $F$ and $D$ be complexes of locally-free sheaves on $X$. Then, there is a
bi-graded dg-algebra associated to $F$ and $D$. Namely,
\betaegin{equation*}
V(F,D) = \betaigoplus_{l \in \mathbb{Z}} \mathbb{H}om_X(F,D \otimes \betaigwedge^l E)[l]
\varepsilonnd{equation*}
Here the differential respects the grading by exterior powers of $V$. From $V(F,D)$ there is natural map to the endomorphisms of the complex in equation \ref{eqn:koszulres}. We map $\phi \in \mathbb{H}om_X(F,D \otimes \betaigwedge^l E)[l]$ to $\pi^*\phi$ in $\mathbb{H}om_{V(E)}(\pi^*F,\pi^*D[l])$. Let us name this map $\pi^*$.
\betaegin{prop}
$\pi^*$ is a quasi-isomorphism.
\label{prop:sscompute}
\varepsilonnd{prop}
\proof Note that $\pi^*$ preserves the bi-gradings and thus descends to a morphism
of the spectral sequences associated to the double complex. The $E^2$-pages for each
are given by $E^2_{pq} = H^p(\mathbb{H}om_X(F,D \otimes \betaigwedge^q E)$ and the map induces
an isomorphism. \qed
Thus, we can replace $\mathbb{H}om_{V(E)}(i_*F,i_*D)$ by $V(F,D)$ and we will. There is one important subtlety - if we consider $V(F,F)$ with the algebra structure induced by $X$, we do not have a morphism of dg-algebras. Thus, we must pullback the algebra structure of $\mathbb{H}om_{V(E)}(i_*F,i_*F)$ to $V(F,F)$. This is described as follows. Each $\phi \in \mathbb{H}om_X(F,F \otimes \betaigwedge^l E)$ induces $\phi \otimes id \in \mathbb{H}om_X(F \otimes \betaigwedge^k E,F \otimes \betaigwedge^{k+l} E)$. This allows us to compose $\psi \circ \phi$ for $\psi \in \mathbb{H}om_X(F,F \otimes \betaigwedge^k E)$. With respect to this algebra structure we have a restatement of the previous proposition.
\betaegin{cor}
$\pi^*$ is a quasi-isomorphism of $V(F,F)$ and $\mathbb{H}om_V(i_*F,i_*F)$ as dg-algebras.
\varepsilonnd{cor}
\betaegin{rmk}
This was also observed in the case of line bundles in \cite{Seg07}.
\varepsilonnd{rmk}
We can restate this construction purely in categorical. Let $\mathcal{C}$ be a category with an auto-functor $L: \mathcal{C} \rightarrow \mathcal{C}$. Then, we can construct the trivial extension category of $\mathcal{C}$ by $L$, denoted by $\mathcal{C} \oplus L$. The objects $\mathcal{C} \oplus L$ are the same as the objects of $\mathcal{C}$. The morphism space between objects $A,B$ is defined to be
\betaegin{gather*}
\mathbb{H}om_{\mathcal{C} \oplus L}(A,B) = \mathbb{H}om_{\mathcal{C}}(A,B) \oplus \mathbb{H}om_{\mathcal{C}}(A,L(B))
\varepsilonnd{gather*}
With respect to this decomposition, write a morphism $\phi$ as $\phi_1 \oplus \phi_2$. $\phi$ composed with $\psi: B \rightarrow C$ is defined to be $\psi_1 \circ \phi_1 \oplus L(\psi_1) \circ \phi_2 + \psi_2 \circ \phi_1$.
Trivial extensions preserve properties of $\mathcal{C}$ and $L$. By this, we mean that if $\mathcal{C}$ has extra structure and $L$ respects the extra structure, then $\mathcal{C} \oplus L$ also possesses this extra structure. For example, if $\mathcal{C}$ is a dg-category and $L$ is a dg-functor, then $\mathcal{C} \oplus L$ is also a dg-category where the differential is just the direct sum of the differentials on each component of the Hom space.
We have a quasi-equivalence between the image of $i_*$ in $\check{C}(V(E))$ and the trivial extension dg-category $\check{C}(X) \oplus \left(\otimes_{\mathcal{O}_X} \betaigwedge^l E [l] \right)$. To ease typographical stress, we shall set $T(E) = \left(\otimes_{\mathcal{O}_X} \betaigwedge^l E [l] \right)$. This extends to an equivalence of the appropriate derived categories.
\betaegin{lem}
$i_*$ induces an equivalence between $D(\check{C}(X) \oplus T(E))$ and $D^b_X(\mathbb{C}oh(V(E)))$.
\varepsilonnd{lem}
\proof From the calculation in proposition \ref{prop:sscompute} we see that $i_*$ is full and faithful. Using corollary \ref{cor:generationcor}, we know that the image $i_*$ strongly generates $D_X^b(\mathbb{C}oh(V(E)))$. Since $X$ is smooth, this gives all bounded compelexes of locally-free sheaves. \qed
Now let us simplify matters a little by recalling a result from \cite{BvB02}.
\betaegin{thm}
Let $Y$ be a quasi-compact and separated scheme with enough locally-free coherent sheaves. Then, there exists a bounded complex of locally-free coherent sheaves $G$ which generates $D(\mathbb{Q}coh(Y))$. In particular, if $Y$ is smooth, then $G$ strongly generates $D^b(\mathbb{C}oh(Y))$.
\varepsilonnd{thm}
This means that we only need to pay attention to the dg-algebra $\mathbb{H}om_{\check{C}(Y)}(G,G)$ (where we have extended the category $\check{C}(Y)$ to include bounded complexes of locally-free coherent sheaves in the obvious way). But, with our assumptions on $X$ (projective over a field $k$) we can assume that $G$ is actually a single locally-free coherent sheaf. The proof in the case does not require the same reduction argument as in \cite{BvB02} and \cite{Nee96}. We give a proof in the appendix.
We can now reduce to a single dg-algebra and modules over it. Let us denote the dg-algebra $V(G,G)$ by $V(X,G)$. From a result of Keller \cite{Kel94}, we have the following.
\betaegin{prop}
Since $X$ is smooth, $D^b_X(\mathbb{C}oh(V(E)))$ is triangle equivalent to $D(V(X,G))$.
\label{cor:maincorollary}
\varepsilonnd{prop}
Let us step back for a moment. Take $A$ to be a dg-algebra and $M$ a dg-module over $A$. We can rephrase the construction of the trivial extension categories in more down to earth terms.
\betaegin{defn}
Given such an $A$ and $M$ as above we can form the trivial extension dg-algebra of $A$ by $M$ denoted by $A(M)$. It is a dg-algebra over $k$. As a vector space it is just $A \oplus M$. The differential is $d_A \oplus d_M$ and composition is given by $(a,m)\cdot(a'm') = (a \cdot a', a \cdot m' + m \cdot a' + m \cdot m')$.
\varepsilonnd{defn}
\betaegin{eg}
A simple of example is given by taking any dg-bi-module $M$ over $A$ and giving it the zero algebra structure, i.e. the composition $M \otimes_A M \rightarrow M$ is just the zero map. Then $A(M)$ is the trivial infinitesimal extension of $A$ by $M$. If $M$ naturally has an algebra structure over $A$, we get a little more structure.
\varepsilonnd{eg}
\betaegin{rmk}
It is obvious but worth remarking that we require a bi-module for this construction. Obviously one can promote any left or right dg-module to a bi-module by bestowing it with the trivial action on the right or left, respectively.
\varepsilonnd{rmk}
In general, to determine the structure of the algebra $A(M)$ one needs to identify the bi-module $M$. To explicitly give the algebra, one needs to know how $A$ acts on $M$. There is one special situation where one can determine up to quasi-isomorphism the dg-algebra $A(M)$ solely from the dg-algebra $A$. That is when $M = \mathbb{H}om_k(A,k) = A^*$. On $A(A^*)$ there is non-degenerate pairing given by $\left< (a_1,b_1^*), (a_2,b_2^*) \right> = \operatorname{tr}(a_1a_2,a_1b_2^*+b_1^*a_2)$ where $\operatorname{tr}: A(A^*) \rightarrow k$ takes $(a,b^*)$ to $(-1)^{|a||b|}b^*(a)$. The pairing satisfies
\betaegin{equation*}
\left< (a_1,b_1^*)\cdot(a_2,b^*_2), (a_3,b^*_3) \right> = (-1)^{n_1(n_2+n_3)}\left (a_2,b_2^*)\cdot(a_3,b_3^*), (a_1,b_1^*) \right>
\varepsilonnd{equation*}
where $|a_i|=|b_i|=n_i$ and
\betaegin{equation*}
\left< d(a_1,b^*_1),(a_2,b_2^*) \right> = (-1)^{n_1n_2+1}\left< d(a_2,b_2^*),(a_1,b_1^*) \right>
\varepsilonnd{equation*}
\betaegin{lem}
If $A$ is finite-dimensional, then any dg-algebra $B$ with an injective map $A \hookrightarrow B$, a non-degenerate cyclically symmetric pairing as above, and a splitting $B \cong A \oplus I$ with $I^2 = 0$ is isomorphic to $A(A^*)$.
\varepsilonnd{lem}
\proof We have $A^* \cong I$ as chain complexes and we just need to know the values of $a\cdot i$ and $i \cdot a$ for $a \in A$ and $i \in I$. These can found by using the non-degenerate cyclically symmetric pairing. \qed
Unfortunately, we cannot apply this lemma directly to the case of $E = \omega_X$ for two reasons. One, our dg-algebras are not finite dimensional, and two, with our choice of dg-model for $D^b_X(\mathbb{C}oh(V(E)))$ we do not have a non-degenerate pairing, only a pairing which is non-degenerate on cohomology. We can reduce to the cohomology which is finite dimensional over $k$ but we must pass from a dg-algebra to an $A_{\infty}$-algebra. Through this passage, we can show the following result.
\betaegin{prop}
Let $X$ be a smooth projective scheme over a field $k$. Let $G$ be a locally-free coherent sheaf strongly generating $D^b(\mathbb{C}oh(X))$ and $B_X$ the endomorphisms of $G$ in $\check{C}(X)$. Then, $V(X,G)$ is quasi-isomorphic to $B_X(B_X^*[-\partialtaim X - 1])$. \label{prop:intermediate}
\varepsilonnd{prop}
\betaegin{cor}
$D_X^b(\mathbb{C}oh(V(E))$ is triangle equivalent to $D(B_X(B_X^*[-\partialtaim X - 1]))$. \label{cor:mainresult}
\varepsilonnd{cor}
\betaegin{rmk}
These results were discussed in the case when the generator chosen for $X$ has no higher cohomology in \cite{Seg07}.
\varepsilonnd{rmk}
\section{A-infinity algebras}
Let $k$ be a field.
\betaegin{defn}
Let $V$ be a graded $k$-module. Then bar coalgebra $B(V)$ is the $k$-module $\oplus_{l \geq 0} (sV)^{\otimes l}$ where $(sV)^i = V^{i+1}$. An element $sv_1 \otimes \cdots \otimes sv_k$ will be denoted by $[v_1 | \cdots | v_k]$. $B(V)$ is equipped with the coalgebra structure $\mathbb{D}elta$.
\betaegin{displaymath}
\mathbb{D}elta [v_1 | \cdots | v_k] = \sum_{i=0}^k [v_1|\cdots|v_i] \betaigotimes [v_{i+1}|\cdots|v_k]
\varepsilonnd{displaymath}
\varepsilonnd{defn}
\betaegin{defn}
Given a coalgebra $(C,\mathbb{D}elta)$ a coderivation $d: C \rightarrow C$ is a degree one $k$-linear map for which the following diagram commutes
\betaegin{center}
\leavevmode
\betaegin{xy}
(-15,10)*+{C}="a"; (15,10)*+{C \otimes C}="b"; (-15,-10)*+{C}="c"; (15,-10)*+{C \otimes C}="d"; {\alphar@{->}^{\mathbb{D}elta} "a";"c"}; {\alphar@{->}^{d} "a";"b"}; {\alphar@{->}^{\mathbb{D}elta} "b";"d"}; {\alphar@{->}^{d \otimes 1 + 1 \otimes d} "c";"d"}
\varepsilonnd{xy}
\varepsilonnd{center}
$d$ is a codifferential if $d^2=0$.
\varepsilonnd{defn}
The following is well known.
\betaegin{lem}
There is bijection between coderivations $B(V)$ and $\betaigoplus_l \mathbb{H}om_k((sV)^{\otimes l},sV)$ given by sending $d$ to $d_l: (sV)^{\otimes l} \rightarrow sV$.
\varepsilonnd{lem}
\betaegin{defn}
A curved $A_{\infty}$-algebra is a $k$-module $A$ with a codifferential $b$ on $B(A)$. If $b_0 = 0$, we shall simply call $A$ an $A_{\infty}$-algebra.
\varepsilonnd{defn}
\betaegin{cor}
The data of an $A_{\infty}$-algebra on $A$ is equivalent to a collection of degree one maps $b_k: (sA)^{\otimes k} \rightarrow (sA)$ $k > 0$ satisfying
\betaegin{displaymath}
\sum_{i+j+k=n,i,k\geq 0,j>0} b_{n-j+1}(\operatorname{id}^{\otimes i} \otimes b_j \otimes \operatorname{id}^{\otimes k})
\varepsilonnd{displaymath}
\varepsilonnd{cor}
\betaegin{rmk}
One can also translate this into degree $2-k$ maps $m_k: A^{\otimes k} \rightarrow A$ where $m_k = s^{-1} \circ b_k \circ s^k$. These satisfy
\betaegin{displaymath}
\sum_{i+j+k=n,i,k\geq 0,j>0} (-1)^{j+k(n-k-j)}m_{n-j+1}(\operatorname{id}^{\otimes i} \otimes m_j \otimes \operatorname{id}^{\otimes k})
\varepsilonnd{displaymath}
In particular if set $m_k = 0$ for $k>2$. We get
\betaegin{displaymath}
\betaegin{aligned}
m_1^2 & = 0 \\
m_2(m_1,\operatorname{id}) + m_2(\operatorname{id},m_1) & = 0 \\
m_2(m_2,\operatorname{id}) - m_2(\operatorname{id},m_2) & = 0
\varepsilonnd{aligned}
\varepsilonnd{displaymath}
These are just the requirements for a dg-algebra. In particular, we can convert all the examples from the previous section into this language.
\varepsilonnd{rmk}
\betaegin{defn}
A morphism $F: A \rightarrow A'$ of $A_{\infty}$-algebras is degree zero coalgebra map $F: B(A) \rightarrow B(A')$ that commutes with the codifferentials. $F$ is a called quasi-isomorphism if $F_1$ is a quasi-isomorphism.
\varepsilonnd{defn}
As before, $F$ is determined by the restriction $F_k: (sA)^{\otimes k} \rightarrow sA'$. To recover $F$ we set $F = \sum F_{i_1} \otimes \cdots \otimes F_{i_n}$. For $F$ to commute with differential we need
\betaegin{displaymath}
\sum_{i+j+k=n,i,k\geq 0,j>0} F_{n-k+1}(\operatorname{id}_{sA}^{\otimes i} \otimes b^A_j \otimes \operatorname{id}_{sA}^{\otimes k}) = \sum_{i_1+\cdots+i_r=n} b_r(F_{i_1} \otimes \cdots \otimes F_{i_r})
\varepsilonnd{displaymath}
\betaegin{defn}
An $A_{\infty}$-algebra $A$ is called minimal if $b_1=0$.
\varepsilonnd{defn}
$A_{\infty}$-algebras may seem a bit unwieldy, but there are advantages. One of which is the following result.
\betaegin{thm}
Given an $A_{\infty}$-algebra $A$ and choice of decomposition $A = H(A) \oplus B \oplus D$ where $b_1:D \rightarrow B$ is an isomorphism. There is an $A_{\infty}$ algebra structure on $H(A)$ and $A_{\infty}$-quasi-isomorphisms $p: A \rightarrow H(A)$ and $i: A \rightarrow H(A)$ so that the $p_1$ and $i_1$ are the projection and inclusion determined by the decomposition.
\varepsilonnd{thm}
This result is originally due to Kadeishvili \cite{Kad80}. The statement above is taken from \cite{Mar06}. The $A_{\infty}$-compositions on $H(A)$, the $p_j$, and the $i_j$ are all constructed from iterative applications of $(b_1|_D)^{-1}=h$, the $b_j$ on $A$, $p_1$, and $i_1$.
Let $(C,\mathbb{D}elta)$ be a coalgebra. Then a left comodule $(N,\mathbb{D}elta_N)$ for $C$ is given by $k$-module $N$ and a degree zero $k$-linear map $\mathbb{D}elta_N: N \rightarrow C \otimes N$ so that the following diagram commutes
\betaegin{center}
\leavevmode
\betaegin{xy}
(-15,10)*+{M}="a"; (15,10)*+{C \otimes M}="b"; (-15,-10)*+{C \otimes C}="c"; (15,-10)*+{C \otimes C \otimes M}="d"; {\alphar@{->}^{\mathbb{D}elta} "a";"c"}; {\alphar@{->}^{\mathbb{D}elta_M} "a";"b"}; {\alphar@{->}^{1 \otimes \mathbb{D}elta_M} "b";"d"}; {\alphar@{->}^{\mathbb{D}elta \otimes 1} "c";"d"}
\varepsilonnd{xy}
\varepsilonnd{center}
A right comodule is defined similarly. A bicomodule $N$ over $C$ is given by a $k$-module $N$ which is a left comodule under $\mathbb{D}elta_N$ and a right comodule under $\mathbb{D}elta^N$ so that the following diagram commutes
\betaegin{center}
\leavevmode
\betaegin{xy}
(-15,10)*+{N}="a"; (15,10)*+{C \otimes N}="b"; (-15,-10)*+{N \otimes C}="c"; (15,-10)*+{C \otimes N \otimes C}="d"; {\alphar@{->}^{\mathbb{D}elta^N} "a";"c"}; {\alphar@{->}^{\mathbb{D}elta_N} "a";"b"}; {\alphar@{->}^{1 \otimes \mathbb{D}elta^N} "b";"d"}; {\alphar@{->}^{\mathbb{D}elta_N \otimes 1} "c";"d"}
\varepsilonnd{xy}
\varepsilonnd{center}
Let $M$ be a $k$-module. We can form a left comodule over $B(A)$, $B(A,M) = B(A) \otimes sM$. The left coaction of $B(A)$ on $B(A,M)$ is given by
\betaegin{displaymath}
\mathbb{D}elta_M([a_1|\cdots|a_k|m] = \sum_{i=0}^k [a_1|\cdots|a_i] \betaigotimes [a_{i+1}|\cdots|a_k|m]
\varepsilonnd{displaymath}
One can define a right comodule $B(M,A)$ similarly. The coaction here will be denoted by $\mathbb{D}elta^M$.
\betaegin{defn}
A left $A_{\infty}$-module $M$ over an $A_{\infty}$-algebra $A$ is a $k$-module $M$ equipped with a degree one $k$-linear map $b_M: B(A,M) \rightarrow B(A,M)$ with $b_M^2 = 0$ and the following diagram commuting
\betaegin{center}
\leavevmode
\betaegin{xy}
(-20,10)*+{B(A,M)}="a"; (20,10)*+{B(A,M)}="b"; (-20,-10)*+{B(A) \otimes B(A,M)}="c"; (20,-10)*+{B(A) \otimes B(A,M)}="d"; {\alphar@{->}^{\mathbb{D}elta_M} "a";"c"}; {\alphar@{->}^{b_M} "a";"b"}; {\alphar@{->}^{\mathbb{D}elta_M} "b";"d"}; {\alphar@{->}^{b \otimes 1 + 1 \otimes b_M} "c";"d"}
\varepsilonnd{xy}
\varepsilonnd{center}
One can define a right $A_{\infty}$-module analogously. A morphism $F: M \rightarrow N$ of left $A_{\infty}$-modules is a $k$-linear degree zero map $F: B(A,M) \rightarrow B(A,N)$ so that $F \circ d_M = d_N \circ F$ and $\mathbb{D}elta^N \circ F = (1 \otimes F) \circ \mathbb{D}elta^M$. A morphism of right $A_{\infty}$-modules is defined similarly.
\varepsilonnd{defn}
We can also make a $B(A)$-bicomodule from a $k$-module $M$. Let $B(A,M,A) = B(A) \otimes sM \otimes B(A)$. We get a left coaction
\betaegin{displaymath}
\mathbb{D}elta_M [a_1|\cdots|a_k|m|a_{k+1}|\cdots|a_{k+l}] = \sum_{i=0}^k [a_1|\cdots|a_i] \betaigotimes [a_i|\cdots|a_k|m|a_{k+1}|\cdots|a_{k+1}]
\varepsilonnd{displaymath}
and a right coaction
\betaegin{displaymath}
\mathbb{D}elta^M [a_1|\cdots|a_k|m|a_{k+1}|\cdots|a_{k+l}] = \sum_{i=0}^l [a_1|\cdots|a_k|m|a_{k+1}|\cdots|a_{k+i}] \betaigotimes [a_{k+i+1}|\cdots|a_{k+l}]
\varepsilonnd{displaymath}
\betaegin{defn}
An $A_{\infty}$-bimodule $M$ over an $A_{\infty}$-algebra $A$ consists of a $k$-module $M$ with and a degree one $k$-linear $b_M^M: B(A,M,A) \rightarrow B(A,M,A)$ so that $(b_M^M)^2 = 0$ and the following diagrams commute
\betaegin{center}
\leavevmode
\betaegin{xy}
(-25,10)*+{B(A,M,A)}="a"; (-25,-10)*+{B(A,M,A)}="c"; (25,10)*+{B(A)\otimes B(A,M,A)}="b"; (25,-10)*+{B(A)\otimes B(A,M,A)}="d"; {\alphar@{->}^{b_M^M} "a";"c"}; {\alphar@{->}^{\mathbb{D}elta_M} "a";"b"}; {\alphar@{->}^{(b\otimes 1 + 1 \otimes b_M^M)} "b";"d"}; {\alphar@{->}^{\mathbb{D}elta_M} "c";"d"}
\varepsilonnd{xy}
\varepsilonnd{center}
\betaegin{center}
\leavevmode
\betaegin{xy}
(-25,10)*+{B(A,M,A)}="a"; (-25,-10)*+{B(A,M,A)}="c"; (25,10)*+{B(A,M,A)\otimes B(A)}="b"; (25,-10)*+{B(A,M,A)\otimes B(A)}="d"; {\alphar@{->}^{b_M^M} "a";"c"}; {\alphar@{->}^{\mathbb{D}elta^M} "a";"b"}; {\alphar@{->}^{(b_M^M\otimes 1 + 1 \otimes b)} "b";"d"}; {\alphar@{->}^{\mathbb{D}elta^M} "c";"d"}
\varepsilonnd{xy}
\varepsilonnd{center}
\varepsilonnd{defn}
\betaegin{rmk}
Since $A_{\infty}$-bi-modules will be of interest, we shall labor this point for a moment longer. $b^M_M: B(A,M,A) \rightarrow B(A,M,A)$ is uniquely determined by the maps $b_{k,l}: (sA)^{\otimes k} \otimes (sM) \otimes (sA)^{\otimes l} \rightarrow sM$. These maps must satisfy
\betaegin{displaymath}
\sum_{i + j + k =n,i,j \geq 0, k>1} b_{n-k+1,m}(\operatorname{id}_{sA}^{\otimes i} \otimes b_k \otimes \operatorname{id}_{sA}^{\otimes j} \otimes \operatorname{id}_{sM} \otimes \operatorname{id}_{sA}^{\otimes n}) +
\varepsilonnd{displaymath}
\betaegin{displaymath}
\sum_{s+i=n,h+j=m,\ i,j,h,s\geq 0} b_{n-i,m-j}(\operatorname{id}_{sA}^{\otimes l} \otimes b_{i,j} \otimes \operatorname{id}_{sA}^{\otimes h}) +
\varepsilonnd{displaymath}
\betaegin{displaymath}
\sum_{s+r+t=m,s,t\geq 0,r>0} b_{n,m-t+1}(\operatorname{id}_{sA}^{\otimes n} \otimes \operatorname{id}_{sM} \otimes \operatorname{id}_{sA}^{\otimes r} \otimes b_t \otimes \operatorname{id}_{sA}^{\otimes s}) = 0
\label{rmk:relations}
\varepsilonnd{displaymath}
If we set $b_{i,j}=0$ for $i+j>1$ and $b_k=0$ for $k>1$, we get the following relations.
\betaegin{displaymath}
\betaegin{aligned}
b_{0,0}^2 & = 0 \\
b_{1,0}(b_1 \otimes \operatorname{id}) + b_1(b_{1,0}) & = 0 \\
b_{0,1}(\operatorname{id} \otimes b_1) + b_1(b_{0,1}) & = 0 \\
b_{0,1}(b_{1,0} \otimes \operatorname{id}) + b_{1,0}(\operatorname{id} \otimes b_{0,1}) & = 0 \\
b_{1,0}(\operatorname{id} \otimes b_{1,0}) + b_{1,0}(b_2 \otimes \operatorname{id}) & = 0 \\
b_{0,1}(b_{0,1} \otimes \operatorname{id}) + b_{0,1}(\operatorname{id} \otimes b_2) & = 0
\varepsilonnd{aligned}
\varepsilonnd{displaymath}
After shifting the gradings back, we get the relations satisfied by a dg-bi-module over a dg-algebra.
\varepsilonnd{rmk}
\betaegin{eg}
\betaegin{enumerate}
\item Given an $A_{\infty}$-algebra $A$, we can define an $A_{\infty}$-bimodule structure on $A$ by setting $b_{k,l} = b_{k+l+1}$.
\item From an $A_{\infty}$-bi-module $M$, we can construct an $A_{\infty}$-bimodule structure on $M^* = \mathbb{H}om_k(M,k)$ as follows. Consider $B(A,M^*,A) \otimes sM$ and let $\sigma$ denote the $k$-module isomorphism $sM^* \otimes B(A,M,A)$ given by shifting the first copy of $B(A)$ to the end. Let $\operatorname{tr}: M^* \otimes M \rightarrow k$ denote the natural pairing. We can define $b^{M^*}_{M^*}$ implicitly by setting $\operatorname{tr}(b^*_{k,l} \otimes \operatorname{id}) = \operatorname{tr}(\operatorname{id} \otimes b_{k,l} \circ \sigma)$. One can quickly check that $b^*_{k,l}$ satisfy the relations \ref{rmk:relations}. Explicitly, we have
\betaegin{displaymath}
b^*_{k,l}([a_1|\cdots|a_k|m^*|a_{k+1}|\cdots|a_{k+l}])(m') = (-1)^{\betaowtie}m^*(b_{l,k}([a_{k+1}|\cdots|a_{k+l}|m'|a_1|\cdots|a_k])
\varepsilonnd{displaymath}
where $\partialtaisplaystyle{\betaowtie = (\sum_{r=1}^k|a_r|)(|m^*|+\sum_{t=1}^l|a_{k+t}|)+|m^*|+\sum_{j=1}^k|a_j|(\sum_{i=j+1}^k|a_i|)}$
\varepsilonnd{enumerate}
\varepsilonnd{eg}
Let $M$ be an $k$-module. Let us set
\betaegin{displaymath}
\underbrace{B(A) \otimes M \otimes B(A) \otimes M \otimes \cdots \otimes M \otimes B(A)}_{n \text{ copies of } M} = B_n(A,M,A)
\varepsilonnd{displaymath}
Then,
\betaegin{displaymath}
B(A\oplus M) = \betaigoplus_{n \geq 0} B_n(A,M,A)
\varepsilonnd{displaymath}
Note that $B_1(A,M,A) = B(A,M,A)$.
\betaegin{defn}
An $A_{\infty}$-algebra $A'$ over an $A_{\infty}$-algebra $A$ is codifferential $b'$ on $B(A \oplus A')$ whose restriction to $B(A)$ is $b$.
\varepsilonnd{defn}
The following is clear.
\betaegin{lem}
Given an $A_{\infty}$-bimodule $M$ over an $A_{\infty}$-algebra, we can form the trivial extension $A_{\infty}$-algebra $A(M)$ which as $k$-module is $A \oplus M$ and which has as codifferential $b_{A(M)} = b + b^M_M$ on $B(A \oplus M)$. This is an $A_{\infty}$-algebra over $A$.
\varepsilonnd{lem}
Let $\operatorname{Mod}-A$ denote the dg-category of right $A_{\infty}$-modules over an $A_{\infty}$-algebra $A$. Morphisms from $M$ to $N$ are all $k$-linear maps from $B(A,M)$ to $B(A,N)$ commuting with comodule structure. The differential is given by commuting a given morphism with the differentials. This dg-category is pre-triangulated with triangles coming from cones in the dg-category. Let $D(A)$ denote the smallest triangulated subcategory of $H^0(\operatorname{Mod}-A)$ containing $A$ and closed under idempotent splittings. The following result is useful.
\betaegin{thm}
If $A$ and $B$ are quasi-isomorphic $A_{\infty}$-algebras, then $D(A) \cong D(B)$.
\varepsilonnd{thm}
For a proof, see \cite{SeiDR}.
Previously, we had defined the bounded derived category of special dg-categories, ones where all quasi-isomorphisms factor through homotopy equivalences. The category of right dg-modules sits inside $\operatorname{Mod}-A$.
\betaegin{prop}
Given a dg-algebra $A$ as above, then the two definitions of the derived category of $A$ coincide.
\varepsilonnd{prop}
\proof Both these categories are equivalent to $D(\partialtagMod-\mathcal{O}mega B A)$ where $\mathcal{O}mega B A$ is the bar-cobar of $A$, see \cite{LH03}. \qed
\betaegin{defn}
A cyclic $A_{\infty}$-algebra of dimension $d$ is an $A_{\infty}$-algebra
$A$ with a symmetric, cohomologically non-degenerate pairing $(\cdot,\cdot): A \otimes A
\rightarrow k$ of degree $-d$ such that
\betaegin{equation*}
\left(m_n(a_0,\cdots,a_{n-1}),a_n \right) = (-1)^{n+|a_0|(|a_1|+\cdots+|a_n|)}\left( m_n(a_1,\cdots,a_n),a_0 \right)
\varepsilonnd{equation*}
for all $n$.
\varepsilonnd{defn}
\betaegin{rmk}
\betaegin{enumerate}
\item If we using the gradings on $sA$ instead, the pairing becomes honestly cyclically symmetric.
\item We can turn the pairing into a A-bimodule map $f: A \rightarrow A^*[-d]$ by setting $f_1(a) = (a,-)$ and $f_n=0$. Nondegeneracy says this is a quasi-isomorphism.
\varepsilonnd{enumerate}
\varepsilonnd{rmk}
Now we prove our main technical lemma.
\betaegin{lem}
Let $A'$ be an $A_{\infty}$-algebra over $A$ both having finite dimensional cohomology. Assume that $A'$ is cyclic, that $b_{A'}$ respects the auxiliary grading $B(A\oplus A') = \oplus B_n(A,A',A)$, and the pairing $(\cdot,\cdot)$ has auxiliary degree zero with the auxiliary degree of the target field being one. Then, $A'$ is quasi-isomorphic to $A(A^*)$.
\label{lem:technicalresult}
\varepsilonnd{lem}
\proof Appealing to proposition \ref{prop:niceminimalmodel}, we see that it is enough to consider the situation where $A'$ and $A$ are minimal and finite dimensional. The pairing gives an isomorphism $A^* \cong A'$. Cyclicity of $(b_k,\operatorname{id})$ and isotropy of $(\cdot,\cdot)$ force $b_k$ to vanish on $B_n(A,A',A)$ for $n > 1$ to map $B_i(A,A',A)$ into $B_i(A,A',A)$ for $i=0,1$. They also force $b_{k,l}: A^{\otimes k} \otimes A' \otimes A^{\otimes l} \rightarrow A'$ to coincide under the isomorphism $A^* \cong A'$ with the $A_{\infty}$-bimodule structure on $A^*$. Thus, we see that $A'$ is isomorphic to $A(A^*)$ in this case and, hence, the result holds in general. \qed
\betaegin{rmk}
One can view proposition \ref{lem:technicalresult} as a slightly weaker characterization of the ``dualizing bimodule'' for an $A_{\infty}$-algebra.
\varepsilonnd{rmk}
\betaegin{rmk}
Preservation by $b$ of the decomposition $B(A\oplus A') = \betaigoplus B_n(A,A',A)$ is equivalent to the existence of an action by $k^{\times}$ on $B(A \oplus A')$ which sends $b = \oplus b|_{B_n(A,A',A)}$ to $b' = \oplus \lambda^n b|_{B_n(A,A',A)}$. This action can be realised geometrically in the case of $V \rightarrow X$ as the standard action of $k^{\times}$ on a vector bundle. This was pointed out to me by Paul Seidel.
\varepsilonnd{rmk}
\section{Formal non-commutative symplectic geometry}
In this section, we prove Proposition \ref{prop:niceminimalmodel}. Before we dive in, we review the notions of formal non-commutative symplectic geometry and its relation with $A_{\infty}$-algebras. Better references for this are the original source \cite{Kon93} and \cite{Gin01,HL04}. I refer the reader to one of these for any unproven assertions.
Let $k$ be field for simplicity.
\betaegin{defn}
A formal $k$-module $V$ is a $\mathbb{Z}$ (or $\mathbb{Z}/2\mathbb{Z}$)-graded $k$-module arising as the inverse limit of finite-dimensional $k$-modules $V_i$ and equipped with the inverse limit topology.
\varepsilonnd{defn}
\betaegin{defn}
A topological basis for $V$ is a choice of basis of $V$ for which any element can be written uniquely as a convergent sum.
\varepsilonnd{defn}
The category of formal $k$-modules will be denoted $f-k-\operatorname{Mod}$. Morphisms in this category are continuous $k$-module morphisms. Given two formal $k$-module $V$ and $W$, we form the completed tensor product $V \hat{\otimes} W := \underset{\leftarrow}{\lim} \ V_i \otimes_k W_j$.
\betaegin{prop}
The functors $k-\operatorname{Mod} \rightarrow f-k-\operatorname{Mod}$, $V \mapsto V^*$ and $f-k-\operatorname{Mod} \rightarrow k-\operatorname{Mod}$, $W \mapsto
W^{\star}$ where $W^{\star}$ is the topological dual, are contravariant equivalences of symmetric monoidal categories.
\varepsilonnd{prop}
A formal $k$-algebra $A$ will be an associative, unital algebra object in this category. We wish to treat $A$ as the ring of functions on some un-named type of manifold. Any reader with an acquaintance with differential geometry should recognize the similarity. To wit, we employ the following notation.
\betaegin{defn}
A continuous derivation $X: A \rightarrow A$ is called a vector field. A continuous algebra homomorphism $\phi: A \rightarrow B$ is called a smooth map. If it is invertible, we call it a diffeomorphism.
\varepsilonnd{defn}
\betaegin{eg}
Let $V$ be a graded $k$-module and consider the tensor algebra on $V$, $\betaigoplus_{i \geq 0} V^{\otimes k}$ Then, the dual considered a formal $k$-module can be written as $\hat{T}V^{\vee} = \prod_{i \leq 0} V^{\hat{\otimes} i}$. Dualising the co-algebra structure gives the standard algebra structure on a tensor algebra. This will be our standard example of a formal $k$-algebra. Note that we can view it as formal non-commutative power series the variables $x^s$ for $s \in S$ a basis for $V$.
\varepsilonnd{eg}
Assume our formal algebra $A$ is isomorphic to $\hat{T}V$ for some $V$. Then there are two gradings on $A$. The grading induced by $V$ where $x^1 \otimes \cdots \otimes x^n$ has degree $|x^1|+\cdots+|x^n|$ and the grading induced by the tensor algebra where $x^1 \otimes \cdots \otimes x^n$ has degree $n$. The second grading we shall call the order.
Any vector field $X$ on $\hat{T}V$ can be expanded as $X = \sum_{i \geq 0} X_i$ where $X_i: V \rightarrow V^{\hat{\otimes} i-1}$. We say a vector field vanishes at zero if $X_0 = 0$. A smooth map $\phi: \hat{T}V \rightarrow \hat{T}W$ can be also be expanded $\sum_{i \geq 1} \phi_i$ with $\phi_i: V \rightarrow W^{\hat{\otimes} i}$.
\betaegin{lem}
A smooth map $\phi: \hat{T}V \rightarrow \hat{T}W$ is a diffeomorphism if and only if $\phi_1$ is invertible.
\varepsilonnd{lem}
\betaegin{defn}
The module of one-forms $\mathcal{O}mega^1(A)$ is defined to be $A \hat{\otimes} A/k$. We write $x \hat{\otimes} y$ as $xdy$. Correspondingly, there is a natural map $d: A \rightarrow \mathcal{O}mega^1(A)$. $\mathcal{O}mega^1(A)$ possesses the structure of an $A$-bimodule with $a \cdot xdy = axdy$ and $xdy \cdot a = xd(ya)-xyda$.
\varepsilonnd{defn}
\betaegin{prop}
There is a bijection between derivations of $A$ with values in $M$ and continuous maps $\mathcal{O}mega^1(A)$ to $M$.
\varepsilonnd{prop}
\betaegin{defn}
The dg-algebra of tensor forms is $\mathcal{O}mega^*(A)$ the completed tensor algebra over $A$ on $s^{-1}\mathcal{O}mega^1(A)$ where $s$ is the parity shift, $(sW)^i = W^{i+1}$. It possesses the standard algebra structure. $d$ extends to a differential on $\mathcal{O}mega^*(A)$.
\varepsilonnd{defn}
Given a vector field $X$ on $A$, we can define some operations on $\mathcal{O}mega^*(A)$. Let $i_X$ be the derivation of degree $X$ of $\mathcal{O}mega^*(A)$ for $i_X (a) = 0$ and $i_X (da) = X(a)$ and let $L_X$ be the derivation of degree $X$ of $\mathcal{O}mega^*(A)$ for which $L_X(a) = X(a)$ and $L_X(da) = d(L_X(a))$. These are the contraction and Lie derivative.
\betaegin{defn}
The de Rham complex of $A$ is $DR^*(A) = \mathcal{O}mega^*(A)/[\mathcal{O}mega^*(A),\mathcal{O}mega^*(A)]$. The differential $d$ and derivations $i_X$ and $L_X$ descend to $DR^*(A)$.
\varepsilonnd{defn}
The standard relations hold. Here are two.
\betaegin{lem}
$L_X = [i_X,d], [L_X,L_Y] = L_{[X,Y]}$
\varepsilonnd{lem}
Choose a topological basis $x^i$ of $A$. Then products of $x^i,dx^i$ form a topological basis for $DR^*(A)$. Let $(DR^*(A))^{[p]}$ denote the subcomplex of $DR^*(A)$ formed from forms with tensor order divisible by $p$.
\betaegin{lem}
(Poincar\'{e}) $H^*(DR^*(A),d) = H^*((DR^*(A))^{[p]},d)$ if the characteristic of $k$ is $p$. In particular, if $\operatorname{char} k = 0$, then $DR^*(A)$ is a resolution of $k$.
\varepsilonnd{lem}
\proof Consider the Euler vector field $E = \sum x^i \partialtael_{x^i}$. The action of $L_E$ on $DR^*(A)$ is diagonalizable with non-negative integral eigenvalues. If the eigenvalue associated to $\nu$ is non-zero and $\nu$ is closed, the Cartan formula for $L_E$ shows that $\nu$ is exact. The zero eigenvalues occur only on $DR^*(A)^{[p]}$. \qed
\betaegin{defn}
An $\omega \in DR^2(A)$ is called symplectic if the map $X \rightarrow i_X\omega$ is a bijection and $d\omega = 0$.
\varepsilonnd{defn}
\betaegin{lem}
Assume that $A \cong \hat{T}V$ with $V$ finite-dimensional. A two-form $\omega = \sum a_{ij} dx^idx^j$ on $A$ is symplectic if and only if the pairing $V^{\vee} \otimes V^{\vee} \rightarrow k$ given by $(a,b) \mapsto i_{\partialtael_a}i_{\partialtael_b}\omega$ is non-degenerate.
\varepsilonnd{lem}
\proof This is clear. \qed
\betaegin{cor}
With the hypotheses as in the previous lemma, we see that $\omega$ is symplectic if and only if $\omega_0$ is symplectic.
\varepsilonnd{cor}
\betaegin{lem}
Assume that $A \cong \hat{T}V$. Given a topological basis $x^a$ for $V$, then $x^a$ and $dx^a$ generate $DR^*(A)$.
\varepsilonnd{lem}
\proof Using the commutation relations $d(ab) = dab-(-1)^{|a|}db$ we see that we can reduce the order of the one-form. \qed
If $A \cong \hat{T}V$ we can write any two-form $\omega = \sum_{i \geq 0} \omega_i$ where $\omega_i$ has order $i+2$.
\betaegin{prop}
(Darboux) Let $A \cong \hat{T}V$ be a formal $k$-algebra with a symplectic form $\omega$, which is exact. Assume that $V$ is finite dimensional. Then, there exists a diffeomorphism $\phi: A \rightarrow A$ so that $\phi^*\omega = \omega_0$.
\varepsilonnd{prop}
\proof Let us first attempt to solve $L_X \omega_0 = \omega_i$. Note that since $V$ is finite dimensional $\omega_0$ is symplectic if and only if $\omega$ is. Now since $\omega$ is exact so is $\omega_i$ for each $i$. We just need to solve $i_X \omega_0 = \alphalpha_i$ for some $X$ which we can since $\omega_0$ is symplectic. Let $X^1$ denote the solution to $L_{X^1}\omega_0 = \omega_1$ and consider the diffeomorphism determined by sending $x^i \in V$ to $x^i - V(x^i)$. The order zero term in $\phi^*\omega$ is $\omega_0$ while the order one term is $\omega_1 - L_V\omega_0$ which is zero by construction. Iterating this procedure and noting that the composition of the resulting maps is convergent, we get the desired diffeomorphism $\phi: A \rightarrow A$. \qed
\betaegin{defn}
An $A_{\infty}$-algebra is a formal $k$-algebra $A \cong \hat{T}V$ for some $V$ with a degree one vector field $m: A \rightarrow A$ vanishing at zero and satisfying $[m,m] = 0$.
\varepsilonnd{defn}
By taking the dual of the description of an $A_{\infty}$-algebra involving the bar complex, we see that we get exactly the above definition.
\betaegin{defn}
A symplectic $A_{\infty}$-algebra is an $A_{\infty}$-algebra $A$ with a symplectic form $\omega$ so that $L_m \omega = 0$. A homologically-symplectic $A_{\infty}$-algebra is an $A_{\infty}$-algebra with closed $m$-constant two-form $\omega$ so that the form induced by $\omega_0$ on the cohomology of $A$ is symplectic.
\varepsilonnd{defn}
\betaegin{eg}
Let $B$ be a dg-algebra over a field with map $\operatorname{tr}: B \rightarrow k$ so that the pairing $\left< a,b \right> = \operatorname{tr}(ab)$ is cyclic and non-degenerate on cohomology. Then, the corresponding formal $k$-algebra is a homologically symplectic $A_{\infty}$-algebra with $\omega$ determined as follows. Express $\operatorname{tr}: B \rightarrow k$ in terms of a basis for $B$ as $\operatorname{tr}(x_i) = t_i$ and express composition as $x_ix_j = \sum_k m^k_{ij} x_k$. Then, $\omega = t_km^k_{ij}dx^idx^j$. That it is $L_m$-constant follows from the proceeding calculation. From the Poincar\'{e} lemma, we see that assumption on the characteristic of $k$ forces $\omega$ to be exact.
\varepsilonnd{eg}
\betaegin{lem}
Given a two form $\omega = \sum a_{ij} dx^i dx^j$ of order two, $L_m \omega = 0$ if and only if the corresponding pairing is cyclic.
\varepsilonnd{lem}
\proof Using the commutation relation, we can write $\omega=\sum_{i \leq j} a_{ij}dx^idx^j$ for some ordering of a topological basis of $V$ corresponding to a basis of the topological dual to $V$. Then the pairing
\betaegin{equation*}
\left< x_i,x_j \right> = \betaegin{cases} a_{ij} & \text{ if $i < j$} \\
2 a_{ii} & \text{ if $i = j$} \\
(-1)^{|x_i||x_j|}a_{ji} & \text{ if $i > j$}
\varepsilonnd{cases}
\varepsilonnd{equation*}
We can write $m x^k = \sum m^k_{i_1\cdots i_l} x^{i_1}\cdots x^{i_l}$. Then, we dualise and use the suspended degree
\betaegin{equation*}
\left< m_n(x_{i_0}\cdots x_{i_{n-1}}),x_{i_n} \right> = \left< m^k_{i_0\cdots i_{n-1}}x_k,x_{i_n} \right> =
\varepsilonnd{equation*}
\betaegin{equation*}
\sum_{k \leq i_n} m^k_{i_0\cdots i_{n-1}} a_{ki_n} + \sum_{k \geq i_n} (-1)^{(k+1)(i_n+1)}m^k_{i_0 \cdots i_{n-1}} a_{i_n k}
\varepsilonnd{equation*}
The order $n$ term in $L_m \omega$ comes from $L_{m_i}\omega = \sum_{i
\leq j}a_{ij}m^i_{l_1\cdots l_n}d(x^{l_1}\cdots x^{l_n})dx^j + (-1)^i a_{ij}m^j_{k_1\cdot k_i} dx^id(x^{k_1} \cdots x^{k_i})$.
The term corresponding to $x^{t_0}\cdots dx^{t_{a}} \cdots dx^{t_n}$ carries coefficients
\betaegin{equation*}
\sum_{i \leq t_n} (-1)^{|t_1|+\cdots+|t_{a-1}|} a_{it_n}m^i_{t_0\cdots t_{n-1}} +
\varepsilonnd{equation*}
\betaegin{equation*}
\sum_{i \leq t_a} (-1)^{|t_{a+1}|+\cdots+|t_n|+\sum_{s=a+1}^{n-1}|t_s|(|t_1|+\cdots+\widehat{|t_s|}+\cdots+|t_n|)} a_{it_a}m^i_{t_{a+1}\cdots t_{a-1}} +
\varepsilonnd{equation*}
\betaegin{equation*}
\sum_{i \geq t_n} (-1)^{i+|t_1|+\cdots+|t_{a-1}|+(|t_n|+1)(|t_0|+\cdots+|t_{n-1}|+1)} a_{t_n i} m^i_{t_0\cdots t_{n-1}} +
\varepsilonnd{equation*}
\betaegin{equation*}
\sum_{i \geq t_a} (-1)^{\circledS(i)}a_{t_a i} m^i_{t_{a+1}\cdots t_{a-1}}
\varepsilonnd{equation*}
where $\circledS(i) = i+|t_{a+1}|+\cdots+|t_{n-1}|+(|t_a|+1)(|t_1|+\cdots+\widehat{|t_a|}+\cdots+|t_n|+1)+(|t_n|+1)(|t_0|+\cdots+|t_{n-1}|+1)+\sum_{s=a+1}^{n-1}|t_s|(|t_1|+\cdots+\widehat{|t_s|}+\cdots+|t_n|)$. We see this being zero for any $a$ is equivalent to
\betaegin{equation*}
\left< m_n(x_{t_0},\cdots,x_{t_{n-1}}),x_{t_n} \right > = (-1)^{|t_0|(|t_1|+\cdots+|t_n|)}\left < m_n(t_1,\cdots,t_n), t_0 \right>
\varepsilonnd{equation*}
using the suspended gradings. If we translate this into the grading on $V$, we get that
\betaegin{equation*}
\left< m_n(x_{t_0},\cdots,x_{t_{n-1}}),x_{t_n} \right > = (-1)^{n+|t_0|(|t_1|+\cdots+|t_n|)} \left < m_n(x_{t_1},\cdots,x_{t_n}), x_{t_0} \right>
\varepsilonnd{equation*} \qed
\betaegin{lem}
Let $A$ be an $A_{\infty}$-algebra with a homologically-nondegenerate, $L_m$-constant, exact two-form $\omega$. Write $\omega = \sum_{i \geq 0} \omega_i$. Then, there is a minimal symplectic $A_{\infty}$-algebra $A'$ quasi-isomorphic to $A$ with $\omega_0|_{H^*}$ as the symplectic form.
\varepsilonnd{lem}
\proof Let $A = TV^*$ and split $V = H \oplus B \oplus D$ and take the morphism coming from the minimal model algorithm, $p:A \rightarrow \hat{T}H^*$, then $p^*\omega$ is non-degenerate on $H^*$ since the constant piece is simply the restriction of $\omega$ to $\hat{T}H^*$ and $p^*\omega$ is $L_{m'}$-constant for the induced $A_{\infty}$-structure $m'$ on $H^*$ since $p^*L_m = L_{m'}p^*$. Now, apply the Darboux lemma to $A'$ with $p^*\omega$ to get a diffeomorphism $\phi: A' \rightarrow A'$ with $\phi^*p^*\omega = \omega_0|_{H^*}$ and take the vector field given by $\phi \circ m' \circ \phi^{-1}$ as the new $A_{\infty}$-structure which gives the result. \qed
\betaegin{rmk}
This result appears in \cite{Laz06}. Older results deal only with symplectic structures (or at least non-degenerate pairings at the chain level) which one rarely sees. One can turn this into an algorithm similar to \cite{Mar06} modulo the choices of $\alphalpha_i$ in the Darboux lemma.
\varepsilonnd{rmk}
\betaegin{cor}
Let $X$ be a smooth projective Calabi-Yau variety over a field $k$. Then, $D^b(\mathbb{C}oh(X))$ is equivalent to $D(A)$ for a cyclic $A_{\infty}$-algebra $A$.
\varepsilonnd{cor}
Let us now revisit the previous situation with a new assumption. Assume that we have an auxiliary $\mathbb{Z}$-grading on our $A_{\infty}$-algebra with $m$ degree zero for this new grading, and $\omega$ degree $l$ with respect to the new induced grading on $DR(A)$. The grading then descends to the cohomology $H(A)$.
\betaegin{lem}
There is a minimal $A_{\infty}$-structure on $H(A)$ which respects the auxiliary grading and for which the $A$ is quasi-isomorphic to $H(A)$ via $A_{\infty}$-homomorphisms of degree zero.
\varepsilonnd{lem}
\proof Decompose $A$ with respect to the auxiliary grading $A = \betaigoplus_{i \in \mathbb{Z}} A_i$ and choose a splitting of $A = H \oplus B \oplus D$ which respects this splitting, i.e. $A_i = H_i \oplus B_i \oplus D_i$ and $d: D_i \rightarrow B_i$ is an isomorphism. Then, the inverse to $d$ also preserves the auxiliary grading. Inspecting the algorithm for constructing the minimal $A_{\infty}$-structure $H(A)$ and the quasi-isomorphisms, we see that they are formed from iterated compositions of $h, m_i, \pi, i$ where $\pi: A \rightarrow H$ and $i: H \rightarrow A$ hence they are of degree zero. \qed
As a corollary of this, we see that the induced symplectic form, which shall also be denoted by $\omega$, is still of degree $l$. In particular, each term in the order expansion $\omega = \sum \omega_i$ is of degree $l$. Now, our minimal model is finite dimensional over $k$.
\betaegin{lem}
The vector fields constructed in the Darboux lemma can be chosen to have degree zero.
\varepsilonnd{lem}
\proof Assume we are in the situation of induction step in the proof of Darboux lemma. Namely, we have a degree $l$ symplectic form $\omega$ on $A$ with an expansion $\omega = \omega_0 + \omega_i + \omega_{i+1} + \cdots$ by order. We seek a vector field $X_i$ so that $L_{X_i} \omega_0 = \omega_i$. We reduced to solving $i_{X_i} \omega_0 = \alphalpha_i$ where $d\alphalpha_i = \omega_i$. Decomposing $\alphalpha_i$ with respect to the auxiliary grading and applying $d$ we see that components with degree not equal to $l$ do not contribute. Hence, we can assume that $\alphalpha_i$ has degree $l$. One can easily check, that given a vector field $Y$ of degree $t$ and order $i-1$, contracting with $\omega_0$ produces a one-form of degree $t$ and order $i$. Since contraction with $\omega_0$ furnishes an isomorphism of the space of vector fields and one-forms, our $X_i$ can be taken to be of degree zero. \qed
\betaegin{cor}
The diffeomorphism constructed in the Darboux lemma can be taken to have degree zero with respect to the auxiliary grading.
\varepsilonnd{cor}
Let us collect these results into a useful conclusion.
\betaegin{prop}
Let $A$ be an homologically-symplectic $A_{\infty}$-algebra with an auxiliary $\mathbb{Z}$-grading so that $m$ is of auxiliary degree zero and the homologically-symplectic form $\omega$ is degree $l$. Then, we can find a symplectic minimal model of $A$ with an auxiliary $\mathbb{Z}$-grading for which the homological vector field is of auxiliary degree zero and the symplectic form is constant and of degree $l$. Moreover, the quasi-isomorphisms are of auxiliary degree zero.
\label{prop:niceminimalmodel}
\varepsilonnd{prop}
Combining this with result with the our lemma \ref{lem:technicalresult} proves proposition \ref{prop:intermediate} and hence the main result \ref{cor:mainresult}.
\section{Closing Remarks}
A useful perspective to take on the results of this paper is furnished by the idea of homological mirror symmetry, \cite{Kon95}. Here we shall frame it rather loosely and optimistically. Given a category arising from algebraic geometry, there is another construction of this category in the realm of symplectic geometry. For example, take a smooth projective Calabi-Yau $d$-fold over $\mathbb{C}$. From this we can construct $D^b(\mathbb{C}oh(X))$. Then, for general enough $X$, homological mirror symmetry predicts that there is another smooth projective Calabi-Yau $d$-fold $Y$ and a category made from it using symplectic geometry, the Fukaya category $\operatorname{Fuk}(Y)$. It is in fact an $A_{\infty}$-category whose objects are well-mannered and properly groomed Lagrangian submanifolds and whose morphisms between such $L$ and $L'$, when these Lagrangians intersect transversely, are the intersection points. The $A_{\infty}$-compositions $m_n: \mathbb{H}om(L_1,L_2) \otimes \cdots \otimes \mathbb{H}om(L_n,L_{n+1}) \rightarrow \mathbb{H}om(L_1,L_{n+1})$ come from counting pseudo-holomorphic polygons with appropriate boundary conditions. After we derive $\operatorname{Fuk}(Y)$, we should obtain a category triangle equivalent to $D^b(\mathbb{C}oh(X))$. And $D^b(\mathbb{C}oh(Y))$ should also be equivalent to $D(\operatorname{Fuk}(X))$.
The conjecture extends beyond Calabi-Yau varieties however we lose a bit of the symmetry. A case with non-trivial canonical bundle is that of projective space itself $\mathbb{P}^n_{\mathbb{C}}$. We have two categories of interest $D^b(\mathbb{C}oh(\mathbb{P}^n_{\mathbb{C}}))$ and $D(\operatorname{Fuk}(\mathbb{P}^n_{\mathbb{C}})$. The mirror of this space is not another variety but a variety with a function on it, also called a Landau-Ginzburg model. For $\mathbb{P}^n_{\mathbb{C}}$, the mirror $W: (\mathbb{C}^{\times})^n \rightarrow \mathbb{C}$ is given by
\betaegin{displaymath}
W(z_1,\cdots,z_n) = z_1 + \cdots + z_n + \frac{1}{z_1\cdots z_n}
\varepsilonnd{displaymath}
Associated to a Landau-Ginzburg model $W: Y \rightarrow \mathbb{C}$, there are also two categories. The first is analogous to the Fukaya category. Let us assume that $0$ is regular value, then we take as objects Lagrangians with boundary along $W^{-1}(0)$ whose image under $W$ near $0$ is a curve that ends at $0$. The morphisms are again intersection points, subject to some ordering prescriptions near $0$, and compositions arise in the same manner as before. This category symplectically measures the singularities of $W$. In the case of the mirror to $\mathbb{P}^n_{\mathbb{C}}$, we have the Lagrangian thimbles associated to the non-degenerate critical points of $W$ as the representative object. For the second category, we measure the singularities of $W$ algebraically. We consider the fibers $W_{\lambda}$ for $\lambda \in \mathbb{C}$ and take the Verdier quotient of $D^b(\mathbb{C}oh(W_{\lambda})$ by the smallest triangulated category containing all locally-free sheaves and denote it by $D_{Sing}(W_{\lambda})$. Then, the category of singularities for $W$, $D_{Sing}(W)$ is then
\betaegin{displaymath}
D_{Sing}(W) = \prod_{\lambda \in \mathbb{C}} D_{Sing}(W_{\lambda})
\varepsilonnd{displaymath}
Homological mirror symmetry predicts that the we again exchange the algebro-geometric categories and symplecto-geometric categories under the duality. In each of these case, one hopes that there are a few central geometrically-interesting objects on each side whose endomorphism $A_{\infty}$-algebras are quasi-isomorphic.
Now, the evidence for homological mirror symmetry is not overwhelming but it is quite convincing. In the case of Calabi-Yau varieties, little is proven, outside the case of abelian varieties and K3 surfaces. For the Fano or near Fano case, the correspondence between Lagrangians on the variety and the category of singularities of its Landau-Ginzburg mirror is still wide open, but a lot is known about the correspondence between coherent sheaves on the varieties and Lagrangians in Landau-Ginzburg models. Surfaces and toric varieties in general have been thoroughly studied. In this case, there are a few simplifying properties of the geometry of a Landau-Ginzburg model that allow one to bypass a lot of the technical machinery that is needed in the general case.
How does this paper fit in? We seek to build upon on the solid ground of the known cases for surfaces and toric varieties. Given a variety $X$, we extract a Calabi-Yau in a few ways. The most common is to take a divisor in the anti-canonical class if $-\omega_X$ is effective. One could also take affine open set $X$. The third construction is the one appearing in this paper - taking the total space of the canonical bundle. The results here characterize the passage on the level of $A_{\infty}$-algebra/triangulated categories from $X$ to $\omega_X$ purely in terms of ($A_{\infty}$-)algebra. One simply takes the trivial nilpotent extension by the dual bi-module. If homological mirror symmetry is true, we should hope that there is a symplecto-geometric method for achieving this algebraic transition. Moreover, believing homological mirror symmetry, we already know the final answer.
\alphappendix
\section{Generators for projective schemes over a field}
The results of this section are due to Kontsevich although the author learned
about them (as he learned most of his maths, adviser aside) from a paper of
Seidel \cite{Sei03}. For notions of generation see \cite{BvB02} or \cite{Rou03}.
Let $X$ be a projective scheme over a field $k$. Choose some embedding $i:X \rightarrow \mathbb{P}^N_k$. We consider the functor $Li^*: D(\mathbb{Q}coh(\mathbb{P}^N)) \rightarrow D(\mathbb{Q}coh(X))$. It commutes with coproducts because it has right adjoint.
\betaegin{prop}
The collection $\{\mathcal{O}_X,\text{-}ots,\mathcal{O}_X(N)\}$ generates $D(\mathbb{Q}coh(X))$.
\varepsilonnd{prop}
\proof Using the Koszul resolution on $\mathbb{P}^N_k$, we can get any tensor power of the twisting sheaf from iterated cones over objects in the collection $\lbrace \mathcal{O}_{\mathbb{P}^N_k}, \text{-}ots, \mathcal{O}_{\mathbb{P}^N_k}(N) \rbrace$. Pulling back via $Li^*$ shows that we can get any $\mathcal{O}_X(j)$. Using Serre's theorem, we can construct a locally-free quasi-coherent resolution of any quasi-coherent sheaf on $X$ from the set $\lbrace \mathcal{O}_X(j) \rbrace_{j \in \mathbb{Z}}$. Since $Li^*$ commutes with coproducts, the image of $Li^*$ is closed under direct sums. Using homotopy limits we can get any bounded above complex from its truncations. Therefore, we can get all quasi-coherent sheaves starting from $\{\mathcal{O}_X,\text{-}ots,\mathcal{O}_X(N)\}$. The smallest triangulated subcategory containing all quasi-coherent sheaves and closed under direct sums and shifts is all of $D(\mathbb{Q}coh(X))$. \qed
Let $G = \betaigoplus_{j=0}^N \mathcal{O}_X(j)$. Then, $G$ generates $D(\mathbb{Q}coh(X))$ since we can split idempotents, see \cite{BN93}.
\betaegin{cor}
If $X$ is a projective scheme over $k$, then there exists a vector bundle $G$ that generates $D(\mathbb{Q}coh(X))$ up to idempotent splittings.
\varepsilonnd{cor}
\betaegin{cor}
If $G'$ is any generator (possibly up to idempotent splittings) for $D(\mathbb{Q}coh(\mathbb{P}^N_k))$, then $Li^*G'$ generates $D(\mathbb{Q}coh(\mathbb{P}^N_k))$.
\varepsilonnd{cor}
\proof Since $G'$ generates, we can get $\oplus_{j=0}^N \mathcal{O}_{\mathbb{P}^N_k}(j)$ and hence $G$. \qed
Let us recall another notion of generation.
\betaegin{defn}
Given a idempotent-closed triangulated category $\mathcal{T}$. We say that a sub-category
$\mathcal{S}$ classically generates $\mathcal{T}$ if the smallest idempotent-closed triangulated subcategory containing $\mathcal{S}$ is $\mathcal{T}$ itself.
\varepsilonnd{defn}
\betaegin{cor}
If $X$ is smooth, any bounded complex of coherent sheaves that classically strongly generates $D^b(\mathbb{C}oh(\mathbb{P}^N_k))$ pulls back to a classical generator of $D^b(\mathbb{C}oh(X))$.
\varepsilonnd{cor}
\proof This follows immediately from the previous result and theorem $2.1$ of \cite{Nee92}, but we shall give a direct proof. Since $X$ is smooth, every coherent sheaf admits a finite locally-free resolution so it is enough to show that we can get all locally-free coherent sheaves. Let $F$ be locally-free and consider $G$ as above. Then, we get each $\mathcal{O}_X(j)$ from summands of $G$ and a finite number of triangles. By assumption, we can get $G$ from our generator and hence we have the twisting powers. Now resolve $F$
\betaegin{gather*}
\cdots \rightarrow \mathcal{O}_X(-r_l)^{n_l} \rightarrow \cdots \rightarrow \mathcal{O}_X(-r_1)^{n_1} \rightarrow F \rightarrow 0
\varepsilonnd{gather*}
Let $d$ be the dimension of $X$. Truncate the above sequences at the $d+1$-st step and let $D$ denote the cokernel. It is also locally-free and of finite rank. From Grothendieck's theorem, $\operatorname{Ext}^n(F,D) = H^n(F^{\vee} \otimes D) = 0$ for $n > d$. Thus, in the triangle,
\betaegin{center}
\leavevmode
\betaegin{xy}
(-10,-10)*+{D[d+1]}="a"; (0,5)*+{0\rightarrow \mathcal{O}_X(-r_d)^{n_d} \rightarrow \cdots \rightarrow \mathcal{O}_X(-r_1)^{n_1} \rightarrow 0}="b"; (10,-10)*+{F}="c"; {\alphar@{->} "a";"b"}; {\alphar@{->} "b";"c"}; {\alphar@{->}^{\ \ \ \ \ \ a} "c";"a"}
\varepsilonnd{xy}
\varepsilonnd{center}
where $a$ is zero. The result then follows from the following lemma.
\betaegin{lem}
Given a triangle
\betaegin{center}
\leavevmode
\betaegin{xy}
(-10,10)*+{X}="a"; (10,10)*+{Y}="b"; (0,-5)*+{Z}="c"; {\alphar@{->}^0 "a";"b"}; {\alphar@{->} "b";"c"}; {\alphar@{->} "c";"a"}
\varepsilonnd{xy}
\varepsilonnd{center}
then $Z \cong Y \oplus X[1]$.
\varepsilonnd{lem}
\proof Given any $X'$ we have a short exact sequence
\betaegin{gather*}
0 \rightarrow \mathbb{H}om(X',Y) \rightarrow \mathbb{H}om(X',Z) \rightarrow \mathbb{H}om(X',X[1]) \rightarrow 0
\varepsilonnd{gather*}
Plugging in $X' = Y$ we see that there is a map $Z \rightarrow Y$ that is left inverse to $Y \rightarrow Z$. This splits the short exact sequence and gives the isomorphism \qed
\betaigskip
\betaibliographystyle{nhamsplain}
\betaibliography{refup}
\varepsilonnd{document} |
\begin{document}
\def\spacingset#1{\renewcommand{\baselinestretch}
{#1}\small\normalsize} \spacingset{1}
\if00
{
\title{\bf Prediction properties of optimum response surface designs}
\author{Heloisa M. de Oliveira\thanks{
The authors gratefully acknowledge financial support from Coordena\c{c}\~ao de Aperfei\c{c}oamento de Pessoal de N\'ivel Superior (PNPD/CAPES, Brazil Government) and from FAPESP grant numbers 2013/09282-9 and 2014/01818-0.}\hspace{.2cm}\\
Universidade Federal de Santa Catarina, Curitibanos, SC, 89520-000, Brazil\\
C\'esar B. A. de Oliveira\\
Instituto Tecnol\'ogico de Aeron\'autica, S\~ao Jos\'e dos Campos, SP, 12228-900, Brazil\\
Steven G. Gilmour\\
King's College London, London, WC2R 2LS, UK\\
and \\
Luzia A. Trinca\\
Universidade Estadual Paulista, Botucatu, SP, 18618-689, Brazil}
\maketitle
} \fi
\if10
{
\begin{center}
{\LARGE\bf On prediction properties of optimum response surface designs}
\end{center}
} \fi
\begin{abstract}
Prediction capability is considered an important issue in response surface methodology. Following the line of argument that a design should have several desirable properties we have extended an existing compound design criterion to include prediction properties. Prediction of responses and of differences in response are considered. Point and interval predictions are allowed for. Extensions of existing graphical tools for inspecting prediction performances of the designs in the whole region of experimentation are also introduced. The methods are illustrated with two examples.
\end{abstract}
\noindent
{\it Keywords:} compound criteria, dispersion graphs, $(DP)$-optimality, FDS, $I$-optimality, pure error.
\spacingset{1.45}
\section{Introduction}
\label{sec:intro}
Experiments provide important information for discoveries in many research areas.
Careful planning of an experiment is very important in order to obtain informative answers to the questions of the research problem at hand. The planning
phase can be quite involved and methods for finding optimum designs are very useful when there are several quantitative factors related to the response variables of interest and when there are practical restrictions. Work in this area started by considering the optimization of single design-criterion functions aimed at maximizing the precision of the model parameter estimates or prediction of responses. Computational algorithms are well developed mainly
for $D$- and $I$-efficiency \citep{CookNachtsheim89, jonesgoos2012}. Designs obtained by such methods
are the best or very close to the best (as they are based on heuristics), given the assumed model, for the property being optimized. However, for practical purposes, an experiment should answer several research
questions and so requires a good design with respect to many properties as advocated by \cite{Box-Draper:1975}. Fortunately, in the last decade or so, design
methodologies seem to be moving in this direction through the application of compound criteria and multiple objective approaches \citep{goosetal2005, jonesnash2011, luetal11, smucker2012, gilmourtrinca2012, smucker2015, borrotti2016, daSilvaGilmourTrinca2017, trincagilmour2017}.
While the use of compound criteria or multiple objective procedures allow the consideration of a set of one-dimensional properties for constructing the design, graphical techniques add information to illustrate
the prediction properties of the designs. The study of design prediction capabilities through graphs advanced with \cite{g-j&m1989} and \cite{myers1992} when they introduced variance dispersion graphs. These graphs were followed by the quantile plots of
\cite{khuri96}, the difference variance dispersion graphs of \cite{trincagilmour1999} and the fraction of design space plots of \cite{zahran2003} and \cite{jang2012}. Such techniques are of great value for choosing a final design among many options.
In this paper we consider a flexible compound criterion for optimization of parameter estimation properties as well as
prediction. The paper introduces several new methods, namely: (i) difference fraction of design space plots, which show variances of differences in response; (ii) variance dispersion graphs and fraction of design space plots for interval predictions, for both responses and differences in response; (iii) the $I_D$ criterion, for point estimation of differences in response; (iv) the $(IP)$ and $(I_DP)$ criteria for interval estimation of responses and differences in response; (v) using standard errors, rather than variances in the plots; (vi) using relative volume in the plots. These methods can be considered as extensions for prediction criteria motivated by the difference variance dispersion graphs of \cite{trincagilmour1999} and the adjusted criteria of \cite{gilmourtrinca2012}. The designs constructed are further evaluated according to their performances with respect to prediction capabilities using the graphs described and extensions incorporating the new measures. In Section \ref{sec:DC} we
review the literature and propose extensions to the usual design criteria. In Section \ref{sec:PC} we discuss graphical methods for prediction evaluation and propose two
extensions, and in Section \ref{sec:Ex} we illustrate these methods and compare several designs for two examples. Motivated by these results, we note in Section \ref{sec:CCD} some situations in which central composite designs are optimal. Finally a discussion is presented in Section \ref{sec:disc}.
\section{Design criteria}\label{sec:DC}
Data from experiments with $q$ continuous quantitative factors are routinely analyzed by fitting low order polynomials. These are used as approximations to the
unknown true function relating the response variable $Y$ and the treatments. A treatment $\mathbf{x}$ is defined by a specific combination of levels of the $q$ factors
$X_1,~X_2,~\ldots,~X_q$. The full model for a completely randomized design with $n$ experimental units (runs) is
\begin{equation}
\mathbf{Y} = \mbox{\boldmath{$\mu$}}(\mathbf{x})+\mbox{\boldmath{$\varepsilon$}},
\label{eq:full}
\end{equation}
where $\mathbf{Y}$ is the column vector of random variables of dimension $n$, $\mbox{\boldmath{$\mu$}}(\mathbf{x})$ is the mean vector of $\mathbf{Y}$, depending on $\mathbf{x}$, and $\mbox{\boldmath{$\varepsilon$}}$ is the error term random vector satisfying $E(\mbox{\boldmath{$\varepsilon$}})=\mathbf{0}$ and $Var(\mbox{\boldmath{$\varepsilon$}})=\sigma^2\mathbf{I}$.
The full model may be further approximated by
\begin{equation}
\mbox{\boldmath{$\mu$}}(\mathbf{x})\approx\mathbf{X}\mbox{\boldmath{$\beta$}},
\label{eq:poly}
\end{equation}
where, using standard notation,
$\mbox{\boldmath{$\beta$}}$ is the $p$-dimensional vector of unknown parameters and $\mathbf{X}$ is the $\big(n \times p \big)$ model matrix whose rows, denoted by $\mathbf{f}(\mathbf{x})^\prime$,
are expansions of levels of the factors in order to accommodate the desired polynomial.
Since the matrix $\mathbf{X}$ is defined by the design and the model approximation, for notational simplicity we will refer to the design as $\mathbf{X}$.
As discussed in \cite{gilmourtrinca2012}, fitting the full model (\ref{eq:full}) allows unbiased estimation of $\sigma^2$ if degrees of freedom from treatment
replications are available while fitting model (\ref{eq:poly}) allows simplification and also lack of fit checking if there are spare treatment degrees
of freedom. In order to construct optimum designs that allow unbiased estimation of error variance, \cite{gilmourtrinca2012} proposed adjustments to the usual alphabetical design criteria,
based on the appropriate quantiles of the $F$ distribution, e.g.\ the $(DP)_S(\alpha)$ and $(AP)_S(\alpha)$ criteria. Following their logic, Goos, in the discussion of \cite{gilmourtrinca2012} proposed the same type of adjustment
for the $I$-optimality criterion.
\subsection{Prediction of responses}
For any point $\mathbf{x} \in \mathcal{X}$, $\mathcal{X}$ being the region which the experimenter desires to explore, the variance of $\hat{y}(\mathbf{x})$, the estimated response from the fitted polynomial,
is $\text{var}(\hat{y}(\mathbf{x}))=\sigma^2 \mathbf{f}(\mathbf{x})^\prime(\mathbf{X}^\prime\mathbf{X})^{-1}\mathbf{f}(\mathbf{x})$. An $I$-optimum design $\mathbf{X}$ is such
that the average variance of predictions over the whole experimental region $\mathcal{X}$ is minimized. Let $\Psi=\int_{\mathbf{x}\in\mathcal{X}}d\mathbf{x}$ be the volume of the region
$\mathcal{X}$. The average prediction variance is defined as
\begin{equation}
\text{average}~\text{variance}=\Psi^{-1}\int_{\mathbf{x}\in\mathcal{X}}\text{var}(\hat{y}(\mathbf{x}))d\mathbf{x}\propto\int_{\mathbf{x}\in\mathcal{X}}\mathbf{f}(\mathbf{x})^\prime(\mathbf{X}^\prime\mathbf{X})^{-1}\mathbf{f}(\mathbf{x})d\mathbf{x}.\label{eq:vary}\end{equation} As the integrand in (\ref{eq:vary}) is a scalar, and using properties of the trace of matrix products, it is easily shown that
\begin{equation}
\text{average}~\text{variance}\propto\text{trace}\left[\mathbf{\mathcal{M}}(\mathbf{X}^\prime\mathbf{X})^{-1}\right],\label{eq:I}\end{equation}where $\mathbf{\mathcal{M}}=\int_{\mathbf{x}\in\mathcal{X}}\mathbf{f}(\mathbf{x})\mathbf{f}(\mathbf{x})^\prime d\mathbf{x}$
is the so called moment matrix of the region. For regular spherical and cubic regions and polynomial models, the matrix $\mathcal{M}$ obeys known patterns, given explicitly, for the full second order model,
in \cite{hardin1991sphere} and \cite{hardin1991cube} for example.
Considering that interest is in evaluating the performance of the design for interval predictions, the $I$ criterion may be modified to minimize the average, over the design region $\mathcal{X}$, of the width of pointwise confidence intervals for the mean response. This gives the criterion function
\begin{equation}
\text{trace}\left[\mathbf{\mathcal{M}}(\mathbf{X}^\prime\mathbf{X})^{-1}\right]F_{1,d;1-\alpha_3},\label{eq:IP}\end{equation} the $(IP)(\alpha_3)$ criterion, where $d$ is the number of
pure error degrees of freedom of the design $\mathbf{X}$, $1-\alpha_3$ is the confidence level for pointwise intervals for $E(y(\mathbf{x}))$ and $F_{1,d;1-\alpha_3}$ is the relevant quantile from the $F$ distribution. According to several researchers, prediction is a key point for planning
response surface experiments \citep{g-j&m1989, hardin1993, trincagilmour1999, zahran2003, goosjones2011book, jonesgoos2012, borrotti2016}.
\subsection{Prediction of differences in response}
In \cite{trincagilmour1999} it was argued that rather than the response level, prediction of differences in
responses would be more interesting. In particular, we are often interested in differences between the estimated response at the expected optimum or standard operating conditions and the estimated response at other
locations, i.e.\ $y(\mathbf{x})-y(\mathbf{x}_0)$, where $\mathbf{x}_0$ denotes standard conditions or the prior expected optimum combination. We code the factors, so that $\mathbf{x}_0=\mathbf{0}$, which implies that the focus should be on estimating $y(\mathbf{x})-\beta_0$. There are both theoretical and practical reasons why predicting differences in response makes more sense than predicting responses themselves.
First, the randomization of the experiment ensures that least squares estimators of the parameters are unbiased, except for the estimate of $\beta_0$, which requires the further assumption that the experimental units are a random sample from a population of possible units - see for example \cite{coxreid}, p.32-36, or Chapter 5 of \cite{hinkelmann}. In response surface studies the runs are almost never a random sample and even treating them as a representative sample is usually implausible. Therefore predictions of responses made from the experiment cannot reasonably be applied to the process over time, but predictions of differences in response can.
Secondly, important aspects of the interpretation of fitted response surfaces, such as estimating the location of the stationary point and estimating the location of ridges, do not depend on the intercept. For example, the stationary point is located at $-\mathbf{B}^{-1}\mathbf{b}/2$, where $\mathbf{b}$ and $\mathbf{B}$ contain respectively the first and second order parameters. Similarly, canonical analysis depends on the same vector and matrix. Thus important aspects of response surface interpretation, which are difficult to build directly into design optimality criteria, should be better represented by optimizing the prediction of differences in response than by optimizing predictions of responses.
Finally, if $\mathbf{x}_0$ represents standard operating conditions of the process, we should already have a much better estimate of $E[y(\mathbf{x}_0)]$ from the historical running of the process than we can expect to get from a fairly small experiment. Using the factor coding, we can treat this historical estimate as being the true $\beta_0$. Then the best prediction from the experiment of the response at some $\mathbf{x}$ is not $\hat{y}(\mathbf{x})$, but
\begin{equation}
\label{eq:ytilde}
\tilde{y}(\mathbf{x}) = \beta_0 +\hat{y}(\mathbf{x}) -\hat{\beta}_0.
\end{equation}
Then the variance of a prediction using this method is
\[
\text{var}[\tilde{y}(\mathbf{x})] = \text{var}[\hat{y}(\mathbf{x}) -\hat{\beta}_0] = \text{var}[\hat{y}(\mathbf{x})-\hat{y}(\mathbf{x}_0)].
\]
Hence, even if predictions of responses are of interest, the design should be chosen to minimize variances of differences in response.
Based on this argument, we define the $I_D$ criterion which minimizes the average difference variance,
\begin{eqnarray}\text{average}~\text{difference}~\text{variance}&=&\Psi^{-1}\int_{\mathbf{x}\in\mathcal{X}}\text{var}[\hat{y}(\mathbf{x})-\hat{y}(\mathbf{x}_0)]d\mathbf{x} \nonumber\\
&\propto&\int_{\mathbf{x}\in\mathcal{X}}[\mathbf{f}(\mathbf{x})-\mathbf{f}(\mathbf{x}_0)]^\prime(\mathbf{X}^\prime\mathbf{X})^{-1}[\mathbf{f}(\mathbf{x})-\mathbf{f}(\mathbf{x}_0)]d\mathbf{x}.\end{eqnarray}
For coded factors $\mathbf{x}_0=\mathbf{0}$ and analogously to (\ref{eq:I}) we have \begin{equation}\text{average}~\text{difference}~\text{variance}\propto\text{trace}\left[\mathbf{\mathcal{M}}_0(\mathbf{X}^\prime\mathbf{X})^{-1}\right],\end{equation}
where $\mathbf{\mathcal{M}}_0=\int_{\mathbf{x}\in\mathcal{X}}[\mathbf{f}(\mathbf{x})-\mathbf{f}(\mathbf{0})][\mathbf{f}(\mathbf{x})-\mathbf{f}(\mathbf{0})]^\prime d\mathbf{x}$ such that $\mathbf{\mathcal{M}}_0$ is the $\mathbf{\mathcal{M}}$ matrix with first row and first column set to zero. Similarly to the $(IP)(\alpha_3)$ criterion we may now define the $(I_DP)(\alpha_{4})$ criterion that searches for $\mathbf{X}$ which minimizes \begin{equation}
\text{trace}\left[\mathbf{\mathcal{M}}_0(\mathbf{X}^\prime\mathbf{X})^{-1}\right]F_{1,d;1-\alpha_{4}},\label{eq:I_DP}\end{equation}
where $(1-\alpha_{4})$ is the confidence level for pointwise intervals for expected response differences and $F_{1,d;1-\alpha_{4}}$ is the appropriate $F$ distribution quantile. This minimizes the average, over the design region $\mathcal{X}$, of the width of pointwise confidence intervals for the mean response if we use equation (\ref{eq:ytilde}) for the predictions.
\subsection{Compound criteria}
\cite{hardin1993} and \cite{jonesgoos2012} showed that $I$-optimum designs have smaller losses in efficiency for parameter estimates than $D$-optimum designs have in terms of prediction
efficiency. Whereas these authors preferred $I$-optimality on this basis, it is more desirable to build both parameter estimation and prediction into the optimality criterion. This, together with the commonly accepted view that a design should have several good properties, suggests investigating a compound criterion for prediction as well as
estimation. To that end we extend the compound criteria
of \cite{gilmourtrinca2012} in order to take into account predictions of the response as well as expected differences in the response with respect to the experimental region center. Thus we simply divide \cite{gilmourtrinca2012}'s equation (5) by
\begin{equation}
{F^{\kappa_6}_{1;d;1-\alpha_{3}}} {F^{\kappa_8}_{1;d;1-\alpha_{4}}}\text{tr}\left\{\mathbf{\mathcal{M}}(\mathbf{X}^\prime\mathbf{X})^{-1}\right\}^{\kappa_5+\kappa_6}\text{tr}\left\{\mathbf{\mathcal{M}}_0(\mathbf{X}^\prime\mathbf{X})^{-1}\right\}^{\kappa_7+\kappa_8},
\end{equation}
where $\kappa_5,~\kappa_6,~\kappa_7~\text{and}~\kappa_8$ are the priority weights for point response prediction, interval response prediction, point response difference prediction and interval response difference prediction, respectively, leading to the more general compound criteria, after ignoring constant terms, given by
\begin{equation}
\frac{F^{-\kappa_1}_{p-1,d;1-\alpha_1}{F^{-\kappa_2}_{1,d;1-\alpha_2}}|\mathbf{X}_0^\prime\mathbf{Q}\mathbf{X}_0|^\frac{\kappa_0+\kappa_1}{p-1}
(n-d)^{\kappa_4}{F^{-\kappa_6}_{1,d;1-\alpha_3}} {F^{-\kappa_8}_{1,d;1-\alpha_4}}}{
\text{tr}\{\mathbf{W}(\mathbf{X}^\prime\mathbf{X})^{-1}\}^{\kappa_2+\kappa_3}\text{tr}\left\{\mathbf{\mathcal{M}}(\mathbf{X}^\prime\mathbf{X})^{-1}\right\}^{\kappa_5+\kappa_6}\text{tr}\left\{\mathbf{\mathcal{M}}_0(\mathbf{X}^\prime\mathbf{X})^{-1}\right\}^{\kappa_7+\kappa_8}},
\label{eq:CP}
\end{equation}
where $\sum_{i=0}^8\kappa_i=1$ and $\mathbf{X}_0$ is the $n\times(p-1)$ matrix equal to the $\mathbf{X}$ matrix except that the column of 1's corresponding to the intercept is
removed and $\mathbf{Q}=\mathbf{I}-\mathbf{1}\mathbf{1}^\prime/n$ is of dimension $n\times n$. Note that we have included in the formula the $D_S$ criterion. By allowing $\kappa_0>0$ we can use the $D_S$ property to reflect parameter point estimation if desired. Note that the formula allows $L$ type criteria, the $A$ criterion being a particular case. For second-order polynomials we recommend the use of weights through the $\mathbf{W}$ matrix in order to adjust the scale for the different types of parameter in the polynomial, i.e.\ linear, quadratic and interaction parameters.
To find a compromise design by maximizing (\ref{eq:CP}) we can use any algorithm proposed in the literature for factorial designs, such as point- or coordinate-exchange type algorithms.
\section{Design prediction capability}\label{sec:PC}
Many of the measures proposed for design construction and evaluation, e.g.\ those of the type presented in Section \ref{sec:DC}, are global measures that try to convey in a single number all the information available in the design (see the discussion in \cite{anderson-cookmontg2009}). Depending on the objectives of the experiment, inspection of only these global measures may not suffice for design choice. This is particularly true for prediction since a design may show a reasonable performance globally by performing extremely well in one portion of the region but badly in another portion that could perhaps be of more interest. Thus, for inspection of design capabilities with respect to prediction, several valuable graphical approaches have been proposed. \cite{g-j&m1989} proposed the variance dispersion graphs (VDGs) that plot the maximum, mean and minimum variances for predictions of the response calculated over various spheres within the region of interest. For a scaled region so that the maximum point is at distance 1 from the center, the radius $r$ varies from 0 to 1. From \cite{g-j&m1989}, for the sphere $U_r$ $(U_r = \{\mathbf{x}: \sum_{i=1}^qx_i^2=r^2\}, r<1)$, the mean, or integrated, variance of predictions is the spherical variance defined by
\begin{equation}
V^r\propto\Psi^{-1}_r\int_{\mathbf{x}\in U_r}\mathbf{f}(\mathbf{x})^\prime(\mathbf{X}^\prime\mathbf{X})^{-1}\mathbf{f}(\mathbf{x})d\mathbf{x}=\text{tr}\{\mathcal{M}_r(\mathbf{X}^\prime\mathbf{X})^{-1}\},
\end{equation}
where $\Psi_r = \int_{\mathbf{x}\in U_r}d\mathbf{x}$ and $\mathcal{M}_r$ is the matrix of moments for the region $U_r$. \cite{vining1993} gave Fortran code to calculate and plot the maximum, minimum and average variances, for given radius, against the distance from the center. VDGs allow visualization of prediction stability over the region and prediction performance of the design in a more informative way than single valued measures. For cuboidal regions, average variances are not calculated and the maximum and minimum variances are searched over restricted hyperspheres when their radii extrapolate the hypercube. The VDG methodology was extended for inspection of variances of response differences by the introduction of difference variance dispersion graphs (DVDGs) by \cite{trincagilmour1999}. For the sphere $U_r$, the mean or integrated variance of differences between predictions at two points, $\mathbf{x}\in \mathcal{X}$ and the design center, is defined by
\begin{equation}
DV^r\propto\Psi^{-1}_r\int_{\mathbf{x}\in U_r}(\mathbf{f}(\mathbf{x})-\mathbf{f}(\mathbf{0}))^\prime(\mathbf{X}^\prime\mathbf{X})^{-1}(\mathbf{f}(\mathbf{x})-\mathbf{f}(\mathbf{0}))d\mathbf{x}=\text{tr}\{\mathcal{M}_{0r}(\mathbf{X}^\prime\mathbf{X})^{-1}\},
\end{equation}
where $\mathcal{M}_{0r}$ is the matrix $\mathcal{M}_r$ with first row and first column set to zero.
Because for each design the VDG and DVDG present three (spherical region) or two (cuboidal region) lines it is difficult to compare more than a very few designs in the same plot. Another drawback of these graphs is that they ignore the relative volume associated with the sphere $U_r$ and may lead to misleading interpretations. The situation is more serious for $q\ge4$.
A more recently preferred display is the fraction of design space (FDS) plot proposed by \cite{zahran2003}. The FDS plot shows the variance against the relative volume of the region that has prediction variance at or below a given value.
The FDS plot can be easily extended to difference fraction of design space (DFDS) plots, that is the fraction of design space for variances of the estimated differences between $\hat{y}(\mathbf{x})$ and $\hat{y}(\mathbf{x}_0)$. The usual method to obtain the information for theses graphs is the one outlined in \cite{goosjones2011book} and we use it to obtain FDS and DFDS plots. A very large sample, of size $N$ points, is taken randomly from $\mathcal{X}$ and $v_j=\mathbf{f}(\mathbf{x}_j)^\prime (\mathbf{X}^\prime\mathbf{X})^{-1}\mathbf{f}(\mathbf{x}_j)$ for FDS or $vd_j=(\mathbf{f}(\mathbf{x}_j)-\mathbf{f}(\mathbf{x}_0))^\prime (\mathbf{X}^\prime\mathbf{X})^{-1}(\mathbf{f}(\mathbf{x}_j)-\mathbf{f}(\mathbf{x}_0))$ for DFDS are calculated for $j=1,~2,~\ldots,~N$ ($\mathbf{x}_0$ is fixed at the desired treatment; here we use, as before, $\mathbf{x}_0=\mathbf{0}$). Then these values are sorted such that $v_{(j)}$ (or $vd_{(j)}$) is in the $j^{th}$ position. The graph is simply the plot of $v_{(j)}$ (or $vd_{(j)}$) against $j/(N+1)$.
We suggest and use an alternative for VDG and DVDG by replacing the radius or distance from the design center by the relative volume of the region inside the hypersphere formed by each distance, to the whole design region. This is particularly useful because we add information that the FDS does not show, that is in which parts of the region the design has which properties.
The calculation of the values for constructing VDG, DVDG, FDS and DFDS plots is available in the R package \verb"dispersion" \citep{oliveira_cesar2014}. Versions of theses graphs to explore interval prediction properties are easily obtained by multiplying $v_{(j)}$ or $vd_{(j)}$ by $F_{1,d;1-\alpha}$ for some suitable choice of $\alpha$.
\section{Examples}\label{sec:Ex}
In this section we explore the potential of the proposed compound criteria for constructing designs for two experiments. We focus on $D_S$, $(DP)_S$ and prediction efficiencies for constructing the designs. For interval estimation criteria we used $\alpha=0.05$ throughout. The search procedure uses a point exchange algorithm. We further evaluate the prediction capabilities of the designs using several versions of the graphs described in Section \ref{sec:PC}. In the displays we use the standard error (s.e.) instead of the variance scale in order to discriminate better between designs, since most variances are less than 1. The new proposed plots are presented in the paper while slight variations of the old ones are included the in Supplementary Material.
\subsection{Example 1: Cassava bread recipe}
\cite{esc} performed experiments in order to gain knowledge for a gluten-free bread recipe using cassava flour for people with coeliac disease. One of the experiments used $n=26$ experimental units to study the effects of $q=3$ factors, the amount of powder albumen ($X_1$); the amount of yeast ($X_2$) and the amount of cassava flour ($X_3$). Other ingredients and factors associated with the mixing and baking process were kept constant. The experimental region was the cube defined by $10\le x_1\le30g$, $5\le x_2\le 15g$ and $45\le x_3\le65g$, and the experimenter decided to use a modified central composite design (CCD) with four center runs and the factorial part duplicated. One objective was to estimate optimum quantities of the ingredients based on some organoleptic characteristics and the primary model considered was the second-order polynomial with $p=10$ regression parameters. Note that the full three-level factorial would use 27 runs and would allow no pure error degrees of freedom. Alternative designs for this experiment were given by \cite{gilmourtrinca2012}, using the inference based and compound criteria, and in \cite{borrotti2016}, using the multi-objective algorithm, MS-TPLS, for both sets of properties, $D$, $A$ and $I$ and $D_S$, $A_S$ and $I_D$.
\begin{table}[H]
\caption{\label{tab:desEx1_1} Alternative designs for Example 1 ($n=26$, $q=3$, $p=10$ in cubic region)}
\centering
\renewcommand{.7}{.7}
\begin{tabular}{rrrcrrrcrrrcrrrcrrr}
\toprule
\multicolumn{19}{c}{Design}\\ \multicolumn{3}{c}{4}&&\multicolumn{3}{c}{5}&&\multicolumn{3}{c}{6}&&\multicolumn{3}{c}{7} &&\multicolumn{3}{c}{8} \\
\multicolumn{3}{c}{{$I$} }&&\multicolumn{3}{c}{{$(IP)$}}&&\multicolumn{3}{c}{$I_D$}&&\multicolumn{3}{c}{$(I_DP)$}&&
\multicolumn{3}{c}{$\kappa_1=\kappa_7=.5$} \\
\cmidrule(lr){1-3}\cmidrule(lr){5-7}\cmidrule(lr){9-11}\cmidrule(lr){13-15}\cmidrule(lr){17-19}
$X_1$&$X_2$&$X_3$ &&$X_1$&$X_2$&$X_3$&&$X_1$&$X_2$&$X_3$ &&$X_1$&$X_2$&$X_3$ && $X_1$&$X_2$&$X_3$ \\ \cmidrule(lr){1-3}\cmidrule(lr){5-7}\cmidrule(lr){9-11}\cmidrule(lr){13-15}\cmidrule(lr){17-19}
-1 & -1 & -1 && -1 & -1 & -1&&-1 & -1 & -1 && -1 & -1 & -1 && -1 & -1 & -1 \\
-1 & -1 & 1 && -1 & -1 & 1 &&-1 & -1 & -1 && -1 & -1 & -1 && -1 & -1 & -1 \\
-1 & 1 & -1 && -1 & 1 & -1&&-1 & -1 & 1 && -1 & -1 & 1 && -1 & -1 & 1 \\
-1 & 1 & 1 && -1 & 1 & 1 &&-1 & 1 & -1 && -1 & 1 & -1 && -1 & -1 & 1 \\
1 & -1 & -1 && 1 & -1 & -1&&-1 & 1 & 1 && -1 & 1 & 1 && -1 & 1 & -1 \\
1 & -1 & 1 && 1 & -1 & 1 && 1 & -1 & -1 && -1 & 1 & 1 && -1 & 1 & -1 \\
1 & 1 & -1 && 1 & 1 & -1&& 1 & -1 & 1 && 1 & -1 & -1 && -1 & 1 & 1 \\
1 & 1 & 1 && 1 & 1 & 1 && 1 & 1 & -1 && 1 & -1 & -1 && -1 & 1 & 1 \\
-1 & -1 & 0 && -1 & 0 & 0 && 1 & 1 & 1 && 1 & -1 & 1 && 1 & -1 & -1 \\
-1 & 1 & 0 && -1 & 0 & 0 &&-1 & -1 & 0 && 1 & -1 & 1 && 1 & -1 & -1 \\
1 & -1 & 0 && -1 & 0 & 0 &&-1 & 1 & 0 && 1 & 1 & -1 && 1 & -1 & 1 \\
1 & 1 & 0 && 1 & 0 & 0 && 1 & -1 & 0 & & 1 & 1 & -1 && 1 & -1 & 1 \\
-1 & 0 & -1 && 1 & 0 & 0 && 1 & 1 & 0 & & 1 & 1 & 1 && 1 & 1 & -1 \\
-1 & 0 & 1 && 1 & 0 & 0 &&-1 & 0 & -1 & & 1 & 1 & 1 && 1 & 1 & -1 \\
1 & 0 & -1 && 0 & -1 & 0 &&-1 & 0 & 1 & & -1 & 0 & 0 && 1 & 1 & 1 \\
1 & 0 & 1 && 0 & -1 & 0 && 1 & 0 & -1 && -1 & 0 & 0 && 1 & 1 & 1 \\
0 & -1 & -1 && 0 & -1 & 0 && 1 & 0 & 1 && 1 & 0 & 0 && -1 & 0 & 0 \\
0 & -1 & 1 && 0 & 1 & 0 && 0 & -1 & -1 && 1 & 0 & 0 && -1 & 0 & 0 \\
0 & 1 & -1 && 0 & 1 & 0 && 0 & -1 & 1 && 0 & -1 & 0 && 1 & 0 & 0 \\
0 & 1 & 1 && 0 & 1 & 0 && 0 & 1 & -1 && 0 & -1 & 0 && 0 & -1 & 0 \\
0 & 0 & 0 && 0 & 0 & -1&& 0 & 1 & 1 && 0 & 1 & 0 && 0 & -1 & 0 \\
0 & 0 & 0 && 0 & 0 & -1&& 0 & 0 & 0 && 0 & 1 & 0 && 0 & 1 & 0 \\
0 & 0 & 0 && 0 & 0 & -1&& 0 & 0 & 0 && 0 & 0 & -1 && 0 & 0 & -1 \\
0 & 0 & 0 && 0 & 0 & 1 && 0 & 0 & 0 && 0 & 0 & -1 && 0 & 0 & -1 \\
0 & 0 & 0 && 0 & 0 & 1 && 0 & 0 & 0 && 0 & 0 & 1 && 0 & 0 & 1 \\
0 & 0 & 0 && 0 & 0 & 1 && 0 & 0 & 0 && 0 & 0 & 1 && 0 & 0 & 1 \\
\toprule
\end{tabular}
\end{table}
\begin{table}[h]
\scalefont{.8}
\caption{\label{tab:effEx1}Efficiencies of alternative designs for Example 1 ($n=26$, $q=3$, $p=10$ in cubic region)}
\centering
\renewcommand{0.03cm}{0.1cm}
\begin{tabular}{cccrrrrrrrrrrrrrrrrrrrrrrrrrrrr}
\toprule
& & &\multicolumn{8}{c}{Efficiency}\\\cline{4-11}
Design&Criterion
&\multicolumn{1}{c}{df(PE,~ LoF)$^{\dagger}$}&\multicolumn{1}{c}{$D_S$}&\multicolumn{1}{c}{$(DP)_S$}
&\multicolumn{1}{c}{$A_S$}&\multicolumn{1}{c}{$(AP)_S$}&\multicolumn{1}{c}{$I$}
&\multicolumn{1}{c}{$(IP)$}&\multicolumn{1}{c}{$I_D$}&\multicolumn{1}{c}{$(I_DP)$} \\
\midrule
1&{{$D_S$, $A_S$}} &(~9,~~7)& 100.00& 86.77& 100.00& 95.50& 75.80& 72.32& 91.93& 87.00\\
2&{{$(DP)_S$}} & (15,~~1)& 93.81& 100.00& 87.12& 93.72& 69.62& 74.82& 83.47& 88.98\\
3&{{$(AP)_S$}} & (12,~~4)& 98.79& 97.45& 97.13& 100.00& 72.30& 74.36& 89.23& 91.02\\
4&{{$I$}} & (~5,~11)& 90.71& 52.42& 87.71& 64.87& 100.00& 73.88& 99.87& 73.19\\
5&{{$(IP)$}} & (12,~~4)& 79.79& 78.70& 72.80& 74.95& 97.23& 100.00& 87.47& 89.23\\
6&{{$I_D$}} & (~5,~11)& 93.36& 53.96& 90.67& 67.06& 97.22& 71.83& 100.00& 73.28\\
7&{{$(I_DP)$}} & (12,~~4)& 95.29& 93.99& 92.11& 94.82& 92.00& 94.63& 98.03& 100.00\\
8&$\kappa_1=\kappa_7=0.5$ &(12,~4) & 98.68& 97.34& 96.96& 99.82& 84.34& 86.74& 96.77& 98.71\\
9&{{$I_D,D_S,A_S$-$sym$}} & (~5,~11)& 98.13& 56.71& 96.83& 71.62& 85.89& 63.46& 97.01& 71.09\\
\bottomrule
\multicolumn{10}{l}{$ \dagger$df(PE,~LoF): degrees of freedom for pure error, degrees of freedom for lack of fit.} \\
\end{tabular}
\end{table}
Here we explore the prediction performances of some of the previously published designs and construct a few other alternatives based on estimation and prediction properties. The new designs are presented in Table \ref{tab:desEx1_1}. In Table \ref{tab:effEx1} we show the properties of the designs in terms of the usual single-valued criteria and the new criteria introduced in Section \ref{sec:DC}.
Designs 1 to 3 were presented in \cite{gilmourtrinca2012}, design 9 is the best design \cite{borrotti2016} found for the properties $D_S$, $A_S$ and $I_D$,
which they called the $I_D,D_S,A_S$-$symmetrical$ design. Designs from 4 to 8 are the new designs, the first four based on a single prediction property each ($I$, $(IP)$, $I_D$ and
$(I_DP)$) and design 8 constructed by using a compound criterion with
$\kappa_1=\kappa_7=0.5$ in equation (\ref{eq:CP}), that is, giving equal priority for $(DP)_S$ and point predictions of difference of response.
We note that, as the number of runs is not too small for the model specified, all designs allow for pure error degrees of freedom with designs 4, 6 and 9
($I$, $I_D$ and $I_D,D_S,A_S$-$sym$) being the least attractive in this respect. Comparisons between designs 1 and 4 confirm the observation of \cite{jonesgoos2012}
that the losses of $I$-optimum designs in terms of efficiencies for estimation, with respect to $D_S$ and $A_S$ criteria, are smaller than the
losses of efficiencies in terms of prediction of $D_S$- or $A_S$-optimum designs. Similar lessons can be drawn when we compare designs 2 and 5 ($(DP)_S$- and $(IP)$-optimum
designs) but now the differences are smaller. However, the results contradict the suggestion of Goos in the discussion of \cite{gilmourtrinca2012} that $I$-optimal designs usually have more replicates that $D$-optimal designs.
In general all designs based on a single property have low performance on at least one property except the
$(I_DP)$-optimum design which has a minimum efficiency of 92\%. However, in case we are interested in inferences for the parameters and predictions of differences in response,
design 8 (obtained by the compound criterion, considering equal weights for $(DP)_S$ and $I_D$) has very high efficiencies for all properties. Surprisingly,
design 8 outperforms design 9, the $I_D,D_S,A_S-sym$ multiple objective design from \cite{borrotti2016}, except for $I$ and $I_D$ properties, although the maximum
difference between them in these two properties is only about 1.5\%. For properties like $(DP)_S$, $(AP)_S$, $(I_DP)$ and $(IP)$ the advantage in using design
8 is overwhelming with efficiency gains of 40.63, 28.20, 27.62 and 23.28\%, respectively. It is interesting to note that design 8 is very close to the
$(AP)_S$-optimum design (design 3) in terms of pure error and parameter estimation properties but it is considerably superior in terms of overall predictions.
\begin{figure}
\caption{Standard error dispersion graphs (SEDG) of response predictions (interval), for designs in Example 1. Left: distance. Right: relative volume.}
\label{graph:vdgpeEx1}
\end{figure}
\begin{figure}
\caption{Standard error dispersion graphs of differences (DSEDG) in response predictions (interval), for designs for Example 1. Left: distance. Right: relative volume.}
\label{graph:dvdgpeEx1}
\end{figure}
\begin{figure}
\caption{FDS plots, in terms of s.e., for designs in Example 1. Left: response interval predictions. Right: difference interval predictions.}
\label{graph:fdspeEx1}
\end{figure}
Figures \ref{graph:vdgpeEx1}-\ref{graph:fdspeEx1} (and Figures A-C in the Suppl.) show the prediction performances of the designs over the unit cube using standard error dispersion graphs (SEDGs). For the dispersion graphs (Figure A, left), the usual pattern is observed, i.e.\ the $(AP)_S$-, $D_S$- and $(DP)_S$-optimum designs have the highest s.e.\ at the center in order to control the precision in the corners. Several designs show two spikes around the relative distances of points in the cube face ($\approx 0.58$) and of points in the edges ($\approx 0.82$) with those of the $(DP)_S$-optimum design being most prominent. Note, however, that this design has the smallest minimum s.e.\ further from the center. In the other hand, the $I$-, $(IP)$- and $I_D$-optimum designs have the smallest s.e.'s in the middle but the s.e.'s are high for the portion away from the center. Our compound criterion design ($\kappa_1=\kappa_7=0.5$) does compromise and has similar performances to the $I_D,D_S,A_S-sym$ design. Note however its superiority when interval prediction of responses is considered (Figure \ref{graph:vdgpeEx1}). The graph at the right hand-side of Figure \ref{graph:vdgpeEx1} presents the same information, but plotted against the relative volume contained within a radius, rather than its distance from the center. This variation of the plot seems more useful since it discriminates better between the designs.
The ordering of the designs in terms of response predictions is better summarized through the FDS graphs in Figures C (right) and \ref{graph:fdspeEx1} (right). It is interesting to note that the performance of the $(DP)_S$-optimum design is not as bad as suspected before. For interval predictions it outperforms design $I_D,D_S,A_S-sym$ in almost the whole region and outperforms the $D_S$-, $(AP)_S$- and $I_D$-optimum design in about $60\%$ of the region. Again, our compound criterion design compromises while the $(IP)$- and $(I_DP)$-optimum designs show the best performances overall.
The designs for Example 1 are quite homogeneous in terms of predictions of differences in the responses (see Figures \ref{graph:dvdgpeEx1}, \ref{graph:fdspeEx1} (right) and C (Supp) and the last two columns of Table \ref{tab:effEx1}). But we can still detect the superiority of our compound design and the $(I_DP)$-optimum design in the whole region.
\subsection{Example 2: $q=5$ factors in spherical region}
\begin{table}[hp]
\scalefont{0.8}
\caption{\label{tab:desEx2_1} Alternative designs for Example 2 ($n=30$, $q=5$, $p=21$ in spherical region)}
\centering
\renewcommand{.7}{.7}
\renewcommand{0.03cm}{0.1cm}
\begin{tabular}{rrrrrccrrrrrccrrrrr}
\toprule
\multicolumn{19}{c}{Design}\\ \multicolumn{5}{c}{1}&&&\multicolumn{5}{c}{2}&&&\multicolumn{5}{c}{3} \\
\multicolumn{5}{c}{$D_S/I $}&&&\multicolumn{5}{c}{$(DP)_S$} &&&\multicolumn{5}{c}{$A_S$} \\
\cmidrule(lr){1-5}\cmidrule(lr){8-12}\cmidrule(lr){15-19}
$X_1$ & $X_2$ & $X_3$ & $X_4$ & $X_5$ &&& $X_1$ & $X_2$ & $X_3$ & $X_4$ & $X_5$ &&& $X_1$ & $X_2$ & $X_3$ & $X_4$ & $X_5$ \\
\cmidrule(lr){1-5}\cmidrule(lr){8-12}\cmidrule(lr){15-19}
-1.12 & 1.12 & -1.12 & 0 & -1.12 &&& -1.29 & -1.29 & 0 & 1.29 & 0 &&& 0 & -1.12 & 1.12 & -1.12 & -1.12 \\
-1.12 & 1.12 & 1.12 & 0 & -1.12 &&& -1.29 & -1.29 & 0 & -1.29 & 0 &&& 0 & -1.12 & 1.12 & 1.12 & -1.12 \\
1.12 & 1.12 & -1.12 & 0 & -1.12 &&& -1.29 & 1.29 & 0 & -1.29 & 0 &&& 0 & 1.12 & 1.12 & -1.12 & -1.12 \\
1.12 & 1.12 & 1.12 & 0 & -1.12 &&& -1.29 & 1.29 & 0 & 1.29 & 0 &&& 0 & 1.12 & 1.12 & 1.12 & -1.12 \\
-1.29 & 1.29 & 0 & 0 & 1.29 &&& -1.29 & 1.29 & 0 & 1.29 & 0 &&& 1.29 & -1.29 & -1.29 & 0 & 0 \\
1.29 & 1.29 & 0 & 0 & 1.29 &&& 1.29 & -1.29 & -1.29 & 0 & 0 &&& 1.29 & -1.29 & 1.29 & 0 & 0 \\
-1.29 & 0 & -1.29 & 1.29 & 0 &&& 1.29 & 1.29 & -1.29 & 0 & 0 &&& 1.29 & 1.29 & -1.29 & 0 & 0 \\
-1.29 & 0 & 1.29 & 1.29 & 0 &&& 1.29 & 1.29 & -1.29 & 0 & 0 &&& 1.29 & 1.29 & 1.29 & 0 & 0 \\
1.29 & 0 & -1.29 & 1.29 & 0 &&& 1.29 & 0 & 1.29 & 1.29 & 0 &&& -1.29 & -1.29 & 0 & -1.29 & 0 \\
1.29 & 0 & 1.29 & 1.29 & 0 &&& 1.29 & 0 & 1.29 & -1.29 & 0 &&& -1.29 & -1.29 & 0 & 1.29 & 0 \\
-1.29 & 0 & 0 & -1.29 & -1.29 &&& 1.29 & 0 & 1.29 & 1.29 & 0 &&& -1.29 & 1.29 & 0 & -1.29 & 0 \\
-1.29 & 0 & 0 & -1.29 & 1.29 &&& 0 & -1.29 & 1.29 & 0 & -1.29 &&& -1.29 & 1.29 & 0 & 1.29 & 0 \\
1.29 & 0 & 0 & -1.29 & -1.29 &&& 0 & -1.29 & 1.29 & 0 & 1.29 &&& -1.29 & 0 & -1.29 & 0 & -1.29 \\
1.29 & 0 & 0 & -1.29 & 1.29 &&& 0 & -1.29 & 1.29 & 0 & 1.29 &&& -1.29 & 0 & -1.29 & 0 & 1.29 \\
0 & -1.29 & -1.29 & -1.29 & 0 &&& 0 & 1.29 & 1.29 & 0 & -1.29 &&& -1.29 & 0 & 1.29 & 0 & -1.29 \\
0 & -1.29 & 1.29 & -1.29 & 0 &&& 0 & 1.29 & 1.29 & 0 & 1.29 &&& -1.29 & 0 & 1.29 & 0 & 1.29 \\
0 & 1.29 & -1.29 & -1.29 & 0 &&& 0 & 1.29 & 1.29 & 0 & 1.29 &&& 1.29 & 0 & 0 & -1.29 & -1.29 \\
0 & 1.29 & 1.29 & -1.29 & 0 &&& 0 & 0 & -1.29 & -1.29 & -1.29 &&& 1.29 & 0 & 0 & -1.29 & 1.29 \\
0 & -1.29 & -1.29 & 0 & -1.29 &&& 0 & 0 & -1.29 & -1.29 & 1.29 &&& 1.29 & 0 & 0 & 1.29 & -1.29 \\
0 & -1.29 & 1.29 & 0 & -1.29 &&& 0 & 0 & -1.29 & -1.29 & -1.29 &&& 1.29 & 0 & 0 & 1.29 & 1.29 \\
-1.58 & -1.58 & 0 & 0 & 0 &&& 0 & 0 & -1.29 & 1.29 & 1.29 &&& 0 & -1.29 & -1.29 & 0 & -1.29 \\
1.58 & -1.58 & 0 & 0 & 0 &&& 0 & 0 & -1.29 & 1.29 & -1.29 &&& 0 & 1.29 & -1.29 & 0 & -1.29 \\
0 & -1.58 & 0 & 1.58 & 0 &&& 0 & 0 & -1.29 & -1.29 & 1.29 &&& 0 & 0 & 1.29 & -1.29 & 1.29 \\
0 & 1.58 & 0 & 1.58 & 0 &&& -1.58 & 0 & -1.58 & 0 & 0 &&& 0 & 0 & 1.29 & 1.29 & 1.29 \\
0 & -1.58 & 0 & 0 & 1.58 &&& -1.58 & 0 & 1.58 & 0 & 0 &&& 0 & -1.58 & 0 & 0 & 1.58 \\
0 & 0 & -1.58 & 0 & 1.58 &&& -1.58 & 0 & 1.58 & 0 & 0 &&& 0 & 1.58 & 0 & 0 & 1.58 \\
0 & 0 & 1.58 & 0 & 1.58 &&& 1.58 & 0 & 0 & 0 & -1.58 &&& 0 & 0 & -1.58 & -1.58 & 0 \\
0 & 0 & 0 & 1.58 & -1.58 &&& 1.58 & 0 & 0 & 0 & 1.58 &&& 0 & 0 & -1.58 & 1.58 & 0 \\
0 & 0 & 0 & 1.58 & 1.58 &&& 1.58 & 0 & 0 & 0 & 1.58 &&& 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 &&& 0 & 0 & 0 & 0 & 0 &&& 0 & 0 & 0 & 0 & 0 \\
\toprule
\end{tabular}
\end{table}
\begin{table}[hp]
\scalefont{0.8}
\caption{\label{tab:desEx2_2} Alternative designs for Example 2 ($n=30$, $q=5$, $p=21$ in spherical region) }
\centering
\renewcommand{.7}{.7}
\renewcommand{0.03cm}{0.1cm}
\begin{tabular}{rrrrrccrrrrrccrrrrr}
\toprule
\multicolumn{19}{c}{Design}\\ \multicolumn{5}{c}{4}&&&\multicolumn{5}{c}{5}&&&\multicolumn{5}{c}{7} \\
\multicolumn{5}{c}{$(AP)_S$}&&&\multicolumn{5}{c}{$(IP)$} &&&\multicolumn{5}{c}{$(I_DP)$} \\
\cmidrule(lr){1-5}\cmidrule(lr){8-12}\cmidrule(lr){15-19}
$X_1$ & $X_2$ & $X_3$ & $X_4$ & $X_5$ &&& $X_1$ & $X_2$ & $X_3$ & $X_4$ & $X_5$ &&& $X_1$ & $X_2$ & $X_3$ & $X_4$ & $X_5$\\
\cmidrule(lr){1-5}\cmidrule(lr){8-12}\cmidrule(lr){15-19}
-1&-1&-1&-1&-1&&&-1&-1&-1&-1&1&&&-1&-1&-1&-1&1\\
-1&-1&-1&1&1&&&-1&-1&-1&-1&1&&&-1&-1&-1&1&-1\\
-1&-1&1&-1&1&&&-1&-1&-1&1&-1&&&-1&-1&-1&1&-1\\
-1&-1&1&-1&1&&&-1&-1&1&-1&-1&&&-1&-1&1&-1&-1\\
-1&-1&1&1&-1&&&-1&-1&1&-1&-1&&&-1&-1&1&1&1\\
-1&1&-1&-1&1&&&-1&-1&1&1&1&&&-1&1&-1&-1&-1\\
-1&1&-1&1&-1&&&-1&1&-1&1&1&&&-1&1&-1&1&1\\
-1&1&1&-1&-1&&&-1&1&1&-1&1&&&-1&1&1&-1&1\\
-1&1&1&1&1&&&-1&1&1&1&-1&&&-1&1&1&1&-1\\
1&-1&-1&-1&1&&&1&-1&-1&1&1&&&-1&1&1&1&-1\\
1&-1&-1&1&-1&&&1&-1&1&-1&1&&&1&-1&-1&-1&-1\\
1&-1&-1&1&-1&&&1&-1&1&-1&1&&&1&-1&-1&1&1\\
1&-1&1&-1&-1&&&1&-1&1&1&-1&&&1&-1&1&-1&1\\
1&-1&1&1&1&&&1&1&-1&-1&1&&&1&-1&1&-1&1\\
1&1&-1&1&1&&&1&1&-1&1&-1&&&1&-1&1&1&-1\\
1&1&-1&1&1&&&1&1&-1&1&-1&&&1&1&-1&-1&1\\
1&1&1&-1&1&&&1&1&1&-1&-1&&&1&1&1&-1&-1\\
1&1&1&1&-1&&&1&1&1&-1&-1&&&1&1&1&1&1\\
1&1&1&1&-1&&&1&1&1&1&1&&&0&1.12&-1.12&1.12&-1.12\\
1.12&1.12&-1.12&0&-1.12&&&1&1&1&1&1&&&0&1.12&-1.12&1.12&-1.12\\
1.12&1.12&-1.12&0&-1.12&&&-1.12&1.12&-1.12&0&-1.12&&&2.24&0&0&0&0\\
2.24&0&0&0&0&&&-1.12&1.12&-1.12&0&-1.12&&&0&-2.24&0&0&0\\
0&-2.24&0&0&0&&&0&-1.12&-1.12&-1.12&-1.12&&&0&0&2.24&0&0\\
0&0&2.24&0&0&&&0&-1.12&-1.12&-1.12&-1.12&&&0&0&0&2.24&0\\
0&0&0&-2.24&0&&&2.24&0&0&0&0&&&0&0&0&0&2.24\\
0&0&0&0&2.24&&&0&2.24&0&0&0&&&0&0&0&0&0\\
0&0&0&0&0&&&0&0&-2.24&0&0&&&0&0&0&0&0\\
0&0&0&0&0&&&0&0&0&-2.24&0&&&0&0&0&0&0\\
0&0&0&0&0&&&0&0&0&0&2.24&&&0&0&0&0&0\\
0&0&0&0&0&&&0&0&0&0&0&&&0&0&0&0&0\\
\toprule
\end{tabular}
\end{table}
\begin{table}[hp]
\scalefont{.8}
\caption{\label{tab:desEx2_3} Alternative designs for Example 2 ($n=30$, $q=5$, $p=21$ in spherical region)}
\centering
\renewcommand{.7}{.7}
\renewcommand{0.03cm}{0.1cm}
\begin{tabular}{rrrrrccrrrrrccrrrrr}
\toprule
\multicolumn{19}{c}{Design}\\ \multicolumn{5}{c}{8}&&&\multicolumn{5}{c}{9}&&&\multicolumn{5}{c}{10} \\
\multicolumn{5}{c}{$\kappa_1=.3;~\kappa_7=.7$}&&&\multicolumn{5}{c}{$\kappa_1=.1;~\kappa_7=.9$} &&&\multicolumn{5}{c}{$\kappa_0=.9;~\kappa_8=.1$
} \\
\cmidrule(lr){1-5}\cmidrule(lr){8-12}\cmidrule(lr){15-19}
$X_1$ & $X_2$ & $X_3$ & $X_4$ & $X_5$ &&& $X_1$ & $X_2$ & $X_3$ & $X_4$ & $X_5$ &&& $X_1$ & $X_2$ & $X_3$ & $X_4$ & $X_5$ \\ \cmidrule(lr){1-5}\cmidrule(lr){8-12}\cmidrule(lr){15-19}
1.12&-1.12&1.12&1.12&0&&&-1&-1&-1&1&-1&&&-1.29&-1.29&-1.29&0&0\\
1.12&1.12&1.12&1.12&0&&&-1&-1&1&1&1&&&-1.29&-1.29&1.29&0&0\\
1.12&-1.12&-1.12&0&1.12&&&-1&1&-1&1&1&&&-1.29&1.29&-1.29&0&0\\
1.12&1.12&-1.12&0&1.12&&&-1&1&1&1&-1&&&-1.29&1.29&1.29&0&0\\
-1.29&-1.29&-1.29&0&0&&&-1&1&1&-1&1&&&1.29&-1.29&0&0&-1.29\\
-1.29&-1.29&1.29&0&0&&&1&-1&-1&-1&-1&&&1.29&-1.29&0&0&-1.29\\
-1.29&1.29&-1.29&0&0&&&1&-1&1&-1&1&&&1.29&-1.29&0&0&1.29\\
-1.29&1.29&1.29&0&0&&&1&1&-1&-1&1&&&1.29&-1.29&0&0&1.29\\
-1.29&0&0&-1.29&-1.29&&&1&1&1&1&1&&&1.29&1.29&0&0&-1.29\\
-1.29&0&0&-1.29&1.29&&&1&1&1&-1&-1&&&1.29&1.29&0&0&1.29\\
-1.29&0&0&1.29&-1.29&&&-1.29&-1.29&0&-1.29&0&&&-1.29&0&0&-1.29&1.29\\
-1.29&0&0&1.29&1.29&&&1.29&-1.29&0&1.29&0&&&-1.29&0&0&1.29&-1.29\\
-1.29&0&0&1.29&1.29&&&-1.29&0&-1.29&-1.29&0&&&-1.29&0&0&1.29&1.29\\
1.29&0&-1.29&0&-1.29&&&1.29&0&-1.29&1.29&0&&&-1.29&0&0&-1.29&-1.29\\
1.29&0&1.29&-1.29&0&&&-1.29&0&0&-1.29&-1.29&&&1.29&0&-1.29&-1.29&0\\
0&-1.58&0&-1.58&0&&&1.29&0&0&1.29&-1.29&&&1.29&0&-1.29&1.29&0\\
0&1.58&0&-1.58&0&&&0&-1.29&-1.29&0&1.29&&&1.29&0&1.29&-1.29&0\\
0&-1.58&0&0&-1.58&&&0&-1.29&1.29&0&-1.29&&&1.29&0&1.29&-1.29&0\\
0&1.58&0&0&-1.58&&&0&1.29&-1.29&0&-1.29&&&1.29&0&1.29&1.29&0\\
0&0&-1.58&-1.58&0&&&-2.24&0&0&0&0&&&0&-1.58&0&-1.58&0\\
0&0&-1.58&1.58&0&&&0&2.24&0&0&0&&&0&-1.58&0&1.58&0\\
0&0&-1.58&1.58&0&&&0&0&2.24&0&0&&&0&1.58&0&-1.58&0\\
0&0&1.58&0&-1.58&&&0&0&0&2.24&0&&&0&1.58&0&1.58&0\\
0&0&1.58&0&1.58&&&0&0&0&0&2.24&&&0&0&-1.58&0&-1.58\\
0&0&1.58&0&1.58&&&0&0&0&0&0&&&0&0&-1.58&0&1.58\\
0&0&0&0&0&&&0&0&0&0&0&&&0&0&1.58&0&-1.58\\
0&0&0&0&0&&&0&0&0&0&0&&&0&0&1.58&0&1.58\\
0&0&0&0&0&&&0&0&0&0&0&&&0&0&0&0&0\\
0&0&0&0&0&&&0&0&0&0&0&&&0&0&0&0&0\\
0&0&0&0&0&&&0&0&0&0&0&&&0&0&0&0&0\\
\toprule
\end{tabular}
\end{table}
\cite{jang2012} compared a few classical designs (CCD, Box-Behnken design) for five factors in a spherical region considering several run sizes. Here we constructed several optimum designs for $n=30$ and the second order model ($p=21$) and we compare them with the resolution-V half fraction CCD ($\alpha = \sqrt{5} \approx 2.236$) with four center runs. The designs are shown in Tables \ref{tab:desEx2_1}, \ref{tab:desEx2_2} and \ref{tab:desEx2_3}. Interestingly we found that the $I_D$-optimum design is the resolution-V CCD, which is very unusual for an optimum design chosen from such a large candidate set. We found other equivalences among designs, for example the $D_S$-optimum design is also $I$-optimum, although, since we are using heuristics, we have no absolute guarantee that the true optimum designs for these criteria are equivalent or unique. Design 11 is also similar to a CCD except that it includes four factorial points duplicated (see Table \ref{d11}), the center point is replicated four times and includes the axial pair for only one factor ($X_3$), while for the other factors it includes only one axial point.
\begin{table}
\scalefont{0.8}
\caption{\label{d11} Points from the $2^5$ that are duplicated in Design 11 (Table \ref{tab:effEx2})}
\centering
\renewcommand{.7}{.7}
\renewcommand{0.03cm}{0.1cm}
\begin{tabular}{rrrrr}
\toprule
$X_1$ & $X_2$ & $X_3$ & $X_4$ & $X_5$ \\
\toprule
-1 & -1 & 1 & 1 & 1\\
-1 & 1 & -1 & 1 & 1\\
-1 & 1 & 1 & 1 & -1\\
1 & -1 & 1 & -1 & 1\\
\toprule
\end{tabular}
\end{table}
\begin{table}[ht]
\scalefont{.75}
\caption{\label{tab:effEx2}Efficiencies of alternative designs for Example 2 ($n=30$, $q=5$, $p=21$ in spherical region)}
\centering
\renewcommand{.7}{.7}
\renewcommand{0.03cm}{0.03cm}
\begin{tabular}{cccrrrrrrrr}
\toprule
& & &\multicolumn{8}{c}{Efficiency}\\\cline{4-11}
Design&Criterion
&\multicolumn{1}{c}{df(PE,~ LoF)$^{\dagger}$}&\multicolumn{1}{c}{$D_S$}&\multicolumn{1}{c}{$(DP)_S$}
&\multicolumn{1}{c}{$A_S$}&\multicolumn{1}{c}{$(AP)_S$}&\multicolumn{1}{c}{$I$}
&\multicolumn{1}{c}{$(IP)$}&\multicolumn{1}{c}{$I_D$}&\multicolumn{1}{c}{$(I_DP)$} \\
\midrule
1&{{$D_S$, $I$}} & (0,~9)& 100.00& 0.00& 94.02& 0.00& 100.00& 0.00& 60.31& 0.00\\
2&{{$(DP)_S$}} & (9,~0)& 86.30 & 100.00& 74.33& 90.36& 74.73 & 97.81 & 52.80& 65.56\\
3&{{ $A_S$}} &(1,~8)& 98.16 & 1.35& 100.00& 3.85& 92.86& 3.85& 81.20& 3.10\\
4&{{$(AP)_S$}} &(8,~1) & 87.39 & 94.39 &85.48&100.00 &74.34&93.64&844.84 &98.28\\
5&{{$(IP)$}} & (8,~1)& 88.84& 95.95& 79.04& 92.47& 79.39& 100.00& 54.37& 62.99\\
6&{{CCD, $I_D$}} & (3,~6)& 96.96& 38.09& 95.25& 58.51& 91.82& 60.73& 100.00& 60.82\\
7&{$(I_DP)$}&(8,~1)&85.37&92.20&83.63&97.83&72.21&90.95&86.32&100.00\\
8&$\kappa_1=0.3;~\kappa_7=0.7$&(7,~2)&85.74&84.69&82.89&92.22&73.35&87.87&87.46&96.35\\
9&$\kappa_1=0.1;~\kappa_7=0.9$&(5,~4)&86.71&64.73&85.61&80.60&76.58&77.62&93.34&87.02\\
10&$\kappa_0=0.9;~\kappa_8=0.1$&(5,~4)&93.49& 69.79 & 91.88& 86.50& 84.56 & 85.72& 87.32& 81.40\\
11&$\kappa_0=\kappa_1=.2;~\kappa_3=\kappa_6=.3$&(7,~2)&90.35&89.25&88.94&98.95&78.50&94.03&88.57& 97.58\\
\bottomrule
\multicolumn{10}{l}{$ \dagger$df(PE,~LoF): degrees of freedom for pure error, degrees of freedom for lack of fit.} \\
\end{tabular}
\end{table}
The efficiencies of several designs are shown in Table \ref{tab:effEx2}. The optimum designs from the usual criteria do not allow pure error estimation ($D_S/I$) or provide very few treatment replications ($A_S$ and CCD/$I_D$) and thus, efficiencies of these designs with respect to modified criteria are zero or small. We note that designs $(AP)_S$, $(I_DP)$ are similar and have reasonably high efficiencies generally, providing 8 degrees of freedom for error estimation but only one spare degree of freedom to add a higher order term in the model in case experimental results show lack of fit of the quadratic model. Design 11 behaves similarly but has the advantage of allowing two degrees of freedom for lack of fit. We tried many weight patterns for this example to obtain compromise designs but many returned designs equivalent to some of the single property criteria and so, we present results for only four of them, designs 8, 9, 10 and 11. From these we see that designs 9 and 10 balance better the degrees of freedom between pure error and lack of fit. Design 10, which focuses on parameter estimation through the $D_S$ criterion and interval estimation of differences in response, has reasonably high efficiencies overall.
\begin{figure}
\caption{SEDGs (interval) for designs in Example 2. Left: distance. Right: relative volume.}
\label{graph:vdgpeEx2}
\end{figure}
\begin{figure}
\caption{DSEDGs (interval) for designs in Example 2. Left: distance. Right: relative volume.}
\label{graph:dvdgpeEx2}
\end{figure}
In Figures \ref{graph:vdgpeEx2}-\ref{graph:fdspeEx2} (and Figures D-F in the Suppl.) we show the prediction performances of the designs over the unit hypersphere. The $D_S/I$- and $A_S$-optimum designs are not shown in the graphs referring to interval predictions because they are too poor for pure error degrees of freedom. Again we see that plotting the information against relative volume discriminates better between the designs. For response point prediction the $I_D$-, $(AP)_S$, $(I_DP)$- and compound optimum designs (8, 9, 10 and 11) have much smaller s.e.'s at the design center. However most of these designs become quite unstable away from the center. From these, the $I_D$-optimum design is the most stable followed by design 10 (left hand-side of Figure D). Similar behavior is observed for interval response prediction (left hand-side of Figure \ref{graph:vdgpeEx2}) although $I_D$ has poorer performances than before due to few pure error degrees of freedom. The $(DP)_S$- and $(IP)$-optimum designs have very similar behavior in both graphs with poor performances at the center of the region. Perhaps fairer comparisons are obtained from Figures \ref{graph:vdgpeEx2} and D, both right hand-side. In these graphs we can see that the advantages of designs $I_D$, 8, 9 and 11 are not so impressive since they are superior for only about $10\%$ of the region. Still, for point response predictions, their minimum values are smaller for about $30\%$ ($I_D$) and about $50\%$ (compound designs) but, because of their instability, we resort to Figure F (left hand-side) where we see lines crossing. The $D_S$/$I$-optimum design has the smallest slope but in order to achieve that, it has higher s.e.'s than other designs such as $A_S$, $I_D$ and 10 in about $50\%$ of the region. For interval response predictions (Figure \ref{graph:vdgpeEx2} the $A_S$-optimum design (not shown in the graph) and the $I_D$ optimum design are clearly no longer competitive. The $(DP)_S$- and $(IP)$-optimum designs have the smallest slopes but have higher s.e.'s than several other designs in about $40\%$ of the region. The $(AP)_S$ and $(I_DP)$-optimum design performs quite well, followed by design 8 (Figure \ref{graph:fdspeEx2}, left).
\begin{figure}
\caption{FDS plots, in terms of s.e., for designs in Example 2.
Left: response interval prediction. Right: difference interval prediction.}
\label{graph:fdspeEx2}
\end{figure}
For point predictions of response differences (Figure E, left) we can identify designs $(DP)_S$, $(IP)$ and $D_S/I$ with even the minimum s.e.'s being high with the last being very stable. All other designs show smaller minimum s.e.'s. Again the $I_D$- and $A_S$-optimum designs are quite stable but perform badly for interval predictions (Figure \ref{graph:dvdgpeEx2}, left). The compound design 10 is perhaps attractive due to its smaller maximum s.e.'s. Once more the patterns are much clearer in Figures \ref{graph:dvdgpeEx2} and E, both right, which separates better the designs. The overall performances are summarized in Figures F and \ref{graph:fdspeEx2} (right). In Figure F (right) we clearly see two groups with the $(DP)_S$-, $(IP)$- and $D_S$-optimum designs having the worst performances for the whole region. The $I_D$-optimum design has the best performance throughout showing that the single criterion $I_D$ summarizes very well the point prediction capabilities in the whole region. We note, however, there are other designs with similar performances, mainly those obtained by compound criteria, although the $A_S$- and $(AP)_S$ and $(I_DP)$-optimum designs follow closely. Now, considering interval predictions of differences (Figure \ref{graph:fdspeEx2}, right), there are three designs with very close to the best performances, namely the $(AP)_S$-, $(I_DP)$- and the compound design 8 (with weights $\kappa_1=0.3$ and $\kappa_7=0.7$, compromising between $(DP)_S$ and $I_D$). The other three compound designs are also close to these.
\section{Central composite designs which are $I_D$-optimal}\label{sec:CCD}
The classical approach to designing response surface experiments, mostly commonly using CCDs, and the optimal design approach, most commonly using $D$-optimality, are often contrasted as having quite different philosophies. It is therefore intriguing that the CCD for five factors in 30 runs, based on a resolution-V half-replicate factorial portion, with four center points, in a spherical region, is optimal under the new $I_D$ criterion. It is natural to ask whether this is true for other run sizes and for other numbers of factors.
This was explored by running our exchange algorithm for various numbers of factors and run sizes in spherical regions. Subject to there being a very small chance that the algorithm has failed to find the true optimum, we found the following.
\begin{itemize}
\item For three factors, the CCD is $I_D$-optimal for $17 \leq n \leq 20$, i.e.\ 3 to 6 center points.
\item For four factors, the CCD is $I_D$-optimal for $28 \leq n \leq 32$, i.e.\ 4 to 8 center points.
\item For five factors, the CCD, with a half-replicate of the factorial points, is $I_D$-optimal for $30 \leq n \leq 33$, i.e.\ 4 to 7 center points.
\item For six factors, the CCD, with a half-replicate of the factorial points, is $I_D$-optimal for $50 \leq n \leq 55$, i.e.\ 6 to 11 center points.
\end{itemize}
We did not explore more than six factors. For other run sizes, the CCD is suboptimal. However, for run sizes just outside the range given, the optimal design is similar to a CCD, e.g.\ having one axial point replaced by a center point for small run sizes, or repeating one factorial point for larger run sizes.
Note that these CCDs are optimal only among designs chosen from the candidate set based on the full $3^q$ design, expanded to have points on the surface of the sphere. Nonetheless, we believe this is the first time CCDs have been shown to be optimal among such a large class of designs. The result nicely links the fields of classical and optimal design.
\section{Discussion}\label{sec:disc}
We have extended the compound criterion function of \cite{gilmourtrinca2012} to allow for efficient designs in terms of predictions. We focused on two properties, prediction of responses and prediction of differences in the response. Point and interval estimation were considered for both responses and differences.
We also proposed the use of several graphs for depicting the prediction performances of the designs. We have extended the usual graphs such as VDG, DVDG, FDS and DFDS to take into account interval estimation. We have illustrated the methods with two examples, one for a cuboidal and the other for a spherical experimental region of interest. The illustrations showed that the graphs add relevant information mainly if one is interested in predicting the response.
Along with many other authors, we argue that a design should have several good properties and it is important to compare several designs, under a wide range of properties, in order to choose the most appropriate one for the problem at hand. This is good practice even under a single objective optimization since usually there are many designs that are almost equivalent. Evaluating them for several other properties is of great help for discriminating between them.
The usefulness of compound criteria is that a design can be developed according to the objectives of the research. We have illustrated compound optimum designs by combining only two properties at time but of course many properties can be studied together.
Even though this was the case for our examples, still the resulting compound designs were quite competitive overall. We have compared a compound design with the one obtained by the multiple objective algorithm of \cite{borrotti2016}. The multiple objective design did not consider inference
and thus our compound design showed advantages. We believe that by using compound criteria we can handle many properties of interest more easily than the multiple objective approach. The graphs proposed are helpful to depict detailed pictures of prediction capabilities of the designs. We recommend the use of the proposed variations of VDG and DVDG plots that use the relative volume instead of distance for both point and interval predictions, since these graphs discriminate better between the different designs. All varieties of FDS and DFDS plots are good summaries that are always be useful for making a final choice of design.
\begin{center}
{\large\bf SUPPLEMENTARY MATERIAL}
\end{center}
\noindent
\textbf{SuppMatPrediction.pdf}: a pdf file containing additional graphs for the examples discussed in the paper and a small simulation study to evaluate the performances of the designs in Example 1 with respect to mean and difference response bias predictions.\\
\textbf{codePrediction.rar}: a zipped folder containing R code to obtain designs by optimizing the compound criteria proposed in the article.
\end{document} |
\begin{document}
\title{Distributionally Robust Optimisation \\ in Congestion Control}
\author{
Jakub Mare{\v c}ek$^{1}$\thanks{{\tt [email protected]}}, Robert Shorten$^{2}$, Jia Yuan Yu$^{3}$\\[3mm]
$^{1}$ IBM Research, Ireland \\
$^{2}$ University College Dublin, Ireland \\
$^{3}$ Concordia University, Canada
}
\maketitle
\abstract{
\neww{The effects of real-time provision of travel-time
information on the behaviour of drivers are considered.
The model of Marecek et al.
[Int. J. Control 88(10), 2015]
is extended to consider uncertainty in the response of a driver to an interval
provided per route.
Specifically, it is suggested that one can optimise
over all distributions of a random variable associated with the driver's response
with the first two moments fixed, and for each route, over the sub-intervals
within the minimum and maximum in a certain number of previous realisations of the travel time per the route.
}
}
\section{Introduction}
Congestion on the roads is often due to drivers using them in a
synchronized manner, ``a wrong road at a wrong time''.
Intuitively, the synchronisation is partly due to the reliance on
the same unequivocal information about past traffic conditions,
which the drivers mistake for a reliable forecast of future traffic conditions.
Perhaps, if the information about past traffic conditions were provided
in a different form, the synchronisation could be reduced.
This intuition led to a considerable interest in
advanced traveller information systems and models of dynamics of information provision
\cite{arnott1991does,BENAKIVA1991251,bonsall1992influence,arnott1993structural,emmerink1996information,bottom2000consistent,papageorgiou2007its,marecek2015signaling,marecek2016signaling}.
In this paper, we propose and study novel means of information provision.
With the increasing availability of satellite-positioning traces of individual cars,
it is becoming increasingly clear that there are many approaches to
aggregating the information and providing them to the public,
while it remains unclear what approach is the best.
Following \cite{marecek2015signaling,marecek2016signaling},
we model the relationship of information provision and road use as a partially known non-linear dynamical system.
In practice, our approach relies on a road network operator with
up-to-date knowledge of congestion across the road network,
who broadcasts travel-time information to drivers,
which is chosen so as to alleviate congestion,
based on an estimate of the driver's response function,
e.g., up to the first two moments of some random variables involved.
\neww{In terms of theory, we study non-linear dynamics,
which are not perfectly known.}
This poses a considerable methodological challenge.
We make first steps towards modelling the interactions among the road network operator and the drivers over time
as a stochastic control problem and the related delay-tolerant and risk-averse means of information provision.
In an earlier paper \cite{marecek2015signaling}, we have
studied the communication of a scalar per route at each time, specific to each driver.
In another recent paper \cite{marecek2016signaling},
we have studied the communication of two scalars (an interval) per route (or road segment) at each time,
with the same information broadcast to all drivers.
There, the intervals were based on the minimum and maximum travel time over the segment
within a time window.
In this paper, we propose an optimisation procedure, where one considers sub-intervals of
the interval.
Across all three papers, we show that congestion \neww{can be} reduced by withholding some information,
while ensuring that the information remains consistent with the true past
observations.
Let us consider the travel time over a route as a time series.
Broadcasting the most recent travel time,
an average over a time window,
or any other scalar function over a time window,
may lead to a suboptimal ``cyclical outcome,''
where drivers overwhelmingly pick the supposedly fastest route,
leading to congestion therein, and another route being announced as the fastest, \neww{only to become congested in turn}.
On the other hand, depriving the drivers of any information leads to a suboptimal outcome,
where each \neww{driver} acts more or less \neww{randomly}.
We illustrate our findings on an intentionally simple model.
\begin{table}[t]
\caption{An overview of the related work (top), our suggestion (middle), and suggestions for future work (bottom) within two-parameter route choice formulations and behaviour of the related models.}
\label{tab:int-signals}
\centering
\begin{tabularx}{\textwidth}{ X p{2.6cm} p{5.1cm} p{6.18cm} }
\toprule
Ref. & Name & $\underline{u}^m_t$ & $\overline{u}^m_t$ \\
\midrule
\cite{marecek2015signaling} & $(\delta, \gamma)$ & \small $c_m(n^m_{t-1}) + \nu_t^m - \delta^m /2$ & \small $c_m(n^m_{t-1}) + \nu_t + \delta^m /2$ \\
\cite{marecek2016signaling} & $r$-extreme & \small $\arg \min_{j=t-r,\ldots,t-1} \{ c_m(n_j^m) \}$ & \small $\arg \max_{j=t-r,\ldots,t-1} \{ c_m(n_j^m) \} $ \\
\cite{Epperlein2016} & smoothing & \small $q_1 \underline{u}^m_{t-1} + (1-q_1)c_m(n_{t-1}^m)$ & \small $q_2 \overline{u}_{t-1}^m + (1-q_2)\,\bigl|c_m(n_{t-1}^m)- \underline{u}^m_{t-1} \bigl|$ \\
\cite{Nikolova2014} & mean and STD & \small $\frac{1}{r} \sum_{j=t-r,\ldots,t-1} c_m(n_j^m)$ & \small $\frac{1}{r} \sum_{j=t-r,\ldots,t-1} (c_m(n_j^m) - \underline{u}^m_{t})^2$ \\[3mm]
\midrule
& $r$-supported & \small $\proj_{\underline{u}^m_t} \arg \min_{ (\underline{u}^m_{t}, \overline{u}^m_{t}) \in P(S_t, \Omega)} C(n_t)$ & \small $\proj_{\overline{u}^m_t} \arg \min_{ (\underline{u}^m_{t}, \overline{u}^m_{t}) \in P(S_t, \Omega)} C(n_t)$ \\[3mm]
\midrule
& mean, VaR & \small $\frac{1}{r} \sum_{j=t-r,\ldots,t-1} c_m(n_j^m)$ & \small VaR$_\alpha \coloneq \inf\{l \in \mathbb{R}: \prob(L>l)\le 1-\alpha\}$ \\
& mean, CVaR & \small $\frac{1}{r} \sum_{j=t-r,\ldots,t-1} c_m(n_j^m)$ & \small CVaR$_\alpha \coloneq \frac{1}{\alpha}\int_0^{\alpha} \mbox{VaR}_{\gamma} d\gamma $ \\
\bottomrule
\end{tabularx}\\[2mm]
\end{table}
\section{Related work}\label{sec:mod}
Recent studies \cite{marecek2015signaling,marecek2016signaling,Epperlein2016} have focussed on
a dynamic discrete-time model of congestion, where a
finite population of $N$ drivers is confronted with $M$ alternative routes
at every time step.
The time horizon is discretized into discrete periods $t=1,2,\ldots$.
At each time, each driver picks exactly one route, and is hence ``atomic''.
Let $a_t^i$ denote the choice of driver $i$ at time $t$ and $n^m_t = \sum_i 1_{[a_t^i = m]}$ be the
number of drivers choosing route $1 \le m \le M$ at time $t$.
Sometimes, we use $n_t$ to denote the vector of $n^m_t$ for $1 \le m \le M$.
The travel time $c_m(n^m_t)$ of route $m$ at time $t$ is a function of the
number $n^m_t$ of drivers that pick $m$ at time $t$,
$c_m: \mathbb N \to \mathbb R_+$.
The social cost $C(n_t)$ weights the travel times of the routes at
time $t$ with the proportions of drivers taking the routes, \emph{i.e.},
\begin{align}
\label{eqn:socialtravel time}
C(n_t) \triangleq \sum_{m = 1}^{M} \frac{n^m_t}{N} \cdot c_m(n^m_t).
\end{align}
Notice that in the case of two alternatives, $M = 2$, $C(n_t)$ becomes a function of $n^1_t$ only,
with $n^2_t$ beign equal to $N - n^1_t$:
\begin{align}
C(n_t) = \frac{n^1_t}{N} \cdot c_1(n^1_t) + \frac{N - n^1_t}{N} \cdot c_2(N - n^1_t).
\end{align}
The social or system optimum at every
time step $t$ is $n^* \in \arg\min_{0 \le n \le N} C(n)$.
Notice that the travel time is, in effect, a time-series,
with a data point per passing driver.
Often, however, one \neww{may} want to aggregate the time series,
for instance in order to communicate travel times succinctly.
Essentially, \cite{marecek2015signaling,marecek2016signaling,Epperlein2016} discuss various means of aggregating the history of travel times $c_m(n^m_{t'})$ for all $1 \le m \le M$ and for all times $t' < t$ in past relative to present $t$.
Every driver $i$ takes route $a_t^i$
based on the history of $s_{t'}, t' \le t$ received up to time $t$.
In keeping with control-theoretic literature,
a mapping of such a history to a route is called a \emph{policy}.
$\Omega$ denotes the set of all possible types of drivers and
$\mu$ a probability measure over the set $\Omega$, which describes the
distribution of the population of drivers into types.
We refer to \cite{marecek2016signaling,Epperlein2016} for the measure-theoretic definitions.
\neww{Sending of the most recent travel time or any other single scalar value per route uniformly to all drivers is not socially optimal \cite{marecek2016signaling}.
One option for addressing this issue is to vary the scalar value sent to each user.}
\cite{marecek2015signaling} studied a scheme, where the network operator sends a distinct $s^i_t \triangleq (y^{m,i}_t, 1 \le m \le M) \in \mathbb R^{M}$ \neww{to each driver $i$ at time $t$}, where
\begin{align}
y^{m,i}_t \triangleq c_m(n^m_{t-1}) + w^{i,m}_t,
\end{align}
and the sequence of random noise vectors $\{ w^{i,m}_t : t=1,2,\ldots\}$ is i.\@i.\@d.\@ such that for all $t$, ${\mathbb E} w^{i,m}_t = 0$,
and $w^{i,m}_t - w^{i,{m'}}_t$ is normally distributed with mean $0$ and
variance $\sigma^2$ for $1 \le m \not = m' \le M$.
\neww{These properties of $w^{i,m}_t$ assure that no driver is being disadvantaged over the long run,
but the absolute value of $w^{i,m}_t$ may vary across drivers $i$ at a particular time $t$.}
Considering the introduction of \neww{such driver-specific randomisation} may not be desirable, \cite{marecek2015signaling} presented a scheme that broadcasts \emph{two} \neww{distict} scalar values \neww{per} route to all drivers, \neww{where the two distinct scalars for a particular route are the same for all the drivers at a particular time}.
\neww{For $M$ routes, one has $s_t \triangleq (\underline{u}^m_t,
\overline{u}^m_t, 1 \le m \le M) \in \in \mathbb R^{2M}$, where}
\neww{
\begin{align}
\underline{u}^m_t &\triangleq c_m(n^m_{t-1}) + \nu^m_t - \delta^m/2,\\
\overline{u}^m_t &\triangleq c_m(n^m_{t-1}) + \nu^m_t + \delta^m/2,
\quad m \in \{A,B\},
\end{align}
}
where $\nu_t^m$ are i.\@i.\@d.\@ uniform random variables with support:
\neww{
\begin{align*}
\mbox{Supp}(\nu^m_t) &= [-\delta^m/2,\delta^m/2].
\end{align*}
}
\neww{Notice that \cite{marecek2015signaling}} use $\delta$ and $\gamma$
to denote the non-negative constants $\delta^A$ and $\delta^B$
\neww{in the case of $M=2$, and hence use $(\delta,\gamma)$-interval to denote such $s_t$.}
Let $\Omega$ be a finite subset of $ = [0,1]$ and assume that
each driver $1 \le i \le N$ is of type $\omega \in
\Omega$ and follows the policy $\pi^\omega$:
\begin{align}
a_t^i \triangleq \pi^\omega(s_t) \triangleq \arg\min_{m=1}^{M} \; \omega \underline{u}^m_t + (1-\omega) \overline{u}^m_t.
\label{policylambda}
\end{align}
in response to $s_t$.
Observe that for $\omega = 0$, policy $\pi^0$ models a risk-averse
driver, who makes decisions based solely on $\overline{u}^m_t$.
Similarly, $\pi^1$ and $\pi^{1/2}$ model risk-seeking and risk-neutral drivers, \neww{respectively}.
Under certain assumptions \neww{ bounding the modulus of continuity of functions $c_A, c_B, \ldots$, \emph{cf.} \cite{marecek2015signaling},} one can show that this results in a stable behaviour of the system.
Considering that \neww{\emph{any}} randomisation may \neww{be} undesirable, \cite{marecek2016signaling} suggested broadcasting a deterministically chosen interval for each route.
In one such approach, called $r$-extreme \cite{marecek2016signaling}, one simply broadcasts the maximum and minimum travel time \neww{within} a time window of $r$ most recently observed travel times.
In another variant, called exponential smoothing \cite{Epperlein2016}, one broadcasts a weighted combination of the current travel-time \neww{and past travel times, alongside a weighted combination of the} current variance \neww{of the travel times} and the previously sent information \neww{about the variance}.
Under \neww{some additional} assumptions, one can analyse the resulting stochastic \neww{(delay)} difference equations\neww{:}
Using \neww{results} developed \neww{in the} theory of iterated random functions \cite{IFS}, \cite{marecek2016signaling} show that the $r$-extreme schema \neww{yields ergodic behaviour when the distribution
of types of drivers changes over time in a memory-less fashion}.
\cite{Epperlein2016} extended the result to populations, whose evolution is governed by a Markov chain, \neww{which allows, \emph{e.g.}, for different distributions
at different times of the day, such as at night, during the morning and afternoon peaks, and all other times.}
In Table~\ref{tab:int-signals}, we present an overview of these schemata.
We should like to stress that the above is not a comprehensive overview of related work.
We refer to \cite{arnott1991does,BENAKIVA1991251,bonsall1992influence,arnott1993structural,emmerink1996information}
for pioneering studies in the field as well as to \cite{bottom2000consistent,papageorgiou2007its} for extensive, book-length
overviews of further related work.
\section{Distributionally robust optimisation}
In this paper, we suggested broadcasting a deterministically chosen interval for each route,
where the deterministic choice is based on optimisation over subintervals of the
\neww{interval given by the minimum and maximum over a time window of a finite, fixed length $r$.}
For $1 < r < t$, we define $s_{t} =
(\underline u_{t}^1, \overline u_{t}^1, \underline u_{t}^2, \overline u_{t}^2, \ldots, \underline u_{t}^M, \overline u_{t}^M, )$ to be \emph{$r$-supported}, whenever
\begin{align}
\label{eq:supported1}
\min_{j=t-r,\ldots,t} \{ c_m(n^m_j) \} \le \underline u_{t}^m < \overline u_{t}^m \leqslant \max_{j=t-r,\ldots,t} \{ c_m(n^m_j) \}.
\end{align}
Notice that $r$-extreme $s_t$ is a special case of $r$-supported $s_t$.
To study the effects \neww{of} broadcasting $r$-supported $s_t$, we need to formalise the
\neww{model} of the population. Clearly, one can start with:
\begin{assumption}[Full Information]\label{as:fixed}
Let us assume that $\Omega$ is a finite set. Further, let us assume the number of drivers of type $\omega$ at time $t+1$ is $N \mu_{t+1}(\omega)$ and that $N \mu_{t+1}(\omega)$
is known to the network operator at time $t$.
\end{assumption}
Assumption~\ref{as:fixed} is very restrictive. Instead, we may want to assume \neww{that} $\mu_t$ \neww{are} independently identically distributed (i.i.d.) samples of \neww{a} random variable.
\footnote{\neww{One could go further still and assume time-varying distributions of $\mu_t$, or more general structures, yet. We refer to \cite{Epperlein2016} for an example, but note that such assumptions do not allow for the efficient application of methods of computational optimisation, in general. In this paper, we hence consider the i.i.d. assumption.}}
In the tradition of robust optimisation \cite{Soyster1974}, one could \neww{assume that a support of the random variable is known and} optimise \neww{social cost} over \neww{all possible distributions of the random variable
with the given support}. That approach, however, tends to produce
overly conservative solutions, when it produces any feasible solutions at all.
In the tradition of distributionally robust optimisation \neww{(DRO)} \cite{bertsimas2010models,delage2010distributionally},
one could \neww{assume that a certain number of moments of the random variable are known and} optimise \neww{social cost} over \neww{all possible distributions of the random variable with the given moments}. \neww{We suggest to use DRO with the first} two moments:\\[1mm]
\begin{assumption}[Partial Information]\label{as:pop}
Let us assume that $\Omega$ is a finite set. Let us assume the number of drivers of type $\omega$ at time $t+1$ is $N \mu_{t+1}(\omega)$, but that the distribution of $\mu_{t+1}$ is unknown
at time $t$, except for the first two moments of the distribution of $\mu_{t+1}$, denoted $E, Q$:
\begin{align}
E &=
\begin{pmatrix}
E_1 \\
E_2 \\
\vdots \\
E_{|\Omega|} \\
\end{pmatrix}
= {\mathbb E} \begin{pmatrix}
\mu_{t+1}(1) \\
\vdots \\
\mu_{t+1}(\abs{\Omega}) \\
\end{pmatrix} \\
Q &=
\begin{pmatrix}
Q_{11} & \ldots & Q_{1|\Omega|} \\
\vdots & & \vdots \\
Q_{|\Omega|1} & \ldots & Q_{|\Omega||\Omega|}\\
\end{pmatrix}
=
{\mathbb E} \left[ \begin{pmatrix}
\mu_{t+1}(1) \\
\mu_{t+1}(2) \\
\vdots \\
\mu_{t+1}(|\Omega|) \\
\end{pmatrix}
\begin{pmatrix}
\mu_{t+1}(1) \\
\mu_{t+1}(2) \\
\vdots \\
\mu_{t+1}(|\Omega|) \\
\end{pmatrix}^T
\right]
\end{align}
and let us assume $E, Q$ are known to the network operator at time $t$.
\end{assumption}
Notice that Assumption~\ref{as:pop} is much more reasonable \neww{than Assumption~\ref{as:fixed}.
The authorities can compute an unbiased estimate of} the first two moments
using \neww{readily-available} statistical
estimation techniques \cite{toledo2004calibration,vaze2009calibration}.
\neww{In contrast, ascertaining the actual realisation of the random variable in
real time seems impossible, and
estimating more than two moments of a multi-variate random variable remains a challenge,
as the requisite number of samples grows exponentially with the order of the moment,
which in turn makes the computations prohibitively time consuming.
In short, we believe that Assumption~\ref{as:pop} presents a suitable trade-off between realism
and practicality.
}
Next, one needs to decide on the objective, which should be optimised.
Clearly, even a finite-horizon approximation of the accumulated social cost is a challenge.
Beyond that, we can show a yet stronger negative result:\\[2mm]
\begin{proposition}[Undecidability]
\label{undecidable}
Under Assumption~\ref{as:fixed},
there exist $c_A, c_B$, and an initial $s_1$ broadcast, such that it is undecidable whether iterates $s_t \in {\mathbb R}^n$, $n \ge 2$ induced by
policies $\pi^\omega$ responding to intervals broadcast converge to a point
$s_T \in {\mathbb R}^n$ from $s_1$, such that $s_t$ for all $t > T$ is equal to $s_T$.
\end{proposition}
The proof is based on the results of \cite{blondel2001deciding,koiran1994computability}
that given piecewise affine function $g : R^2 \to R^2$ and an initial point $x_0 \in R^2$,
it is undecidable whether iterated application $g \ldots g(x_0)$ reaches a fixed point, eventually, and the fact we make no assumptions about the functions $c_m$.
\neww{Although Proposition~\ref{undecidable} does not rule out weak convergence guarantees in the measure-theoretic sense under Assumption~\ref{as:fixed}, for instance, some assumptions concerning the functions $c_m$ do simplify the matters considerably.}
\neww{To formulate such an assumption,} observe that the function $g$ corresponds to a composition of the social cost \eqref{eqn:socialtravel time} and the policy \eqref{policylambda}. In particular:
\begin{align}
\underline u^m_{t+1} & = \min \{\underline u^m_t, c_m(n^m_t)\},\notag \\
\overline u^m_{t+1} &= \max \{\overline u^m_t, c_m(n^m_t)\}, \notag
\end{align}
wherein one applies $c_m$ to values of $n^m_t$:
\neww{
\begin{align}
n^m_t &= \sum_i 1_{(a^i_t = A_m)} \\
&= \sum_i \sum_{\omega \in \Omega} 1_{(a^i_t = A_m \mid \textrm{ driver } i \textrm{ is of type }
\omega
)} \mu_t(\omega) \notag \\
&= N \sum_{\omega \in \Omega} 1_{ \bigwedge_{s \neq r} ( \omega
\underline u^m_t + (1-\omega) \overline u^m_t < \omega
\underline u^s_t + (1-\omega) \overline u^s_t )}
\mu_t(\omega)\label{eq:29},
\end{align}
whereby one obtains $\underline u^m_{t+1}, \overline u^m_{t+1}$ as a function of $\underline u^m_{t}, \overline u^m_{t}$.
We refer to the proof of Theorem 1 in \cite{marecek2016signaling} for a detailed discussion of this signal-to-signal mapping
and properties of $\mu_t$.
}
\neww{One may hence obtain a signal-to-signal mapping $g$ of more desirable properties} by restricting oneself to a particular class of $c_m$, and hence to a particular class of social costs \eqref{eqn:socialtravel time}.
\neww{In particular, we restrict ourselves to}:\\[2mm]
\begin{proposition}[$C$ is Difference of Convex]
\label{prop:DCA}
For any functions $c_m$ convex on $[0, 1]$,
there exist solvers for the minimisation of the unconstrained social cost $C$ (cf. Eq.~\ref{eqn:socialtravel time}),
with guaranteed convergence to a stationary point.
\end{proposition}
Using a wealth of results \cite{DC} on the optimisation of DC (``difference of convex'') functions,
we can show:
\begin{proposition}[The Full Information Optimum]
\label{thm:Computable}
Under Assumption~\ref{as:fixed},
a stationary point of:
\neww{
\begin{align}
\label{eq:Computable}
\min_{ s_t = (\underline{u}^m_t, \overline{u}^m_t, 1 \le m \le M) \in P(S_t, \Omega)} C(n_t)
\end{align}
\neww{can be computed up to any fixed precision} in finite time,
where $P \subseteq {\mathbb R}^{2M}$
is the set of $r$-supported signals \eqref{eq:supported1}
and functions $c_m, 1 \le m \le M$ are convex on $[0, 1]$.\\[2mm]
}
\end{proposition}
\begin{proof}
\label{proof3}
Let us introduce an auxiliary indicator variable and a non-negative
continuous variable:
\begin{align*}
x_{t,m}^\omega = & 1_{[\pi^{\omega}(s_t) = m]} = \begin{cases}
\; 1 & \; \text{ if } \omega \text{ selects action } m \text{ at time } t \\
\; 0 & \; \text{ otherwise }
\end{cases} \\
\underline y_{t,i,j}^\omega = &
\begin{cases}
\; - g^\omega_{i,j} & \text{ if } g^\omega_{i,j} < 0 \\
\; 0 & \text{otherwise}
\end{cases} \\
\overline y_{t,i,j}^\omega = &
\begin{cases}
\; g^\omega_{i,j} & \text{ if } g^\omega_{i,j} \ge 0 \\
\; 0 & \text{otherwise}
\end{cases}
\end{align*}
where \neww{$g^\omega_{i,j} \triangleq \omega \underline{u}_t^i + (1-\omega)
\overline{u}_t^i - \omega \underline{u}_t^j - (1-\omega)
\overline{u}_t^j$}. See that $n_t^m = \sum_{\omega \in \Omega}
x_{t,m}^\omega n_{t}(\omega)$.
Sometimes, we use $x_{t}$ to denote a matrix of $x_{t,m}^\omega$ for all $\omega \in \Omega$,
$1 \le m \le M$.
It is easy to show there exist a lifted
polytope $P'$ such that:
\begin{align}
\label{eq:Obj}
\min_{
s_t
\in {\mathbb R}^{2M}}
\sum_{m=1}^M \frac{n^m_t}{N} \cdot c_m(n^m_t), \notag \\ s \in P(S_t,\Omega) \\
= \min_{
s_t
\in {\mathbb R}^{2M},
\underline y_t, \overline y_t \in {\mathbb R}^{M(M-1)|\Omega|} x_t \in \{0, 1
\}^{M|\Omega|} }
\sum_{m=1}^M \frac{n^m_t}{N} \cdot c_m(n^m_t) \notag \\ (s_t, x_t, \underline y_t, \overline
y_t ) \in P'(S_t,\Omega),
\end{align}
The definition of the polytope $P'$ depends on the policies defined
by $\Omega$ and the history of signals $S_t$. Specifically:
\begin{align}
\omega \underline{u}^i_{t} + (1-\omega) \overline{u}^i_{t} -
\omega \underline{u}^j_{t} - (1-\omega) \overline{u}^j_{t}
\leqslant & \overline y_{t,i,j}^\omega & \neww{\forall i, j, t, \omega} \\
\omega \underline{u}^j_{t} + (1-\omega) \overline{u}^j_{t} -
\omega \underline{u}^i_{t} - (1-\omega) \overline{u}^i_{t}
\leqslant & \underline y_{t,i,j}^\omega & \neww{\forall i, j, t, \omega} \\
- Z (1 - x_{t,m}^\omega) \leqslant & \underline y_{t,m,i}^\omega \leqslant Z (1 - x_{t,m}^\omega) & \neww{\forall t, m, \omega} \\
- Z x_{t,m}^\omega \leqslant & \overline y_{t,m,i}^\omega \leqslant Z x_{t,m}^\omega & \neww{\forall t, m, \omega} \\
\overline u_{t}^m \geqslant & \underline u_{t}^m \geqslant \min_{j=t-r,\ldots,t} \{ c_m(n^m_j) \} & \neww{\forall m, r, t} \\
& \overline u_{t}^m \leqslant \max_{j=t-r,\ldots,t} \{ c_m(n^m_j) \} & \neww{\forall m, r, t} \\
\sum_{m = 1}^{M} x_{t,m}^{\omega} =& 1 & \neww{\forall t, \omega} \\
\underline y_\omega, \overline y_\omega \geqslant & 0 & \neww{\forall \omega}
\end{align}
where the $\max, \min$ operators are applied to
\neww{the revealed realisations of the random variable $n^m_j$, and hence
yield constants, rather than bi-level structures.} Further, $Z$ is a sufficiently large constant, \emph{e.g.},
$$
\max_{\substack{m = 1,2,\ldots,M, m'\in\{1,2, \ldots, M \}\setminus
\{m\}\\ \underline{u}^m_{t},\overline{u}^m_{t},\underline{u}^{m'}_{t},\overline{u}^{m'}_{t}}}
\{ |\omega \underline{u}^m_{t} + (1-\omega) \overline{u}^m_{t} -
\omega \underline{u}^{m'}_{t} - (1-\omega) \overline{u}^{m'}_{t}| \}
\notag \leqslant \max_x \{ C(x) \}. $$
The integer component can be solved by branching, whereby the
Lagrangian gives us an unconstrained relaxation of the original
problem. Hence, by Proposition~\ref{prop:DCA}, the \neww{stationary point can be computed}
up to any precision in finite time.
\end{proof}
\begin{proposition}[The Distributionally Robust Optimum]
Under Assumption~\ref{as:pop},
let us consider functions $c_m, 1 \le m \le M$ convex on $[0, 1]$ and
\neww{
\begin{align}
\label{eq:robust}
\min_{ (\underline{u}^m_t, \overline{u}^m_t, 1 \le m \le M) \in P(S_t, \Omega)} \sup_{D\sim (E, Q)}
C\left( {\mathbb E}_D n_t \right)
\end{align}
}
where $D\sim (E, Q)$ in the inner optimisation problem suggests optimisation over the infinitely many distribution functions
of $\Omega$ with the first two moments of Assumption~\ref{as:pop},
and $P \in {\mathbb R}^{2M}$
is the set of $r$-supported signals \eqref{eq:supported1}.
A stationary point of the distributionally robust optimisation problem \eqref{eq:robust}
\neww{can be computed up} to any \neww{fixed} precision in finite time.\\[2mm]
\end{proposition}
\begin{proof}
\label{proof4}
Notice that we can reformulate the problem \eqref{eq:robust} as an
integer semidefinite program by the introduction of a new decision
variable $W$ in dimension $|\Omega| \times |\Omega|$, vector $w_{\omega}
\in {\mathbb R}^{|\Omega|}$, and scalar $q_{\omega}$, in addition to the variables
introduced in the proof of Proposition \ref{eq:Computable}:
\begin{align}
\min_{ (\underline{u}^m_t, \overline{u}^m_t, 1 \le m \le M) \in P(S_t, \Omega)} \sup_{D \sim (E, Q)} &
C\left( {\mathbb E}_D n_t \right) \\
= \min_{ \substack{
s_t
\in {\mathbb R}^{2M} \\
x_t \in \{0, 1
\}^{M|\Omega|}
} }
\sup_{ D \sim (E, Q)} &
\sum_{m=1}^M \left( \sum_{\omega \in \Omega} \left( x_{t,m}^\omega {\mathbb E}_D \mu_t(\omega) \right) \cdot c_m \left( \sum_{\omega \in \Omega} x_{t,m}^\omega N {\mathbb E}_D \mu_t(\omega) \right) \right) \\
\textrm{s.t. }
& {\mathbb E}_D \mu_t = E \notag \\
& {\mathbb E}_D \left[ \mu_t \mu_t^T \right] = Q \notag \\
& s \in P'(S_t, \Omega) \notag \\
= \min_{ \substack{
s_t
\in {\mathbb R}^{2M}, \\
\underline y_t, \overline y_t \in {\mathbb R}^{M(M-1)|\Omega|} \\
x_t \in \{0, 1 \}^{M|\Omega|} \\
W_{\omega} \in {\mathbb R}^{|\Omega| \times |\Omega|}\\ w_{\omega} \in {\mathbb R}^{|\Omega|},
q_{\omega} \in {\mathbb R} } }
&
\sum_{m=1}^M \frac{\sum_{\omega \in \Omega} \left( x_{t,m}^\omega e_\omega w_\omega \right)}{N} \cdot c_m \left( \sum_{\omega \in \Omega} \left( x_{t,m}^\omega e_\omega w_\omega \right) \right) \\
\textrm{s.t. }
& \sum_{\omega \in \Omega} \begin{pmatrix}
W_\omega & w_\omega \\
w_\omega^T & q_\omega
\end{pmatrix} = \begin{pmatrix}
Q & E \\
E^T & 1
\end{pmatrix} \notag \\
& \begin{pmatrix}
W_\omega & w_\omega \\
w_\omega^T & q_\omega
\end{pmatrix} \succeq 0 \quad \forall \omega \in \Omega \notag \\
& (s_t, x_t, \underline y_t, \overline y_t ) \in
P'(S_t,\Omega),\notag
\end{align}
where $e_{\omega}$ are vectors with only the $\omega^\text{th}$
entry of $1$ and others $0$. The first equality
follows from the definition of $C$ \eqref{eqn:socialtravel time}.
The second equality follows from the work of Bertsimas et al. \cite{bertsimas2010models}
on minimax problems, and specifically from Theorem 2.1 therein.
Although Theorem 2.1 does not consider integer variables explicitly,
it is easy to see that for each of the $2^{M|\Omega|}$ possible integer values of $x_t$, the equality holds, and hence it holds generically.
See also the lucid treatment of Mishra et al. \cite{Mishra2012}.
Computationally, one can apply branching to the integer variables $x_t$, as in the proof of Proposition \ref{thm:Computable}, which leaves one with a semidefinite
program with a non-convex objective.
There, one can formulate the
augmented Lagrangian, which is non-convex, but well-studied \cite{Stingl2009,kovcvara2003pennon,lanckriet2009convergence,yen2012convergence}.
For instance, it can be reformulated to a ``difference of convex'' form and Proposition~\ref{prop:DCA} can be applied.
Let us multiply $C(\cdot)$ by $N$ to study the $2 M$ terms one by one.
We want to show that the rest is a sum of a convex and concave terms.
Let us see that for $i = 1, 2, \ldots, M - 1$,
we have the term $n_t^i c_i(n_t^i)$,
which is convex in $n_t^i$, considering that for convex and non-decreasing $g$ and convex $f$, we know $g(f(x))$ is convex.
For $i = M$, we have the terms
$c_i( 1-\sum_{i = 1}^{M-1} n_t^i)$
and a $M - 1$ terms from
$- (\sum_{i = 1}^{M-1} n_t^i) c_i( 1-\sum_{i = 1}^{M-1} n_t^i)$.
Considering that convexity is preserved by affine substitutions of
the argument, the former term is convex for the affine subtraction and convex
$c_i$.
Considering the additive inverse of a
convex function is a concave function, we see $ - n_t^i n_t^M(\cdot)$ is
concave.
The proposition
follows from the following Proposition~\ref{prop:DCA}.
\end{proof}
Alternatively, one may consider
polynomial functions $c_m$, where the minimum of the social cost $C$ can be \neww{computed} up
to any \neww{fixed} precision in finite time by solving a number of instances of semidefinite
programming (SDP).
\section{A computational illustration}
For optimisation problems such as \eqref{eq:Computable} and \eqref{eq:robust}, there are solvers based on sequential convex
programming with known rates of convergence
\cite{lanckriet2009convergence,yen2012convergence}.
In our computational experiements, we have extended a
sequential convex programming solver of Stingl et
al. \cite{Stingl2009}, which handles polynomial semidefinite programming of \eqref{eq:robust},
to handle mixed-integer polynomial semidefinite programming.
Specifically, Stingl et al. \neww{replace} nonlinear objective functions
by block-separable convex models,
following the approach of Ben-Tal and Zhibulevsky \cite{ben1997penalty} and Ko{\v
c}vara and Stingl \cite{kovcvara2003pennon}.
In our experiments, we have considered the same set-up as in \cite{marecek2016signaling},
where $M=2$ and
two Bureau of Public Roads (BPR) functions are used for the
costs, as presented in Figure~\ref{fig:ushaped5settings}.
The population is given by
$\Omega = \{ 0, 0.5, 1, \textrm{Uniform}(0, 1), \textrm{Uniform}(0, 1) \}$,
the initial signal is $s_1 = (0.5, 1, 0.6, 0.9)$, and
$\kappa = 0.15$,
$\mu_t(\omega) \sim \textrm{Uniform}(1/5 - \kappa, 1/5 + \kappa) \; \forall \omega \in \Omega, t > 1$,
and $N=30$.
\neww{
These settings have been chosen both for the simplicity of reproduction as well as to allow for comparison
with plots presented in \cite{marecek2015signaling,marecek2016signaling}.
}
\neww{
Figure~\ref{fig:ushaped5simulations2} illustrates the cost $\{C(n_t)\}$ over time $1 \le t \le 20$,
for three lengths $r$ of the look-back, $r = 1$ (top), $r = 3$ (middle), $r = 5$ (bottom),
with error bars at one standard deviation capturing the variability over the sample paths.
It seems clear that the $r$-supported scheme (in dark blue, Eq. \ref{eq:robust}) is only marginally worse than
the full-information optimum (in red, Eq. \ref{eq:Computable}), which is ``pre-scient'' and hence impossible to operate in the real-world.
Also, it seems clear that for low values of $r$, there is not enough data to estimate the second moments,
and hence the use of the first moment (in green) behaves similarly to the use of the first two moments (in dark blue).
Both compared to the use of the first moment and to the previously proposed $r$-extreme scheme (in light blue),
the $r$-supported scheme yields costs with less prominent extremes,
even after averaging over the sample paths.
}
\neww{
Further, Figure~\ref{fig:ushaped5simulations1} illustrates the the process $\{C(n_t)\}$ averaged over $1 \le t \le 20$ for varying $r$, again with error bars at one standard deviation.
It shows that employing
$r$-supported scheme (in dark blue, Eq. \ref{eq:robust})
allows for a reduction of the social cost,
when compared to $r$-extreme singalling (in light blue),
across a range
of the length $r$ of the look-back interval.
Again, it seems clear that the $r$-supported scheme (in dark blue) is only marginally worse than
the full-information optimum (in red, Eq. \ref{eq:Computable}).
}
\begin{figure}
\caption{ A trivial example with $M=2$. Left: Cost functions $c_1(x)
\triangleq 2 (1 + 3.6 x^4)$ and $c_2(y) \triangleq 5 (1 + 0.8 y^2)$.
Right: The corresponding social cost.
}
\label{fig:ushaped5settings}
\end{figure}
\begin{figure}
\caption{ $r$-supported scheme (in dark blue) compared to the
``pre-scient'' full information optimum (in red), the use of the first moment (in green), and the previously proposed $r$-extreme scheme (in light blue):
The process $\{C(n_t)\}
\label{fig:ushaped5simulations2}
\end{figure}
\begin{figure}
\caption{ $r$-supported scheme (in dark blue) compared to the
``pre-scient'' full information optimum (in red), the use of the first moment (in green), and the previously proposed $r$-extreme scheme (in light blue):
The process $\{C(n_t)\}
\label{fig:ushaped5simulations1}
\end{figure}
Finally, we note that the for
The stationary point \eqref{eq:Computable} can be \neww{computed up} to \neww{precision} $10^{-6}$ in about 15 seconds on a basic laptop with Intel i5-2520M,
although the run-time does increase with the number of routes.
\neww{This is much more than the run-time of the previously proposed $r$-extreme scheme. An efficient implementation of the $r$-supported scheme remains a major challenge for future work.}
\section{Conclusions}
In conclusion, \neww{there are multiple ways of introducing} ``uncertainty'' into \neww{the behaviour of the road user in terms of the route choice.}
Previously, the addition of zero-mean noise with a positive variance $\sigma$ \cite{marecek2015signaling}, broadcasting intervals such as $(\delta,\gamma)$ intervals \cite{marecek2015signaling} and
$r$-extreme intervals (minima and maxima over a time window of size $r$) \cite{marecek2016signaling}, and intervals based on exponential smoothing \cite{Epperlein2016},
have been shown result in the distribution of drivers over the road network converging over time,
under a variety of assumptions about the evolution of the population over time.
This paper studied the optimisation of the social cost over sub-intervals within the minima
and maxima over a time window of size $r$, under a variety of assumptions.
This paper is among the first applications of distributionally robust optimisation (DRO)
in transportation research.
while other recent work considered its use in stochastic traffic assignment \cite{Ahipasaoglu2015},
where it presents a tractable alternative to multinomial probit \cite{Mishra2012},
and in traffic-light setting \cite{Liu2015536}.
We envision there will be a wide variety of further studies,
once the power of DRO is fully appreciated in the community.
This work opens a number of questions in cognitive science, multi-agent systems,
artificial intelligence, and urban economics.
How do humans react to intervals, actually?
How to invest in transportation infrastructure, knowing that information provision can be co-designed to suit the infrastructure?
Future technical work may include the study of variants of the proposed scheme, such as
broadcasting $s_t$ such that
\begin{align}
\avg_{j=t-r,\ldots,t} \{ c_m(n^m_j) \} \geqslant \underline u_{t}^m &\geqslant \min_{j=t-r,\ldots,t} \{ c_m(n^m_j) \},\\
\avg_{j=t-r,\ldots,t} \{ c_m(n^m_j) \} \leqslant \overline u_{t}^m &\leqslant \max_{j=t-r,\ldots,t} \{ c_m(n^m_j) \},
\end{align}
where $\avg$ denotes the average.
One could also employ risk measures such as value at risk (VaR) and conditional value at risk (CVaR)
for a given coefficient $\alpha$ and distribution function $L$ with support $\{ c_m(n_t^m) \}_t$,
as suggested in Table~\ref{tab:int-signals}.
Further studies of (weak) convergence properties \cite{marecek2016signaling,Epperlein2016}, including the rates of convergence,
and further developments of the population dynamics \cite{Epperlein2016} would also be most interesting.
Beyond transportation, one could plausibly employ similar techniques in related resource-sharing problems (e.g., ad keyword auctions,
dynamic pricing in power systems, announcements of emergency evacuation routes)
in order to improve the variants of social costs therein.
\end{document} |
\begin{document}
\title[operator multipliers]
{Bilinear operator multipliers into the trace class}
\author[C. Le Merdy]{Christian Le Merdy}
\email{[email protected]}
\address{Laboratoire de Math\'ematiques de Besan\c con, UMR 6623,
CNRS, Universit\'e Bourgogne Franche-Comt\'e,
25030 Besan\c{c}on Cedex, France}
\author[I. Todorov]{Ivan G. Todorov}
\email{[email protected]}
\address{Mathematical Sciences Research Center, Queen's University Belfast, Belfast BT7 1NN, United Kingdom}
\author[L. Turowska]{Lyudmila Turowska}
\email{[email protected]}
\address{Department of Mathematical Sciences, Chalmers University
of Technology and the University of Gothenburg, Gothenburg SE-412 96, Sweden}
\date{\today}
\maketitle
\begin{abstract}
Given Hilbert spaces $H_1,H_2,H_3$,
we consider bilinear maps defined on the cartesian product $S^2(H_2,H_3)\times S^2(H_1,H_2)$
of spaces of Hilbert-Schmidt operators and valued in either the space
$B(H_1,H_3)$ of bounded operators, or in
the space $S^1(H_1,H_3)$ of trace class operators. We introduce modular properties
of such maps with respect to the commutants of von Neumann algebras $M_i\subset B(H_i)$, $i=1,2,3$,
as well as an appropriate notion of complete boundedness for such maps. We characterize
completely bounded module maps $u\colon S^2(H_2,H_3)\times S^2(H_1,H_2)\to B(H_1,H_3)$
by the membership of a natural symbol of $u$ to the
von Neumann algebra tensor product $M_1\overline{\otimes} M_2^{op}\overline{\otimes}
M_3$. In the case when $M_2$ is injective, we
characterize completely bounded module maps $u\colon S^2(H_2,H_3)\times S^2(H_1,H_2)\to S^1(H_1,H_3)$
by a weak factorization property, which extends to the bilinear setting a famous
description of bimodule linear mappings
going back to Haagerup, Effros-Kishimoto, Smith and Blecher-Smith. We make crucial
use of a theorem of Sinclair-Smith
on completely bounded bilinear
maps valued in an injective von Neumann
algebra, and provide a new proof of it, based on Hilbert $C^*$-modules.
\end{abstract}
\vskip 1cm
\noindent
{\it 2000 Mathematics Subject Classification:} 46L07, 46B28, 47D25, 46L08.
\vskip 1cm
\section{Introduction}\label{1Intro} Factorization properties of completely bounded
maps have played a prominent role in the development of operator spaces \cite{BLM, ER, P2}
and in their
applications to Hilbertian operator theory, in particular for the study of
special classes of operators: Schur multipliers, Fourier multipliers on either
commutative or non commutative groups,
module maps, decomposable maps, etc. The main purpose of this paper is to establish new
such factorization properties for some classes of bilinear maps
defined on the cartesian product $S^2(H_2,H_3)\times S^2(H_1,H_2)$
of two spaces of Hilbert-Schmidt operators and valued in their
``product space", namely
the space $S^1(H_1,H_3)$ of trace class operators. This line
of investigation is motivated by the recent characterization of
bounded bilinear Schur multipliers $S^2\times S^2\to
S^1$ proved in \cite{CLPST, CLS}, by various advances on multidimensional
operator multipliers, see \cite{KS, JTT}, and by new developments
on multiple operator integrals,
see e.g. \cite{AP} and the references therein.
Let $H,K$ be Hilbert spaces and let $M,N$ be von Neumann
algebras acting on $H$ and $K$, respectively.
Let $CB_{(N',M')}(S^1(H,K))$ denote the
Banach space of all
$(N',M')$-bimodule completely bounded maps on
$S^1(H,K)$, equipped with the completely bounded norm $\cbnorm{\, \cdotp}$.
This space is characterized by the following factorization property.
\begin{theorem}\label{Haag}
A bounded map $u\colon S^1(H,K)\to S^1(H,K)$
belongs to $CB_{(N',M')}(S^1(H,K))$ and $\cbnorm{u}\leq 1$ if and only
if there exist an index set $I$, a family $(a_i)_{i\in I}$
of elements of $M$ belonging to the row space
$R_I^w(M)$ and a family $(b_i)_{i\in I}$
of elements of $N$ belonging to the column space
$C_I^w(N)$ such that
$$
u(z)= \sum_{i\in I} b_i z a_i,\qquad
z\in S^1(H,K),
$$
and $\norm{(a_i)_i}_{R_I^w}
\norm{(b_i)_i}_{C_I^w} \leq 1$.
\end{theorem}
We refer to Section \ref{5SS} for the precise definitions of the spaces $R_I^w(M)$
and $C_I^w(N)$. The above theorem is a reformulation of \cite[Theorem 2.2]{BS},
a fundamental factorization result going
back to \cite{EK, H} (see also \cite{Sm}). Indeed let $B(K,H)$
(resp. $S^\infty(K,H)$) denote the space of all bounded operators
(resp. all compact operators) from $K$ into $H$.
Then by standard operator space duality,
the adjoint mapping $u\mapsto u^*$ induces
an isometric isomorphism between
$CB_{(N',M')}(S^1(H,K))$ and the space
$CB_{(M',N')}(S^\infty(K,H), B(K,H))$ of all
$(M',N')$-bimodule completely bounded maps from $S^\infty(K,H)$
into $B(K,H)$. Consequently the description of such maps
provided by \cite[Theorem 2.2]{BS} yields Theorem \ref{Haag}.
Using the so-called
weak$^*$ Haagerup tensor product $\stackrel{w^*h}{\otimes}$
introduced in \cite{BS}, an equivalent formulation of Theorem \ref{Haag}
is that we have a natural isometric $w^*$-homeomorphic identification
\begin{equation}\label{Haag+}
M\stackrel{w^*h}{\otimes} N\,\simeq \,
CB_{(N',M')}(S^1(H,K)).
\end{equation}
In this paper we consider three Hilbert spaces $H_1,H_2,H_3$
as well as von Neumann algebras $M_1,M_2,M_3$ acting on
them. We study
bilinear $(M'_3,M'_2,M'_1)$-module maps
$$
u\colon S^2(H_2,H_3)\times S^2(H_1,H_2)\longrightarrow
S^1(H_1,H_3),
$$
in the sense that $u(Ty,SxR)=Tu(yS,x)R$
for any $x\in S^2(H_1,H_2)$, $y\in S^2(H_2,H_3)$,
$R\in M'_1$, $S\in M'_2$ and $T\in M'_3$.
In the case when $H_i=L^2(\mbox{${\mathcal O}$}mega_i)$ for
some measure spaces $\mbox{${\mathcal O}$}mega_i$, $i=1,2,3$,
and $M_i=L^\infty(\mbox{${\mathcal O}$}mega_i)\subset B(L^2(\mbox{${\mathcal O}$}mega_i))$ in the usual way, bilinear
$(M'_3,M'_2,M'_1)$-module maps coincide with the bilinear Schur multipliers
discussed in \cite{JTT, CLS}.
On the projective tensor product
$S^2(H_2,H_3)\widehat{\otimes}S^2(H_1,H_2)$,
we introduce a natural operator space structure,
denoted by $\mbox{${\mathcal G}$}amma(H_1,H_2,H_3)$, see (\ref{2OS-Gamma}).
Our main result, Theorem \ref{6Factorization}, is a characterization,
in the case when $M_2$ is injective, of completely bounded
$(M'_3,M'_2,M'_1)$-module maps $u$ as above
by a weak factorization property, which extends Theorem \ref{Haag}.
(see Remark \ref{6Recover}).
This characterization is already new
in the non module case (that is, when
$M_i=B(H_i)$ for $i=1,2,3$).
The proof of this result has two steps.
First we establish an isometric and $w^*$-homeomorphic
identification
\begin{equation}\label{1Id}
M_2^{op}\overline{\otimes}\bigl(
M_1\stackrel{w^*h}{\otimes}M_3\bigr)\,\simeq \,
CB_{(M'_3,M'_2,M'_1)}\bigl(\mbox{${\mathcal G}$}amma(H_1,H_2,H_3), S^1(H_1,H_3)\bigr)
\end{equation}
which extends (\ref{Haag+}), see Theorem \ref{3OM-S1}.
Second we make use of a remarkable factorization result
of Sinclair-Smith \cite{SS} on completely bounded bilinear
maps valued in an injective von Neumann
algebra (see Theorem \ref{4SS} for the precise statement), as well as
operator space results, to derive
Theorem \ref{6Factorization} from (\ref{1Id}).
The Sinclair-Smith theorem, which plays a key role in this
paper, was proved in \cite[Theorem 4.4]{SS} using tensor product computations, the
Effros-Lance characterization of semidiscrete von Neumann algebras \cite{EL}
and Connes's fundamental result (completed in \cite{W}) that any injective von Neumann
algebra is semidiscrete.
In Section \ref{5SS} below, we give a new, much shorter proof of Theorem \ref{4SS}
based on Hilbert $C^*$-modules.
The paper also contains a thorough study of completely bounded
bilinear $(M'_3,M'_2,M'_1)$-module maps
$$
u\colon S^2(H_2,H_3)\times S^2(H_1,H_2)\longrightarrow
B(H_1,H_3).
$$
In analogy with (\ref{1Id}) we show that the space of such maps can be identified
with the von Neumann algebra tensor product $M_1\overline{\otimes} M_2^{op}\overline{\otimes}
M_3$, see Corollary \ref{3Mod-B}.
\section{Operator space and duality preliminaries}\label{20S}
We start with some general principles and conventions
which will be used throughout this paper.
Let $E,F$ and $G$ be Banach spaces. We let $E\otimes F$
denote the algebraic tensor product of $E$ and $F$.
We let $B(E,G)$ denote the Banach space
of all bounded operators from $E$ into $G$. We let $B_2(F\times E,G)$
denote the Banach space of all bounded bilinear operators from
$F\times E$ into $G$.
Let $F\widehat{\otimes} E$ be the projective tensor product of $F$ and $E$.
To any $u\in B_2(F\times E,G)$, one can associate a unique
$\widetilde{u}\colon F\otimes E\to G$ satisfying
$$
\widetilde{u}(y\otimes x)=u(y,x),
\qquad x\in E,\ y\in F.
$$
Then $\widetilde{u}$ extends to a bounded operator (still denoted by)
$\widetilde{u}\colon F\widehat{\otimes} E\to G$ and we have equality
$\norm{\widetilde{u}}=\norm{u}$. Then the mapping $u\mapsto \widetilde{u}$
yields an isometric identification
\begin{equation}\label{1Duality1}
B_2(F\times E,G) \,\simeq\, B(F\widehat{\otimes} E,G).
\end{equation}
Consider the case $G=\ensuremath{\mathbb{C}}$. Then (\ref{1Duality1})
provides an isometric identification
$B_2(F\times E,\ensuremath{\mathbb{C}})
\,\simeq\, (F\widehat{\otimes} E)^*$.
Now to any bounded bilinear form
$u\colon F\times E\to \ensuremath{\mathbb{C}}\,$, one can associate two bounded maps
$$
u'\colon E\longrightarrow F^*
\qquad\hbox{and}\qquad
u''\colon F\longrightarrow E^*
$$
defined by $\langle u'(x),y\rangle = u(y,x) = \langle u''(y),x\rangle$
for any $x\in E$ and $y\in F$. Moreover the norms of $u'$ and
$u''$ are equal to the norm of $u$. Hence
the mappings $u\mapsto u'$ and $u\mapsto u''$ yield
isometric identifications
\begin{equation}\label{1Duality3}
(F\widehat{\otimes} E)^*
\,\simeq\, B(E,F^*)\,\simeq\,B(F,E^*).
\end{equation}
We refer to \cite[Chap. 8, Theorem 1 and Corollary 2]{DU}
for these classical facts.
We assume that the reader is familiar with the basics
of Operator Space Theory and completely bounded maps,
for which we refer to \cite{ER, P2} and \cite[Chap. 1]{BLM}.
However we need to review a few
important definitions and fundamental results
which will be used at length in this paper;
the remainder of this section is devoted to this task.
We will make crucial use of the dual operator
space $E^*$ of an operator space
$E$ as well as of the operator space $CB(E,F)$ of completely bounded
maps from $E$ into another operator space $F$ (see e.g. \cite[Section 3.2]{ER}).
Whenever $v\colon E\to F$ is a completely bounded map,
its completely bounded norm will be denoted by
$\cbnorm{v}$.
Let $E,F$ be operator spaces. We let $F\stackrel{\frown}{\otimes} E$
denote the operator space projective tensor product of
$F$ and $E$ (here we adopt the notation from \cite[1.5.11]{BLM}).
We will often use the fact that this tensor product is
commutative
and associative.
The identifications
(\ref{1Duality3}) have operator space analogs. Namely let
$u\colon F\times E\to\ensuremath{\mathbb{C}}\,$ be a bounded bilinear form. Then
$\widetilde{u}$ extends to a functional on $F\stackrel{\frown}{\otimes} E$
if and only if $u'\colon E\to F^*\,$ is completely bounded, if and
only if $u''\colon F\to E^*$ is completely bounded. In this case
$\cbnorm{u'}=\cbnorm{u''}=\norm{\widetilde{u}}_{(F\stackrel{\frown}{\otimes} E)^*}$.
Thus (\ref{1Duality3}) restricts to isometric identifications
\begin{equation}\label{1Duality4}
(F\stackrel{\frown}{\otimes}E)^*
\,\simeq\, CB(E,F^*)\,\simeq\,CB(F,E^*).
\end{equation}
It turns out that the latter are actually
completely isometric identifications
(see e.g. \cite[Section 7.1]{ER} or \cite[(1.51)]{BLM}).
Let $H,K$ be Hilbert spaces. We let $\overline{K}$ denote the complex conjugate
of $K$.
For any $\xi\in K$, the notation $\overline{\xi}$ stands for
$\xi$ regarded as an element of $\overline{K}$.
We recall the canonical identification $\overline{K}=K^*$. Thus
for any $\xi\in K$ and any $\eta\in H$, $\overline{\xi}\otimes \eta$ may be regarded
as the rank one operator $K\to H$ taking any $\zeta\in K$ to $\langle \zeta,\xi\rangle\eta$.
With this convention, the algebraic tensor product $\overline{K}\otimes H$
is identified with the space of all bounded finite rank operators from $K$
into $H$.
Let $S^1(K,H)$ be the space of trace class operators $v\colon K\to H$, equipped with its
usual norm $\norm{v}_1=tr(\vert v\vert)$. Then $\overline{K}\otimes H$ is a dense subspace
of $S^1(K,H)$ and $\norm{\,\cdotp}_1$ coincides with the Banach space projective norm on
$\overline{K}\otimes H$. Hence we have an isometric identification
\begin{equation}\label{1Proj}
S^1(K,H)\,\simeq\, \overline{K}\widehat{\otimes} H.
\end{equation}
Let
$S^2(K,H)$ be the space of Hilbert-Schmidt operators $v\colon K\to H$, equipped with its
usual Hilbertian norm $\norm{v}_2=\bigl(tr(v^*v)\bigr)^\frac12$.
Then $\overline{K}\otimes H$ is a dense subspace
of $S^2(K,H)$ and $\norm{\,\cdotp}_2$ coincides with the Hilbertian tensor norm on
$\overline{K}\otimes H$. Hence we have an isometric identification
\begin{equation}\label{1HS}
S^2(K,H)\,\simeq\, \overline{K}\stackrel{2}{\otimes} H,
\end{equation}
where the right hand side denotes the Hilbertian tensor product of
$\overline{K}$ and $H$.
Let $S^\infty(H,K)$ denote the space of all compact operators from
$H$ into $K$, equipped with its usual operator space structure. We recall that
through trace duality, we have isometric identifications
\begin{equation}\label{1S11}
S^\infty(H,K)^*
\,\simeq\,
S^1(K,H)
\qquad\hbox{and}\qquad
S^1(K,H)^*
\,\simeq\,
B(H,K).
\end{equation}
Throughout we asssume that $S^1(K,H)$ is equipped with its canonical
operator space structure, so that (\ref{1S11}) holds completely isometrically
(see e.g. \cite[Theorem 3.2.3]{ER}).
Let $E,G$ be Banach spaces and let $j\colon E^*\to G^*$ be a
$w^*$-continuous isometry. Then its range $j(E^*)$ is
$w^*$-closed, hence $j(E^*)$ is a dual space.
Further $j$ induces a $w^*$-$w^*$-homeomorphism
between $E^*$ and $j(E^*)$ (see e.g. \cite[A.2.5]{BLM}).
Thus $j$ allows to identify $E^*$ and $j(E^*)$ as dual
Banach spaces.
In this case, we will say that $j$ induces a $w^*$-continuous
isometric identification between $E^*$ and $j(E^*)$. If
$E,G$ are operator spaces and $j$ is a complete isometry,
then $j(E^*)$ is a dual operator space and we will call $j$ a
$w^*$-continuous completely
isometric identification between $E^*$ and $j(E^*)$.
Let $E,F$ be operator spaces
and consider $w^*$-continuous completely isometric embeddings
\begin{equation}\label{1Rep}
E^*\subset B(H)
\qquad\hbox{and}\qquad
F^*\subset B(K),
\end{equation}
for some Hilbert spaces $H,K$ (see e.g. \cite[Prop. 3.2.4]{ER}).
The normal spatial tensor product of the dual operator
spaces $F^*$ and $E^*$ is defined as
the $w^*$-closure of $F^*\otimes E^*$ into the von Neumann algebra
$B(K)\overline{\otimes} B(H)$ and is denoted by
$$
F^*\overline{\otimes} E^*.
$$
This is a dual operator space.
It turns out that its definition
does not depend on the specific embeddings
(\ref{1Rep}), see e.g. \cite[p. 135]{ER}.
We note for further use that the natural embedding
$B(K)\otimes B(H)\subset B(K\stackrel{2}{\otimes} H)$
extends to a $w^*$-continuous
completely isometric identification
\begin{equation}\label{1Normal}
B(K)\overline{\otimes} B(H)
\,\simeq\,
B(K\stackrel{2}{\otimes} H).
\end{equation}
To deal with normal spatial tensor products, it is convenient to
use the so-called slice maps.
Take any $\lambda\in S^1(K)$ and consider it as
a $w^*$-continuous functional $\lambda\colon B(K)\to\ensuremath{\mathbb{C}}\,$.
Then the operator $\lambda\otimes I_{B(H)}$
extends to a (necessarily unique) $w^*$-continuous bounded map
$$
\ell_\lambda\colon B(K)\overline{\otimes} B(H)\longrightarrow B(H).
$$
Likewise, any $\mu\in S^1(H)$ can be considered as
a $w^*$-continuous functional $\mu\colon B(H)\to\ensuremath{\mathbb{C}}\,$ and
$I_{B(K)}\otimes \mu$
extends to a $w^*$-continuous bounded map
$$
r_\mu\colon B(K)\overline{\otimes} B(H)\longrightarrow B(K).
$$
Then we have the following properties (for which we refer to either
\cite[Lemma 7.2.2]{ER} and its proof, or \cite[1.5.2]{BLM}).
\begin{lemma}\label{1Slice}
Let $z\in B(K)\overline{\otimes} B(H)$. The linear mappings
$$
z'\colon S^1(H)\longrightarrow B(K)
\qquad\hbox{and}\qquad
z''\colon S^1(K)\longrightarrow B(H)
$$
defined by $z'(\mu)=r_\mu(z)$ and
$z''(\lambda)=\ell_\lambda(z)$
are completely bounded.
Further the mappings $z\mapsto z'$ and $z\mapsto z''$ are
$w^*$-continuous completely isometric isomorphisms
from $B(K)\overline{\otimes} B(H)$ onto $CB(S^1(H),B(K))$
and $CB(S^1(K),B(H))$, respectively.
\end{lemma}
According to (\ref{1Duality4}), an equivalent formulation of the above lemma is that
\begin{equation}\label{1S1S1}
\bigl(S^1(K)\stackrel{\frown}{\otimes} S^1(H)\bigr)^*\,\simeq\,
B(K)\overline{\otimes} B(H)
\end{equation}
$w^*$-continuously and completely isometrically.
Recall (\ref{1Rep}).
The space of all $z\in B(K)\overline{\otimes} B(H)$ such that
$z'$ is valued in $F^*$ and $z''$ is valued in $E^*$ is usually called
the normal Fubini tensor product of $F^*$ and $E^*$. This subspace is $w^*$-continuously completely isometric
to $CB(E,F^*)$ (equivalently to $CB(F,E^*)$, by (\ref{1Duality4})).
Indeed we may regard $CB(E,F^*)$ as the subspace of $CB(S^1(H),B(K))$ of
all $w\colon S^1(H)\to B(K)$ such that $w$ is valued in $F^*$ and $w$ vanishes on $E^{*}_{\perp}$.
Then it is not hard to see that $z$ belongs to the normal Fubini tensor product of $F^*$ and $E^*$
if and only if $z'$ belongs to $CB(E,F^*)$.
We refer to \cite[Theorem 7.2.3]{ER} for these facts.
It is easy to check
that the normal Fubini tensor product of $F^*$ and $E^*$
contains $F^*\overline{\otimes} E^*$. This yields a $w^*$-continuous
completely isometric embedding
$$
F^*\overline{\otimes} E^*\,\subset\, CB(E,F^*).
$$
However this inclusion may be strict. The next lemma provides a list of cases
when the inclusion is an equality. We refer the reader to \cite[Sections 7.2 and 11.2]{ER}
for the proofs.
Whenever $M$ is a von Neumann algebra, we let
$M_*$ denote its (unique) predual. We equip it with its
natural operator space structure, so that $M=(M_*)^*$
completely isometrically (see e.g. \cite[Section 2.5]{P2}
or \cite[Lemma 1.4.6]{BLM}).
\begin{lemma}\label{1Injective1}
\
\begin{itemize}
\item [(a)] For any von Neumann algebras $M,N$, we have
$$
N\overline{\otimes}M\,\simeq\, CB(M_*,N).
$$
\item [(b)] For any injective von Neumann algebra $M$ and for any
operator space $E$, we have
$$
M\overline{\otimes} E^*\,\simeq\, CB(E,M).
$$
\item [(c)] For any Hilbert spaces $H,K$ and for any
operator space $E$, we have
$$
B(H,K)\overline{\otimes} E^*\,\simeq\, CB(E,B(H,K)).
$$
\end{itemize}
\end{lemma}
Let $K$ be a Hilbert space. We let $\{K\}_c$ (resp. $\{K\}_r$)
denote the column operator space (resp. the row operator space) over $K$.
We recall that through the canonical identification $K^*=\overline{K}$, we have
$$
\{K\}_c^* = \{\overline{K}\}_r
\quad\hbox{and}\quad
\{K\}_r^* = \{\overline{K}\}_c
\qquad\hbox{completely isometrically}.
$$
(See e.g. \cite[Section 3.4]{ER}.)
We let $F\stackrel{h}{\otimes}E$ denote
the Haagerup tensor product of a couple $(F,E)$
of operator spaces. We will use the
fact that this is an associative tensor product.
Let
$\theta\colon F\times E\to\ensuremath{\mathbb{C}}\,$ be a bounded
bilinear form. Then $\theta$
extends to an element
of $(F\stackrel{h}{\otimes}E)^*$ if and only if there
exist a Hilbert space $\mbox{${\mathcal H}$}$ and two completely bounded
maps $\alpha\colon E\to \{\mbox{${\mathcal H}$}\}_c\,$ and $\beta\colon F\to
\{\overline{\mbox{${\mathcal H}$}}\}_r\,$
such that $\theta(y,x)= \langle \alpha(x),\beta(y)\rangle$
for any $x\in E$ and
any $y\in F$ (see e.g. \cite[Corollary 9.4.2]{ER}).
The Haagerup tensor product is projective. This means that if
$p\colon E\to E_1$ and $q\colon F\to F_1$ are complete quotient maps, then
$q\otimes p$ extends to a (necessarily unique) complete quotient map
$F\stackrel{h}{\otimes}E\to
F_1\stackrel{h}{\otimes}E_1$. Taking the adjoint of the latter, we
obtain a $w^*$-continuous
completely isometric embedding
\begin{equation}\label{1Embed}
(F_1\stackrel{h}{\otimes}E_1)^*
\,\subset\,
(F\stackrel{h}{\otimes}E)^*.
\end{equation}
\begin{lemma}\label{1Slice-H}
Let $E,F,E_1,F_1$ be operator spaces as above and let
$\theta\in (F\stackrel{h}{\otimes}E)^*$. Let
$$
\theta'\colon E\longrightarrow F^*
\qquad\hbox{and}\qquad
\theta''\colon F\longrightarrow E^*
$$
be the bounded linear maps associated to $\theta$.
Then $\theta\in (F_1\stackrel{h}{\otimes}E_1)^*$ (in the sense given by (\ref{1Embed}))
if and only if $\theta'$ is valued in $F_1^*$ and $\theta''$ is valued in $E_1^*$.
\end{lemma}
\begin{proof}
If $\theta\in (F_1\stackrel{h}{\otimes}E_1)^*$, then
$\theta(y,x)=0$ if either $x\in{\rm Ker}(p)$ or
$y\in{\rm Ker}(q)$. Hence $\langle \theta''(y),x\rangle=0$
for any $(y,x)\in F\times {\rm Ker}(p)$ and
$\langle \theta'(x),y\rangle=0$ for any $(y,x)\in {\rm Ker}(q)\times E$.
Hence $\theta''$ is valued in $E_1^*={\rm Ker}(p)^\perp$
and $\theta'$ is valued in $F_1^*={\rm Ker}(q)^\perp$.
Assume conversely that $\theta'$ is valued in $F_1^*$ and that
$\theta''$ is valued in $E_1^*$.
Let $\alpha\colon E\to \{\mbox{${\mathcal H}$}\}_c\,$ and $\beta\colon F\to
\{\overline{\mbox{${\mathcal H}$}}\}_r\,$
be completely bounded maps, for some Hilbert space $\mbox{${\mathcal H}$}$,
such that $\theta(y,x)= \langle \alpha(x),\beta(y)\rangle$
for any $x\in E$ and
any $y\in F$. Changing $\mbox{${\mathcal H}$}$ into the closure of the range of $\alpha$, we may assume that
$\alpha$ has dense range. Next changing $\mbox{${\mathcal H}$}$ into the closure of the
(conjugate of) the range of $\beta$, we may
actually assume that both $\alpha$ and $\beta$ have dense range.
The assumption on $\theta'$
means that $\langle \alpha(x),\beta(y)\rangle=0$
for any $x\in E$ and any $y\in {\rm Ker}(q)$. Since $\alpha$ has dense range
this means that $\beta$ vanishes on ${\rm Ker}(q)$. Likewise
the assumption on $\theta''$
means that $\alpha$ vanishes on ${\rm Ker}(p)$.
We may therefore consider $\alpha_1\colon E_1\to \{\mbox{${\mathcal H}$}\}_c$ and
$\beta_1\colon F_1\to \{\overline{\mbox{${\mathcal H}$}}\}_r$
induced by $\alpha$ and $\beta$, that is,
$\alpha=\alpha_1\circ p$ and $\beta=\beta_1\circ q$.
Further $\alpha_1$ and $\beta_1$ are completely
bounded, hence the bilinear mapping
$(y_1,x_1)\mapsto \langle \alpha_1(x_1),\beta_1(y_1)\rangle$
is an element of $(F_1\stackrel{h}{\otimes}E_1)^*$. By construction
it identifies with $\theta$ in the embedding (\ref{1Embed}),
hence $\theta$ belongs to $(F_1\stackrel{h}{\otimes}E_1)^*$.
\end{proof}
We will need the so-called weak$^*$ Haagerup tensor product
of two dual operator spaces \cite{BS}. It can be defined by
\begin{equation}\label{1w*h}
F^*\stackrel{w^*h}{\otimes}E^* \,=\, (F\stackrel{h}{\otimes}E)^*.
\end{equation}
The reason why this dual space can be considered as a tensor product over the
couple $(F^*,E^*)$ is discussed in
\cite[1.6.9]{BLM}.
We now recall a few tensor product identities involving the
operator space projective tensor product and the Haagerup
tensor product.
\begin{proposition}\label{1Recap} Let $E$ be an operator space
and let $H,K$ be two Hilbert spaces.
\begin{itemize}
\item [(a)] We have completely isometric identifications
$$
\{K\}_r\stackrel{\frown}{\otimes} E
\,\simeq\,
\{K\}_r\stackrel{h}{\otimes} E
\qquad\hbox{and}\qquad
E\stackrel{\frown}{\otimes} \{H\}_c
\,\simeq\,
E\stackrel{h}{\otimes} \{H\}_c.
$$
\item [(b)] We have completely isometric identifications
$$
\{K\}_r\stackrel{\frown}{\otimes} \{H\}_r
\,\simeq\,
\{K\stackrel{2}{\otimes} H\}_r
\qquad\hbox{and}\qquad
\{K\}_c\stackrel{\frown}{\otimes} \{H\}_c
\,\simeq\,
\{K\stackrel{2}{\otimes} H\}_c.
$$
\item [(c)] The embedding $\overline{K}\otimes H\subset S^1(K,H)$
extends to completely isometric identifications
\begin{equation}\label{1RC}
S^1(K,H)
\,\simeq\,
\{\overline{K}\}_r\stackrel{\frown}{\otimes}\{H\}_c
\end{equation}
and
\begin{equation}\label{1REC}
S^1(K,H)\stackrel{\frown}{\otimes} E
\,\simeq\,
\{\overline{K}\}_r\stackrel{\frown}{\otimes} E
\stackrel{\frown}{\otimes}\{H\}_c.
\end{equation}
\item [(d)] To any $u\colon E\to B(H,K)$, associate
$\theta_u\colon \overline{K}\otimes E\otimes H\to\ensuremath{\mathbb{C}}\,$ by
letting $\theta_u(\overline{\xi}\otimes x\otimes \eta )
=\langle u(x)\eta,\xi\rangle$, for any $x\in E,\eta\in H,\xi\in K$.
Then $u\mapsto\theta_u$ extends to a $w^*$-continuous completely
isometric identification
$$
\bigl(\{\overline{K}\}_r\stackrel{\frown}{\otimes} E
\stackrel{\frown}{\otimes}\{H\}_c\bigr)^*
\,\simeq\,
CB(E,B(H,K)).
$$
\end{itemize}
\end{proposition}
\begin{proof}
We refer to \cite[Proposition 9.3.2]{ER} for (a)
and to \cite[Proposition 9.3.5]{ER} for (b).
Formula (\ref{1RC}) follows from \cite[Proposition 9.3.4]{ER}
and (a), and formula (\ref{1REC}) follows by the comutativity
of the operator space projective tensor product.
Finally (d) is a consequence of (\ref{1REC}), (\ref{1S11}) and (\ref{1Duality4}).
\end{proof}
\begin{remark}\label{1Equal}
Comparing (\ref{1RC})
with (\ref{1Proj}), we note
that at the Banach space level,
the operator space projective tensor product of a row and a column
Hilbert space coincides with their Banach space projective tensor product.
\end{remark}
\begin{remark}\label{1RankOne} For any $\eta\in H$ and $\xi\in K$, let
$T_{\eta,\xi}\in B(H,K)$ be the rank one operator defined by
$$
T_{\eta,\xi}(\zeta)=\langle \zeta,\eta\rangle\, \xi,\qquad\zeta\in H.
$$
When we consider this operator as an element of $S^\infty(H,K)$
or $B(H,K)$, it is convenient to identify it with $\xi\otimes\overline{\eta}
\in K\otimes\overline{H}$, and hence to regard $K\otimes\overline{H}$
as a subspace of $S^\infty(H,K)$. This convention is different from the
one used so far when we had to represent rank one (more generally, finite
rank) operators as elements of the trace class or of the Hilbert-Schmidt class.
The rationale for this is that the
trace duality providing (\ref{1S11}) extends the natural duality
between $K\otimes\overline{H}$ and $\overline{K}\otimes H$.
Then the embedding $K\otimes\overline{H}\subset
S^\infty(H,K)$ extends to a completely isometric identification
\begin{equation}\label{1Compact}
S^\infty (H,K)\,\simeq\, \{K\}_c\stackrel{h}{\otimes} \{\overline{H}\}_r.
\end{equation}
(See e.g. \cite[Proposition 9.3.4]{ER}.)
\end{remark}
If $A$ is any $C^*$-algebra, the so-called opposite
$C^*$-algebra $A^{op}$ is the involutive Banach space
$A$ equipped with its reversed
multiplication $(a,b)\mapsto ba$. Note that as an operator
space, $A^{op}$ is not (in general) the same as $A$,
that is, the identity mapping $A\to A^{op}$ is
not a complete isometry. See e.g. \cite[Theorem 2.2]{Roy}
for more about this.
In the case when $A=B(H)$, we have the following
well-known description (see e.g. \cite[Sections 2.9 and 2.10]{P2}).
\begin{lemma}\label{1Opp}
Let $H$ be a Hilbert space. For any $S\in B(H)$, define
$$
\widehat{S}(\overline{h}) = \overline{S^*(h)},\qquad
h\in H.
$$
Then $S\mapsto \widehat{S}$ is a $*$-isomorphism from $B(H)^{op}$
onto $B(\overline{H})$.
\end{lemma}
In the sequel we will use the operator space $M_{*}^{op}$ for any von Neumann algebra $M$.
This is both the predual operator space of $M^{op}$ and the opposite operator space of
$M_*$, in the sense of \cite[Section 2.10]{P2}.
\section{Operator multipliers into the trace class}\label{3OM}
Let $H_1,H_2,H_3$ be three Hilbert spaces.
Using (\ref{1HS}), we let
$$
\mbox{${\mathcal T}$}heta\colon H_1\stackrel{2}{\otimes}\overline{H_2}\stackrel{2}{\otimes} H_3\longrightarrow
S^2\bigl(S^2(H_1,H_2),H_3\bigr)
$$
be the unitary operator obtained by first identifying $H_1\stackrel{2}{\otimes}\overline{H_2}$
with $\overline{S^2(H_1,H_2)}$, and then identifying $\overline{S^2(H_1,H_2)}\stackrel{2}{\otimes}
H_3$ with $S^2\bigl(S^2(H_1,H_2),H_3\bigr)$.
For any $\varphi\in B\bigl(H_1\stackrel{2}{\otimes}\overline{H_2}
\stackrel{2}{\otimes} H_3\bigr)$, one may define
a bounded bilinear map
$$
\tau_\varphi\colon S^2(H_2,H_3)\times S^2(H_1,H_2)\longrightarrow B(H_1,H_3)
$$
by
$$
\bigl[\tau_\varphi(y,x)\bigr](h) = \mbox{${\mathcal T}$}heta\bigl[\varphi(h\otimes y)\bigr](x),
\qquad x\in S^2(H_1,H_2), \ y\in S^2(H_2,H_3),\ h\in H_1.
$$
On the right hand side of the above
equality, $y$ is regarded as an element of
$\overline{H_2}\stackrel{2}{\otimes}H_3$, and hence $h\otimes y$
is an element of $H_1\stackrel{2}{\otimes}\overline{H_2}
\stackrel{2}{\otimes} H_3$.
It is clear that
$$
\bignorm{\bigl[\tau_\varphi(y,x)\bigr](h)}
\leq \norm{\varphi}\norm{x}_2\norm{y}_2\norm{h}.
$$
Consequently, the above construction defines a contraction
\begin{equation}\label{2Sigma1}
\tau\colon
B\bigl(H_1\stackrel{2}{\otimes}\overline{H_2}\stackrel{2}{\otimes} H_3\bigr)
\longrightarrow
B_2\bigl(S^2(H_2,H_3)\times S^2(H_1,H_2), B(H_1,H_3)\bigr).
\end{equation}
The bilinear maps $\tau_\varphi$ were introduced in \cite{JTT}
(however the latter paper focuses on the case when
$\bignorm{\bigl[\tau_\varphi(y,x)\bigr](h)}
\leq D\norm{x}\norm{y}\norm{h}$ for some constant $D>0$).
We call $\tau_\varphi$ an operator multiplier and we say that
$\varphi$ is the symbol of $\tau_\varphi$. We refer to \cite{JTT} for
$m$-linear versions of such operators for arbitrary $m\geq 2$.
We note that by (\ref{1Normal}) and Lemma \ref{1Opp},
we have a von Neumann algebra identification
\begin{equation}\label{2VN}
B\bigl(H_1\stackrel{2}{\otimes}\overline{H_2}\stackrel{2}{\otimes} H_3\bigr)
\,\simeq\,
B(H_1)\overline{\otimes} B(H_2)^{op}\overline{\otimes} B(H_3).
\end{equation}
In the sequel we will make no difference between these two von Neumann algebras.
In particular, we will consider symbols $\varphi$ of operator multipliers as elements
of $B(H_1)\overline{\otimes} B(H_2)^{op}\overline{\otimes} B(H_3)$.
One can check (see \cite{JTT}) that for any $R\in B(H_1)$, $S\in B(H_2)$ and
$T\in B(H_3)$, we have
\begin{equation}\label{2Sigma2}
\tau_{R\otimes S\otimes T}(y,x) = TySxR,
\qquad x\in S^2(H_1,H_2), \ y\in S^2(H_2,H_3).
\end{equation}
Note that in this identity,
$S$ is regarded as an element of $B(H_2)^{op}$
at the left-hand side
and as an element of $B(H_2)$ at the right-hand side.
We now define the operator space
\begin{equation}\label{2OS-Gamma}
\mbox{${\mathcal G}$}amma(H_1,H_2,H_3)\, =\, \{S^2(H_2,H_3)\}_c
\stackrel{\frown}{\otimes} \{S^2(H_1,H_2)\}_r.
\end{equation}
According to Remark \ref{1Equal}, $\mbox{${\mathcal G}$}amma(H_1,H_2,H_3)$
coincides, at the Banach
space level, with the projective tensor
product of $S^2(H_2,H_3)$ and $S^2(H_1,H_2)$. Hence
\begin{equation}\label{2Equal}
B_2\bigl(S^2(H_2,H_3)\times S^2(H_1,H_2), B(H_1,H_3)\bigr)
\,\simeq\, B\bigl(\mbox{${\mathcal G}$}amma(H_1,H_2,H_3), B(H_1,H_3)\bigr)
\end{equation}
by (\ref{1Duality1}). In the sequel for any
$u\colon S^2(H_2,H_3)\times S^2(H_1,H_2)\to B(H_1,H_3)$,
we let
$$
\widetilde{u}\colon
\mbox{${\mathcal G}$}amma(H_1,H_2,H_3)\longrightarrow B(H_1,H_3)
$$
denote its associated linear map.
The next proposition shows that under the identification (\ref{2Equal}),
the range of $\tau$
coincides with the space of completely bounded maps from
$\mbox{${\mathcal G}$}amma(H_1,H_2,H_3)$ into $B(H_1,H_3)$.
\begin{proposition}\label{2OM-B}
Let $u\colon S^2(H_2,H_3)\times S^2(H_1,H_2)\to B(H_1,H_3)$
be a bounded bilinear map. Then $\widetilde{u}\colon
\mbox{${\mathcal G}$}amma(H_1,H_2,H_3)\to B(H_1,H_3)$ is completely bounded if and only if
there exists $\varphi$ in
$B(H_1)\overline{\otimes} B(H_2)^{op}\overline{\otimes} B(H_3)$
such that $u=\tau_\varphi$. Further
$\tau$ provides a $w^*$-continuous
completely isometric identification
\begin{equation}\label{2OM-B1}
B(H_1)\overline{\otimes} B(H_2)^{op}\overline{\otimes} B(H_3)
\,\simeq\, CB\bigl(\mbox{${\mathcal G}$}amma(H_1,H_2,H_3), B(H_1,H_3)\bigr).
\end{equation}
\end{proposition}
\begin{proof}
For convenience we set
$$
\mbox{${\mathcal H}$}=H_1\stackrel{2}{\otimes}\overline{H_2}
\stackrel{2}{\otimes} H_3.
$$
By (\ref{1HS}) and Proposition \ref{1Recap} (b),
we have
$$
\{S^2(H_1,H_2)\}_r\,\simeq\,\{\overline{H_1}\}_r
\stackrel{\frown}{\otimes}\{H_2\}_r
\qquad\hbox{and}\qquad
\{S^2(H_2,H_3)\}_c\,\simeq\,\{\overline{H_2}\}_c
\stackrel{\frown}{\otimes}\{H_3\}_c
$$
completely isometrically.
Hence applying (\ref{2OS-Gamma}), we have
\begin{equation}\label{2Gamma}
\mbox{${\mathcal G}$}amma(H_1,H_2,H_3) \,\simeq\,
\{\overline{H_2}\}_c
\stackrel{\frown}{\otimes}\{H_3\}_c
\stackrel{\frown}{\otimes}
\{\overline{H_1}\}_r
\stackrel{\frown}{\otimes}\{H_2\}_r
\end{equation}
completely isometrically.
Using the commutativity of the operator space projective
tensor product, we deduce a
completely isometric identification
$$
\{\overline{H_3}\}_r\stackrel{\frown}{\otimes}
\mbox{${\mathcal G}$}amma(H_1,H_2,H_3)\stackrel{\frown}{\otimes}
\{H_1\}_c
\,\simeq\, \{\overline{H_1}\}_r\stackrel{\frown}{\otimes}
\{H_2\}_r\stackrel{\frown}{\otimes}\{\overline{H_3}\}_r
\stackrel{\frown}{\otimes}
\{H_1\}_c
\stackrel{\frown}{\otimes}\{\overline{H_2}\}_c\stackrel{\frown}{\otimes}\{H_3\}_c.
$$
Using Proposition \ref{1Recap} (b) again, we have
$$
\{H_1\}_c
\stackrel{\frown}{\otimes}\{\overline{H_2}\}_c\stackrel{\frown}{\otimes}\{H_3\}_c
\simeq\{\mbox{${\mathcal H}$}\}_c
\qquad\hbox{and}\qquad \{\overline{H_1}\}_r\stackrel{\frown}{\otimes}
\{H_2\}_r\stackrel{\frown}{\otimes}\{\overline{H_3}\}_r \simeq\{\overline{\mbox{${\mathcal H}$}}\}_r
$$
completely isometrically.
By Proposition \ref{1Recap} (c),
this yields a completely isometric identification
\begin{equation}\label{2Ident1}
\{\overline{H_3}\}_r\stackrel{\frown}{\otimes}
\mbox{${\mathcal G}$}amma(H_1,H_2,H_3)\stackrel{\frown}{\otimes}
\{H_1\}_c
\simeq S^1\bigl(\mbox{${\mathcal H}$}).
\end{equation}
Passing to the duals, using (\ref{1S11}) and Proposition \ref{1Recap} (d),
we deduce a $w^*$-continuous
completely isometric identification
$$
B(\mbox{${\mathcal H}$})\,\simeq\, CB\bigl(\mbox{${\mathcal G}$}amma(H_1,H_2,H_3), B(H_1,H_3)\bigr).
$$
Combining with (\ref{2VN}), we deduce a $w^*$-continuous,
completely isometric onto
mapping
\begin{equation}\label{2J}
J\colon B(H_1)\overline{\otimes} B(H_2)^{op}\overline{\otimes} B(H_3)
\longrightarrow CB\bigl(\mbox{${\mathcal G}$}amma(H_1,H_2,H_3), B(H_1,H_3)\bigr).
\end{equation}
Now to establish the proposition it suffices to check that
\begin{equation}\label{2J=Tau}
J(\varphi)=\widetilde{\tau}_\varphi
\end{equation}
for any
$\varphi\in B(H_1)\overline{\otimes} B(H_2)^{op}\overline{\otimes} B(H_3)$.
We claim that it suffices to prove (\ref{2J=Tau}) in the
case when $\varphi$ belongs to the algebraic tensor
product $B(H_1)\otimes B(H_2)^{op}\otimes B(H_3)$.
Indeed let $\varphi\in B(H_1)\overline{\otimes} B(H_2)^{op}\overline{\otimes} B(H_3)$,
let $x\in S_2(H_1,H_2)$, $y\in S_2(H_2,H_3)$ and
$h\in H_1$. Assume that $(\varphi_t)_t$ is a net of
$B(H_1)\otimes B(H_2)^{op}\otimes B(H_3)$ converging
to $\varphi$ in the $w^*$-topology. Then
$\varphi_t(h\otimes y)\to \varphi(h\otimes y)$ in the weak topology
of $H_1\stackrel{2}{\otimes} \overline{H_2}\stackrel{2}{\otimes} H_3$. Hence
$\mbox{${\mathcal T}$}heta[\varphi_t(h\otimes y)]\to \mbox{${\mathcal T}$}heta[\varphi(h\otimes y)]$ in the weak topology
of $S^2(S^2(H_1,H_2),H_3)$, which implies that
$\mbox{${\mathcal T}$}heta[\varphi_t(h\otimes y)](x)
\to \mbox{${\mathcal T}$}heta[\varphi(h\otimes y)](x)$ in the weak topology
of $H_3$. Equivalently, $[\tau_{\varphi_t}(y,x)](h)\to [\tau_{\varphi}(y,x)](h)$
weakly. Since $J$ is $w^*$-continuous, we also have, by similar arguments, that
$[J(\varphi_t)(y\otimes x)](h)\to [J(\varphi)(y\otimes x)](h)$ weakly. Hence
if $J(\varphi_t)=\widetilde{\tau}_{\varphi_t}$ for any $t$, we have
$J(\varphi)=\widetilde{\tau}_\varphi$ as well.
Moreover by linearity, it suffices to prove (\ref{2J=Tau})
when $\varphi= R\otimes S \otimes T$ for some
$R\in B(H_1)$, $S\in B(H_2)$ and
$T\in B(H_3)$.
In view of (\ref{2Sigma2}), it therefore suffices to show that
\begin{equation}\label{2Check}
J(R\otimes S\otimes T)(y\otimes x ) = TySxR,
\end{equation}
for any $R\in B(H_1)$, $S\in B(H_2)$,
$T\in B(H_3)$,
$x\in S^2(H_1,H_2)$ and $y\in S^2(H_2,H_3)$.
Since $J$ is linear and $w^*$-continuous it actually suffices to
prove (\ref{2Check}) when $R$, $S$, $T$, $x$ and $y$ are rank one.
For $i=1,2,3$, let $\xi_i,\eta_i,h_i,k_i\in H_i$ and consider
$x=\overline{\xi_1}\otimes\eta_2$ and
$y= \overline{\xi_2}\otimes\eta_3$, as well as the operators
$R=h_1\otimes\overline{k_1}$, $S=h_2\otimes\overline{k_2}$ and
$T=h_3\otimes\overline{k_3}$ (see Remark \ref{1RankOne} for the
use of these tensor product notations).
Then let
$$
\alpha = \overline{\xi_1}\otimes\eta_2\otimes\overline{\xi_3}
\otimes \eta_1\otimes\overline{\xi_2}\otimes \eta_3\, \in
\, (\overline{H_1}\otimes
H_2\otimes \overline{H_3})\otimes (H_1\otimes \overline{H_2}\otimes H_3)
\,\subset\,
S^1(\mbox{${\mathcal H}$})
$$
and let
$$
\beta= h_1\otimes \overline{k_2}\otimes
h_3\otimes \overline{k_1}\otimes h_2\otimes \overline{k_3}
\, \in
\, (H_1\otimes \overline{H_2}\otimes H_3)\otimes (\overline{H_1}\otimes
H_2\otimes \overline{H_3})\,\subset\,
B(\mbox{${\mathcal H}$}).
$$
In the identification (\ref{2Ident1}), $\overline{\xi_3}\otimes y\otimes x
\otimes \eta_1$ corresponds to $\alpha$ whereas in the identification (\ref{2VN}),
$R\otimes S\otimes T$ corresponds to $\beta$. Hence
\begin{align*}
\bigl\langle\bigl[J(R\otimes S\otimes T)(y\otimes x)](\eta_1),\xi_3\bigr\rangle
& ={\rm tr}(\alpha\beta)\\
& =\langle h_1,\xi_1\rangle\langle \eta_2,k_2\rangle
\langle h_3,\xi_3\rangle\langle \eta_1,k_1\rangle
\langle h_2,\xi_2\rangle\langle \eta_3,k_3\rangle.
\end{align*}
On the other hand,
$$
TySxR = \langle h_1,\xi_1\rangle\langle \eta_2,k_2\rangle
\langle h_2,\xi_2\rangle\langle \eta_3,k_3\rangle
h_3\otimes\overline{k}_1,
$$
hence
\begin{equation}\label{2Trace}
\langle TySxR(\eta_1),\xi_3\rangle = \langle h_1,\xi_1\rangle\langle \eta_2,k_2\rangle
\langle h_3,\xi_3\rangle\langle \eta_1,k_1\rangle
\langle h_2,\xi_2\rangle\langle \eta_3,k_3\rangle.
\end{equation}
This proves the desired equality.
\end{proof}
\begin{remark}\label{2Rk} Using (\ref{1S1S1}) twice we have a
$w^*$-continuous completely
isometric identification
\begin{equation}\label{2Rk1}
B(H_1)\overline{\otimes} B(H_2)^{op}\overline{\otimes} B(H_3)\,\simeq\,
\bigl(S^1(H_1)\stackrel{\frown}{\otimes}
S^1(H_2)^{op}\stackrel{\frown}{\otimes}
S^1(H_3)\bigr)^*.
\end{equation}
Let $\varphi\in B(H_1)\overline{\otimes} B(H_2)^{op}\overline{\otimes} B(H_3)$
and let $u=\tau_\varphi$. Let
$\xi_1,\eta_1\in H_1$, $\xi_2,\eta_2\in H_2$ and $\xi_3,\eta_3\in H_3$ and
regard $\overline{\xi_i}\otimes\eta_i$ as an element of $S^1(H_i)$ for $i=1,2,3$.
According to (\ref{2Rk1}) we may consider the action of $\varphi$
on $\overline{\xi_1}\otimes\eta_1\otimes\eta_2\otimes
\overline{\xi_2}\otimes\overline{\xi_3}\otimes\eta_3$.
Then we have
$$
\langle\varphi, \overline{\xi_1}\otimes\eta_1\otimes\eta_2\otimes
\overline{\xi_2}\otimes\overline{\xi_3}\otimes\eta_3\rangle\,=\,
\bigl\langle \bigl[u(\overline{\xi_2}\otimes\eta_3, \overline{\xi_1}\otimes\eta_2)\bigr]
(\eta_1),\xi_3\bigr\rangle.
$$
Indeed this follows from the arguments
in the proof of Proposition \ref{2OM-B}. Details are left to the reader.
\end{remark}
Let $\varphi\in B(H_1)\overline{\otimes} B(H_2)^{op}\overline{\otimes} B(H_3)$.
We will say that $\tau_\varphi$
is an {\bf $S^1$-operator multiplier} if it takes values into
the trace class
$S^1(H_1,H_3)$ and there exists a constant
$D\geq 0$ such that
$$
\norm{\tau_\varphi(y,x)}_1\leq D\norm{x}_2\norm{y}_2,\qquad
x\in S^2(H_1,H_2),\ y\in S^2(H_2,H_3).
$$
Note that, by (\ref{2Sigma2}), $\tau_\varphi$ is an
$S^1$-operator multiplier when $\varphi$ is of the form $R\otimes S \otimes T$.
Consequently, $\tau_\varphi$ is an
$S^1$-operator multiplier whenever $\varphi$ belongs to the algebraic
tensor product $B(H_1)\otimes B(H_2)\otimes B(H_3)$.
In this paper we will be mostly interested in {\bf completely bounded
$S^1$-operator multipliers}, that is, $S^1$-operator multipliers $\tau_\varphi$
such that $\widetilde{\tau_\varphi}$ is a completely bounded map from $\mbox{${\mathcal G}$}amma(H_1,H_2,H_3)$ into
$S^1(H_1,H_3)$. Note that the canonical inclusion $S^1(H_1,H_3)\subset B(H_1,H_3)$
is a complete contraction, hence
$$
CB(\mbox{${\mathcal G}$}amma(H_1,H_2,H_3), S^1(H_1,H_3))\,\subset\,
CB(\mbox{${\mathcal G}$}amma(H_1,H_2,H_3), B(H_1,H_3))\qquad\hbox{contractively.}
$$
It therefore follows from Proposition
\ref{2OM-B} that the space of all completely bounded
$S^1$-operator multipliers coincides with the space
$CB(\mbox{${\mathcal G}$}amma(H_1,H_2,H_3), S^1(H_1,H_3))$.
The following statement provides a characterization.
\begin{lemma}\label{2CB}
Let $u\colon S^2(H_2,H_3)\times S^2(H_1,H_2)\to S^1(H_1,H_3)$
be a bounded bilinear map and let
$D>0$ be a constant.
Then $\widetilde{u}\in CB(\mbox{${\mathcal G}$}amma(H_1,H_2,H_3), S^1(H_1,H_3))$ and
$\cbnorm{\widetilde{u}}\leq D$ if and only
if for any $n\geq 1$, for any $x_1,\ldots,x_n\in S^2(H_1,H_2)$ and for any
$y_1,\ldots,y_n\in S^2(H_2,H_3)$,
$$
\bignorm{\bigl[u(y_i,x_j)\bigr]_{1\leq i,j\leq n}}_{S^1(\ell^2_n(H_1),
\ell^2_n(H_3))}\,\leq D\,\mbox{${\mathcal B}$}igl(\sum_{j=1}^n
\norm{x_j}^2_2\mbox{${\mathcal B}$}igr)^{\frac12}\mbox{${\mathcal B}$}igl(\sum_{i=1}^n
\norm{y_i}^2_2\mbox{${\mathcal B}$}igr)^{\frac12}.
$$
\end{lemma}
\begin{proof}
For any $n\geq 1$, we use the classical notations
$R_n=\{\ell^2_n\}_r, C_n=\{\ell^2_n\}_c$ and
$S^1_n=S^1(\ell^2_n)$.
Consider $u$ as above and set
$$
d_n = \bignorm{I_{S^1_n}\otimes \widetilde{u} \colon
S_n^1\stackrel{\frown}{\otimes} \mbox{${\mathcal G}$}amma(H_1,H_2,H_3)
\longrightarrow S_n^1\stackrel{\frown}{\otimes}S^1(H_1,H_3)}
$$
for any $n\geq 1$.
By \cite[Lemma 1.7]{P1}, $\widetilde{u}\in
CB\bigl(\mbox{${\mathcal G}$}amma(H_1,H_2,H_3), S^1(H_1,H_3)\bigr)$ if and only if
the sequence $(d_n)_{n\geq 1}$ is bounded and in this case,
$\cbnorm{\widetilde{u}}=\sup_n d_n$.
By Proposition \ref{1Recap} (c),
$$
S_n^1\stackrel{\frown}{\otimes} \mbox{${\mathcal G}$}amma(H_1,H_2,H_3)
\,\simeq\,
R_n\stackrel{\frown}{\otimes} \{S^2(H_1,H_2)\}_r
\stackrel{\frown}{\otimes}\{S^2(H_2,H_3)\}_c\stackrel{\frown}{\otimes} C_n
$$
completely isometrically. Using Proposition \ref{1Recap} (b), this yields
$$
S_n^1\stackrel{\frown}{\otimes} \mbox{${\mathcal G}$}amma(H_1,H_2,H_3)
\,\simeq\,
\bigl\{\ell^2_n\stackrel{2}{\otimes}S^2(H_1,H_2)\bigr\}_r\stackrel{\frown}{\otimes}
\bigl\{\ell^2_n\stackrel{2}{\otimes}S^2(H_2,H_3)\bigr\}_c.
$$
Applying Remark \ref{1Equal}, we derive that
$$
S_n^1\stackrel{\frown}{\otimes} \mbox{${\mathcal G}$}amma(H_1,H_2,H_3)
\,\simeq\,
\bigl(\ell^2_n\stackrel{2}{\otimes}S^2(H_1,H_2)\bigr)\,\widehat{\otimes}\,
\bigl(\ell^2_n\stackrel{2}{\otimes}S^2(H_2,H_3)\bigr)
$$
isometrically.
Similarly,
\begin{align*}
S_n^1 \stackrel{\frown}{\otimes} S^1(H_1,H_3)
& \,\simeq\,
R_n \stackrel{\frown}{\otimes} S^1(H_1,H_3)
\stackrel{\frown}{\otimes} C_n
\\
& \,\simeq\,
R_n\stackrel{\frown}{\otimes} \{\overline{H_1}\}_r
\stackrel{\frown}{\otimes} \{H_3\}_c\stackrel{\frown}{\otimes} C_n
\\
& \,\simeq\,
\bigl\{\ell^2_n\stackrel{2}{\otimes}\overline{H_1}\bigr\}_r
\stackrel{\frown}{\otimes}
\bigl\{\ell^2_n\stackrel{2}{\otimes} H_3\bigr\}_c
\\
& \,\simeq\,
S^1\bigl(\ell^2_n(H_1), \ell^2_n(H_3)\bigr)
\end{align*}
isometrically.
Hence a thorough look at these identifications shows that
$$
d_n = \sup\mbox{${\mathcal B}$}igl\{\bignorm{\bigl[u(y_i,x_j)\bigr]_{1\leq i,j\leq n}}_{S^1(\ell^2_n(H_1),
\ell^2_n(H_3))}\mbox{${\mathcal B}$}igr\},
$$
where the supremum runs over all
$$
(x_1,\ldots,x_n)\in \ell^2_n\stackrel{2}{\otimes}S^2(H_1,H_2)
\qquad\hbox{and}\qquad
(y_1,\ldots,y_n)\in\ell^2_n\stackrel{2}{\otimes}S^2(H_2,H_3)
$$
of norms less than
or equal to $1$. This yields the result.
\end{proof}
The next result, which should be compared to Proposition \ref{2OM-B},
provides a characterization of completely bounded
$S^1$-operator multipliers. Before stating it, we note
that we have $S^1(H_1)\stackrel{\frown}{\otimes}
S^1(H_3)\subset S^1(H_1)\stackrel{h}{\otimes}
S^1(H_3)$ completely contractively (see e.g. \cite[Theorem 9.2.1]{ER}).
Consequently
$$
CB\bigl(S^1(H_1)\stackrel{h}{\otimes}
S^1(H_3), B(H_2)^{op}\bigr)\,\subset\,
CB\bigl(S^1(H_1)\stackrel{\frown}{\otimes}
S^1(H_3), B(H_2)^{op}\bigr)
$$
contractively. Applying Lemma \ref{1Injective1} (c), and using
(\ref{1S1S1}) and (\ref{1w*h}), we deduce a
contractive embedding
$$
B(H_2)^{op}\overline{\otimes}\bigl(
B(H_1)\stackrel{w^*h}{\otimes}B(H_3)\bigr)\,\subset\,
B(H_1)\overline{\otimes} B(H_2)^{op}
\overline{\otimes} B(H_3).
$$
\begin{theorem}\label{2OM-S1}
Let $\varphi\in B(H_1)\overline{\otimes} B(H_2)^{op}\overline{\otimes} B(H_3)$.
Then $\tau_\varphi$ is a completely bounded $S^1$-operator
multiplier if and only if $\varphi$ belongs to
$B(H_2)^{op}\overline{\otimes}\bigl(
B(H_1)\stackrel{w^*h}{\otimes}B(H_3)\bigr)$. Further (\ref{2OM-B1})
restricts to a $w^*$-continuous
completely isometric identification
\begin{equation}\label{2Ident3}
B(H_2)^{op}\overline{\otimes}\bigl(
B(H_1)\stackrel{w^*h}{\otimes}B(H_3)\bigr)\,\simeq\,
CB\bigl(\mbox{${\mathcal G}$}amma(H_1,H_2,H_3), S^1(H_1,H_3)\bigr).
\end{equation}
\end{theorem}
\begin{proof} The scheme of proof is similar to the one of Proposition \ref{2OM-B}.
Recall (\ref{2Gamma}) from this proof.
On the one hand, using
commutativity of the operator space projective tensor product, we deduce a
completely isometric identification
$$
\mbox{${\mathcal G}$}amma(H_1,H_2,H_3)
\,\simeq\, \{H_2\}_r \stackrel{\frown}{\otimes} \{\overline{H}_2\}_c
\stackrel{\frown}{\otimes} \{\overline{H}_1\}_r \stackrel{\frown}{\otimes}
\{H_3\}_c,
$$
and then, by Proposition \ref{1Recap} (c),
\begin{equation}\label{2Ident2}
\mbox{${\mathcal G}$}amma(H_1,H_2,H_3) \,\simeq\, S^1(\overline{H}_2)
\stackrel{\frown}{\otimes} S^1(H_1,H_3).
\end{equation}
On the other hand, it follows from (\ref{1REC}) and Proposition \ref{1Recap} (a) that
$$
S^1(H_1,H_3)
\stackrel{\frown}{\otimes} S^\infty(H_3,H_1)
\,\simeq\,\{\overline{H_1}\}_r\stackrel{h}{\otimes} S^\infty(H_3,H_1)\stackrel{h}{\otimes}\{H_3\}_c.
$$
Then using (\ref{1Compact}), we deduce
that
$$
S^1(H_1,H_3)
\stackrel{\frown}{\otimes} S^\infty(H_3,H_1)
\,\simeq\, \bigl(\{\overline{H_1}\}_r\stackrel{h}{\otimes}
\{H_1\}_c\bigr)
\stackrel{h}{\otimes} \bigl(\{\overline{H_3}\}_r\stackrel{h}{\otimes}\{H_3\}_c\bigr).
$$
Applying Proposition \ref{1Recap} (a) again together with (\ref{1RC}), we obtain that
$$
S^1(H_1,H_3)
\stackrel{\frown}{\otimes} S^\infty(H_3,H_1)
\,\simeq\, S^1(H_1) \stackrel{h}{\otimes} S^1(H_3)
$$
completely isometrically.
Combining the last identification with (\ref{2Ident2}), we find
\begin{equation}\label{2Ident4}
\mbox{${\mathcal G}$}amma(H_1,H_2,H_3)\stackrel{\frown}{\otimes} S^\infty(H_3,H_1)\,\simeq\,
S^1(\overline{H}_2)
\stackrel{\frown}{\otimes}\bigl(S^1(H_1) \stackrel{h}{\otimes} S^1(H_3)\bigr).
\end{equation}
We now pass to duals. First by (\ref{1Duality4}) and (\ref{1S11}), we have
a $w^*$-continuous completely isometric identification
$$
\bigl(\mbox{${\mathcal G}$}amma(H_1,H_2,H_3)\stackrel{\frown}{\otimes} S^\infty(H_3,H_1)\bigr)^*
\,\simeq\,
CB\bigl(\mbox{${\mathcal G}$}amma(H_1,H_2,H_3), S^1(H_1,H_3)\bigr).
$$
Second by (\ref{1Duality4}) and Lemma \ref{1Opp}, we have
$w^*$-continuous completely isometric identifications
\begin{align*}
\bigl(S^1(\overline{H}_2)
\stackrel{\frown}{\otimes}\bigl(S^1(H_1) \stackrel{h}{\otimes} S^1(H_3)\bigr)\bigr)^*
\,&\simeq\,
CB\bigl(S^1(H_1) \stackrel{h}{\otimes} S^1(H_3),B(\overline{H_2})\bigr)\\
&\simeq\, CB\bigl(S^1(H_1) \stackrel{h}{\otimes} S^1(H_3),B(H_2)^{op}\bigr).
\end{align*}
Equivalently, by Lemma \ref{1Injective1} (c), we have
$$
\bigl(S^1(\overline{H}_2)
\stackrel{\frown}{\otimes}\bigl(S^1(H_1) \stackrel{h}{\otimes} S^1(H_3)\bigr)\bigr)^*
\,\simeq\,B(H_2)^{op}\overline{\otimes}\bigl(B(H_1)\stackrel{w^*h}{\otimes} B(H_3)\bigr).
$$
Thus (\ref{2Ident4}) yields a $w^*$-continuous, completely isometric onto mapping
$$
L\colon B(H_2)^{op}\overline{\otimes}\bigl(B(H_1)\stackrel{w^*h}{\otimes} B(H_3)\bigr)
\longrightarrow
CB\bigl(\mbox{${\mathcal G}$}amma(H_1,H_2,H_3), S^1(H_1,H_3)\bigr).
$$
Arguing as in the proof of Proposition \ref{2OM-B}, it now suffices to
show that
for any $R\in B(H_1)$, $S\in B(H_2)$ and
$T\in B(H_3)$,
$L(S\otimes R\otimes T)$ coincides with
$\widetilde{\tau}_{R\otimes S\otimes T}$. Next, it suffices to show that
\begin{equation}\label{2Check2}
L(S\otimes R\otimes T)(y\otimes x) = TySxR
\end{equation}
when $R,S,T$ are rank one and when $x\in S^2(H_1,H_2)$ and
$y\in S^2(H_2,H_3)$ are rank one.
We let $\xi_i,\eta_i,h_i,k_i\in H_i$ for $i=1,2,3$ and
consider
$R=h_1\otimes\overline{k_1}$, $S=h_2\otimes\overline{k_2}$,
$T=h_3\otimes\overline{k_3}$, $x=\overline{\xi_1}\otimes\eta_2$ and
$y= \overline{\xi_2}\otimes\eta_3$.
Then $y\otimes x\in \mbox{${\mathcal G}$}amma(H_1,H_2,H_3)$ corresponds
to $(\eta_2\otimes\overline{\xi_2})\otimes
(\overline{\xi_1}\otimes\eta_3)\in S^1(\overline{H}_2)\otimes
S^1(H_1,H_3)$ in the identification (\ref{2Ident2}). Hence
$y\otimes x\otimes(\eta_1\otimes\overline{\xi_3})$ regarded as an element
of
$\mbox{${\mathcal G}$}amma(H_1,H_2,H_3)\otimes S^\infty(H_3,H_1)$
corresponds
to
$$
(\eta_2\otimes\overline{\xi_2})\otimes
(\overline{\xi_1}\otimes\eta_1)\otimes(\overline{\xi_3}\otimes\eta_3)
\,\in\, S^1(\overline{H}_2)\otimes
S^1(H_1)\otimes S^1(H_3)
$$
in the identification (\ref{2Ident4}).
Since
$$
\widehat{S}\otimes R\otimes T=\overline{k_2}\otimes h_2\otimes
h_1\otimes\overline{k_1}\otimes h_3\otimes\overline{k_3}
\,\in\, B(\overline{H}_2)\otimes B(H_1)\otimes B(H_3),
$$
we then have
$$
\bigl\langle\bigl[
L(S\otimes R\otimes T)(y\otimes x )\bigr](\eta_1),\xi_3\bigr\rangle =
\langle \eta_2,k_2\rangle\langle h_2,\xi_2\rangle
\langle h_1,\xi_1\rangle\langle \eta_1,k_1\rangle
\langle h_3,\xi_3\rangle\langle \eta_3,k_3\rangle.
$$
By (\ref{2Trace}), the right hand side of this equality is equal
to $\langle TySxR(\eta_1),\xi_3\rangle$. This proves the identity (\ref{2Check2}),
and hence the result.
\end{proof}
\section{Module maps}\label{4MOD}
As in the previous section, we consider three Hilbert spaces $H_1,H_2,H_3$. We further
consider von Neumann subalgebras
$$
M_1\subset B(H_1),\qquad
M_2\subset B(H_2)\quad\hbox{and}\qquad
M_3\subset B(H_3)
$$
acting on these spaces. For $i=1,2,3$, we let $M_i'\subset B(H_i)$ be the commutant
of $M_i$.
Let $u\colon S^2(H_2,H_3)\times S^2(H_1,H_2)\to B(H_1,H_3)$ be a bounded bilinear operator.
We say that $u$ is an $(M'_3,M'_2,M'_1)$-module map (or is $(M'_3,M'_2,M'_1)$-modular)
provided that
$$
u(Ty,x)=Tu(y,x),\qquad u(y,xR)=u(y,x)R\quad\hbox{and}\quad
u(yS,x)=u(y,Sx)
$$
for any $x\in S^2(H_1,H_2)$, $y\in S^2(H_2,H_3)$, $R\in M'_1$, $S\in M'_2$ and $T\in M'_3$.
It will be convenient to associate to $u$ the following $4$-linear bounded operators.
We define
\begin{equation}\label{3U11}
U_1\colon \overline{H_2}\times H_2\times \overline{H_3}\times H_3\longrightarrow B(H_1)
\end{equation}
by
\begin{equation}\label{3U12}
\bigl\langle \bigl[U_1(\overline{\xi_2},\eta_2,\overline{\xi_3},\eta_3)\bigr]
(\eta_1),\xi_1\bigr\rangle
\,=\, \bigl\langle \bigl[u(\overline{\xi_2}\otimes\eta_3, \overline{\xi_1}\otimes\eta_2)\bigr]
(\eta_1),\xi_3\bigr\rangle
\end{equation}
for any $\xi_1,\eta_1\in H_1$, $\xi_2,\eta_2\in H_2$ and $\xi_3,\eta_3\in H_3$. Likewise we define
$$
U_2\colon\overline{H_1}\times H_1\times \overline{H_3}\times H_3\to B(H_2)
\qquad\hbox{and}\qquad
U_3\colon\overline{H_1}\times H_1\times \overline{H_2}\times H_2\to B(H_3)
$$
by
\begin{align*}
\bigl\langle \bigl[U_2(\overline{\xi_1},\eta_1,\overline{\xi_3},\eta_3)\bigr]
(\eta_2),\xi_2\bigr\rangle
\,&=\, \bigl\langle \bigl[u(\overline{\xi_2}\otimes\eta_3, \overline{\xi_1}\otimes\eta_2)\bigr]
(\eta_1),\xi_3\bigr\rangle\\
\bigl\langle \bigl[U_3(\overline{\xi_1},\eta_1,\overline{\xi_2},\eta_2)\bigr]
(\eta_3),\xi_3\bigr\rangle
\,&=\, \bigl\langle \bigl[u(\overline{\xi_2}\otimes\eta_3, \overline{\xi_1}\otimes\eta_2)\bigr]
(\eta_1),\xi_3\bigr\rangle.
\end{align*}
\begin{lemma}\label{3LemMod}
Let $u\in B_2\bigl(S^2(H_2,H_3)\times S^2(H_1,H_2), B(H_1,H_3)\bigr)$.
Then $u$ is an $(M'_3,M'_2,M'_1)$-module map if and only if
for any $i=1,2,3$, $U_i$ is valued in $M_i$.
\end{lemma}
\begin{proof}
Let $R\in B(H_1)$. For any $\eta_1,\xi_1\in H_1$,
$\eta_2,\xi_2\in H_2$ and $\eta_3,\xi_3\in H_3$, we have
$$
\bigl\langle \bigl[u(\overline{\xi_2}\otimes\eta_3, \overline{\xi_1}\otimes\eta_2)\bigr]
R(\eta_1),\xi_3\bigr\rangle\,
=\,
\bigl\langle \bigl[U_1(\overline{\xi_2},\eta_2,\overline{\xi_3},\eta_3)\bigr]
R(\eta_1),\xi_1\bigr\rangle.
$$
Further $(\overline{\xi_1}\otimes\eta_2)R= \overline{R^*(\xi_1)}\otimes\eta_2$, hence
\begin{align*}
\bigl\langle \bigl[u(\overline{\xi_2}\otimes\eta_3, (\overline{\xi_1}\otimes\eta_2)R)\bigr]
(\eta_1),\xi_3\bigr\rangle\,
& =\,\bigl\langle \bigl[U_1(\overline{\xi_2},\eta_2,\overline{\xi_3},\eta_3)\bigr]
(\eta_1),R^*(\xi_1)\bigr\rangle\\
& =\,\bigl\langle R\bigl[U_1(\overline{\xi_2},\eta_2,\overline{\xi_3},\eta_3)\bigr]
(\eta_1),\xi_1\bigr\rangle.
\end{align*}
Since $\overline{H_1}\otimes H_2$ and $\overline{H_2}\otimes H_3$
are dense in $S^2(H_1,H_2)$ and
$S^2(H_2,H_3)$, respectively,
we deduce that $u(y,xR)=u(y,x)R$ for any $x\in S^2(H_1,H_2)$ and
any $y\in S^2(H_2,H_3)$ if and only if $R$ commutes with
$U_1(\overline{\xi_2},\eta_2,\overline{\xi_3},\eta_3)$ for
any $\xi_2,\eta_2\in H_2$ and $\xi_3,\eta_3\in H_3$.
Consequently
$u$ is $(\ensuremath{\mathbb{C}},\ensuremath{\mathbb{C}},M_1')$-modular if and only if
the range of $U_1$ commutes with $M_1'$. By the
Bicommutant Theorem, this means that
$u$ is $(\ensuremath{\mathbb{C}},\ensuremath{\mathbb{C}},M_1')$-modular if and only if
$U_1$ is valued in $M_1$.
Likewise $u$ is $(\ensuremath{\mathbb{C}},M_2',\ensuremath{\mathbb{C}})$-modular (resp.
$(M_3',\ensuremath{\mathbb{C}},\ensuremath{\mathbb{C}})$-modular)
if and only if
$U_2$ is valued in $M_2$ (resp. $U_3$ is valued in $M_3$).
This proves the result.
\end{proof}
\begin{corollary}\label{3Mod-B}
Let $\varphi\in B(H_1)\overline{\otimes} B(H_2)^{op}\overline{\otimes}
B(H_3)$. Then $\tau_\varphi$ is $(M_3',M_2',M_1')$-modular if and only
if $\varphi\in M_1\overline{\otimes} M_2^{op}\overline{\otimes}
M_3$.
This provides (as a restriction of (\ref{2OM-B1}))
a $w^*$-continuous
completely isometric identification
$$
M_1\overline{\otimes} M_2^{op}\overline{\otimes} M_3
\,\simeq\, CB_{(M'_3,M'_2,M'_1)}
\bigl(\mbox{${\mathcal G}$}amma(H_1,H_2,H_3), B(H_1,H_3)\bigr),
$$
where the right-hand side denotes the subspace
of $CB\bigl(\mbox{${\mathcal G}$}amma(H_1,H_2,H_3), B^1(H_1,H_3)\bigr)$
of all completely bounded maps $\widetilde{u}$ such that
$u$ is an $(M'_3,M'_2,M'_1)$-module map.
\end{corollary}
\begin{proof}
Consider the duality relation
$$
B(H_2)^{op}\overline{\otimes} B(H_3) =
\bigl(S^1(H_2)^{op} \stackrel{\frown}{\otimes} S^1(H_3)\bigr)^*
$$
provided by (\ref{1S1S1}).
We claim that in the space $S^1(H_2)^{op} \stackrel{\frown}{\otimes} S^1(H_3)$, we have equality
\begin{equation}\label{3perp}
\bigl(M_{2}^{op}\overline{\otimes} M_3\bigr)_{\perp}
\,=\,\overline{(M_{2\perp}^{op}\otimes S^1(H_3) + S^1(H_2)^{op}\otimes M_{3\perp})}.
\end{equation}
Indeed let $z\in B(H_2)^{op}\overline{\otimes} B(H_3)$ and let
$z'\colon S^1(H_3)\to B(H_2)^{op}$ and
$z''\colon S^1(H_2)^{op} \to B(H_3)$
be associated with $z$ (see Lemma \ref{1Slice}).
Then $z\in \bigl(M_{2\perp}^{op}\otimes S^1(H_3)\bigr)^{\perp}$ if and only if
$z'$ is valued in $M_{2}^{op}$, whereas $z\in \bigl(S^1(H_2)^{op}\otimes M_{3\perp}\bigr)^{\perp}$ if and only if
$z''$ is valued in $M_{3}$. Consequently,
$z$ belongs to the orthogonal of $M_{2\perp}^{op}\otimes S^1(H_3) + S^1(H_2)^{op}\otimes M_{3\perp}$
if and only if $z'$ is valued in $M_{2}^{op}$
and $z''$ is valued in $M_{3}$. In turn this is equivalent to $z'\in CB(M_{3*}, M_{2}^{op})$.
Applying Lemma \ref{1Injective1} (a), we deduce that
the orthogonal of $M_{2\perp}^{op}\otimes S^1(H_3) + S^1(H_2)^{op}\otimes M_{3\perp}$
is equal to $M_{2}^{op}\overline{\otimes} M_3$. The claim (\ref{3perp})
follows at once.
Let $\varphi\in B(H_1)\overline{\otimes} B(H_2)^{op}\overline{\otimes}
B(H_3)$. Using Lemma \ref{1Slice}, we may associate 3 completely bounded operators
\begin{align*}
\varphi^1 &\colon S^1(H_2)^{op}\stackrel{\frown}{\otimes} S^1(H_3)\longrightarrow
B(H_1),\\
\varphi^2 &\colon S^1(H_1)\stackrel{\frown}{\otimes} S^1(H_3)\longrightarrow
B(H_2)^{op},\\
\varphi^3 &\colon S^1(H_1)\stackrel{\frown}{\otimes} S^1(H_2)^{op}\longrightarrow
B(H_3)
\end{align*}
to $\varphi$.
According to Lemma \ref{1Injective1} (a), $\varphi$ belongs
to $M_1\overline{\otimes} M_2^{op}\overline{\otimes}
M_3$ if and only if $\varphi^1$ is valued in $M_1$ and
$\varphi^1$ vanishes on $(M_2^{op}\overline{\otimes}
M_3)_{\perp}$. By (\ref{3perp}),
$\varphi^1$ vanishes on $(M_2^{op}\overline{\otimes}
M_3)_{\perp}$ if and only if it both vanishes on
$M_{2\perp}^{op}\otimes S^1(H_3)$ and $S^1(H_2)^{op}\otimes M_{3\perp}$.
A quick look at the definitions of $\varphi^1, \varphi^2,\varphi^3$
reveals that $\varphi^1$ vanishes on
$M_{2\perp}^{op}\otimes S^1(H_3)$ if and only if $\varphi^2$ is valued in
$M^{op}_{2}$ and that $\varphi^1$ vanishes on $S^1(H_2)^{op}\otimes M_{3\perp}$
if and only if $\varphi^3$ is valued in
$M_{3}$. Altogether we obtain that $\varphi$ belongs
to $M_1\overline{\otimes} M_2^{op}\overline{\otimes}
M_3$ if and only if $\varphi^1$ is valued in $M_1$,
$\varphi^2$ is valued in $M_2^{op}$ and
$\varphi^3$ is valued in $M_3$.
Let $u=\tau_\varphi$.
It follows from Remark \ref{2Rk}
that for any $\eta_2,\xi_2\in H_2$ and $\eta_3,\xi_3\in H_3$, we have
$$
\varphi^1(\eta_2\otimes \overline{\xi_2}\otimes \overline{\xi_3}\otimes
\eta_3) = U_1(\overline{\xi_2},\eta_2,\overline{\xi_3},\eta_3),
$$
where $U_1$ is defined by (\ref{3U11}) and (\ref{3U12}).
Thus $\varphi^1$ is valued in
$M_1$ if and only if $U_1$ is valued in $M_1$. Likewise
$\varphi^2$ is valued in
$M_2^{op}$ if and only if $U_2$ is valued in $M_2$ and
$\varphi^3$ is valued in
$M_3$ if and only if $U_3$ is valued in $M_3$.
By Lemma \ref{3LemMod} we deduce that
$u$ is $(M_1',M_2',M_3')$-modular if and only
if $\varphi\in M_1\overline{\otimes} M_2^{op}\overline{\otimes} M_3$.
\end{proof}
We now turn to the study of modular completely bounded $S^1$-multipliers.
We let
$$
CB_{(M'_3,M'_2,M'_1)}\bigl(\mbox{${\mathcal G}$}amma(H_1,H_2,H_3), S^1(H_1,H_3)\bigr)
$$
denote the subspace of $CB\bigl(\mbox{${\mathcal G}$}amma(H_1,H_2,H_3), S^1(H_1,H_3)\bigr)$
of all completely bounded maps $\widetilde{u}$ such that
$u$ is an $(M'_3,M'_2,M'_1)$-module map.
According to (\ref{1Embed}) and (\ref{1w*h}),
$M_1\stackrel{w^*h}{\otimes} M_3$ can be regarded as
a $w^*$-closed subspace of the dual operator space
$B(H_1)\stackrel{w^*h}{\otimes}B(H_3)$. Consequently,
$M_2^{op}\overline{\otimes}\bigl(M_1\stackrel{w^*h}{\otimes} M_3\bigr)$
can be regarded as
a $w^*$-closed subspace of the dual operator space
$B(H_2)^{op}\overline{\otimes}\bigl(B(H_1)\stackrel{w^*h}{\otimes}B(H_3)\bigr)$.
The next statement is a continuation of Theorem \ref{2OM-S1}.
\begin{theorem}\label{3OM-S1}
Assume that $M_2$ is injective.
\begin{itemize}
\item [(a)] Let $\varphi\in B(H_2)^{op}\overline{\otimes}\bigl(B(H_1)\stackrel{w^*h}{\otimes}B(H_3)\bigr)$.
Then $\varphi$ belongs to $M_2^{op}\overline{\otimes}\bigl(M_1\stackrel{w^*h}{\otimes} M_3\bigr)$
if and only if $\tau_\varphi$ is $(M'_3,M'_2,M'_1)$-modular.
\item [(b)]
The identification (\ref{2Ident3}) restricts to
\begin{equation}\label{3Ident1}
M_2^{op}\overline{\otimes}\bigl(
M_1\stackrel{w^*h}{\otimes}M_3\bigr)\,\simeq \,
CB_{(M'_3,M'_2,M'_1)}\bigl(\mbox{${\mathcal G}$}amma(H_1,H_2,H_3), S^1(H_1,H_3)\bigr).
\end{equation}
\end{itemize}
\end{theorem}
\begin{proof}
Clearly (b) is a consequence of (a) so we only treat this first item.
Let $\varphi\in B(H_2)^{op}\overline{\otimes}\bigl(B(H_1)\stackrel{w^*h}{\otimes}B(H_3)\bigr)$.
Let
$$
\sigma\colon S^1(H_1)\stackrel{h}{\otimes}S^1(H_3)\longrightarrow B(H_2)^{op}
$$
be corresponding to $\varphi$ in the identification provided by Lemma \ref{1Injective1} (c).
Then let
$$
\rho\colon S^1(H_2)^{op} \longrightarrow B(H_1)\stackrel{w^*h}{\otimes} B(H_3)
$$
be the restriction of the adjoint of $\sigma$
to $S^1(H_2)^{op}$.
We assumed that $M_2$ is injective. It therefore
follows from Lemma \ref{1Injective1} (b) that
$\varphi\in M_2^{op}\overline{\otimes}\bigl(M_1\stackrel{w^*h}{\otimes} M_3\bigr)$
if and only if
\begin{equation}\label{2Fubini1}
\sigma\bigl(S^1(H_1)\stackrel{h}{\otimes}S^1(H_3)\bigr)
\,\subset\, M_2^{op}
\end{equation}
and
\begin{equation}\label{2Fubini2}
\rho\bigl(S^1(H_2)^{op}\bigr)\,\subset\,M_1\stackrel{w^*h}{\otimes} M_3.
\end{equation}
Let $u=\tau_\varphi$.
We will now show that $u$ is an $(M'_3,M'_2,M'_1)$-module map if and only if
(\ref{2Fubini1}) and (\ref{2Fubini2}) hold true.
First we observe that for any $\xi_1,\eta_1\in H_1$ and $\xi_3,\eta_3\in H_3$,
$$
\sigma\bigl((\overline{\xi_1}\otimes\eta_1)\otimes(\overline{\xi_3}\otimes\eta_3)\bigr)
\,=\, U_2(\overline{\xi_1},\eta_1,\overline{\xi_3},\eta_3).
$$
Indeed, this follows from Remark \ref{2Rk} and the definition of $U_2$.
Since
$\overline{H_1}\otimes H_1$ and $\overline{H_3}\otimes H_3$
are dense in $S^1(H_1)$ and $S^1(H_3)$, respectively,
we deduce that (\ref{2Fubini1}) holds true if and only if
$U_2$ is valued in $M_2$.
For any $v\in S^1(H_2)^{op}$, we may regard $\rho(v)$ as
an element of $\bigl(S^1(H_1)\stackrel{h}{\otimes} S^1(H_3)\bigr)^*$.
Then following the notation in Lemma \ref{1Slice-H}, we let
$$
[\rho(v)]'\colon S^1(H_3)\longrightarrow B(H_1)
\qquad\hbox{and}\qquad
[\rho(v)]''\colon S^1(H_1)\longrightarrow B(H_3)
$$
be the bounded linear maps associated to $\rho(v)$.
For any $\xi_2,\eta_2\in H_2$ and $\xi_3,\eta_3\in H_3$, we have
$$
\bigl[\rho(\eta_2\otimes\overline{\xi_2})\bigr]'(\overline{\xi_3}\otimes\eta_3) =
U_1(\overline{\xi_2},\eta_2, \overline{\xi_3},\eta_3).
$$
Indeed this follows again from Remark \ref{2Rk}.
Since
$H_2\otimes \overline{H_2}$ and $\overline{H_3}\otimes H_3$
are dense in $S^1(H_2)^{op}$ and $S^1(H_3)$, respectively,
we deduce that $[\rho(v)]'$ maps
$S^1(H_3)$ into $M_1$ for any $v\in S^1(H_2)^{op}$ if and only
if $U_1$ is valued in $M_1$. Likewise, $[\rho(v)]''$ maps
$S^1(H_1)$ into $M_3$ for any $v\in S^1(H_2)^{op}$ if and only
if $U_3$ is valued in $M_3$. Applying Lemma \ref{1Slice-H}, we
deduce (\ref{2Fubini2}) holds true if and only if
$U_1$ is valued in $M_1$ and $U_3$ is valued in $M_3$.
Altogether we have that (\ref{2Fubini1}) and (\ref{2Fubini2}) both
hold true if and only if for any $i=1,2,3$,
$U_i$ is valued in $M_i$. According to Lemma \ref{3LemMod}, this is equivalent to
$u=\tau_\varphi$ being
$(M'_3,M'_2,M'_1)$-modular.
\end{proof}
\section{The Sinclair-Smith factorization theorem}\label{5SS}
Let $I$ be an index set, and consider the Hilbertian operator spaces
$$
C_I = \{\ell^2_I\}_c
\qquad\hbox{and}\qquad
R_I = \{\ell^2_I\}_r.
$$
For any operator space $G$, we set
$$
C_I^w(G^*) = C_I\overline{\otimes} G^*
\qquad\hbox{and}\qquad
R_I^w(G^*) = R_I\overline{\otimes} G^*.
$$
This notation is taken from \cite[1.2.26--1.2.29]{BLM}, to which we refer for more
information.
We recall that $C_I^w(G^*)$ can be equivalently defined as the space of all families
$(x_i)_{i\in I}$ of elements of $G^*$ such that
the sums $\sum_{i\in J} x^*_ix_i$, for finite $J\subset I$, are uniformly bounded.
Likewise, $R_I^w(G^*)$ is equal to the space of all families
$(y_i)_{i\in I}$ of elements of $G^*$ such that
the sums $\sum_{i\in J} y_iy_i^*$, for finite $J\subset I$, are uniformly bounded.
Assume that $G^*=M$ is a von Neumann algebra, and consider $(x_i)_{i\in I}\in
C_I^w(M)$ and $(y_i)_{i\in I}\in
R_I^w(M)$. Then the family $(y_i x_i)_{i\in I}$ is summable in the $w^*$-topology of
$M$ and we let
\begin{equation}\label{4Sum}
\sum_{i\in I} y_i x_i\ \in M
\end{equation}
denote its sum.
We note the obvious fact that for any $x_i\in M, i\in I$,
$(x_i)_{i\in I}$ belongs to
$R_I^w(M)$ if and only if $(x_i^*)_{i\in I}$ belongs to
$C_I^w(M)$. In this case we set
$$
\bigl[(x_i)_{i\in I}\bigr]^* \,=\, (x_i^*)_{i\in I}.
$$
\begin{lemma}\label{4CI-CB} Let $E,G$ be operator spaces and let $I$ be an index set.
For any $\alpha=(\alpha_i)_{i\in I}\in C_I^w\bigl(CB(E,G^*)\bigr)$,
the (well-defined) operator $\widehat{\alpha}\colon E\to C_I^w(G^*)$,
$\widehat{\alpha}(x) =(\alpha_i(x))_{i\in I}$,
is completely bounded
and the mapping $\alpha\mapsto \widehat{\alpha}$ induces a
$w^*$-continuous completely isometric identification
$$
C_I^w\bigl(CB(E,G^*)\bigr)\,\simeq\, CB\bigl(E, C_I^w(G^*)\bigr).
$$
Likewise we have
$$
R_I^w\bigl(CB(E,G^*)\bigr)\,\simeq\, CB\bigl(E, R_I^w(G^*)\bigr).
$$
\end{lemma}
\begin{proof}
According to Lemma \ref{1Injective1} (c) and (\ref{1Duality4}),
$C_I^w(Z^*)\simeq (R_I\stackrel{\frown}{\otimes} Z)^*$ for any
operator space $Z$. Applying this identification, first with $Z=E\stackrel{\frown}{\otimes} G$
and then with
$Z=G$, we obtain that
\begin{align*}
C_I^w\bigl(CB(E,G^*)\bigr)\,
&\simeq\,
C_I^w\bigl((E\stackrel{\frown}{\otimes} G)^*\bigr)\quad\hbox{by (\ref{1Duality4})}\\
&\simeq\,
(R_I\stackrel{\frown}{\otimes} E\stackrel{\frown}{\otimes} G)^*\\
&\simeq\, CB\bigl(E, (R_I\stackrel{\frown}{\otimes} G)^*\bigr)\quad\hbox{by (\ref{1Duality4})}\\
&\simeq\, CB\bigl(E, C_I^w(G^*)\bigr).
\end{align*}
A straightforward verification reveals that this identification is implemented
by $\widehat{\alpha}$.
This yields the first part of the lemma. The proof of the second part is identical.
\end{proof}
We can now state the Sinclair-Smith factorization theorem, which will be use
in the next section.
\begin{theorem}\label{4SS} (\cite{SS})
Let $E,F$ be operator spaces, let $M$ be an injective von Neumann algebra
and let $w\colon F\stackrel{h}{\otimes} E\to M$ be a completely bounded
map. Then there exist an index set $I$ and two families
$$
\alpha=(\alpha_i)_{i\in I}\in C_I^w\bigl(CB(E,M)\bigr)
\qquad\hbox{and}\qquad
\beta=(\beta_i)_{i\in I}\in R_I^w\bigl(CB(F,M)\bigr)
$$
such that $\cbnorm{\alpha}\cbnorm{\beta} = \cbnorm{w}$ and
$$
w(y\otimes x)\,=\,\sum_{i\in I} \beta_i(y)\alpha_i(x),
\qquad x\in E,\, y\in F.
$$
\end{theorem}
In the rest of this section, we give a
new (shorter) proof of Theorem \ref{4SS}
based on Hilbert $C^*$-modules.
In the following we give the necessary background on
Hilbert $C^*$-modules. Let $M$ be a $C^*$-algebra. Recall that a
pre-Hilbert $M$-module is a right $M$-module $\mbox{${\mathcal X}$}$ equipped with a map
$\langle\,\cdotp,\,\cdotp\rangle\colon\mbox{${\mathcal X}$}\times \mbox{${\mathcal X}$}\to M$ (called an $M$-valued
inner product) satisfying the following properties:
\begin{itemize}
\item $\langle s,s\rangle\geq 0$ for every $s\in \mbox{${\mathcal X}$}$;
\item $\langle s,s\rangle=0$ if and only if $s=0$;
\item $\langle s,t\rangle=\langle t,s\rangle^*$ for every $s,t\in \mbox{${\mathcal X}$}$;
\item $\langle s, t_1m_1+t_2m_2\rangle=\langle s,t_1\rangle m_1+\langle s,t_2\rangle m_2$
for every $s,\,t_1,\,t_2\in \mbox{${\mathcal X}$}$ and $m_1,\, m_2\in M$.
\end{itemize}
In this setting, the map
$\|\cdot\|\colon\mbox{${\mathcal X}$}\to\mathbb R^+$, defined by
$$
\|s\|=\|\langle s,s\rangle\|^{1/2}, \qquad s\in \mbox{${\mathcal X}$},
$$
is a norm on $\mbox{${\mathcal X}$}$. A pre-Hilbert $M$-module which is complete
with respect to its norm is said to be a Hilbert $M$-module.
By \cite{B} (see also \cite[8.2.1]{BLM}),
a Hilbert $M$-module $\mbox{${\mathcal X}$}$ has a canonical operator
space structure obtained by letting, for any $n\geq 1$,
$$
\bignorm{(s_{ij})_{i,j}} \, =\, \mbox{${\mathcal B}$}iggnorm{\mbox{${\mathcal B}$}iggl(\sum_{k=1}^n\langle s_{ki}, s_{kj}
\rangle\mbox{${\mathcal B}$}iggr)_{i,j}}_{M_n(M)}^{1/2}, \qquad (s_{ij})_{i,j}\in M_n(\mbox{${\mathcal X}$}).
$$
A morphism between two Hilbert $M$-modules $\mbox{${\mathcal X}$}_1$ and $\mbox{${\mathcal X}$}_2$
is a bounded $M$-module map $u\colon \mbox{${\mathcal X}$}_1\to \mbox{${\mathcal X}$}_2$. A unitary
isomorphism $u\colon \mbox{${\mathcal X}$}_1\to \mbox{${\mathcal X}$}_2$ is an isomorphism preserving
the $M$-valued inner products. Any such map is a complete isometry
(see e.g. \cite[Proposition 8.2.2]{BLM}).
Assume now that $M$ is a von Neumann algebra. As a basic
example, we recall that whenever $p\in M$ is a projection,
then the subspace $pM$ of $M$ is a Hilbert $M$-module, when equipped with
multiplication on the right as the $M$-module action, and
with the $M$-valued inner product $\langle x,y\rangle = x^*y$, for $x,y\in pM$.
We recall the construction of the ultraweak direct sum Hilbert $M$-module.
Let $I$ be an index set and let $\{\mbox{${\mathcal X}$}_i\, :\, i\in I\}$
be a collection of Hilbert $M$-modules indexed by $I$. We let
$\langle\,\cdotp,\,\cdotp\rangle_i$ denote the $M$-valued inner product of $\mbox{${\mathcal X}$}_i$,
for any $i\in I$. Let $\mbox{${\mathcal X}$}$ be the set of all families $s=(s_i)_{i\in I}$,
with $s_i\in\mbox{${\mathcal X}$}_i$, such that
the sums $\sum_{i\in J}\langle s_i,s_i\rangle_i$, for finite $J\subset I$, are uniformly bounded.
Since $\langle s_i,s_i\rangle_i\geq 0$ for each $i\in I$,
the family $(\langle s_i,s_i\rangle_i)_{i\in I}$ is
then summable in the $w^*$-topology of
$M$. Using polarization identity, it is easy to deduce that for any
$s=(s_i)_{i\in I}$ and any $t=(t_i)_{i\in I}$ in $\mbox{${\mathcal X}$}$,
the family $(\langle s_i,t_i\rangle_i)_{i\in I}$ is
summable in the $w^*$-topology of $M$. Then one defines
$$
\langle s,t\rangle\,=\, \sum_{i\in I} \langle s_i,t_i\rangle_i.
$$
It turns out that $\mbox{${\mathcal X}$}$ is a right $M$-module
for the action $(s_i)_{i\in I}\cdot m = (s_i m)_{i\in I}$, and that
equipped with $\langle \,\cdotp,\,\cdotp\rangle$,
$\mbox{${\mathcal X}$}$ is a Hilbert $M$-module. The latter is called the ultraweak direct sum
of $\{\mbox{${\mathcal X}$}_i\, :\, i\in I\}$ and it is denoted by
$$
\mbox{${\mathcal X}$}\,=\,\oplus_{i\in I} \mbox{${\mathcal X}$}_i.
$$
See e.g. \cite[8.5.26]{BLM} for more on this construction.
Let $I$ be an index set, consider $C_I^w(M)$ as a right $M$-module is the obvious way. For any
$(s_i)_{i\in I}$ and $(t_i)_{i\in I}$ in $C_I^w(M)$
set
$$
\langle (s_i)_{i\in I}, (t_i)_{i\in I}\rangle=\sum_{i\in I} s_i^* t_i\,,
$$
where this sum is defined by (\ref{4Sum}). This is an
$M$-valued inner product, which makes $C_I^w(M)$ a Hilbert $M$-module. Moreover the canonical
operator space structure of $C_I^w(M)$ as a Hilbert $M$-module coincides with
the one given by writing $C_I^w(M)=C_I\overline{\otimes} M$, see
\cite[8.2.3]{BLM}. Further we clearly have
$$
C_I^w(M)\,\simeq\,\oplus_{i\in I} M\qquad \hbox{as Hilbert } M\hbox{-modules}.
$$
\begin{proof}[Proof of Theorem \ref{4SS}]
Assume that $M\subset B(K)$ for some Hilbert space $K$.
Let $w\colon F\stackrel{h}{\otimes} E\to M$ be a completely bounded
map. By the Christensen-Sinclair factorization theorem
(see e.g. \cite[Theorem 9.4.4]{ER}),
there exist a Hilbert space $\mbox{${\mathcal H}$}$ and two completely bounded maps
$$
a\colon E\to B(K,\mbox{${\mathcal H}$})
\qquad\hbox{and}\qquad
b\colon F\to B(\mbox{${\mathcal H}$},K)
$$
such that $\cbnorm{a}\cbnorm{b}=\cbnorm{w}$
and $w(y\otimes x)=b(y)a(x)$ for any $x\in E$ and any $y\in F$.
Since $M$ is injective, there exists a unital completely positive projection
$$
\mbox{${\mathcal P}$}si \colon B(K)\longrightarrow M.
$$
As $\mbox{${\mathcal P}$}si$ is valued in $M$, we then
have
\begin{equation}\label{4Facto1}
w(y\otimes x)=\mbox{${\mathcal P}$}si\bigl(b(y)a(x)\bigr),
\qquad x\in E, \ y\in F.
\end{equation}
We introduce
$$
C\,=\,\bigl\{T\in B(K,\mbox{${\mathcal H}$})\,:\, \mbox{${\mathcal P}$}si(T^*T)=0\bigr\}.
$$
For any $k\in K$, $(T,S)\mapsto\langle\mbox{${\mathcal P}$}si(T^*S)k,k\rangle$
is a nonnegative sesquilinear form
on $B(K,\mbox{${\mathcal H}$})$, which vanishes on $\{(T,T)\, :\, T\in C\}$. This implies
(by the Cauchy-Schwarz inequality) that $\langle\mbox{${\mathcal P}$}si(T^*S )k,k\rangle=0$
for any $T\in C$ and any $S\in B(K,\mbox{${\mathcal H}$})$. Consequently,
$$
C\,=\, \bigl\{T\in B(K,\mbox{${\mathcal H}$})\, :\,\mbox{${\mathcal P}$}si(T^*S)=0 \text{ for any } S\in B(K,\mbox{${\mathcal H}$})\bigr\}.
$$
In particular $C$ is a subspace of $B(K,\mbox{${\mathcal H}$})$.
Moreover $\mbox{${\mathcal P}$}si$ is an $M$-bimodule map by \cite{To}, hence
$$
\mbox{${\mathcal P}$}si((Tm)^*(Tm))=\mbox{${\mathcal P}$}si(m^*T^*Tm)=m^*\mbox{${\mathcal P}$}si(T^*T)m,\qquad m\in M,\, T\in B(K,\mbox{${\mathcal H}$}).
$$
Consequently, $C$ is invariant under right multiplication by elements of $M$.
Let $N=B(K,\mbox{${\mathcal H}$})/C$ and let $q\colon B(K,\mbox{${\mathcal H}$})\to N$
be the quotient map. The $M$-invariance of $C$ allows to
define a right $M$-module action on $N$ by
$$
q(T)\cdot m = q(Tm),\qquad m\in M,\ T\in B(K,\mbox{${\mathcal H}$}).
$$
For any $S,\, T\in B(K,\mbox{${\mathcal H}$})$, set
$$
\langle q(T),q(S)\rangle_{N}=\mbox{${\mathcal P}$}si(T^*S).
$$
Then $\langle\,\cdotp,\,\cdotp\rangle_{N}$ is a well-defined, $M$-valued inner product on $N$,
and hence $N$ is a pre-Hilbert $M$-module.
For convenience, we keep the notation $N$ to denote its completion, which is a Hilbert $M$-module.
The factorization property
(\ref{4Facto1}) can now be rephrased as
\begin{equation}\label{4Facto2}
w(y\otimes x)=\bigl\langle q(b(y)^*),q(a(x))\bigr\rangle_N,
\qquad x\in E, \ y\in F.
\end{equation}
Recall from Paschke's fundamental paper \cite{Pa}
that the dual of $N$ (in the Hilbert $M$-module sense)
is the space
$$
N' = \bigl\{\phi \colon N \to M \, : \, \phi \mbox{ is a bounded } M\mbox{-module map}\bigr\}.
$$
Equip $N'$ with the linear structure obtained with usual addition of maps and scalar
multiplication given by $(\lambda\cdot\phi)(t) =\overline{\lambda}\phi(t)$ for any
$\phi\in N'$, $\lambda\in\ensuremath{\mathbb{C}}$, and $t\in N$.
Then $N'$ is a right $M$-module for the action
given by
$$
(\phi\cdot m)(t) = m^*\phi(t),
\qquad \phi\in N',\ m\in M,\ t\in N.
$$
Let $\kappa\colon N\to N'$ be defined by
$\kappa(s)\colon t\in N \mapsto \langle s,t\rangle\in M$.
Then $\kappa$ is a linear map. By \cite[Theorem 3.2]{Pa},
there exists
an $M$-valued inner product $\langle\,\cdotp ,
\,\cdotp\rangle_{N'}$ on $N'$ such
that
\begin{equation}\label{4Kappa1}
\langle \kappa(s),\kappa(t)\rangle_{N'}\, =\, \langle s,t\rangle_N,
\qquad s,t\in N,
\end{equation}
and such that $N'$ is selfdual (see \cite[Section 3]{Pa}
for the definition). Then
by \cite[Theorem 3.12]{Pa}, $N'$ is unitarily
isomorphic to an ultraweak direct sum $\displaystyle{\oplus_{i\in I}} p_i M$,
where $(p_i)_{i\in I}$ is a family of non-zero projections in $M$.
Summarizing, we then have
\begin{equation}\label{4Kappa2}
N\,\stackrel{\kappa}{\hookrightarrow}\,
N'\,\simeq\, \oplus_i p_i M \,\subset\, \oplus_i M \,\simeq C_I^w(M).
\end{equation}
Note that by (\ref{4Kappa1}), $\kappa$ is a complete isometry.
We claim that the quotient map
$q \colon B(K,\mbox{${\mathcal H}$})\to N$ is completely contractive, when $N$
is equipped with its Hilbert $M$-module operator space structure.
Indeed, $\mbox{${\mathcal P}$}si$ is completely contractive
hence, for any $(S_{ij})_{i,j}\in M_n(B(K,\mbox{${\mathcal H}$}))$, we have
\begin{align*}
\bignorm{\bigl(q(S_{ij})\bigr)_{i,j}}^2_{M_n(N)}\,
& =\,
\mbox{${\mathcal B}$}iggnorm{\left(\sum_{k=1}^n \mbox{${\mathcal P}$}si(S_{ki}^* S_{kj}) \right)_{i,j}}_{M_n(M)}\\
& \leq \,
\mbox{${\mathcal B}$}iggnorm{\left(\sum_{k=1}^n S_{ki}^* S_{kj}\right)_{i,j}}_{M_n(B(K))}\\
& = \,
\bignorm{\bigl((S_{ij})_{i,j}^*(S_{ij})_{i,j}\bigr)}_{M_n(B(K))}\\
&=\, \bignorm{(S_{ij})_{i,j}}^2_{M_n(B(K,\tiny{\mbox{${\mathcal H}$}}))}.
\end{align*}
Using (\ref{4Kappa2}), we define $\alpha\colon E\to C_I^w(M)$
by $\alpha(x)=\kappa\bigl(q(a(x))\bigr)$. It follows from above that
$\alpha$ is completely bounded, with $\cbnorm{\alpha}\leq\cbnorm{a}$.
Likewise we define $\beta\colon F\to R_I^w(M)$
by $\beta(y)=\bigl[\kappa\bigl(q(b(y)^*)\bigr)]^*$. Then
$\beta$ is completely bounded, with $\cbnorm{\beta}\leq\cbnorm{b}$.
Consequently, $\cbnorm{\alpha}\cbnorm{\beta} \leq \cbnorm{w}$.
In accordance with Lemma \ref{4CI-CB}, let
$(\alpha_i)_{i\in I}\in C^w_I\bigl(CB(E,M)\bigr)$
and $(\beta_i)_{i\in I}\in R^w_I\bigl(CB(F,M)\bigr)$ be corresponding
to $\alpha$ and $\beta$, respectively.
Then by (\ref{4Facto2}) and (\ref{4Kappa2}), we have
$$
w(y\otimes x)\,=\,\langle\beta(y)^*,\alpha(x)\rangle_{N'}
\,=\, \bigl\langle (\beta_i(y)^*)_{i\in I},(\alpha_i(x))_{i\in I}
\bigr\rangle_{C_I^w(M)}\,
=\,
\sum_{i\in I} \beta_i(y)\alpha_i(x)
$$
for any $x\in E$ and $y\in F$.
Once this identity is established, the inequality $\cbnorm{w}\leq
\cbnorm{\alpha}\cbnorm{\beta}$ is a classical fact.
\end{proof}
\section{Factorization of modular operators}\label{6Facto}
Consider $H_1,H_2,H_3$ and $M_1,M_2,M_3$, $M_i\subset B(H_i)$, as in Sections \ref{3OM} and \ref{4MOD}.
Using the Hilbert space identification
$S^2(H_1,H_2) \simeq\overline{H_1}\stackrel{2}{\otimes} H_2$, Lemma
\ref{1Opp} and (\ref{1Normal}), we have
von Neumann algebra identifications
$$
B(H_1)\overline{\otimes} B(H_2)^{op}\simeq B(H_1)\overline{\otimes} B(\overline{H_2})
\simeq B(H_1\stackrel{2}{\otimes}\overline{H_2})\simeq B(S^2(H_1,H_2))^{op}
$$
and hence a von Neumann algebra embedding
$$
\tau^1\colon M_1\overline{\otimes} M_2^{op}\,\hookrightarrow\,
B(S^2(H_1,H_2))^{op}.
$$
Unraveling the above identifications, we see that
\begin{equation}\label{6Tau1}
\bigl[\tau^1(R\otimes S)\bigr](x)\,=\, SxR,\qquad
x\in S^2(H_1,H_2),\, R\in M_1,\,S\in M_2.
\end{equation}
Further this property determines $\tau^1$.
Likewise we may consider
$$
\tau^3\colon M_2^{op}\overline{\otimes} M_3\hookrightarrow\,
B(S^2(H_2,H_3)),
$$
the (necessarily unique) von Neumann algebra embedding satisfying
\begin{equation}\label{6Tau3}
\bigl[\tau^3(S\otimes T)\bigr](y) = TyS,\qquad y\in S^2(H_2,H_3),\, T\in M_3,\,S\in M_2.
\end{equation}
For convenience, for any $a\in M_1\overline{\otimes} M_2^{op}$ and any
$b\in M_2^{op}\overline{\otimes} M_3$,
we write $\tau^1_a$ instead of $\tau^1(a)$ and $\tau^3_b$ instead of $\tau^3(b)$.
The main objective of this section is to prove the following description of
modular completely bounded $S^1$-multipliers.
\begin{theorem}\label{6Factorization} Assume that $M_2$ is injective.
\begin{itemize}
\item [(a)] Let $I$ be an index set and let
$$
A=(a_i)_{i\in I}\in R_{I}^{w}\bigl(M_1\overline{\otimes} M_2^{op}\bigr)
\qquad\hbox{and}\qquad
B=(b_i)_{i\in I}\in C_{I}^{w}\bigl(M_2^{op}\overline{\otimes} M_3\bigr).
$$
For any $x\in S^2(H_1,H_2)$ and any $y\in S^2(H_2,H_3)$,
$$
\sum_{i\in I} \,\bignorm{\tau^3_{b_i}(y)\tau^1_{a_i}(x)}_1\, <\,\infty.
$$
Let $u_{A,B}\colon S^2(H_2,H_3)\times S^2(H_1,H_2)\to S^1(H_1,H_3)$ be the resulting mapping
defined by
$$
u_{A,B}(y,x)\,=\, \sum_{i\in I} \tau^3_{b_i}(y)\tau^1_{a_i}(x),\qquad
x\in S^2(H_1,H_2),\, y\in S^2(H_2,H_3).
$$
Then $\widetilde{u}_{A,B}\in CB_{(M'_3,M'_2,M'_1)}\bigl(\mbox{${\mathcal G}$}amma(H_1,H_2,H_3), S^1(H_1,H_3)\bigr)$ and
\begin{equation}\label{6cbn}
\cbnorm{\widetilde{u}_{A,B}}\,\leq\, \norm{A}_{R_{I}^{w}}\,\norm{B}_{C_{I}^{w}}.
\end{equation}
\item [(b)] Conversely, let $u\colon S^2(H_2,H_3)\times
S^2(H_1,H_2)\to S^1(H_1,H_3)$ be a bounded bilinear map
and assume that $\widetilde{u}$ belongs to
$CB_{(M'_3,M'_2,M'_1)}\bigl(\mbox{${\mathcal G}$}amma(H_1,H_2,H_3), S^1(H_1,H_3)\bigr)$. Then there
exist an index set $I$ and two families
$$
A=(a_i)_{i\in I}\in R_{I}^{w}\bigl(M_1\overline{\otimes} M_2^{op}\bigr)
\qquad\hbox{and}\qquad
B=(b_i)_{i\in I}\in C_{I}^{w}\bigl(M_2^{op}\overline{\otimes} M_3\bigr)
$$
such that $u=u_{A,B}$ and $\norm{A}_{R_{I}^{w}}\,\norm{B}_{C_{I}^{w}} = \cbnorm{u}$.
\end{itemize}
\end{theorem}
We will establish two intermediate lemmas before proceeding to the
proof. We recall the mapping $\tau$ from (\ref{2Sigma1}). In the sequel
we use the notation $1$
for the unit of either $B(H_1)$ or $B(H_3)$. Thus for any
$a\in M_1\overline{\otimes} M_2^{op}$, we may consider
$a\otimes 1\in M_1\overline{\otimes} M_2^{op}\overline{\otimes} M_3$.
Likewise, for any
$b\in M_2^{op}\overline{\otimes} M_3$,
we may consider $1\otimes b\in M_1\overline{\otimes} M_2^{op}\overline{\otimes} M_3$. The following is a generalization
of \cite[Lemma 20]{CLS}.
\begin{lemma}\label{6Magic1}
For any $a\in M_1\overline{\otimes} M_2^{op}$, for any $b\in M_2^{op}\overline{\otimes} M_3$,
and for any
$x\in S^2(H_1,H_2)$ and $y\in S^2(H_2,H_3)$, we have
\begin{equation}\label{6Magic11}
\tau_{(a\otimes 1)(1\otimes b)} (y,x)
\,=\,
\tau^3_{b}(y)\tau^1_{a}(x).
\end{equation}
\end{lemma}
\begin{proof}
We fix $x\in S^2(H_1,H_2)$, $y\in S^2(H_2,H_3)$, $\eta_1\in H_1$ and $\xi_3\in H_3$.
Let $R\in M_1, S,S'\in M_2^{op}, T\in M_3$.
Then $(R\otimes S\otimes 1)(1\otimes S'\otimes T)
= R\otimes S'S\otimes T$. Hence by (\ref{2Sigma2}), (\ref{6Tau1}) and (\ref{6Tau3}), we have
$$
\tau_{(R\otimes S\otimes 1)(1\otimes S'\otimes T)}(y,x) \,=\, TyS'SxR\,=\, \tau^3_{S'\otimes T}(y)
\tau^1_{R\otimes S}(x).
$$
Hence the result holds true when $a$ and $b$ are elementary tensors. By linearity,
this implies (\ref{6Magic11}) in the case when $a$ and $b$ belong
to the algebraic tensor products $M_1\otimes M_2^{op}$ and
$M_2^{op}\otimes M_3$, respectively.
We now use a limit process. Let $a\in M_1\overline{\otimes} M_2^{op}$
and $b\in M_2^{op}\overline{\otimes} M_3$ be arbitrary. Let
$(a_s)_s$ be a net in $M_1\otimes M_2^{op}$ converging to
$a$ in the $w^*$-topology of $M_1\overline{\otimes} M_2^{op}$
and let $(b_t)_t$ be a net in $M_2^{op}\otimes M_3$ converging to
$b$ in the $w^*$-topology of $M_2^{op}\overline{\otimes} M_3$.
For any $s,t$, we have
\begin{equation}\label{6st}
\tau_{(a_s\otimes 1)(1\otimes b_t)} (y,x)
=\tau^3_{b_t}(y)\tau^1_{a_s}(x)
\end{equation}
by the preceding paragraph.
On the one hand, since the
product is separately $w^*$-continuous on von Neumann algebras,
\begin{equation}\label{6asbt}
(a\otimes 1)(1\otimes b) \,=\,w^*\hbox{-}\lim_s\lim_t (a_s\otimes 1)(1\otimes b_t)
\end{equation}
in $M_1\overline{\otimes} M_2^{op}\overline{\otimes} M_3$.
Since $\tau$ is $w^*$-continuous, this implies that
$$
\bigl\langle \bigl[\tau_{(a\otimes 1)(1\otimes b)} (y,x)\bigr](\eta_1),\xi_3\bigr\rangle\,=\,
\lim_s\lim_t \bigl\langle \bigl[\tau_{(a_s\otimes 1)(1\otimes b_t)} (y,x)\bigr](\eta_1),\xi_3\bigr\rangle.
$$
On the other hand, by the $w^*$-continuity of $\tau^1$ and $\tau^3$,
$\tau^1_{a_s}\to \tau^1_{a}$ in the $w^*$-topology of $B(S^2(H_1,H_2))$
and $\tau^3_{b_t}\to \tau^3_{b}$ in the $w^*$-topology of $B(S^2(H_2,H_3))$.
Consequently, $\tau^1_{a_s}(x)\to \tau^1_{a}(x)$ in the weak topology of $S^2(H_1,H_2)$ whereas
$\tau^3_{b_t}(y)\to \tau^3_{b}(y)$ in the weak topology of $S^2(H_2,H_3)$. This readily implies that
$$
\bigl\langle \bigl[\tau^3_{b}(y)\tau^1_{a}(x)\bigr](\eta_1),\xi_3\bigr\rangle\,=\,
\lim_s\lim_t
\bigl\langle \bigl[\tau^3_{b_t}(y)\tau^1_{a_s}(x)\bigr](\eta_1),\xi_3\bigr\rangle.
$$
Combining these two limit results with (\ref{6st}), we deduce the formula
(\ref{6Magic11}).
\end{proof}
It follows from Lemma \ref{1Injective1} (a) that
we have $w^*$-continuous and completely isometric
identifications
\begin{equation}\label{6a-alpha}
M_1\overline{\otimes} M_2^{op}\,\simeq\, CB(M_{1*},M_2^{op})
\qquad\hbox{and}\qquad
M_2^{op}\overline{\otimes} M_3\,\simeq\, CB(M_{3*},M_2^{op}).
\end{equation}
Likewise,
$M_1\overline{\otimes} M_2^{op}\overline{\otimes}M_3
\,\simeq\,CB((M_1\overline{\otimes}M_3)_* , M_{2}^{op})$ hence by
\cite[Theorem 7.2.4]{ER},
we have a $w^*$-continuous and completely isometric
identification
\begin{equation}\label{6a-alpha-1}
M_1\overline{\otimes} M_2^{op}\overline{\otimes}M_3
\,\simeq\, CB\bigl(M_{1*}\stackrel{\frown}{\otimes} M_{3*}, M_{2}^{op}\bigr).
\end{equation}
\begin{lemma}\label{6Magic2}
Assume that $M_2$ is injective.
Let $a\in M_1\overline{\otimes} M_2^{op}$ and $b\in M_2^{op}\overline{\otimes} M_3$.
Let $\alpha\in CB(M_{1*}, M_2^{op})$ and $\beta\in CB(M_{3*}, M_2^{op})$
be corresponding to $a$ and $b$, respectively, through the identifications (\ref{6a-alpha}).
Let
$$
\sigma_{a,b}\colon M_{1*}\stackrel{\frown}{\otimes} M_{3*}\longrightarrow M_2^{op}
$$
be the completely bounded map
corresponding to $(a\otimes 1)(1\otimes b)$
through the identification (\ref{6a-alpha-1}). Then we have
\begin{equation}\label{6Magic22}
\sigma_{a,b}(v_1\otimes v_3)\,=\,\alpha(v_1)\beta(v_3)
\end{equation}
for any $v_1\in M_{1*}$ and any $v_3\in M_{3*}$.
\end{lemma}
\begin{proof}
We fix $v_1\in M_{1*}$ and $v_3\in M_{3*}$.
Let $R\in M_1$, $S,S'\in M_2^{op}$
and $T\in M_3$, and assume first that $a=R\otimes S$ and
$b=S'\otimes T$. Then $\alpha(v_1)= \langle R,v_1\rangle_{M_1,M_{1*}} S$ and $\beta(v_3)= \langle T, v_3
\rangle_{M_3,M_{3*}} S'$. Hence
$$
\alpha(v_1)\beta(v_3) = \langle R,v_1\rangle_{M_1,M_{1*}} \langle T, v_3
\rangle_{M_3,M_{3*}} S'S.
$$
Since $(a\otimes 1)(1\otimes b) = R\otimes S'S\otimes T$,
$\sigma_{a,b}(v_1\otimes v_3)$ is also equal to
$\langle R, v_1\rangle_{M_1,M_{1*}}
\langle T, v_3
\rangle_{M_3,M_{3*}} S'S$. This proves the result
in this special case. By linearity, we deduce that (\ref{6Magic22})
holds true when $a$ and $b$ belong
to the algebraic tensor products $M_1\otimes M_2^{op}$ and
$M_2^{op}\otimes M_3$.
As in the proof of the preceding lemma, we deduce the
general case by a limit process.
Let $a\in M_1\overline{\otimes} M_2^{op}$
and $b\in M_2^{op}\overline{\otimes} M_3$ be arbitrary. Let
$(a_s)_s$ be a net in $M_1\otimes M_2^{op}$ converging to
$a$ in the $w^*$-topology of $M_1\overline{\otimes} M_2^{op}$
and let $(b_t)_t$ be a net in $M_2^{op}\otimes M_3$ converging to
$b$ in the $w^*$-topology of $M_2^{op}\overline{\otimes} M_3$. Then
for any $s,t$, let $\alpha_s\in CB(M_{1*}, M_2^{op})$ and $\beta_t\in CB(M_{3*}, M_2^{op})$
be corresponding to $a_s$ and $b_t$, respectively.
By the
preceding paragraph,
$$
\sigma_{a_s,b_t}(v_1\otimes v_3)\,=\,\alpha_s(v_1)\beta_t(v_3)
$$
for any $s,t$.
Since the identifications (\ref{6a-alpha}) are $w^*$-continuous,
$\alpha_s(v_1)\to\alpha(v_1)$ and $\beta_t(v_3)\to\beta(v_3)$
in the $w^*$-topology of $M_2^{op}$. Since the
product is separately $w^*$-continuous on von Neumann algebras, this implies
that
$$
\alpha(v_1)\beta(v_3)\,=\, w^*\hbox{-}\lim_s\lim_t \alpha_s(v_1)\beta_t(v_3).
$$
Next since the identification (\ref{6a-alpha-1}) is $w^*$-continuous, it follows from
(\ref{6asbt}) that
$$
\sigma_{a,b}(v_1\otimes v_3)\,=\, w^*\hbox{-}\lim_s\lim_t
\sigma_{a_s,b_t}(v_1\otimes v_3).
$$
The identity (\ref{6Magic22}) follows at once.
\end{proof}
Note that if $M_2$ is injective, then by Lemma \ref{1Injective1} (b)
the identification (\ref{6a-alpha-1}) restricts to an identification
between $M_2^{op}\overline{\otimes}\bigl(M_1\stackrel{w^*h}{\otimes} M_3\bigr)$
and $CB\bigl(M_{1*}\stackrel{h}{\otimes} M_{3*}, M_{2}^{op}\bigr)$.
Combining with (\ref{3Ident1}), we deduce a $w^*$-continuous and completely isometric
identification
\begin{equation}\label{6a-alpha-2}
CB_{(M'_3,M'_2,M'_1)}\bigl(\mbox{${\mathcal G}$}amma(H_1,H_2,H_3), S^1(H_1,H_3)\bigr)
\,\simeq\, CB\bigl(M_{1*}\stackrel{h}{\otimes} M_{3*}, M_{2}^{op}\bigr).
\end{equation}
This will be used in the proof below.
\begin{proof}[Proof of Theorem \ref{6Factorization}]
\
(a): Consider $x\in S^2(H_1,H_2)$ and $y\in S^2(H_2,H_3)$. We have
\begin{align*}
\sum_{i\in I} \bignorm{\tau^3_{b_i}(y)\tau^1_{a_i}(x)}_1\,
& \leq\, \sum_{i\in I} \bignorm{\tau^3_{b_i}(y)}_2 \bignorm{\tau^1_{a_i}(x)}_2\\
&\leq\,\mbox{${\mathcal B}$}igl(\sum_{i\in I} \bignorm{\tau^3_{b_i}(y)}_2^2\mbox{${\mathcal B}$}igr)^{\frac12} \mbox{${\mathcal B}$}igl(\sum_{i\in I}
\bignorm{\tau^1_{a_i}(x)}_2^2\mbox{${\mathcal B}$}igr)^{\frac12},
\end{align*}
by the Cauchy-Schwarz inequality.
Let $J\subset I$ be a finite subset.
Since $\tau^3$ is a $*$-homomorphism, we have
\begin{align*}
\sum_{i\in J} \bignorm{\tau^3_{b_i}(y)}_2^2\,
& =\, \sum_{i\in J} \bigl\langle
{\tau^3_{b_i}}^*\tau^3_{b_i}(y),y\bigr\rangle_{S^2}\\
& =\,\mbox{${\mathcal B}$}igl\langle \tau^3\mbox{${\mathcal B}$}igl(\sum_{i\in J} b_i^*b_i\mbox{${\mathcal B}$}igr) (y),y\mbox{${\mathcal B}$}igr\rangle_{S^2}\\
& \leq\, \mbox{${\mathcal B}$}ignorm{\sum_{i\in J} b_i^*b_i}\,\norm{y}^2_2\\
&\leq\, \norm{B}_{C_{I}^{w}}^2\,\,\norm{y}^2_2.
\end{align*}
Since $J$ is arbitrary, this implies that
\begin{equation}\label{6Square1}
\sum_{i\in I} \bignorm{\tau^3_{b_i}(y)}_2^2\,
\leq\, \norm{B}_{C_{I}^{w}}^2\,\,\norm{y}^2_2.
\end{equation}
Likewise,
\begin{equation}\label{6Square2}
\sum_{i\in I} \bignorm{\tau^1_{a_i}(x)}_2^2
\,\leq\,
\norm{A}_{R_{I}^{w}}^2\,\,\norm{x}^2_2.
\end{equation}
This implies
$$
\sum_{i\in I} \,\bignorm{\tau^3_{b_i}(y)\tau^1_{a_i}(x)}_1\, \leq \,
\norm{A}_{R_{I}^{w}}
\norm{B}_{C_{I}^{w}}\norm{x}_2\norm{y}_2,
$$
which allows the definition of $u_{A,B}$.
Let $n\geq 1$ be an integer, let $x_1,\ldots, x_n \in
S^2(H_1,H_2)$ and let $y_1,\ldots,y_n \in S^2(H_2,H_3)$.
In the space $S^1(\ell^2_n(H_1),\ell^2_n(H_3))$, we have
the equality
$$
\bigl[u_{A,B}(y_k,x_l)\bigr]_{1\leq k,l\leq n}
= \sum_{i\in I} \bigl[\tau^3_{b_i}(y_k)\tau^1_{a_i}(x_l)\bigr]_{1\leq k,l\leq n}.
$$
Further for any $i\in I$, we have
$$
\bignorm{\bigl[\tau^3_{b_i}(y_k)\tau^1_{a_i}(x_l)\bigr]_{1\leq k,l\leq n}}_{
S^1(\ell^2_n(H_1),\ell^2_n(H_3))}\,\leq\,
\mbox{${\mathcal B}$}igl(\sum_{k=1}^n\norm{\tau^3_{b_i}(y_k)}_2^2\mbox{${\mathcal B}$}igr)^{\frac12}
\mbox{${\mathcal B}$}igl(\sum_{l=1}^n\norm{\tau^1_{a_i}(x_l)}_2^2\mbox{${\mathcal B}$}igr)^{\frac12}.
$$
Consequently, using Cauchy-Schwarz,
\begin{align*}
\bignorm{\bigl[u_{A,B}(y_k,x_l)\bigr]_{1\leq k,l\leq n}}_{S^1(\ell^2_n(H_1),\ell^2_n(H_3))}
\,& \leq\, \sum_{i\in I} \mbox{${\mathcal B}$}igl(\sum_{k=1}^n\norm{\tau^3_{b_i}(y_k)}_2^2\mbox{${\mathcal B}$}igr)^{\frac12}
\mbox{${\mathcal B}$}igl(\sum_{l=1}^n\norm{\tau^1_{a_i}(x_l)}_2^2\mbox{${\mathcal B}$}igr)^{\frac12}\\
&\leq\,\mbox{${\mathcal B}$}igl(\sum_{i\in I} \sum_{k=1}^n\norm{\tau^3_{b_i}(y_k)}_2^2\mbox{${\mathcal B}$}igr)^{\frac12}
\mbox{${\mathcal B}$}igl(\sum_{i\in I}\sum_{l=1}^n\norm{\tau^1_{a_i}(x_l)}_2^2\mbox{${\mathcal B}$}igr)^{\frac12}.
\end{align*}
It therefore follows from (\ref{6Square1}) and (\ref{6Square2}) that
$$
\bignorm{\bigl[u_{A,B}(y_k,x_l)\bigr]_{1\leq k,l\leq n}}_{S^1(\ell^2_n(H_1),\ell^2_n(H_3))}
\,\leq\,\norm{A}_{R_{I}^{w}}\norm{B}_{C_{I}^{w}}\mbox{${\mathcal B}$}igl(\sum_{k=1}^n\norm{y_k}_2^2\mbox{${\mathcal B}$}igr)^{\frac12}
\mbox{${\mathcal B}$}igl(\sum_{l=1}^n\norm{x_l}_2^2\mbox{${\mathcal B}$}igr)^{\frac12}.
$$
According to Lemma \ref{2CB}, this shows that $\widetilde{u}_{A,B} $ is completely bounded
and that (\ref{6cbn}) holds.
Again let $x\in S^2(H_1,H_2)$ and $y\in S^2(H_2,H_3)$.
Using a simple approximation process,
one can check that for any
$R\in M_1'$, $S\in M_2'$ and $T\in M_3'$, we have
$$
\tau^1_a(xR)=\tau^1_a(x)R,\quad
\tau^1_a(Sx)=S\tau^1_a(x),\quad
\tau^3_b(yS)= \tau^3(y)S\quad\hbox{and}\quad
\tau^3_b(Ty)=T\tau^3(y)
$$
whenever $a\in M_1\overline{\otimes} M_2^{op}$ and $b\in M_2^{op}\overline{\otimes} M_3$.
This implies that $(y,x)\mapsto
\tau^3_b(y)\tau^1_a(x)$ is an $(M_3',M_2',M_1')$-module map for any
$a\in M_1\overline{\otimes} M_2^{op}$ and $b\in M_2^{op}\overline{\otimes} M_3$.
This readily implies that $u_{A,B}$ is an $(M_3',M_2',M_1')$-module map.
(b): Assume that $\widetilde{u}\in CB_{(M'_3,M'_2,M'_1)}
\bigl(\mbox{${\mathcal G}$}amma(H_1,H_2,H_3), S^1(H_1,H_3)\bigr)$. Let
$$
\sigma\colon M_{1*}\stackrel{h}{\otimes} M_{3*}\longrightarrow M_2^{op}
$$
be the completely bounded map corresponding to
$\widetilde{u}$ through the identification (\ref{6a-alpha-2}). Since $M_2$ is injective, we may apply
Theorem \ref{4SS} to $\sigma$. We obtain the existence of an index set $I$
and two families $(\alpha_i)_{i\in I}\in R_{I}^{w}\bigl(CB(M_{1*}, M_2^{op})\bigr)$
and $(\beta_i)_{i\in I}\in C_{I}^{w}\bigl(CB(M_{3*}, M_2^{op})\bigr)$ such that
$$
\sigma(v_1\otimes v_3)\,=\,\sum_{i\in I} \alpha_i(v_1)\beta_i(v_3),
\qquad v_1\in M_{1*},\ v_3\in M_{3*}.
$$
For any $i\in I$, we let $a_i\in M_1\overline{\otimes} M_2^{op}$ and
$b_i\in M^{op}_2\overline{\otimes} M_3$ be corresponding to $\alpha_i$ and
$\beta_i$, respectively, through the identifications (\ref{6a-alpha}). Then we set
$A=(a_i)_{i\in I}$ and $B=(b_i)_{i\in I}$. By Theorem \ref{4SS}, we may assume that
$\norm{A}_{R_{I}^{w}}\norm{B}_{C_{I}^{w}} = \cbnorm{u}$.
For any finite subset $J\subset I$, we may define
$$
u_J\colon S^2(H_2,H_3)\times S^2(H_1,H_2)\to S_1(H_1,H_3)
\qquad\hbox{and}\qquad
\sigma_J\colon M_{1*}\stackrel{h}{\otimes} M_{3*}\to M_2^{op}
$$
by
$$
u_J(y,x)\,=\, \sum_{i\in J} \tau^3_{b_i}(y)\tau^1_{a_i}(x),
\qquad x\in S^2(H_1,H_2),\ y\in S^2(H_2,H_3),
$$
and
$$
\sigma_J(v_1\otimes v_3)\,=\,\sum_{i\in J} \alpha_i(v_1)\beta_i(v_3),
\qquad v_1\in M_{1*},\ v_3\in M_{3*}.
$$
It follows from Lemmas \ref{6Magic1} and \ref{6Magic2} that for any $i$,
the mapping $(v_1\otimes v_3)\to \alpha_i(v_1)\beta_i(v_3)$ corresponds to
the mapping $y\otimes x\mapsto \tau^3_{b_i}(y)\tau^1_{a_i}(x)$ through the identification
(\ref{6a-alpha-2}). By linearity we deduce that $\sigma_J$ corresponds to
$\widetilde{u}_J$ through (\ref{6a-alpha-2}).
We observe that by the easy (and well-known)
converse to Theorem \ref{4SS}, we have
$$
\cbnorm{\sigma_J}\leq \bignorm{(\alpha_i)_{i\in J}}_{R_{J}^{w}(CB(M_{1*}, M_2^{op}))}
\bignorm{(\beta_i)_{i\in J}}_{C_{J}^{w}(CB(M_{3*}, M_2^{op}))}.
$$
This implies the following uniform boundedness,
\begin{equation}\label{6Uniform}
\forall\, J\subset I\ \hbox{finite},\qquad
\cbnorm{\sigma_J}\,\leq\, \norm{A}_{R_{I}^{w}}\norm{B}_{C_{I}^{w}}.
\end{equation}
In the sequel we consider the set of finite subsets of $I$ as directed by inclusion.
We observe that for any $v_1\in M_{1*}$ and $v_3\in M_{3*}$, $\sigma_J(v_1
\otimes v_3)\to \sigma(v_1\otimes v_3)$ in
the $w^*$-topology of $M_2^{op}$. Using the uniform boundedness (\ref{6Uniform}), this implies
that $\sigma_J\to \sigma$ in the point-$w^*$-topology of $CB\bigl(
M_{1*}\stackrel{h}{\otimes} M_{3*},M_2^{op}\bigr)$. Applying (\ref{6Uniform}) again, we deduce
that $\sigma_J\to \sigma$ in the $w^*$-topology of $CB\bigl(
M_{1*}\stackrel{h}{\otimes} M_{3*},M_2^{op}\bigr)$. Since the identification (\ref{6a-alpha-2}) is
a $w^*$-continuous one, this implies that $\widetilde{u}_J\to \widetilde{u}$
is the $w^*$-topology of $CB\bigl(\mbox{${\mathcal G}$}amma(H_1,H_2,H_3), S^1(H_1,H_3)\bigr)$.
Let $x\in S^2(H_1,H_2)$ and $y\in S^2(H_2,H_3)$. The above implies
that $u_J(y,x)\to u(y,x)$ in the $w^*$-topology
of $S^1(H_1,H_3)$. However by part (a) of the theorem,
$$
u_J(y,x)\,\longrightarrow\, \sum_{i\in I} \tau^3_{b_i}(y)\tau^1_{a_i}(x)
$$
in the norm topology of $S^1(H_1,H_3)$. This shows that $u(y,x)$
is equal to this sum, and proves the result.
\end{proof}
\begin{remark} It is clear from its proof that
part (a) of Theorem \ref{6Factorization} is true without assuming
that $M_2$ is injective.
The injectivity assumption in Theorem \ref{4SS}
is necessary, see \cite[Theorem 5.3]{SS}, however
we do not know if it is necessary in part (b)
of Theorem \ref{6Factorization}.
\end{remark}
The next corollary follows from the above proof.
\begin{corollary}\label{6Facto-varphi}
Assume that $M_2$ is injective and let $\varphi\in M_1\overline{\otimes} M_2^{op}\overline{\otimes} M_3$.
Then $\tau_\varphi$ is a completely bounded $S^1$-multiplier if and only if there
exist an index set $I$ and families
$$
(a_i)_{i\in I}\in R_{I}^{w}\bigl(M_1\overline{\otimes} M_2^{op}\bigr)
\qquad\hbox{and}\qquad
(b_i)_{i\in I}\in C_{I}^{w}\bigl(M_2^{op}\overline{\otimes} M_3\bigr)
$$
such that
$$
\varphi\,=\, \sum_{i\in I} (a_i\otimes 1)(1\otimes b_i),
$$
where the convergence in taken in the $w^*$-topology.
Further
$$
\cbnorm{\tau_\varphi}\,=\,\inf\mbox{${\mathcal B}$}igl\{
\bignorm{(a_i)_{i\in I}}_{R_I^\omega}\bignorm{(b_i)_{i\in I}}_{C_I^\omega}\mbox{${\mathcal B}$}igr\},
$$
where the infimumm runs over all possible families
$(a_i)_{i\in I}$ and $(b_i)_{i\in I}$ providing such a factorization of
$\varphi$.
\end{corollary}
\begin{remark}\label{6Recover}
\
(a)$\,$
Assume that $H_2=\ensuremath{\mathbb{C}}$ is trivial. Then
$$
\mbox{${\mathcal G}$}amma(H_1,\ensuremath{\mathbb{C}},H_3) = \{H_3\}_c\stackrel{\frown}{\otimes} \{\overline{H_1}\}_r
\simeq S^1(H_1,H_3),
$$
by (\ref{1RC}). Hence $CB\bigl(\mbox{${\mathcal G}$}amma(H_1,\ensuremath{\mathbb{C}},H_3),S^1(H_1,H_3)\bigr)\simeq CB(S^1(H_1,H_3))$
and in this identification,
$CB_{(M_3',\ensuremath{\mathbb{C}},M_1')}\bigl(\mbox{${\mathcal G}$}amma(H_1,\ensuremath{\mathbb{C}},H_3),S^1(H_1,H_3)\bigr)$ coincides
with $CB_{(M_3',M_1')}(S^1(H_1,H_3))$, the space of all $(M_3',M_1')$-bimodule
completely bounded maps from $S^1(H_1,H_3)$ into itself.
Further $\tau^1\colon M_1\hookrightarrow
B(\overline{H_1})^{op}\simeq B(H_1)$
and $\tau^3\colon M_3\hookrightarrow B(H_3)$
coincide with the canonical embeddings.
Hence in this case, Theorem \ref{6Factorization}
reduces to Theorem \ref{Haag} (see also (\ref{Haag+})).
(b)$\,$ A tensor product reformulation of Corollary \ref{6Facto-varphi}
is that the bilinear mapping $(a,b)\mapsto (a\otimes 1)(1\otimes b)$ extends to a
complete quotient map
$$
(M_1\overline{\otimes} M_2^{op})
\stackrel{w^*h}{\otimes}
(M_2^{op}\overline{\otimes} M_3)\longrightarrow
M_2^{op}\overline{\otimes}\bigl(
M_1 \stackrel{w^*h}{\otimes}
M_3\bigr).
$$
\end{remark}
We conclude this paper by considering the special case of Schur multipliers.
Our presentation follows \cite{CLS}. We let $(\mbox{${\mathcal O}$}mega_1,\mu_1)$, $(\mbox{${\mathcal O}$}mega_2,\mu_2)$
and $(\mbox{${\mathcal O}$}mega_3,\mu_3)$ be three separable
measure spaces. (The separability assumption is not essential but avoids
technical measurability issues.) Recall the classical fact that
to any $f\in L^2(\mbox{${\mathcal O}$}mega_1\times\mbox{${\mathcal O}$}mega_2)$, one may associate
an operator $x_f\in S^2(L^2(\mbox{${\mathcal O}$}mega_1),L^2(\mbox{${\mathcal O}$}mega_2))$
given by
$$
x_f(\eta) =\int_{\mbox{${\mathcal O}$}mega_1} f(t_1,\,\cdotp)\eta(t_1)\,d\mu_1(t_1),\qquad \eta\in L^2(\mbox{${\mathcal O}$}mega_1),
$$
and the mapping $f\mapsto x_f$ is a unitary which yields a Hilbert space identification
$$
L^2(\mbox{${\mathcal O}$}mega_1\times\mbox{${\mathcal O}$}mega_2)
\,\simeq\,
S^2\bigl(L^2(\mbox{${\mathcal O}$}mega_1),L^2(\mbox{${\mathcal O}$}mega_2)\bigr).
$$
Of course the same holds with the pairs $(\mbox{${\mathcal O}$}mega_2,\mbox{${\mathcal O}$}mega_3)$ and $(\mbox{${\mathcal O}$}mega_1,\mbox{${\mathcal O}$}mega_3)$.
For any $g\in L^2(\mbox{${\mathcal O}$}mega_2\times\mbox{${\mathcal O}$}mega_3)$ (resp.
$h\in L^2(\mbox{${\mathcal O}$}mega_1\times\mbox{${\mathcal O}$}mega_3)$) we let $y_g\in
S^2\bigl(L^2(\mbox{${\mathcal O}$}mega_2),L^2(\mbox{${\mathcal O}$}mega_3)\bigr)$ (resp. $z_h\in S^2\bigl(L^2(\mbox{${\mathcal O}$}mega_1),L^2(\mbox{${\mathcal O}$}mega_3)\bigr)$) be the
corresponding Hilbert-Schmidt operator.
To any $\varphi\in L^\infty(\mbox{${\mathcal O}$}mega_1\times\mbox{${\mathcal O}$}mega_2\times\mbox{${\mathcal O}$}mega_3)$, one may associate a
bounded bilinear map
$$
\Lambda_\varphi\colon S^2\bigl(L^2(\mbox{${\mathcal O}$}mega_2),L^2(\mbox{${\mathcal O}$}mega_3)\bigr)\times
S^2\bigl(L^2(\mbox{${\mathcal O}$}mega_1),L^2(\mbox{${\mathcal O}$}mega_2)\bigr)
\longrightarrow S^2\bigl(L^2(\mbox{${\mathcal O}$}mega_1),L^2(\mbox{${\mathcal O}$}mega_3)\bigr)
$$
given for any $f\in L^2(\mbox{${\mathcal O}$}mega_1\times\mbox{${\mathcal O}$}mega_2)$ and $g\in L^2(\mbox{${\mathcal O}$}mega_2\times\mbox{${\mathcal O}$}mega_3)$ by
$$
\Lambda_\varphi(y_g,x_f)=z_h
$$
where, for almost every $(t_1,t_3)\in \mbox{${\mathcal O}$}mega_1\times\mbox{${\mathcal O}$}mega_3$,
$$
h(t_1,t_3)\,=\,\int_{\mbox{${\mathcal O}$}mega_2}\varphi(t_1,t_2,t_3)f(t_1,t_2)g(t_2,t_3)\,d\mu_2(t_2)\,.
$$
We refer to \cite[Theorem 3.1]{JTT} or \cite[Subsection 3.2]{CLS} for the proof, and also for the fact that
$$
\norm{\Lambda_\varphi\colon S^2\times S^2\longrightarrow S^2}\, =\, \norm{\varphi}_\infty.
$$
Bilinear maps of this form will be called {\bf bilinear Schur multipliers} in the sequel.
Since
$$
S^2\bigl(L^2(\mbox{${\mathcal O}$}mega_1),L^2(\mbox{${\mathcal O}$}mega_3)\bigr)\,\subset \,
B\bigl(L^2(\mbox{${\mathcal O}$}mega_1),L^2(\mbox{${\mathcal O}$}mega_3)\bigr)
$$
contractively, we may regard any bilinear Schur multiplier
as valued in $B\bigl(L^2(\mbox{${\mathcal O}$}mega_1),L^2(\mbox{${\mathcal O}$}mega_3)\bigr)$. Then it follows from
the proof of \cite[Corollary 10]{CLS} that
\begin{equation}\label{5Norm}
\bignorm{\Lambda_\varphi\colon S^2\times S^2\longrightarrow B
\bigl(L^2(\mbox{${\mathcal O}$}mega_1),L^2(\mbox{${\mathcal O}$}mega_3)\bigr)}\, =\, \norm{\varphi}_\infty.
\end{equation}
For any $i=1,2,3$, let us regard
\begin{equation}\label{5qi*}
L^\infty(\mbox{${\mathcal O}$}mega_i)\subset B(L^2(\mbox{${\mathcal O}$}mega_i))
\end{equation}
as a von Neumann algebra in the usual way, that is, any $r\in L^\infty(\mbox{${\mathcal O}$}mega_i)$
is identified with the multiplication operator $f\mapsto rf,\, f\in L^2(\mbox{${\mathcal O}$}mega_i)$.
In the sequel we use the notions considered so far in the case when
$H_i=L^2(\mbox{${\mathcal O}$}mega_i)$ and $M_i= L^\infty(\mbox{${\mathcal O}$}mega_i)$. We note that
$$
L^\infty(\mbox{${\mathcal O}$}mega_i)'=L^\infty(\mbox{${\mathcal O}$}mega_i)
\qquad\hbox{and}\qquad L^\infty(\mbox{${\mathcal O}$}mega_i)^{op}=L^\infty(\mbox{${\mathcal O}$}mega_i).
$$
Using the classical von Neumann algebra identification
$$
L^\infty(\mbox{${\mathcal O}$}mega_1\times\mbox{${\mathcal O}$}mega_2\times\mbox{${\mathcal O}$}mega_3)= L^\infty(\mbox{${\mathcal O}$}mega_1)\overline{\otimes}
L^\infty(\mbox{${\mathcal O}$}mega_2)\overline{\otimes}
L^\infty(\mbox{${\mathcal O}$}mega_3),
$$
we may
apply the construction from Sections 3 and 4 to any
$\varphi\in L^\infty(\mbox{${\mathcal O}$}mega_1\times\mbox{${\mathcal O}$}mega_2\times\mbox{${\mathcal O}$}mega_3)$ and consider
the operator multiplier
$$
\tau_\varphi
\colon S^2\bigl(L^2(\mbox{${\mathcal O}$}mega_2),L^2(\mbox{${\mathcal O}$}mega_3)\bigr)\times
S^2\bigl(L^2(\mbox{${\mathcal O}$}mega_1),L^2(\mbox{${\mathcal O}$}mega_2)\bigr)
\longrightarrow B\bigl(L^2(\mbox{${\mathcal O}$}mega_1),L^2(\mbox{${\mathcal O}$}mega_3)\bigr).
$$
It turns out that
\begin{equation}\label{5tau=lambda}
\tau_\varphi=\Lambda_\varphi.
\end{equation}
The easy verification is left to the reader.
The next proposition should be compared with \cite[Theorem 3.1]{JTT}. In the
latter result, the authors established a similar characterization
of bilinear module maps, but under the assumption that they take values in
$S^2\bigl(L^2(\mbox{${\mathcal O}$}mega_1),L^2(\mbox{${\mathcal O}$}mega_3)\bigr)$.
\begin{proposition}\label{6Schur1}
For any
$$
u\in B_2\bigl(S^2\bigl(L^2(\mbox{${\mathcal O}$}mega_2),L^2(\mbox{${\mathcal O}$}mega_3)\bigr)\times
S^2\bigl(L^2(\mbox{${\mathcal O}$}mega_1),L^2(\mbox{${\mathcal O}$}mega_2)\bigr),
B\bigl(L^2(\mbox{${\mathcal O}$}mega_1),L^2(\mbox{${\mathcal O}$}mega_3)\bigr)\bigr),
$$
the following are equivalent.
\begin{itemize}
\item [(i)] $u$ is a bilinear Schur multiplier.
\item [(ii)] $u$ is an $(L^\infty(\mbox{${\mathcal O}$}mega_3), L^\infty(\mbox{${\mathcal O}$}mega_2),L^\infty(\mbox{${\mathcal O}$}mega_1))$-module map.
\end{itemize}
\end{proposition}
\begin{proof}
The implication ``(i)$\,\mbox{${\mathcal R}$}ightarrow\,$(ii)" follows from (\ref{5tau=lambda}) and Corollary \ref{3Mod-B}.
(It is also possible to write a direct proof.)
To prove the converse,
assume that $u$ is $(L^\infty(\mbox{${\mathcal O}$}mega_3), L^\infty(\mbox{${\mathcal O}$}mega_2),L^\infty(\mbox{${\mathcal O}$}mega_1))$-modular.
We let
$$
U\colon S^1(L^2(\mbox{${\mathcal O}$}mega_1))\times S^1(L^2(\mbox{${\mathcal O}$}mega_2))
\times S^1(L^2(\mbox{${\mathcal O}$}mega_3))\longrightarrow \ensuremath{\mathbb{C}}
$$
be the unique trilinear form satisfying
$$
U(\overline{\xi_1}\otimes\eta_1, \overline{\xi_2}\otimes\eta_2,
\overline{\xi_3}\otimes\eta_3)
\,=\, \bigl\langle \bigl[u(\overline{\xi_2}\otimes\eta_3, \overline{\xi_1}\otimes\eta_2)\bigr]
(\eta_1),\xi_3\bigr\rangle
$$
for any $\xi_1,\eta_1\in L^2(\mbox{${\mathcal O}$}mega_1)$, $\xi_2,\eta_2\in L^2(\mbox{${\mathcal O}$}mega_2)$
and $\xi_3,\eta_3\in L^2(\mbox{${\mathcal O}$}mega_3)$.
Then for $i=1,2,3$,
let
$$
q_i\colon S^1(L^2(\mbox{${\mathcal O}$}mega_i))\longrightarrow L^1(\mbox{${\mathcal O}$}mega_i)
$$
be the unique bounded operator satisfying $q_i(\overline{\xi_i}\otimes\eta_i)
=\overline{\xi_i}\eta_i$ for any $\xi_i,\eta_i\in L^2(\mbox{${\mathcal O}$}mega_i)$. This is a quotient map,
whose adjoint coincides with the embedding (\ref{5qi*}).
Recall the operators $U_1,U_2,U_3$ defined at the beginning of Section \ref{4MOD}.
By Lemma \ref{3LemMod}, $U_i$ is valued in $L^\infty(\mbox{${\mathcal O}$}mega_i)$ for any $i=1,2,3$. This implies
that $U$ vanishes on the union of ${\rm Ker}(q_1)\times S^1(L^2(\mbox{${\mathcal O}$}mega_2))
\times S^1(L^2(\mbox{${\mathcal O}$}mega_3))$, $S^1(L^2(\mbox{${\mathcal O}$}mega_1))\times {\rm Ker}(q_2)
\times S^1(L^2(\mbox{${\mathcal O}$}mega_3))$ and $S^1(L^2(\mbox{${\mathcal O}$}mega_1))
\times S^1(L^2(\mbox{${\mathcal O}$}mega_2))\times {\rm Ker}(q_3)$. Consequently, there exists
a trilinear form
$$
\widehat{u}\colon L^1(\mbox{${\mathcal O}$}mega_1)\times L^1(\mbox{${\mathcal O}$}mega_2)\times L^1(\mbox{${\mathcal O}$}mega_3)
\longrightarrow\ensuremath{\mathbb{C}}
$$
factorizing $U$ in the sense that
$$
U(v_1,v_2,v_3) = \widehat{u}\bigl(q_1(v_1), q_2(v_2),q_3(v_3)\bigr),
\qquad v_i\in S^1(L^2(\mbox{${\mathcal O}$}mega_i)).
$$
Since $L^1(\mbox{${\mathcal O}$}mega_1)\widehat{\otimes} L^1(\mbox{${\mathcal O}$}mega_2)\widehat{\otimes} L^1(\mbox{${\mathcal O}$}mega_3) =
L^1(\mbox{${\mathcal O}$}mega_1\times \mbox{${\mathcal O}$}mega_2\times \mbox{${\mathcal O}$}mega_3)$ (see e.g. \cite[Chap. VIII, Example 10]{DU}),
there exists $\varphi\in L^\infty(\mbox{${\mathcal O}$}mega_1\times \mbox{${\mathcal O}$}mega_2\times \mbox{${\mathcal O}$}mega_3)$ such that
$$
\widehat{u}(\phi_1,\phi_2,\phi_3)\,=\,\int_{\mbox{${\mathcal O}$}mega_1\times\mbox{${\mathcal O}$}mega_2\times\mbox{${\mathcal O}$}mega_3}
\varphi(t_1,t_2,t_3) \phi_1(t_1)\phi_2(t_2)
\phi_3(t_3)\,d\mu_1(t_1)d\mu_2(t_2)d\mu_3(t_3)
$$
for any $\phi_i\in L^1(\mbox{${\mathcal O}$}mega_i)$. A thorough look at the definitions of
$U$ and $\Lambda_\varphi$ then reveals that
$u=\Lambda_\varphi$.
\end{proof}
Combining (\ref{5Norm}), (\ref{5tau=lambda}) and Proposition \ref{2OM-B}, we obtain
that any bilinear Schur multiplier $u$ induces a completely bounded
$$
\widetilde{u}\colon \mbox{${\mathcal G}$}amma\bigl(L^2(\mbox{${\mathcal O}$}mega_1), L^2(\mbox{${\mathcal O}$}mega_2),L^2(\mbox{${\mathcal O}$}mega_3)\bigr)
\longrightarrow B(L^2(\mbox{${\mathcal O}$}mega_1), L^2(\mbox{${\mathcal O}$}mega_3))
$$
and that $\cbnorm{\widetilde{u}}=\norm{\widetilde{u}} (=\norm{u})$.
The next result, which essentially follows from \cite{CLS},
shows that similarly, $S^1$-Schur multipliers
are automatically completely bounded and that their norm
and completely bounded norm coincide.
\begin{theorem}\label{6Schur2}
Let $\varphi\in L^\infty(\mbox{${\mathcal O}$}mega_1\times\mbox{${\mathcal O}$}mega_2\times\mbox{${\mathcal O}$}mega_3)$.
\begin{itemize}
\item [(a)] $\Lambda_\varphi$ is an $S^1$-operator multiplier if and only if
there exist a separable Hilbert space $H$ and two functions
$$
a\in L^{\infty}(\mbox{${\mathcal O}$}mega_1 \times \mbox{${\mathcal O}$}mega_2 ; H) \qquad
\text{and} \qquad
b\in L^{\infty}(\mbox{${\mathcal O}$}mega_2\times \mbox{${\mathcal O}$}mega_3 ; H)
$$
such that
\begin{equation}\label{6Facto1}
\varphi(t_1,t_2,t_3)= \left\langle a(t_1,t_2), b(t_2,t_3) \right\rangle
\end{equation}
for a.e. $(t_1,t_2,t_3) \in \mbox{${\mathcal O}$}mega_1 \times \mbox{${\mathcal O}$}mega_2 \times \mbox{${\mathcal O}$}mega_3.$
In this case,
$$
\bignorm{\Lambda_\varphi \colon S^2 \times S^2 \rightarrow S^1}=
\inf\bigl\{\norm{a}_\infty\norm{b}_\infty\bigr\},
$$
where the infimum runs over all pairs $(a,b)$ verifying the above factorization property.
\item [(b)] If $\Lambda_\varphi$ is an $S^1$-operator multiplier, then
$$
\widetilde{\Lambda_\varphi}\colon
\mbox{${\mathcal G}$}amma\bigl(L^2(\mbox{${\mathcal O}$}mega_1), L^2(\mbox{${\mathcal O}$}mega_2),L^2(\mbox{${\mathcal O}$}mega_3)\bigr)
\longrightarrow S^1\bigl(L^2(\mbox{${\mathcal O}$}mega_1), L^2(\mbox{${\mathcal O}$}mega_3)\bigr)
$$
is completely bounded, with $\cbnorm{\widetilde{\Lambda_\varphi}} =
\norm{\widetilde{\Lambda_\varphi}}$.
\end{itemize}
\end{theorem}
\begin{proof}
Part (a) is given by
\cite[Theorem 22]{CLS}.
Assume that $\Lambda_\varphi$ is an $S^1$-operator multiplier.
Let
$$
\mbox{${\mathcal S}$}_{3,1}\subset B\bigl(S^\infty(L^2(\mbox{${\mathcal O}$}mega_3), L^2(\mbox{${\mathcal O}$}mega_1)),
B(L^2(\mbox{${\mathcal O}$}mega_3), L^2(\mbox{${\mathcal O}$}mega_1))\bigr)
$$
be the space of all measurable Schur multipliers
from $L^2(\mbox{${\mathcal O}$}mega_3)$ into $L^2(\mbox{${\mathcal O}$}mega_1)$, in the sense of
\cite[Subsection 2.4]{CLS}. Then using the
notation from the latter paper
(to which we refer for more explanations), part (a)
implies that $\varphi\in L^\infty_\sigma\bigl(\mbox{${\mathcal O}$}mega_2;\mbox{${\mathcal S}$}_{3,1}\bigr)$.
Indeed this follows from Peller's description of
measurable Schur multipliers given by \cite[Theorem 1]{Pe}
(see also \cite[Theorem 3.3]{Sp},
\cite[Theorem 23]{CLS} and \cite{H}).
Measurable Schur multipliers are
$(L^\infty(\mbox{${\mathcal O}$}mega_1),L^\infty(\mbox{${\mathcal O}$}mega_3))$-bimodule maps hence by
\cite[Theorem 2.1]{Sm}, any element of $\mbox{${\mathcal S}$}_{3,1}$ is a completely
bounded map, whose completely bounded norm coincides with its usual norm.
Thus we have
$$
\mbox{${\mathcal S}$}_{3,1}\,\subset\, CB
\bigl(S^\infty(L^2(\mbox{${\mathcal O}$}mega_3), L^2(\mbox{${\mathcal O}$}mega_1)),
B(L^2(\mbox{${\mathcal O}$}mega_3), L^2(\mbox{${\mathcal O}$}mega_1))\bigr)
\qquad\hbox{isometrically}.
$$
We deduce that
$$
\varphi\in
L^\infty_\sigma\bigl(\mbox{${\mathcal O}$}mega_2;CB
\bigl(S^\infty(L^2(\mbox{${\mathcal O}$}mega_3), L^2(\mbox{${\mathcal O}$}mega_1)),
B(L^2(\mbox{${\mathcal O}$}mega_3), L^2(\mbox{${\mathcal O}$}mega_1))\bigr)\bigr).
$$
Recall that by \cite[Theorem 2.2]{BS} (see also Theorem 4.2
in the latter paper), we have a $w^*$-continuous isometric identification
$$
CB
\bigl(S^\infty(L^2(\mbox{${\mathcal O}$}mega_3), L^2(\mbox{${\mathcal O}$}mega_1)),
B(L^2(\mbox{${\mathcal O}$}mega_3), L^2(\mbox{${\mathcal O}$}mega_1))\bigr)
\simeq B(L^2(\mbox{${\mathcal O}$}mega_1))\stackrel{w^*h}{\otimes}
B(L^2(\mbox{${\mathcal O}$}mega_3)).
$$
Hence we obtain that $\varphi$ belongs to
$L^\infty_\sigma\bigl(\mbox{${\mathcal O}$}mega_2;B(L^2(\mbox{${\mathcal O}$}mega_1))\stackrel{w^*h}{\otimes}
B(L^2(\mbox{${\mathcal O}$}mega_3))\bigr)$. Equivalently,
$\varphi$ belongs to $L^\infty(\mbox{${\mathcal O}$}mega_2)\overline{\otimes}
\bigl( B(L^2(\mbox{${\mathcal O}$}mega_1))\stackrel{w^*h}{\otimes}
B(L^2(\mbox{${\mathcal O}$}mega_3))\bigr)$. Moreover
the norm of
$\Lambda_\varphi \colon S^2 \times S^2 \rightarrow S^1$ is equal to
the norm of $\varphi$ in the latter space.
Now applying Theorem \ref{2OM-S1}, we deduce that
$\Lambda_\varphi \colon S^2 \times S^2 \rightarrow S^1$
is completely bounded, with
$\cbnorm{\widetilde{\Lambda_\varphi}} =
\norm{\widetilde{\Lambda_\varphi}}$.
\end{proof}
\begin{remark}
In Theorem \ref{6Schur2} above, (a) can be deduced from (b) as follows.
Assume that $\Lambda_\varphi$ is a completely bounded $S^1$-operator multiplier,
with completely bounded norm $<1$.
By Proposition \ref{6Schur1} and (\ref{5tau=lambda}), $\Lambda_\varphi=\tau_\varphi$
is $(L^\infty(\mbox{${\mathcal O}$}mega_3),L^\infty(\mbox{${\mathcal O}$}mega_2),L^\infty(\mbox{${\mathcal O}$}mega_1))$-modular. Further
$L^\infty(\mbox{${\mathcal O}$}mega_2)$ is injective. Hence by Corollary \ref{6Facto-varphi},
there exist
an index set $I$, a family $(a_i)_{i\in I}$ in $L^{\infty}(\mbox{${\mathcal O}$}mega_1 \times \mbox{${\mathcal O}$}mega_2)$
and a family $(b_i)_{i\in I}$ in $L^{\infty}(\mbox{${\mathcal O}$}mega_2 \times \mbox{${\mathcal O}$}mega_3)$ such that
$$
\sum_{i\in I} \vert a_i\vert^2\, < 1
\qquad\hbox{and}\qquad
\sum_{i\in I} \vert b_i\vert^2\, < 1
$$
almost everywhere on $\mbox{${\mathcal O}$}mega_1 \times \mbox{${\mathcal O}$}mega_2$ and on
$\mbox{${\mathcal O}$}mega_2 \times \mbox{${\mathcal O}$}mega_3$, respectively, and
$\varphi=\sum_{i\in I} (a_i\otimes 1)(1\otimes b_i)$ in
the $w^*$-topology of $L^\infty(\mbox{${\mathcal O}$}mega_1\times\mbox{${\mathcal O}$}mega_2\times\mbox{${\mathcal O}$}mega_3)$.
Since we assumed that the three measure spaces $(\mbox{${\mathcal O}$}mega_j,\mu_j)$ are
separable, it follows from the proof of Corollary \ref{6Facto-varphi}
that $I$ can be chosen to be a countable set.
Then we have
\begin{equation}\label{6Facto2}
\varphi(t_1,t_2,t_3)\, = \,\sum_{i\in I} a_i(t_1,t_2) b_i(t_2,t_3)
\end{equation}
for a.e. $(t_1,t_2,t_3) \in \mbox{${\mathcal O}$}mega_1 \times \mbox{${\mathcal O}$}mega_2 \times \mbox{${\mathcal O}$}mega_3$.
Further we may define $a\in L^{\infty}(\mbox{${\mathcal O}$}mega_1 \times \mbox{${\mathcal O}$}mega_2 ; \ell^2_I)$
and $b\in L^{\infty}(\mbox{${\mathcal O}$}mega_2\times \mbox{${\mathcal O}$}mega_3 ; \ell^2_I)$
by $a(t_1,t_2) =(a_i(t_1,t_2))_{i\in I}$ and
$b(t_2,t_3) =(b_i(t_2,t_3))_{i\in I}$, respectively.
Then we both have $\norm{a}_\infty\leq 1$ and $\norm{b}_\infty\leq 1$,
and the identity (\ref{6Facto2}) yields (\ref{6Facto1}), with $H=\ell^2_I$.
Note however we do not know any direct proof of
Theorem \ref{6Schur2} (b), not using some of the
arguments from \cite{CLS}.
\end{remark}
\noindent
{\bf Acknowledgements.}
The first author was supported by the French
``Investissements d'Avenir" program,
project ISITE-BFC (contract ANR-15-IDEX-03).
We warmly thank the referee for the careful reading and
several valuable suggestions which improved the presentation
of the paper.
\end{document} |
\begin{document}
\title{Hamiltonian Learning and Certification Using Quantum Resources}
\author{Nathan Wiebe}
\affiliation{Quantum Architectures and Computation Group, Microsoft Research, Redmond, WA 98052, USA}
\affiliation{Department of Combinatorics \& Optimization, University of Waterloo, Ontario N2L 3G1, Canada}
\affiliation{Institute for Quantum Computing, University of Waterloo, Ontario N2L 3G1, Canada}
\author{Christopher Granade}
\affiliation{Department of Physics, University of Waterloo, Ontario N2L 3G1, Canada}
\affiliation{Institute for Quantum Computing, University of Waterloo, Ontario N2L 3G1, Canada}
\author{Christopher Ferrie}
\affiliation{
Center for Quantum Information and Control,
University of New Mexico,
Albuquerque, New Mexico, 87131-0001}
\author{D. G. Cory}
\affiliation{Department of Chemistry, University of Waterloo, Ontario N2L 3G1, Canada}
\affiliation{Institute for Quantum Computing, University of Waterloo, Ontario N2L 3G1, Canada}
\affiliation{Perimeter Institute, University of Waterloo, Ontario N2L 2Y5, Canada}
\begin{abstract}
In recent years quantum simulation has made great strides culminating in experiments that operate in a regime that existing supercomputers cannot easily simulate. Although this raises the possibility that special purpose analog quantum simulators may be able to perform computational tasks that existing computers cannot, it also introduces a major challenge: certifying that the quantum simulator is in fact simulating the correct quantum dynamics. We provide an algorithm that, under relatively weak assumptions, can be used to efficiently infer the Hamiltonian of a large but untrusted quantum simulator using a trusted quantum simulator.
We illustrate the power of this approach by showing numerically that it can inexpensively learn the Hamiltonians for large frustrated Ising models, demonstrating that quantum resources can make certifying analog quantum simulators tractable.
\end{abstract}
\maketitle
Quantum information processing promises to dramatically advance physics and chemistry by providing efficient simulators for the Schr\"odinger or Dirac equations~\cite{lloyd_universal_1996,alan_qchem_2005,gerritsma_diracsim_2010}.
This is important because conventional methods are inefficient, scaling exponentially in the number of interacting subsystems. Consequently, quantum simulations beyond a few tens of interacting particles are generally believed to be beyond the limitations of conventional supercomputers. This inability to simulate large quantum systems means that important questions in condensed matter, such as the shape of the phase diagram for the Fermi--Hubbard model, remain open.
Analog quantum simulation raises the possibility that
special purpose \emph{analog devices} may be able to address such problems using current or near--future hardware~\cite{simon_simulation_2011,britton_simulation_2012,kim_simulation_2010}.
A major objection to this avenue of inquiry is that analog simulators are not necessarily trustworthy~\cite{hauke_trust_2012,gogolin_boson_2013} and certification of them is not known to be efficient.
Without such certification, an analog simulator can at best only provide hints about the answer to a given computational question. A resolution to this problem is therefore essential if analog quantum simulators are to compete on an even footing with classical supercomputers.
An important first step towards a resolution is provided in~\cite{daSilva_practical_2011}, where it is shown that quantum systems with local time--independent Hamiltonians can be efficiently characterized given ensemble readout. However, the method is not generally applicable, can be expensive and is not known to be either error robust or stable in cases where single shot measurements are used.
A number of machine learning and statistical inference methods \cite{hentschel_machine_2010,hentschel_efficient_2011,sergeevich_characterization_2011,ferrie_how_2012,sergeevich_optimizing_2012,granade_robust_2012,lovett_differential_2013,svore_faster_2013} have been recently introduced to address similar problems in metrology or Hamiltonian learning. In the context of Hamiltonian learning, such ideas have are known to be error--robust and lead to substantial reductions in the cost of high--precision Hamiltonian inference~\cite{granade_robust_2012}, albeit at the price of sacrificing the efficient scaling exhibited by~\cite{daSilva_practical_2011}.
We overcome these challenges by providing a robust method that can be used to characterize unknown Hamiltonians by unifying statistical inference with quantum simulation. The key insight behind this is that Bayesian inference reduces the problem of Hamiltonian estimation to a problem in Hamiltonian simulation that can be efficiently solved using a trusted quantum simulator. Our algorithm achieves this through the following steps. We begin by positing a Hamiltonian model for the system and a probability distribution over the parameters of the Hamiltonian model. We then use a novel guess heuristic for the optimal experiment that adaptively chooses experiments based on the current uncertainty in the Hamiltonian. The experiment is then performed and the trusted quantum simulator is used to efficiently compute the likelihood of the measurement outcome occurring if each hypothetical model were true. These likelihoods are then used by the algorithm to update its knowledge of the Hamiltonian parameter via Bayes rule, resulting in an updated probability distribution, called the posterior distribution. This process is then repeated until the uncertainty in the unknown Hamiltonian parameters (as measured by the posterior variance) becomes sufficiently small. This iterative process is depicted in \fig{flowchart}.
To make the problem concrete, we represent each hypothetical Hamiltonian $H_j$ by a vector of real numbers $\vec{x}_j \in \mathbb{R}^d$ such that $H_j = H(\vec{x}_j)$. The Hamiltonian model is therefore specified by $H(\vec{x})$.
\begin{figure}
\caption{\label{fig:flowchart}
\label{fig:flowchart}
\end{figure}
We consider three classes of experiments that can be performed to infer the Hamiltonian, $H$, given an initial state $\ket{\psi}$ (typically a pseudorandom state~\cite{emerson_pseudo-random_2003}): (a) Classical Likelihood Evaluation (CLE), (b) Quantum Likelihood Evaluation (QLE) and (c) Interactive Quantum Likelihood Evaluation (IQLE). CLE is the simplest of these experiments and is discussed in detail in~\cite{granade_robust_2012}. It involves simply picking an experimental time $t$, and computing the likelihood $\Pr(D|\vec{x}_i)=|\bra{D}e^{-iH(\vec{x}_i)t}\ket{\psi}|^2$ using a classical computer, where $\vec{x}_i$ is a given set of Hamiltonian parameters and $D$ is the experimental outcome. This function, known as the likelihood function, will not generally be efficiently computable on a classical computer because it involves quantum simulation.
In QLE experiments, a trusted quantum simulator is used to ameliorate these problems. It does so by estimating $\Pr(D|\vec{x}_i)$ to be the fraction of times outcome $D$ occurs in a sufficiently large set of simulated experiments, which is efficient if $\Pr(D|\vec{x}_i)$ is only polynomially small.
This approach allows a complex quantum simulator, such as a fault tolerant quantum computer, to act as a certifier for an analog quantum simulator. A trusted quantum simulator could also be constructed using a bootstrapping protocol wherein a smaller trusted analog simulator is the certifier. This is possible if a compressed simulation scheme~\cite{kraus_compressed_2011} for the dynamics exists.
The Loschmidt echo famously shows that, for complex quantum systems, two nearly identical Hamiltonians will typically generate evolutions that diverge exponentially after a short time, before saturating at an exponentially small overlap~\cite{Haa06}. This means that QLE will often be restricted to short evolution times to guarantee efficiency (which is undesirable~\cite{granade_robust_2012}). We resolve this by using IQLE experiments, which are described in~\fig{models}. These experiments are reminiscent of the Hahn echo experiments commonly used in magnetic resonance and experimental quantum information processing~\cite{hahn_spin_1950}.
An IQLE experiment swaps the state of the unknown quantum system with that of a trusted quantum simulator then inverts the evolution based on a guessed Hamiltonian $H_{-}$. The measurement in IQLE is always assumed to be in an orthonormal basis that has $\ket{\psi}$ as an element. This produces $\Pr(D|\vec{x}_i)=|\bra{D}e^{iH_{-} t}e^{-iH(\vec{x}_i)t}\ket{\psi}|^2$.
\begin{figure}
\caption{\label{fig:models}
\label{fig:models}
\end{figure}
Although the Loschmidt echo may also seem to be problematic for IQLE experiments, we exploit it in our guess heuristic for $H_{-}$. We call this heuristic the ``particle guess heuristic'' (PGH), which chooses $H_{-}:= H(\vec{x}_{-})$ by sampling $\vec{x}_{-}$ from the prior probability $\Pr({\vec{x}})$, which describes our current knowledge of the Hamiltonian parameters. The set of parameters, $\vec{x}_{-}$, is called a \emph{particle} because it is described by a Dirac-delta distribution over parameter space. The time $t$ is chosen by drawing a second particle $\vec{x}_{-}' \ne \vec{x}_{-}$ and setting $t=1/\|\vec{x}_{-}'-\vec{x}_{-}\|_2$. As the uncertainty in the estimated parameter shrinks, the PGH adaptively picks longer times to ensure that informative experiments continue to be chosen as certainty about the unknown parameters increases. The PGH also causes $e^{-iH(\vec{x}_i)t}$ to result in substantially different likelihoods for $\vec{x}_i$ that are within one standard deviation of the prior's mean, which we show in the appendix is optimal for certain learning problems.
IQLE experiments with two outcomes also ensure that $\Pr(D|\vec{x})$ will not be exponentially small (with high probability) for $H$ an affine transformation acting on $\vec{x}$, since
\begin{align}
|\bra{\psi}e^{iH_{-} t}e^{-iHt}\ket{\psi}|&\ge 1-2\|H-H_{-}\|_2t \nonumber\\
&\ge 1- O(\|\vec{x}-\vec{x}_{-}\|_2t).
\end{align}
If the prior distribution has converged to a unimodal distribution centered near the correct Hamiltonian (this is typical for Bayesian inference of non-degenerate learning problems~\cite{granade_robust_2012}) then $\|\vec{x}-\vec{x}_{-}\|_2\in \mathrm{T}heta(1/t)$. This means that if we use a POVM with two elements: $\ket{\psi}\!\!\bra{\psi}$ and its orthogonal compliment $\openone-\ket{\psi}\!\!\bra{\psi}$ then we expect (a) neither probability will be exponentially small if $\vec{x}_{-}$ and $\vec{x_j}$ are near the mean and (b) $\Pr(\psi|\vec{x_j})$ will typically be exponentially small for $H(\vec{x_j})$ that differ substantially from the correct Hamiltonian. The PGH therefore leads to IQLE experiments that rapidly eliminate incorrect hypotheses about the correct Hamiltonian.
The measurement outcomes yielded by the experiments are \emph{immediately processed} using Bayesian inference, as described in~\fig{flowchart}. This immediate processing allows our algorithm to adaptively choose experiments based on its current knowledge of the correct Hamiltonian.
The state of knowledge is represented by a distribution that is called, previous to the next update step, the \emph{prior}. In the cases we consider, the initial prior distribution before any data is observed is taken to be uniform. This encodes a state of maximum ignorance about the correct $\vec{x}$.
The prior distribution is updated as measurement outcomes are recorded using Bayes' rule, which gives the proper way of computing the probability of each $\vec{x}_j$ being correct given the observed data and the prior. It states that if datum $D$ is recorded then
\begin{equation}
\Pr(\vec{x}_j|D) \propto \Pr(D|\vec{x}_j)\Pr(\vec{x}_j),\label{eq:bayes}
\end{equation}
up to a normalization factor and
$\Pr(\vec{x}_j|D)$ is called the posterior distribution.
Eq.~\eq{bayes} can be efficiently computed (for a polynomial number of $H_j$) only if the likelihood function $\Pr(D|\vec{x}_j)$
is tractable. QLE and IQLE experiments allows $\Pr(D|\vec{x}_j)$ to be efficiently estimated, which removes the main obstacle to using Bayesian methods to learn the correct $\vec{x}$.
\begin{figure}
\caption{The quadratic loss plotted as a function of the number of inversion experiments for Ising models on the complete graph. The shaded areas show a $50\%$ confidence interval for the quadratic loss.}
\label{fig:complete_scale}
\end{figure}
A secondary problem is that \emph{exact} computations of the update rule are intractable in practice because an infinite number of Hamiltonians could potentially describe the system; hence, a probability
distribution over Hamiltonians cannot be exactly represented on either a classical or quantum computer. This problem can be addressed by using the sequential Monte Carlo (SMC) approximation \cite{huszar_adaptive_2011,granade_robust_2012,doucet_tutorial_2011}, which approximates the probability distribution using a weighted sum of particles (Dirac delta functions). Each particle corresponds to a particlar $\vec{x}_j$ and is a hypothesis about the correct Hamiltonian parameters $\vec{x}$. SMC assigns a weight $w_j$ to each particle that represents the probability of that hypothesis. The weights are normalized such that $\sum_{j}w_j =1$. The update rule for the probability distribution under the SMC approximation then becomes
$w_j \mapsto \Pr(D|\vec{x}_j)w_j$, followed by normalization.
If necessary, a resampling step is used after updating to ensure that the inference procedure remains stable, as discussed in~\cite{granade_robust_2012} and in \app{bayes}.
The algorithm then iteratively updates the weights $w_i$ and positions $\vec{x}_i$ of the sequential Monte Carlo particles representing the distribution over Hamiltonians $\Pr(H | D)$, conditioned on the data recorded at each step. In this way, the full state of knowledge at each step is iteratively carried forward, and is used to heuristically design future experiments according to the PGH. Subsequent updates will then refine this estimate of the unknown Hamiltonian parameter until the uncertainty of the estimated Hamiltonian is sufficiently small, as measured by the trace of the posterior covariance matrix.
\begin{figure}
\caption{The median decay exponent for the quadratic loss as a function of the number of parameters in the Ising model, $d$.}
\label{fig:exp_scale}
\end{figure}
Now that we have discussed how our algorithm works, we will proceed to assess its cost. We will show that the cost of Hamiltonian inference on a fixed number of IQLE experiments is exponentially smaller than the cost of using CLE. This is significant because CLE gives the best known methods for some problems~\cite{granade_robust_2012}.
A natural measure of the cost is the number of quantum simulations needed to estimate the Hamiltonian parameters. The total cost is therefore,
\begin{equation}
{\rm{Cost}} = N_{\rm steps}(\mathrm{d}elta) \times {\rm{Cost}(\rm update;\epsilon)}.\label{eq:costbasic}
\end{equation}
Here $N_{\rm steps}$ is the number of updates needed to make the uncertainty less than $\mathrm{d}elta$ and ${\rm{Cost}(\rm update;\epsilon)}$ is the number of samples from the trusted simulator that are needed to update the particle weights using Eq.~\eq{bayes} within error $\epsilon$ in the $1$--norm.
We show in \app{stability} that, with high probability,
${\rm{Cost}(\rm update;\epsilon)}$ scales as
\begin{equation*}
\frac{|\{\vec{x}_i\}|}{\epsilon^2}\left(\mathbb{E}_{D|H}\left[\frac{\max_k \Pr(D|\vec{x}_k)(1-\Pr(D|\vec{x}_k))}{\left( {\sum_k \Pr(D|\vec{x}_k)\Pr(\vec{x}_k)}\right)^2}\right]\right).
\end{equation*}
This implies that the update process will be efficient if the number of particles required is small and the resultant probability distribution is not too flat. That is, $|\{\vec{x}_i\}|\in O({\rm poly}(n))$ and $\sum_k \Pr(D|H(\vec{x}_k))\Pr(H(\vec{x}_k))\in O(1/{\rm poly}(n))$, where $n$ is the number of interacting systems. It has been shown that SMC algorithms require a number of particles that scales sub--exponentially in $d$~\cite{beskos_stability_2011}, which itself may not be a function of $n$. This means that in practice, a small number of particles will typically be required. The robustness of the algorithm to sampling errors is discussed in~\cite{ferrie_likelihood-free_2013} as well as in \app{stability}, so relatively large $\epsilon$ can be tolerated.
If the posterior distribution has converged to a unimodal distribution such that $\vec{x}$ is within a fixed distance from the mean, then the PGH and~\eq{bayes} ensure that $\mathbb{E}_{H_{-}} [|\!\bra{\psi}e^{iH_{-} t}e^{-iHt}\ket{\psi}|^2] \in \mathrm{T}heta(1)$ since $t\in \mathrm{T}heta(|\vec{x}-\vec{x}_{-}|^{-1})$. If a two outcome measurement is used then Markov's inequality implies that $\sum_k \Pr(D|H(\vec{x}_k))\Pr(H(\vec{x}_k))\in \mathrm{T}heta(1)$ with high probability.
By a trivial generalization of this argument, it is clear that a super--polynomial reduction in the cost of performing~\eq{bayes} relative to CLE is obtained with high probability for IQLE experiments if $d\in O({\rm poly}(n))$ and the \emph{effective} number of outcomes, $\sum_j \Pr(j|\vec{x}_k)^{-2}$, is at most $O({\rm poly}(n))$ for each $\vec{x}_k$.
In contrast, QLE experiments may not lead to a super--polynomial separation in the cost estimates for generic Hamiltonians and large $t$ because $\sum_k \Pr(D|H(\vec{x}_k))\Pr(H(\vec{x}_k))\in 2^{-\mathrm{T}heta(n)}$ with high probability for complex quantum systems~\cite{ududec_equilibration_2013,Haa06}. This can be rectified by choosing small $t$ as per~\cite{daSilva_practical_2011}, but such QLE experiments will be much less informative~\cite{granade_robust_2012}.
If a fixed number of updates are required, then the previous discussion and~\eq{costbasic} suggest that IQLE will provide an exponential advantage over CLE. If inference within a fixed error tolerance, $\mathrm{d}elta$, is required then the cost estimate is much more challenging.
Each two-outcome measurement yields at most one bit of information about $H$ per measurement hence $N_{\rm steps}(\mathrm{d}elta) \in \Omega(d\log_2(1/\mathrm{d}elta))$. For most models of interest, $d$ is polynomial (or even constant) in $n$ and hence a small number of updates should typically suffice. It is, however, unclear whether this lower bound is tight; hence, we turn to numerical evidence to show that our algorithm can efficiently learn Hamiltonians in certain cases.
Consider the problem of learning $H({\vec{x}})$ using IQLE experiments for an Ising model with no transverse field:
\begin{equation}\label{eq:Ising}
H(\vec{x}) = \sum_{(i,j)\in G} x_{i,j}\ \sigma_z^{(i)}\sigma_z^{(j)},
\end{equation}
where $G$ is the edge set of an interaction graph on $n$ qubits. Unless otherwise specified, we take $x_{i,j}\in [-1/2,1/2]$ uniformly at random. We take the initial state for the evolution to be $\ket{\psi}=\ket{+}^{\otimes n}$. We choose this Hamiltonian not only because it is physically relevant~\cite{richerme_trapped-ion_2013}, but also for numerical expediency, since the learning process require the algorithm to perform thousands of simulated evolutions of the initial state. All measurements are performed in the eigenbasis of $X^{\otimes n}$. Restricting the measurements to two outcomes is unnecessary for these experiments because IQLE and the PGH concentrates $\Pr(D|\vec{x}_i)$ over a small number of outcomes for this Hamiltonian.
\fig{complete_scale} shows that the quadratic loss (a generalization of the mean--squared error for multiple parameters) shrinks exponentially with the number of experiments performed; however, the rate at which the error decreases slows as the number of qubits $n$ increases. This is expected because $d=n(n-1)/2$ for the case of a complete interaction graph, which implies that the learning problem becomes more difficult as $n$ increases. The data for interactions on the line is similar and is presented in \app{line} and QLE data is given in \app{QLE}.
\begin{figure}
\caption{An approximate $1$ parameter Ising model on the complete graph. The thin lines give the best fits to the exponential decays, which scale as $e^{-0.07N }
\label{fig:corner}
\end{figure}
The rate at which the learning process slows as $n$ increases is investigated in
\fig{exp_scale}. We examine the slowing of the learning problem by fitting the quadratic loss, $\mathrm{d}elta$, in each experiment to $Ae^{-\gamma N_{\rm steps}}$. The median decay exponent, which is the median of the values of $\gamma$ attained for a set of experiments with constant $n$, measures how rapidly the algorithm learns the unknown parameters. \fig{exp_scale} shows that these decay constants scale as $O(1/d)$ for the complete graph, and provides weaker evidence for the line. This implies that $N_{\rm steps}(\mathrm{d}elta)=O(d\log(1/\mathrm{d}elta))$ for this Hamiltonian, which implies that the inference is efficient. Similarly, the PGH implies that the total simulation time needed (for fixed $|\{\vec{x}_i\}|$) scales as $N_{\rm steps}\mathrm{d}elta^{-1}\approx \mathrm{d}elta^{-3}$, which is relevant in cases where the cost of a simulation is dominated by the evolution time.
Although $d=n(n-1)/2$ or $d=n-1$ in the examples considered above, $d$ can be approximately independent of $n$ in some cases.
An example of this behavior is given in \fig{corner}, where we consider the case where each of the $x_{i,j}$ is approximately the same value chosen uniformly on $[0,100]$, but with small normally distributed fluctuations with mean $0$ and variance $10^{-4}$. This causes the learning problem to be effectively one--dimensional initially, and then transition to $d=n$ when the small fluctuations need to be identified to learn the Hamiltonian parameters within a fixed accuracy. The transition from a single-- to a multi--parameter learning problem happens at $\mathrm{d}elta \approx d\times 10^{-4} \approx 10^{-3}$, which coincides with the point when the slope in \fig{corner} changes. This emphasizes that the cost of Hamiltonian estimation using our method only implicitly depends on $n$ through $d$. In fact, the difference in the observed scaling of $\gamma$ is approximately a factor of $2.5$, which is what would be expected if $\gamma\propto 1/d$.
In conclusion, we have shown that Bayesian inference combined with the SMC approximation provides an ideal way to leverage a (potentially non--universal) quantum
simulator to characterize an unknown or unreliable quantum system. We provide theoretical evidence that shows that the update rule, which is at the heart of the learning algorithm, can be performed efficiently using quantum resources. We then illustrate the practicality of the algorithm and show that it is capable of learning unknown Ising couplings with surprisingly few experiments even in the presence of sampling errors. We will show elsewhere that the algorithm is highly resilient to depolarizing noise and other forms of noise that can be introduced via a noisy swap gate.
\begin{thebibliography}{32}
\expandafter\ifx\csname natexlab\endcsname\relax\mathrm{d}ef\natexlab#1{#1}\fi
\expandafter\ifx\csname bibnamefont\endcsname\relax
\mathrm{d}ef\bibnamefont#1{#1}\fi
\expandafter\ifx\csname bibfnamefont\endcsname\relax
\mathrm{d}ef\bibfnamefont#1{#1}\fi
\expandafter\ifx\csname citenamefont\endcsname\relax
\mathrm{d}ef\citenamefont#1{#1}\fi
\expandafter\ifx\csname url\endcsname\relax
\mathrm{d}ef\url#1{\texttt{#1}}\fi
\expandafter\ifx\csname urlprefix\endcsname\relax\mathrm{d}efURL {URL }\fi
\providecommand{\bibinfo}[2]{#2}
\providecommand{\eprint}[2][]{\url{#2}}
\bibitem[{\citenamefont{Lloyd et~al.}(1996)}]{lloyd_universal_1996}
\bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Lloyd}} \bibnamefont{et~al.},
\emph{\bibinfo{title}{Universal quantum simulators}},
\bibinfo{journal}{SCIENCE-NEW YORK THEN WASHINGTON-} pp.
\bibinfo{pages}{1073--1077} (\bibinfo{year}{1996}).
\bibitem[{\citenamefont{Aspuru-Guzik et~al.}(2005)\citenamefont{Aspuru-Guzik,
Dutoi, Love, and Head-Gordon}}]{alan_qchem_2005}
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Aspuru-Guzik}},
\bibinfo{author}{\bibfnamefont{A.~D.} \bibnamefont{Dutoi}},
\bibinfo{author}{\bibfnamefont{P.~J.} \bibnamefont{Love}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Head-Gordon}},
\emph{\bibinfo{title}{Simulated quantum computation of molecular energies}},
\bibinfo{journal}{Science} \textbf{\bibinfo{volume}{309}},
\bibinfo{pages}{1704} (\bibinfo{year}{2005}),
\eprint{http://www.sciencemag.org/content/309/5741/1704.full.pdf}.
\bibitem[{\citenamefont{{Gerritsma} et~al.}(2010)\citenamefont{{Gerritsma},
{Kirchmair}, {Z{\"a}hringer}, {Solano}, {Blatt}, and
{Roos}}}]{gerritsma_diracsim_2010}
\bibinfo{author}{\bibfnamefont{R.}~\bibnamefont{{Gerritsma}}},
\bibinfo{author}{\bibfnamefont{G.}~\bibnamefont{{Kirchmair}}},
\bibinfo{author}{\bibfnamefont{F.}~\bibnamefont{{Z{\"a}hringer}}},
\bibinfo{author}{\bibfnamefont{E.}~\bibnamefont{{Solano}}},
\bibinfo{author}{\bibfnamefont{R.}~\bibnamefont{{Blatt}}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{C.~F.} \bibnamefont{{Roos}}},
\emph{\bibinfo{title}{{Quantum simulation of the Dirac equation}}},
\bibinfo{journal}{\nat} \textbf{\bibinfo{volume}{463}}, \bibinfo{pages}{68}
(\bibinfo{year}{2010}).
\bibitem[{\citenamefont{{Simon} et~al.}(2011)\citenamefont{{Simon}, {Bakr},
{Ma}, {Tai}, {Preiss}, and {Greiner}}}]{simon_simulation_2011}
\bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{{Simon}}},
\bibinfo{author}{\bibfnamefont{W.~S.} \bibnamefont{{Bakr}}},
\bibinfo{author}{\bibfnamefont{R.}~\bibnamefont{{Ma}}},
\bibinfo{author}{\bibfnamefont{M.~E.} \bibnamefont{{Tai}}},
\bibinfo{author}{\bibfnamefont{P.~M.} \bibnamefont{{Preiss}}},
\bibnamefont{and}
\bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{{Greiner}}},
\emph{\bibinfo{title}{{Quantum simulation of antiferromagnetic spin chains in
an optical lattice}}}, \bibinfo{journal}{\nat}
\textbf{\bibinfo{volume}{472}}, \bibinfo{pages}{307} (\bibinfo{year}{2011}),
\eprint{1103.1372}.
\bibitem[{\citenamefont{{Britton} et~al.}(2012)\citenamefont{{Britton},
{Sawyer}, {Keith}, {Wang}, {Freericks}, {Uys}, {Biercuk}, and
{Bollinger}}}]{britton_simulation_2012}
\bibinfo{author}{\bibfnamefont{J.~W.} \bibnamefont{{Britton}}},
\bibinfo{author}{\bibfnamefont{B.~C.} \bibnamefont{{Sawyer}}},
\bibinfo{author}{\bibfnamefont{A.~C.} \bibnamefont{{Keith}}},
\bibinfo{author}{\bibfnamefont{C.-C.~J.} \bibnamefont{{Wang}}},
\bibinfo{author}{\bibfnamefont{J.~K.} \bibnamefont{{Freericks}}},
\bibinfo{author}{\bibfnamefont{H.}~\bibnamefont{{Uys}}},
\bibinfo{author}{\bibfnamefont{M.~J.} \bibnamefont{{Biercuk}}},
\bibnamefont{and} \bibinfo{author}{\bibfnamefont{J.~J.}
\bibnamefont{{Bollinger}}}, \emph{\bibinfo{title}{{Engineered two-dimensional
Ising interactions in a trapped-ion quantum simulator with hundreds of
spins}}}, \bibinfo{journal}{\nat} \textbf{\bibinfo{volume}{484}},
\bibinfo{pages}{489} (\bibinfo{year}{2012}), \eprint{1204.5789}.
\bibitem[{\citenamefont{{Kim} et~al.}(2010)\citenamefont{{Kim}, {Chang},
{Korenblit}, {Islam}, {Edwards}, {Freericks}, {Lin}, {Duan}, and
{Monroe}}}]{kim_simulation_2010}
\bibinfo{author}{\bibfnamefont{K.}~\bibnamefont{{Kim}}},
\bibinfo{author}{\bibfnamefont{M.-S.} \bibnamefont{{Chang}}},
\bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{{Korenblit}}},
\bibinfo{author}{\bibfnamefont{R.}~\bibnamefont{{Islam}}},
\bibinfo{author}{\bibfnamefont{E.~E.} \bibnamefont{{Edwards}}},
\bibinfo{author}{\bibfnamefont{J.~K.} \bibnamefont{{Freericks}}},
\bibinfo{author}{\bibfnamefont{G.-D.} \bibnamefont{{Lin}}},
\bibinfo{author}{\bibfnamefont{L.-M.} \bibnamefont{{Duan}}},
\bibnamefont{and} \bibinfo{author}{\bibfnamefont{C.}~\bibnamefont{{Monroe}}},
\emph{\bibinfo{title}{{Quantum simulation of frustrated Ising spins with
trapped ions}}}, \bibinfo{journal}{\nat} \textbf{\bibinfo{volume}{465}},
\bibinfo{pages}{590} (\bibinfo{year}{2010}).
\bibitem[{\citenamefont{Hauke et~al.}(2012)\citenamefont{Hauke, Cucchietti,
Tagliacozzo, Deutsch, and Lewenstein}}]{hauke_trust_2012}
\bibinfo{author}{\bibfnamefont{P.}~\bibnamefont{Hauke}},
\bibinfo{author}{\bibfnamefont{F.~M.} \bibnamefont{Cucchietti}},
\bibinfo{author}{\bibfnamefont{L.}~\bibnamefont{Tagliacozzo}},
\bibinfo{author}{\bibfnamefont{I.}~\bibnamefont{Deutsch}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Lewenstein}},
\emph{\bibinfo{title}{Can one trust quantum simulators?}},
\bibinfo{journal}{Reports on Progress in Physics}
\textbf{\bibinfo{volume}{75}}, \bibinfo{pages}{082401}
(\bibinfo{year}{2012}).
\bibitem[{\citenamefont{Gogolin et~al.}(2013)\citenamefont{Gogolin, Kliesch,
Aolita, and Eisert}}]{gogolin_boson_2013}
\bibinfo{author}{\bibfnamefont{C.}~\bibnamefont{Gogolin}},
\bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Kliesch}},
\bibinfo{author}{\bibfnamefont{L.}~\bibnamefont{Aolita}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Eisert}},
\emph{\bibinfo{title}{Boson-sampling in the light of sample complexity}},
\bibinfo{journal}{arXiv preprint arXiv:1306.3995} (\bibinfo{year}{2013}).
\bibitem[{\citenamefont{da~Silva et~al.}(2011)\citenamefont{da~Silva,
Landon-Cardinal, and Poulin}}]{daSilva_practical_2011}
\bibinfo{author}{\bibfnamefont{M.~P.} \bibnamefont{da~Silva}},
\bibinfo{author}{\bibfnamefont{O.}~\bibnamefont{Landon-Cardinal}},
\bibnamefont{and} \bibinfo{author}{\bibfnamefont{D.}~\bibnamefont{Poulin}},
\emph{\bibinfo{title}{Practical characterization of quantum devices without
tomography}}, \bibinfo{journal}{Phys. Rev. Lett.}
\textbf{\bibinfo{volume}{107}}, \bibinfo{pages}{210404}
(\bibinfo{year}{2011}).
\bibitem[{\citenamefont{Hentschel and Sanders}(2010)}]{hentschel_machine_2010}
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Hentschel}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{B.~C.} \bibnamefont{Sanders}},
\emph{\bibinfo{title}{Machine learning for precise quantum measurement}},
\bibinfo{journal}{Physical Review Letters} \textbf{\bibinfo{volume}{104}},
\bibinfo{pages}{063603} (\bibinfo{year}{2010}).
\bibitem[{\citenamefont{Hentschel and
Sanders}(2011)}]{hentschel_efficient_2011}
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Hentschel}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{B.~C.} \bibnamefont{Sanders}},
\emph{\bibinfo{title}{Efficient algorithm for optimizing adaptive quantum
metrology processes}}, \bibinfo{journal}{Physical Review Letters}
\textbf{\bibinfo{volume}{107}}, \bibinfo{pages}{233601}
(\bibinfo{year}{2011}).
\bibitem[{\citenamefont{Sergeevich et~al.}(2011)\citenamefont{Sergeevich,
Chandran, Combes, Bartlett, and Wiseman}}]{sergeevich_characterization_2011}
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Sergeevich}},
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Chandran}},
\bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Combes}},
\bibinfo{author}{\bibfnamefont{S.~D.} \bibnamefont{Bartlett}},
\bibnamefont{and} \bibinfo{author}{\bibfnamefont{H.~M.}
\bibnamefont{Wiseman}}, \emph{\bibinfo{title}{Characterization of a qubit
{H}amiltonian using adaptive measurements in a fixed basis}},
\bibinfo{journal}{1102.3700} (\bibinfo{year}{2011}).
\bibitem[{\citenamefont{Ferrie et~al.}(2012)\citenamefont{Ferrie, Granade, and
Cory}}]{ferrie_how_2012}
\bibinfo{author}{\bibfnamefont{C.}~\bibnamefont{Ferrie}},
\bibinfo{author}{\bibfnamefont{C.}~\bibnamefont{Granade}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{D.}~\bibnamefont{Cory}},
\emph{\bibinfo{title}{How to best sample a periodic probability distribution,
or on the accuracy of {H}amiltonian finding strategies}},
\bibinfo{journal}{Quantum Information Processing} pp. \bibinfo{pages}{1--13}
(\bibinfo{year}{2012}), ISSN \bibinfo{issn}{1570-0755}.
\bibitem[{\citenamefont{Sergeevich and
Bartlett}(2012)}]{sergeevich_optimizing_2012}
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Sergeevich}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{S.~D.} \bibnamefont{Bartlett}},
\emph{\bibinfo{title}{Optimizing qubit hamiltonian parameter estimation
algorithms using {PSO}}}, \bibinfo{journal}{{arXiv:1206.3830}}
(\bibinfo{year}{2012}), \bibinfo{note}{proceedings of 2012 {IEEE} Conference
on Evolutionary Computation ({CEC)}, 10-15 June 2012}.
\bibitem[{\citenamefont{Granade et~al.}(2012)\citenamefont{Granade, Ferrie,
Wiebe, and Cory}}]{granade_robust_2012}
\bibinfo{author}{\bibfnamefont{C.~E.} \bibnamefont{Granade}},
\bibinfo{author}{\bibfnamefont{C.}~\bibnamefont{Ferrie}},
\bibinfo{author}{\bibfnamefont{N.}~\bibnamefont{Wiebe}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{D.~G.} \bibnamefont{Cory}},
\emph{\bibinfo{title}{Robust online {H}amiltonian learning}},
\bibinfo{journal}{New Journal of Physics} \textbf{\bibinfo{volume}{14}},
\bibinfo{pages}{103013} (\bibinfo{year}{2012}), ISSN
\bibinfo{issn}{1367-2630}.
\bibitem[{\citenamefont{Lovett et~al.}(2013)\citenamefont{Lovett, Crosnier,
Perarnau-Llobet, and Sanders}}]{lovett_differential_2013}
\bibinfo{author}{\bibfnamefont{N.~B.} \bibnamefont{Lovett}},
\bibinfo{author}{\bibfnamefont{C.}~\bibnamefont{Crosnier}},
\bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Perarnau-Llobet}},
\bibnamefont{and} \bibinfo{author}{\bibfnamefont{B.~C.}
\bibnamefont{Sanders}}, \emph{\bibinfo{title}{Differential evolution for
many-particle adaptive quantum metrology}},
\bibinfo{journal}{{arXiv:1304.2246}} (\bibinfo{year}{2013}).
\bibitem[{\citenamefont{Svore et~al.}(2013)\citenamefont{Svore, Hastings, and
Freedman}}]{svore_faster_2013}
\bibinfo{author}{\bibfnamefont{K.~M.} \bibnamefont{Svore}},
\bibinfo{author}{\bibfnamefont{M.~B.} \bibnamefont{Hastings}},
\bibnamefont{and} \bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Freedman}},
\emph{\bibinfo{title}{Faster phase estimation}},
\bibinfo{journal}{{arXiv:1304.0741}} (\bibinfo{year}{2013}).
\bibitem[{\citenamefont{Emerson et~al.}(2003)\citenamefont{Emerson, Weinstein,
Saraceno, Lloyd, and Cory}}]{emerson_pseudo-random_2003}
\bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Emerson}},
\bibinfo{author}{\bibfnamefont{Y.~S.} \bibnamefont{Weinstein}},
\bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Saraceno}},
\bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Lloyd}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{D.~G.} \bibnamefont{Cory}},
\emph{\bibinfo{title}{Pseudo-random unitary operators for quantum information
processing}}, \bibinfo{journal}{Science} \textbf{\bibinfo{volume}{302}},
\bibinfo{pages}{2098} (\bibinfo{year}{2003}), ISSN \bibinfo{issn}{0036-8075,
1095-9203}, \bibinfo{note}{{PMID:} 14684815}.
\bibitem[{\citenamefont{{Kraus}}(2011)}]{kraus_compressed_2011}
\bibinfo{author}{\bibfnamefont{B.}~\bibnamefont{{Kraus}}},
\emph{\bibinfo{title}{{Compressed Quantum Simulation of the Ising Model}}},
\bibinfo{journal}{Physical Review Letters} \textbf{\bibinfo{volume}{107}},
\bibinfo{eid}{250503} (\bibinfo{year}{2011}), \eprint{1109.2455}.
\bibitem[{\citenamefont{Haake}(2004)}]{Haa06}
\bibinfo{author}{\bibfnamefont{F.}~\bibnamefont{Haake}},
\emph{\bibinfo{title}{Quantum Signatures of Chaos $2^{\rm nd}$ Edition}}
(\bibinfo{publisher}{Springer-Verlag New York}, \bibinfo{year}{2004}).
\bibitem[{\citenamefont{Hahn}(1950)}]{hahn_spin_1950}
\bibinfo{author}{\bibfnamefont{E.~L.} \bibnamefont{Hahn}},
\emph{\bibinfo{title}{Spin echoes}}, \bibinfo{journal}{Physical Review}
\textbf{\bibinfo{volume}{80}}, \bibinfo{pages}{580} (\bibinfo{year}{1950}).
\bibitem[{\citenamefont{Huszár and Houlsby}(2011)}]{huszar_adaptive_2011}
\bibinfo{author}{\bibfnamefont{F.}~\bibnamefont{Huszár}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{N.~M.~T.} \bibnamefont{Houlsby}},
\emph{\bibinfo{title}{Adaptive bayesian quantum tomography}},
\bibinfo{journal}{1107.0895} (\bibinfo{year}{2011}).
\bibitem[{\citenamefont{Doucet and Johansen}(2011)}]{doucet_tutorial_2011}
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Doucet}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{A.~M.} \bibnamefont{Johansen}},
\emph{\bibinfo{title}{A tutorial on particle filtering and smoothing: fifteen
years later}} (\bibinfo{year}{2011}).
\bibitem[{\citenamefont{Beskos et~al.}(2011)\citenamefont{Beskos, Crisan, and
Jasra}}]{beskos_stability_2011}
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Beskos}},
\bibinfo{author}{\bibfnamefont{D.}~\bibnamefont{Crisan}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Jasra}},
\bibinfo{type}{{arXiv} e-print} \bibinfo{number}{1103.3965}
(\bibinfo{year}{2011}), URL \url{http://arxiv.org/abs/1103.3965}.
\bibitem[{\citenamefont{Ferrie and
Granade}(2013)}]{ferrie_likelihood-free_2013}
\bibinfo{author}{\bibfnamefont{C.}~\bibnamefont{Ferrie}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{C.~E.} \bibnamefont{Granade}},
\emph{\bibinfo{title}{Likelihood-free quantum inference: tomography without
the {B}orn rule}}, \bibinfo{journal}{{arXiv:1304.5828}}
(\bibinfo{year}{2013}).
\bibitem[{\citenamefont{{Ududec} et~al.}(2012)\citenamefont{{Ududec}, {Wiebe},
and {Emerson}}}]{ududec_equilibration_2013}
\bibinfo{author}{\bibfnamefont{C.}~\bibnamefont{{Ududec}}},
\bibinfo{author}{\bibfnamefont{N.}~\bibnamefont{{Wiebe}}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{{Emerson}}},
\emph{\bibinfo{title}{{Equilibration of Measurement Statistics Under Complex
Dynamics}}}, \bibinfo{journal}{ArXiv e-prints} (\bibinfo{year}{2012}),
\eprint{1208.3419}.
\bibitem[{\citenamefont{Richerme et~al.}(2013)\citenamefont{Richerme, Senko,
Korenblit, Smith, Lee, Campbell, and Monroe}}]{richerme_trapped-ion_2013}
\bibinfo{author}{\bibfnamefont{P.}~\bibnamefont{Richerme}},
\bibinfo{author}{\bibfnamefont{C.}~\bibnamefont{Senko}},
\bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Korenblit}},
\bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Smith}},
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Lee}},
\bibinfo{author}{\bibfnamefont{W.~C.} \bibnamefont{Campbell}},
\bibnamefont{and} \bibinfo{author}{\bibfnamefont{C.}~\bibnamefont{Monroe}},
\emph{\bibinfo{title}{Trapped-ion quantum simulation of an {I}sing model with
transverse and longitudinal fields}}, \bibinfo{journal}{{arXiv:1303.6983}}
(\bibinfo{year}{2013}).
\bibitem[{\citenamefont{Jones et~al.}(2001--)\citenamefont{Jones, Oliphant,
Peterson et~al.}}]{SciPy2001}
\bibinfo{author}{\bibfnamefont{E.}~\bibnamefont{Jones}},
\bibinfo{author}{\bibfnamefont{T.}~\bibnamefont{Oliphant}},
\bibinfo{author}{\bibfnamefont{P.}~\bibnamefont{Peterson}},
\bibnamefont{et~al.}, \emph{\bibinfo{title}{{SciPy}: Open source scientific
tools for {Python}}} (\bibinfo{year}{2001--}),
URL \url{http://www.scipy.org/}.
\bibitem[{\citenamefont{Peterson}(2009)}]{peterson_f2py:_2009}
\bibinfo{author}{\bibfnamefont{P.}~\bibnamefont{Peterson}},
\emph{\bibinfo{title}{{F2PY:} a tool for connecting {F}ortran and {P}ython
programs}}, \bibinfo{journal}{International Journal of Computational Science
and Engineering} \textbf{\bibinfo{volume}{4}}, \bibinfo{pages}{296 }
(\bibinfo{year}{2009}).
\bibitem[{\citenamefont{Ferrie et~al.}(2012--)\citenamefont{Ferrie, Granade
et~al.}}]{ferrie_qinfer_2012}
\bibinfo{author}{\bibfnamefont{C.}~\bibnamefont{Ferrie}},
\bibinfo{author}{\bibfnamefont{C.}~\bibnamefont{Granade}},
\bibnamefont{et~al.}, \emph{\bibinfo{title}{{QInfer}: Library for statistical
inference in quantum information}} (\bibinfo{year}{2012--}),
URL \url{https://github.com/csferrie/python-qinfer}.
\bibitem[{\citenamefont{Liu and West}(2000)}]{Liu2000Combined}
\bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Liu}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{West}},
\emph{\bibinfo{title}{{Combined parameter and state estimation in
simulation-based filtering}}} (\bibinfo{publisher}{Springer-Verlag},
\bibinfo{year}{2000}).
\bibitem[{\citenamefont{Braunstein and
Caves}(1994)}]{Braunstein1994Statistical}
\bibinfo{author}{\bibfnamefont{S.~L.} \bibnamefont{Braunstein}}
\bibnamefont{and} \bibinfo{author}{\bibfnamefont{C.~M.} \bibnamefont{Caves}},
\emph{\bibinfo{title}{{Statistical distance and the geometry of quantum
states}}}, \bibinfo{journal}{Physical Review Letters}
\textbf{\bibinfo{volume}{72}}, \bibinfo{pages}{3439} (\bibinfo{year}{1994}).
\end{thebibliography}
\onecolumngrid
\appendix
\section{Supplemental Data}\label{app:supp}
\subsection{Error Scaling for Linear Interaction Graph}\label{app:line}
In the main body of the text, we showed that our algorithm learns information about the Hamiltonian at a rate that scales exponentially with the number of experiments taken for both the complete graph and the line, but only presented an example of the raw data for the case where the interaction graph is complete. For completeness, we provide here analogous data for the case where the interaction graph is a line.
\begin{figure}
\caption{This figure shows the quadratic loss plotted as a function of the number of IQLE experiments for $4,8,12$ qubits (from bottom to top) interacting on the line. The dashed lines show a $50\%$ confidence interval for the quadratic loss. $10~000$, $10~000$ and $20~000$ particles were used in the $n=4$, $n=8$ and $n=12$ cases respectively.}
\label{fig:line_scale}
\end{figure}
This data clearly shows that IQLE experiments are similarly effective in the case of linear interaction graphs in that the data follows an exponential scaling. The fits of the median quadratic loss to an exponential of the form $Ae^{-\gamma N}$, where $N$ is the experiment number, is given in \fig{exp_scale}.
\subsection{Error Scaling for QLE}\label{app:QLE}
A major problem facing the use of QLE experiments is efficiently estimating the likelihood function using quantum simulation. Despite this problem, if we grant the algorithm the ability to do perfect likelihood evaluations at unit cost then QLE experiments can be highly informative. For example, the typical variation of the likelihood function for QLE experiments with large $t$ on random Hamiltonians acting on $n$ qubits drawn from the Gaussian Unitary Ensemble (such random Hamiltonians model complex quantum systems with time-reversal symmetry~\cite{Haa06}) is on the order of $2^{-n}$~\cite{ududec_equilibration_2013} which is on the same order as the typical values of the likelihood. This means that, if \emph{we do not consider} sampling errors, then late time QLE experiments will allow learning to occur even for complex Hamiltonians.
In spite of this, Hamiltonian learning using QLE experiments is expected to be much less stable in this regime. This is because $\Pr(D|\vec{x}_j)$ can be approximately the same as (or larger than) $\Pr(D|\vec{x})$ even if $\|\vec{x}_k -\vec{x}\|_2$ is large (here $\vec{x}$ is the correct Hamiltonian parameter). This can cause the learning algorithm to get confused and move particles to near $\vec{x}_k$ during the resampling step. This makes it harder for the algorithm to recover from the bad inference and continue to learn. Thus even if we grant QLE experiments the ability to perform \emph{exact} likelihood evaluations at unit cost, then we still do not expect such experiments to be as robust to bad inferences as IQLE.
\fig{QLEline_scale} confirms these expectations. It shows that the $25^{\rm th}$ percentile of the quadratic loss for QLE experiments for the Ising model on the line is similar to that of IQLE experiments. The most notable difference between the data sets is that the $50\%$ confidence intervals overlap. This is because, in each case, the learning algorithm is more likely to get confused in IQLE experiments versus QLE experiments. Additionally, the $75^{\rm th}$ percentile of the quadratic loss is \emph{much worse} for $n=12$ and suggests that the learning algorithm eventually fails in such cases. Similar problems are observed in the median and $75^{\rm th}$ percentile of the $n=4$ data. These problems are not fatal: they can be addressed by repeating the learning algorithm several times and using a majority vote scheme to reduce the impact of instances where the learning algorithm becomes confused. As mentioned previously, we expect real problems to emerge for QLE experiments when inexact likelihood calls are considered.
\begin{figure}
\caption{The quadratic loss plotted as a function of the number of QLE experiments for $4,8,12$ qubits (from bottom to top) interacting on the line. The dashed lines show a $50\%$ confidence interval for the quadratic loss. $10~000$, $10~000$ and $20~000$ particles were used in the $n=4$, $n=8$ and $n=12$ cases respectively.}
\label{fig:QLEline_scale}
\end{figure}
\subsection{Errors in Likelihood Evaluations}
The numerical examples provided in the main body of the text assumed that the error in the inference process due to using a finite number of samples to compute the likelihoods $\Pr(D|\vec{x}_i)$ is negligible. Here we provide evidence showing that the learning process is robust to such errors for IQLE experiments on the Ising models considered previously.
In particular, let us define $\mathcal{P}$ to be the uncertainty in the estimated probability that results from estimating the likelihood in using the trusted quantum simulator. Here we simulate the use of MLE (Maximum Likelihood Estimation) or ALE (Adaptive Likelihood Estimation \cite{ferrie_likelihood-free_2013}) methods by adding normally distributed noise with zero mean and standard deviation $\mathcal{P}$, and then clip the likelihood to the interval $[0,1]$. This is chosen in preference to MLE or ALE because it is expedient to compute and it models the results of either method closely. We find that even if $\mathcal{P}$ is a large constant then our algorithm continues to reduce the quadratic loss at a rate that scales as $e^{-\gamma N}$; albeit at a reduced value of $\gamma$.
This clearly indicates that we do not need to take $\epsilon$ to small if we need small error.
The robustness of Bayesian inference using the SMC approximation is illustrated in \fig{poison}, where we show that our algorithm is robust to sampling errors for $9$ qubits interacting on the line for $\mathcal{P}=0.1$ and $\mathcal{P}=0$. We see that the data for QLE experiments agrees with that of IQLE experiments for short times, which is expected because the probability distribution has not had time to reach its maximum support. At later times, QLE exeperiments with $\mathcal{P}=0.1$ fare much worse than IQLE experiments. Nonetheless, IQLE experiments (and QLE for this value of $\mathcal{P}$) still exhibit exponential scaling of the error with the number of experiments. This may be surprising because the errors in $\Pr(D|\vec{x}_i)$ can be as large as $0.1$, which one may assume would be catastrophic given that many of the outcomes will have probability less than $0.1$ in such models. We note that in particular, IQLE experiments are more robust to such noise than QLE experiments. This is because the inversion employed by IQLE concentrates the probability over a smaller number of outcomes; leading to smaller relative errors in the likelihood evaluations in such cases.
\fig{poisonscale} gives a more clear picture of the effects of sampling error on the resultant distribution for IQLE experiments. We observe that the presence of such noise does not qualitatively change the scaling of $\gamma$, where $\gamma$ is the decay exponent that describes how the quadratic loss shrinks as more experimental data is provided. Specifically, we find that for $\mathcal{P}=0$, $\gamma\propto d^{-1}$ whereas for $\mathcal{P}=0.4/n$ (which corresponds to $\epsilon \approx \sum_{i=1}^d 0.4/n \approx 0.4$) we find that $\gamma\propto d^{-3/2}$. This shows that large sampling errors do not necessarily prevent our algorithm from learning the Hamiltonian parameters at a rate that scales as $Ae^{-\gamma N}$ and further suggests that this learning process is efficient for the problem of learning unknown Ising couplings. It also suggests that a constant value of $\epsilon$ may suffice for certain experiments.
The surprising robustness of our method comes in part from the fact that the likelihood function must be approximated for each particle. This means that if the algorithm errs in the update of a particular particle due to inexact evaluation of the likelihood function then it may not err substantially in evolving the total probability in the region that many such particles are in. For example, consider a region $R$ that has 10~000 particles in it. The probability density in that region is then $\sum_{\vec{x}_i \in R} w_i/V(R)$, where $V(R)$ is the volume of the region. We then see that if errors are independent and identically distributed over each particle, then the total error in the update of the probability density will be roughly $1/100$ the error that would be expected if all the errors were in fact correlated. Thus the robustness of the algorithm may be understood in part as a consequence of the fact that the errors are (approximately) unbiased about the true likelihood and that the particle number will typically be large for high precision inferences.
\begin{figure}
\caption{This plot shows the median quadratic loss for a $9$ qubit Ising model on the line for the cases where inversion is used and when inversion is not used for cases where the sample standard deviation is $\mathcal{P}
\label{fig:poison}
\end{figure}
\begin{figure}
\caption{This plot shows the median value of $\gamma$ computed for the case of IQLE experiments where the interaction graph is a line and $n$ ranges from $1$ to $12$ with different levels of noise. 20~000 particles were used for these numerical experiments.}
\label{fig:poisonscale}
\end{figure}
\section{Bayesian Inference of Hamiltonians}\label{app:bayes}
Sequential Monte Carlo (SMC) has before been considered in the context of quantum information \cite{huszar_adaptive_2011}, and in particular for its utility in estimating Hamiltonian dynamics \cite{granade_robust_2012}.
Here, we summarize and review the sequential Monte Carlo algorithm and approximation, as SMC is an important tool for the practical implementation of statistical inference according to Bayes' rule, and in particular for our proposed methods. The SMC approximation is of central importance here because Hamiltonian models are typically parameterized by a vector of real numbers rather than discrete numbers. This means that there are an infinite number of hypothetical Hamiltonians that could represent the system, which makes the update of the prior distribution intractable. The SMC approximation is used to model the continuous distribution over model parameters (which is computationally difficult to sample from) by a discrete distribution
that preserves the low--order moments of the distribution and thereby making estimation of the unknown Hamiltonian parameters tractable.
To clarify, suppose we have fixed an input state $\ket\psi$ and measurement basis $\{\ket{D}\}$, but that the Hamiltonian under which the state evolves is unknown.
Had we known that the Hamiltonian was $H$, we apply Born's rule to obtain the probability distribution for the outcomes of the experiment:
\begin{equation}
\Pr(D|H)=|\bra{D}e^{-iHt}\ket{\psi}|^2.
\end{equation}
This is called the \emph{likelihood function}. When we write a probability distribution $\Pr(\vec{x}|y)$, we are stating how likely the proposition $\vec{x}$ is true \emph{given} $y$ is known to be true. In a Hamiltonian learning problem, $H$ is unknown and the measurement result is given.
Bayes' rule provides a way to invert the conditioning to provide the probability that $H$ is the true Hamiltonian given that datum $D$ is recorded:
\begin{equation}
\Pr(H|D) =\frac{\Pr(D|H)\Pr(H)}{\Pr(D)}=\frac{\Pr(D|H)\Pr(H)}{\int \Pr(D|H)\Pr(H)\,\mathrm{d} H}.
\end{equation}
Here, $\Pr(H)$ is called the \emph{prior} and formally encodes any \emph{a priori} knowledge of the unknown Hamiltonian. The probability of interest, $\Pr(H|D)$ is called the \emph{posterior} since it encodes our \emph{a posteriori} knowledge. The final term $\Pr(D)$ can simply be thought as a normalization factor that can be found implicitly by integrating over the unnormalized distribution.
Since each measurement is statistically independent given $H$, the processing of the data can be done on- or off-line; Bayesian updating (or Bayesian learning or Bayesian inference) allows us to sequentially update our knowledge of the Hamiltonian through a sequence of probability distributions $\Pr(H|\{D_1, D_2,\ldots\})$.
In practice, the Bayesian update rule and the expectations listed above are analytically and computationally intractable since they involve integrals over multidimensional parameter spaces. However, if we drop the requirement of a deterministic algorithm, we can efficient compute them using Monte Carlo techniques. Our numerical algorithm fits within the subclass of Monte Carlo methods called \emph{sequential Monte Carlo} or SMC \cite{doucet_tutorial_2011}.
The first step in the approximation method is to think of $H$ as a function that maps a parameterization $\vec{x}$ of a Hamiltonian to a Hermitian operator $H(\vec{x})$.
Doing so allows us to reduce the dimension of the random variable that we are reasoning about, called the model dimension, by using knowledge about the class
of Hamiltonians which are plausible given the physics of the system.
We then approximate the probability distribution by a weighted sum of Dirac delta-functions,
\begin{equation}
\Pr(H(\vec{x})) \approx \sum_{j=1}^{|\{\vec{x}_i\}|} w_j \mathrm{d}elta(\vec{x} - \vec{x}_j),
\end{equation}
where the weights at each step are iteratively calculated from the previous step via
\begin{equation}
w_j \mapsto \Pr(D|\vec{x}_j) w_j,
\end{equation}
followed by a normalization step. The elements of the set $\{\vec{x}_j\}_{j=0}^{|\{\vec{x}_i\}|}$ are called \emph{particles}. Here, ${|\{\vec{x}_i\}|}$ is the number of particles and controls the accuracy of the approximation. Like all Monte Carlo algorithms, the SMC algorithm approximates expectation values, such that
\begin{equation}
\mathbb{E}_{\vec{x}}[f(H(\vec{x}))] \approx \sum_{j=1}^{|\{\vec{x}_i\}|} w_j f(H(\vec{x}_j)).
\end{equation}
In other words, sequential Monte Carlo allows us to efficiently compute multidimensional integrals with respect to the measure defined by the probability distribution.
An iterative numerical algorithm such as SMC requires care to ensure stability. In the next section, we derive the conditions for stability of the algorithm. But first we describe one additional, and important, step in the iteration. The step is called \emph{resampling} and is required to ensure that the SMC particles explore the space of Hamiltonians rather than staying fixed at the ${|\{\vec{x}_i\}|}$ initially chosen hypotheses. This is necessary both intuitively and, as we will see next, computationally.
The idea is simple: if the weight associated to a particle is too is small, move the particles to a region where the weight is large. We follow the methodology of Liu and West \cite{Liu2000Combined}. First, to determine when to resample, we compare the effective sample size $N_{\text{ess}} = 1/\sum_j w_j^2$ to a threshold (typically $|\{\vec{x}_i\}|/2$). If the threshold is not met, we randomly select ${|\{\vec{x}_i\}|}$ new particles according to the distribution of the current weights. Additionally, we incorporate randomness to search larger volumes of the parameter space. This randomness is inserted by
applying a random perturbation to the location of each new particle. Thus, the new particles are randomly spread around the previous locations of the old.
After drawing ${|\{\vec{x}_i\}|}$ new particles, we set the weight of each new particle to $1/{|\{\vec{x}_i\}|}$ so that $N_{\text{ess}} = |\{\vec{x}_i\}|$. To clarify, the Liu and West resampler algorithm updates the position of a particle $\vec{x}_i$, which is sampled from the posterior distribution, by drawing a particle from a Gaussian distribution with mean
\begin{equation}
\vec{\mu}_i = a\vec{x}_i + (1-a)\vec{\mu},
\end{equation}
where $\vec{\mu}=\mathbb{E}_{\vec{x}} [\vec{x}]$ is the posterior mean of the particle location and $a\in[0,1]$ is a constant. The covariance matrix for the Gaussian distribution is given by
\begin{equation}
\vec{\Sigma}=(1-a^2)\mathbb{C}ov(\vec{x}),
\end{equation}
where $\mathbb{C}ov(\vec{x})$ is the covariance matrix for the particle positions. The resampler therefore introduces randomness into the problem that depends on the current level of uncertainty in the unknown Hamiltonian parameters. We find for the learning problems that we consider $a=0.9$ performs well, whereas for simpler learning problems $a=0.98$ was found to be superior~\cite{granade_robust_2012,Liu2000Combined}.
Full algorithmic details of SMC, including resampling, are given in \cite{granade_robust_2012}.
The resultant posterior probability provides a full specification of our knowledge. However, in most applications, it is sufficient---and certainly more efficient---to summarize this distribution. In our context, the optimal single Hamiltonian to report is the mean of the posterior distribution (here, we have omitted for brevity the fact that the posterior is conditioned on the data)
\begin{equation}
\mu_H := \mathbb{E}_{\vec{x}}[H(\vec{x})] = \sum_{j=1}^{|\{\vec{x}_i\}|}w_j H(\vec{x}_j).
\end{equation}
However, a single point is the space of unknown Hamiltonians does not provide information of the uncertainties in this estimate. For that we turn to \emph{regions}. In particular, the set of Hamiltonians $X$ is an $\alpha$-credible region if
\begin{equation}
\Pr(H\in X) \geq 1-\alpha.
\end{equation}
That is, a set is an $\alpha$-credible region if no more than $\alpha$ probability mass exists outside the region or, equivalently, at least $1-\alpha$ probability mass is contained within the region.
After a sufficient number of experiments, we assume the posterior distribution will be approximately Gaussian in terms
of our chosen parameterization, so that $\vec{x} \sim \mathcal N(\hat{\vec{x}},\mathbb{C}ov[\vec{x}])$, where $\hat{\vec{x}} = \mathbb{E}[\vec{x}]$. Then, an $\alpha$-credible region estimate is given by the covariance ellipse
\begin{equation}
X = \{H(\vec{x}): (\vec{x}-\mathbb{E}_{\vec{x}}[\vec{x}])^{\mathrm{T}}\mathbb{C}ov[\vec{x}]^{-1}(\vec{x}-\mathbb{E}_{\vec{x}}[\vec{x}]) \leq Z_\alpha^2\},
\end{equation}
where $Z_\alpha^2$ is the $\alpha$-quantile of the $\chi_d^2$ distribution. Such estimates are important because they allow SMC methods to characterize the uncertainty in an estimate of the unknown Hamiltonian~\cite{granade_robust_2012}. We do not emphasize the ability of our learning algorithm to perform region estimation in the main body of the text, but the algorithm's capability of specifying the uncertainty in the unknown Hamiltonian through the form of a region estimation provides a powerful advantage over tomographic methods wherein such a characterization of the uncertainty is much less natural.
\section{Solution in tractable cases}
\label{app:derivations}
The mathematical tools we use to solve the Hamiltonian identification problem are those of decision theory and statistical learning. We have used a combination of methods from computation statistics to approximate the optimal solution to the problem of learning a Hamiltonian. However, for the case of a single unknown parameter, the equations can be solved analytically. These solutions provide much of the insight into designing the numerical algorithm to solve the general problem as well as serve to explain, in a broader context, why our method succeeds.
\subsection{Statistical decision theory of learning}
To evaluate the performance of any algorithm, we compare the estimated Hamiltonian parameters $\hat{\vec{x}}$ to the true parameters $\vec{x}_0$ by using the
quadratic loss $L(\hat{\vec{x}}, \vec{x}) = \|\hat{\vec{x}} - \vec{x}\|^2 $. This loss function generalizes the mean squared error to multiple parameters, and quantifies the error we incur due to the estimation procedure.
Our task is to choose an \emph{estimator} $\hat{\vec{x}}(D)$, a function from the possible data sets to valid parameters. This problem is most naturally cast in the language of decision theory. There is ostensibly one general approach: minimize---in some sense---the expected loss, or \emph{risk}. That is we choose the estimator which satisfies
\begin{equation}\label{eq:opt Bayes mean loss}
\hat{\vec{x}}_{\rm opt} := \operatorname{argmin}_{\hat{\vec{x}}}\mathbb E_{\vec{x},D}[\|\vec{x}-\hat{\vec{x}}(D)\|^2],
\end{equation}
where the expectation is with respect to the distribution of $\vec{x}$ and $D$ for the given experiment. This objective function is denoted $r(\vec{e})$ for the experiment $\vec{e}$, and called the \emph{Bayes risk}. Under some regularity conditions, the unique best strategy is the Bayesian one, selecting as the estimator the mean of the posterior distribution
\begin{equation}
\hat{\vec{x}}_{\rm opt} = \mathbb E_{\vec{x}|D}[x].
\end{equation}
An important and useful consequence of using the quadratic loss is that the Bayes risk is equal to the expected trace of the covariance matrix of the posterior distribution (in the case of a single parameter it is simply the variance).
\subsection{Single parameter problem}
For the single parameter problem, the Hamiltonian reads
\begin{equation}
H(x) = x \sigma_z^{(1)}\sigma_z^{(2)},
\end{equation}
and the initial state is $\ket +$ and final measurement is in the basis $\{\ket +,\ket -\}$ (labeling the outcomes $\{0,1\}$. If we evolve for a time $t$ and allow an IQLE experiment with inversion Hamiltonian $H_{-}:=H(\vec{x}_{-})$, the output probability distribution (the likelihood function) is
\begin{equation}
\Pr(d|x;\vec{x}_{-},t) = \frac12(1 + (1-2d)\cos[2(x-\vec{x}_{-})t])
\end{equation}
This model, for CLE experiments, was studied in detail in \cite{ferrie_how_2012}. To obtain asymptotic expressions, we assume that probability distribution of $x$ is approximately Gaussian and remains so after a subsequent measurement. The risk incurred between these two measurements then provides an asymptotic approximation to Bayes risk of the algorithm.
Formally, we assume
\begin{equation}
\Pr(x|\mu,\sigma) = \frac{1}{\sqrt{2\pi\sigma^2}}\exp\left(-\frac{(x-\mu)^2}{\sigma^2}\right),
\end{equation}
with mean $\mu$ and variance $\sigma^2$. The posterior distribution
\begin{equation}
\Pr(x|d, \mu, \sigma; \vec{x}_{-},t) = \frac{\Pr(d|x,\mu,\sigma;\vec{x}_{-},t)\Pr(x|\mu,\sigma)}{\Pr(d|\mu,\sigma;\vec{x}_{-},t)}
\end{equation}
gives a mean of
\begin{equation}
\hat{x}_{\rm opt} = \mu + \frac{2i(1-2d)\sigma^2 t(e ^{4i\mu t}-e^{4i\vec{x}_{-} t})}{2 e^{2t(i(\mu+\vec{x}_{-})+\sigma^2 t)} + (1-2d)(e ^{4i\mu t}-e^{4i\vec{x}_{-} t})},
\end{equation}
which is the final estimator. The risk incurred by this estimator (which, recall, is the optimal one) is explored next.
\subsection{Asymptotic risk and the particle guess heuristic}
These calculations rapidly become too cumbersome to display. The behavior of these more complex systems, however, can be described concisely with a few graphs, as shown in \fig{risk_final}.
\begin{figure}
\caption{\label{fig:risk_final}
\label{fig:risk_final}
\end{figure}
\fig{risk_final} shows the mean squared error for different choices of $(t,\vec{x}_{-})$. Notice the envelope
\begin{equation}
1-4\sigma^2 t^2 e^{-4\sigma^2t^2}\leq \frac{r(\vec{x}_{-},t)}{\sigma^2} \leq 1.
\end{equation}
This tells us that the posterior variance cannot increase on average. In other words, there is no such thing as a strictly bad experiment. It also gives us a theoretical lower bound; the ``risk envelope'' has a minimum at
\begin{equation}
t_{\rm opt} = \frac{1}{2\sigma}.
\end{equation}
This leads to
\begin{equation}
\min_{\vec{x}_{-},t} r(\vec{x}_{-},t) = (1-e^{-1})\sigma^2.
\end{equation}
That is, per measurement, the risk is reduced by a factor of about $0.63$, which leads to the exponential scaling observed in \cite{ferrie_how_2012}.
Notice, however, in \fig{risk_final} that the risk rapidly oscillates within the risk envelope. This shows, in particular, that the minimum corresponds to the solution of challenging global optimization problem. This is where we see the advantage of inversion; the effect of inversion is to ``wash out'' these oscillations. Thus, errors in approximations misplacing the optimal evolution time are less severe. Based on numerical testing, the optimal experimental inversion parameter is
\begin{equation}\label{eq:exp design 1D}
\vec{x}_{-} \approx \mu \pm \sigma.
\end{equation}
These results and conclusions are only valid for the 1-dimensional parameter estimation problem. However, from these results we can gain intuition for what ought to happen in the multi-dimensional case. First, since the role of time is identical, we would expect that the optimal algorithm achieves exponential scaling, as we indeed observe. Second, we should expect that the optimal time for each experiment be proportional to some function of the inverse covariance matrix of the current distribution. Computing and inverting the covariance matrix is computationally inconvenient and so we use the heuristic
\begin{equation}
t = \frac{1}{\|\vec{x}' - \vec{x}\|},
\end{equation}
where $\vec{x}\neq \vec{x}'$ are two particles drawn at random from the distribution of particles weights. This is a proxy for the inverse of the standard deviation. Finally, a computationally efficient analog of the experiment design in equation \eq{exp design 1D} is to simply select $\vec{x}_{-}$ at random from the distribution of particle weights. As we will see next, this added randomization has a positive effect when additional errors are present.
One final note before we move on is that the above analysis assumes the distribution is approximately Gaussian. This will eventually be true but in practice we require a ``warm-up'' phase of experiment designs before we employ the the heuristics motivated by the asymptotic analysis. Fortunately, the randomization included in the particle guess heuristic provides a way to adaptively warm-up the learning algorithm without including an ad--hoc warmup heuristic, as was done in previous studies~\cite{granade_robust_2012}.
\subsection{Robustness of inversion to sampling error}
For a two-outcome model, the only possible errors (regardless of origin---physical, modeling, sampling, etc.) manifest as bit-flips. If we assume the process is symmetric, we have a noisy version of the likelihood function,
\begin{equation}
\Pr(d|x;\vec{x}_{-},t,\alpha) = \alpha + (1-2\alpha)\Pr(d|x;\vec{x}_{-},t),
\end{equation}
where $\alpha$ is the probability of a bit-flip. Now, since we assume the the algorithm is blind to this added noise, the posterior does not change. Thus, the estimator (the posterior mean) and the variance do not change either. If this seems odd, one must think of the posterior as a logical construct which is updated with assumed model---not the true model. To evaluate the Bayes risk however, we must take the average with respect to data of true model:
\begin{equation}
r(\vec{x}_{-},t,\alpha) = \mathbb E_{d|\vec{x}_{-},t,\alpha}[{\rm Var}_{x|d;\vec{x}_{-},t}(x)].
\end{equation}
This quantity is shown in \fig{risk_noise_final} for various values of $\alpha$. The important thing to note is that the strategy with no inversion possesses a risk which can now \emph{increase}. Now ``bad'' experiments are just as likely as good ones near the optimal evolution time. Remarkably, the inversion model is complete insensitive to any strength of noise near the optimal evolution time. Moreover, the ``particle guess heuristic'' achieves the same performance independent of noise, which implies that the experimenter need not change their strategy depending on whether noise is present or not.
\begin{figure}
\caption{\label{fig:risk_noise_final}
\label{fig:risk_noise_final}
\end{figure}
\subsection{Consistency in multiple dimensions}
The above analysis considered the case of a single unknown parameter. While this makes the statistical lessons learned equally valid when moving to more unknown parameters plausible, it would be more comforting to have similar results for more than a single parameter. Unfortunately, the integrals required appear to be analytically intractable. We can, however, perform simulations to obtain an approximate function form for the Bayes risk. To this end, we consider the 3-qubit problem:
\begin{equation}
H(x_{1},x_2) = x_1 \;\sigma_z^{(1)}\sigma_z^{(2)}+x_2\;\sigma_z^{(2)}\sigma_z^{(3)}.
\end{equation}
The results of the simulations, analogous to those presented in Figures \ref{fig:risk_final} and \ref{fig:risk_noise_final}, are shown in \fig{risk_2d_final}. The conclusions drawn from the 2-qubit case remain; inversion enhances the performance of the estimation algorithm by smoothing out the Bayes risk and leaving the improvement unchanged near the optimal evolution time.
\begin{figure}
\caption{\label{fig:risk_2d_final}
\label{fig:risk_2d_final}
\end{figure}
\section{Conditions for Asymptotic Stability of Bayesian Inference}\label{app:stability}
We have already discussed the need for resampling as a means of maintaining the stability of performing Bayesian inference using the SMC approximation. Here we discuss why these instabilities arise, and whether there are other sources of instabilities that can arise in quantum Hamiltonian estimation. We show that the errors in the updating procedure will, on average, be small given that experiments are chosen that do not yield small likelihoods for probable events and given that the particle weights used in the SMC approximation do not become too small.
We consider, for now, only one step in the updating procedure. There are two sources of errors that can arise in the update procedure: (1) errors in the prior that have arisen due to previous approximate updates or numerical errors in the initial prior (2) errors in the likelihood evaluation. Let us assume that datum $D$ is obtained and let the error--free prior probability of Hamiltonian $H(\vec{x}_j)$, for any $j$, be denoted $\Pr(\vec{x}_j)$ and similarly the actual likelihood is $\Pr(D|\vec{x}_j)$. We then denote the approximate analogs of these distributions ${\Pr}(\vec{x}_j)$ and $\tilde{\Pr}(D|\vec{x}_j)$. The error in the posterior probability of $\vec{x}_j$ is
\begin{equation}
\epsilon_j =\left|\frac{\Pr(D|\vec{x}_j)\Pr(\vec{x}_j)}{\sum_j \Pr(D|\vec{x}_j)\Pr(\vec{x}_j)}-\frac{\tilde \Pr(D|\vec{x}_j)\tilde \Pr(\vec{x}_j)}{\sum_j \tilde \Pr(D|\vec{x}_j)\tilde \Pr(\vec{x}_j)} \right|
\end{equation}
For simplicity, we will now introduce variables that describe the variation of the approximate probabilities from the precise probabilities. These deviations can, in many circumstances, be thought of as random variables since the majority of the error
in this protocol will arise from using sampling to estimate the likelihood function.
\begin{align}
\tilde \Pr(\vec{x}_j)&:=\eta'_j + \Pr(\vec{x}_j)\nonumber\\
\tilde \Pr(D|\vec{x}_j)&:=\eta_j + \Pr(D|\vec{x}_j)
\end{align}
We make use of the fact that
\begin{equation}
\eta := \sum_j \Pr(D|\vec{x}_j)|\eta'_j| +\Pr(\vec{x}_j)|\eta_j| +|\eta_j\eta'_j| \le \frac 1 2 \sum_j \Pr(D|\vec{x}_j)\Pr(\vec{x}_j).
\end{equation}
Then assuming that $\max\{|\eta'_j|,|\eta_j|\}\in O(\eta)$ we find by using Taylor's theorem and the triangle inequality that
\begin{align}
\epsilon_j &= \left|\frac{ [\Pr(D|\vec{x}_j)+\eta_j] [\Pr(\vec{x}_j)+\eta'_j]}{\sum_k [\Pr(D|H(\vec{x}_k))+\eta_k][\tilde \Pr(H(\vec{x}_k))+\eta'_k]}-\frac{\Pr(D|\vec{x}_j)\Pr(\vec{x}_j)}{\sum_k \Pr(D|H(\vec{x}_k))\Pr(H(\vec{x}_k))} \right|\nonumber\\
&\le\Biggr|(\Pr(\vec{x}_j)+\eta'_j)(\Pr(D|\vec{x}_j)+\eta_j)\left(\frac{1}{\sum_k \Pr(D|H(\vec{x}_k))\Pr(H(\vec{x}_k))}+\frac{2\eta}{(\sum_k \Pr(D|H(\vec{x}_k))\Pr(H(\vec{x}_k)))^2} \right)\nonumber\\
&\qquad\qquad-\frac{\Pr(D|\vec{x}_j)\Pr(\vec{x}_j)}{\sum_k \Pr(D|H(\vec{x}_k))\Pr(H(\vec{x}_k))} \Biggr|\nonumber\\
&\le\left(\frac{\Pr(D|\vec{x}_j)|\eta'_j|+\Pr(\vec{x}_j)|\eta_j|}{\sum_k \Pr(D|H(\vec{x}_k))\Pr(H(\vec{x}_k))}+\frac{2\eta \Pr(D|\vec{x}_j)\Pr(\vec{x}_j)}{(\sum_k \Pr(D|H(\vec{x}_k))\Pr(H(\vec{x}_k)))^2} \right)+O(\eta^2)\label{eq:epsilonj}
\end{align}
The overall error as measured by the $1$--norm is $\epsilon = \sum_j \epsilon_j$ and hence~\eq{epsilonj} gives
\begin{align}
\epsilon&\le\frac{3\sum_k \Pr(D|H(\vec{x}_k))|\eta'_k|+\Pr(H(\vec{x}_k))|\eta_k| }{\sum_k \Pr(D|H(\vec{x}_k))\Pr(H(\vec{x}_k))}+O(\eta^2)\nonumber\\
&\le\frac{3\left(\sqrt{\sum_k \Pr^2(D|H(\vec{x}_k))}\sqrt{\sum_k |\eta'_k|^2}+ \sqrt{\sum_k \Pr^2(H(\vec{x}_k))}\sqrt{\sum_k |\eta_k|^2} \right)}{\sum_k \Pr(D|H(\vec{x}_k))\Pr(H(\vec{x}_k))}+O(\eta^2).\label{eq:error}
\end{align}
Equation \eq{error} provides an upper bound for the error in the Bayesian update for a fixed measured datum $D$. In practice, surprising outcomes can destabilize the update according to~\eq{error}. The contribution of such surprising results to the overall error is small if
\begin{align}
\sqrt{\sum_j \eta_j'^2}&\ll \frac{\sum_k \Pr(D|H(\vec{x}_k))\Pr(H(\vec{x}_k))}{\sqrt{\sum_k \Pr^2(D|H(\vec{x}_k))}},\label{eq:delta'}\\
\sqrt{\sum_j \eta_j^2}&\ll \frac{\sum_k \Pr(D|H(\vec{x}_k))\Pr(H(\vec{x}_k))}{\sqrt{\sum_k \Pr^2(H(\vec{x}_k))}}=\sqrt{N_{\rm ess}}\left({\sum_k \Pr(D|H(\vec{x}_k))\Pr(H(\vec{x}_k))}\right),\label{eq:delta}
\end{align}
where $N_{\rm ess}$ is the effective sample size.
These equations give two different criteria for the stability of the Bayesian update. Equation~\eq{delta'} states that if the weights of the particles are too small then an unreasonably small value of $\eta'_j$ may be required to ensure that the error in the update is small. This justifies the need for using resampling in SMC methods, and further justifies the criteria used for resampling in our algorithm: $N_{\rm ess}= (\sum_k \Pr^2(H(\vec{x}_k)))^{-1}\le |\{\vec{x}_i\}|/2$. Equation~\eq{delta} makes a more interesting claim. It states that the update rule can become unstable if the expectation value of $\Pr(D|H(\vec{x}_k))$ over the prior $\Pr(H(\vec{x}_k))$ is small for typical values of $D$.
Eqns.~\eq{error} and~\eq{delta} imply that the error due to estimating the likelihood via sampling is asymptotically bounded above by $\epsilon$ if
\begin{equation}
\sum_j \eta_j^2\in O\left( \epsilon^2N_{\rm ess}\left({\sum_k \Pr(D|H(\vec{x}_k))\Pr(H(\vec{x}_k))}\right)^2\right).\label{eq:deltaAssBd1}
\end{equation}
Since ${\sum_j \eta_j^2}\le |\{\vec{x}_i\}| \eta_{j\max}^2$, where $\eta_{j\max}=\max_j \eta_j$, we have that~\eq{deltaAssBd1} is satisfied if
\begin{equation}
\eta_{j\max}\in O\left( \epsilon\sqrt{\frac{N_{\rm ess}}{|\{\vec{x_i}\}|}}\left({\sum_k \Pr(D|H(\vec{x}_k))\Pr(H(\vec{x}_k))}\right)\right).\label{eq:deltaAssBd2}
\end{equation}
Our criteria for resampling is that $N_{\rm ess} \le |\{\vec{x}_i\}|/2$. So, we have that $N_{\rm ess}\in \mathrm{T}heta(|\{\vec{x}_i\}|)$ and hence~\eq{deltaAssBd1} is implied by
\begin{equation}
\eta_{j\max}\in O\left( \epsilon\left({\sum_k \Pr(D|H(\vec{x}_k))\Pr(H(\vec{x}_k))}\right)\right).\label{eq:deltaAssBd3}
\end{equation}
We use as our estimate of $\Pr(D|H(\vec{x}_k))$ the fraction of samples drawn from the simulator that yield outcome $D$.
The resultant distribution for the number of samples that yield $D$ is a binomial distribution with mean $N_{\rm samp} \Pr(D|H(\vec{x}_k))$ and variance $N_{\rm samp}\Pr(D|H(\vec{x}_k))(1-\Pr(D|H(\vec{x}_k)))$.
Hence, if $N_{\rm samp}$ samples are drawn from the simulator then the uncertainty in our estimate of $\Pr(D|H(\vec{x}_k))$ obeys
\begin{equation}
\eta_{j\max}\in O\left(\sqrt{ \frac{\max_k \Pr(D|H(\vec{x}_k))(1-\Pr(D|H(\vec{x}_k)))}{{N_{\rm samp}}}}\right).
\end{equation}
Therefore we have from~\eq{deltaAssBd3} that~\eq{deltaAssBd1} is satisfied if
\begin{equation}
\sqrt{ \frac{\max_k \Pr(D|H(\vec{x}_k))(1-\Pr(D|H(\vec{x}_k)))}{{N_{\rm samp}}}}\in O\left( \epsilon{\sum_k \Pr(D|H(\vec{x}_k))\Pr(H(\vec{x}_k))}\right),
\end{equation}
which is equivalent to saying that
\begin{equation}
N_{\rm samp} \in \Omega\left(\frac{\max_k \Pr(D|H(\vec{x}_k))(1-\Pr(D|H(\vec{x}_k)))}{\left( \epsilon{\sum_k \Pr(D|H(\vec{x}_k))\Pr(H(\vec{x}_k))}\right)^2} \right),
\end{equation}
We require that $N_{\rm samp}$ samples are drawn for each particle in $\{\vec{x}_i\}$ and hence it is sufficient to take a number of simulations that scales as
\begin{equation}
N_{\rm sim} \in \mathrm{T}heta\left(\frac{|\{\vec{x}_i\}|\max_k \Pr(D|H(\vec{x}_k))(1-\Pr(D|H(\vec{x}_k)))}{\left( \epsilon{\sum_k \Pr(D|H(\vec{x}_k))\Pr(H(\vec{x}_k))}\right)^2} \right).
\end{equation}
Our method uses the mean of the posterior distribution as an estimator for the true Hamiltonian, $H(\vec{x})$, which means that more work is needed to determine how an error of $\epsilon$ in the update procedure propagates to errors in the mean and the variance of the posterior distribution. Let $\mu_H:=\sum_i \Pr(H(\vec{x}_i)|D)H(\vec{x}_i)$ be the posterior mean and $\tilde\mu_H:=\sum_i \tilde\Pr(H(\vec{x}_i)|D)H(\vec{x}_i)$ be the posterior mean calculated by approximate likelihood evaluation.
The error in the estimated Hamiltonian, as measured by the $2$--norm is then
\begin{equation}
\|\mu_H -\tilde\mu_H\|\le \max_i \|H(\vec{x}_i)\| \sum_i |\Pr(H(\vec{x}_i)|D)-\tilde{\Pr}(H(\vec{x}_i)|D)|= \max_i \|H(\vec{x}_i)\| \epsilon.
\end{equation}
Similarly, it is straight forward to see that
\begin{equation}
\left|\sum_{i}\Pr(H(\vec{x}_i)|D) \|H(\vec{x}_i)-\mu_H\|^2-\tilde\Pr(H(\vec{x}_i)|D) \|H(\vec{x}_i)-\tilde\mu_H\|^2\right|\in O( \max_i \|H(\vec{x}_i)\|^2\epsilon),
\end{equation}
where $\sum_{i} \Pr(H(\vec{x}_i)|D) \|H(\vec{x}_i)-\mu_H\|^2$ is the posterior variance.
It may be tempting to conclude that after $N$ steps, the error in the estimate is $N\max_i \|H(\vec{x}_i)\| \epsilon$, but because the Bayesian update rule is non-linear it is difficult to prove such a bound. Instead, note that Bayesian inference is robust to the choice of prior~\cite{granade_robust_2012} and thus the inference process will remain stable under such errors. We therefore can consider beginning the inference process using the erroneous posterior as the prior and expect convergence if the relative errors in the variance are small. In particular, we expect stability if
\begin{equation}
\max_i \|H(\vec{x}_i)\|^2\epsilon \in O( \mathrm{d}elta),
\end{equation}
where $\mathrm{d}elta\le Ae^{-\gamma N}$ is defined to be the error in the estimate of the unknown Hamiltonian and $\gamma$ is approximately a constant function in $N$ for the test cases considered in the main body of the paper. We therefore expect the algorithm to be stable if $\epsilon$ is chosen as above and hence, it will suffice to use a number of simulations in an update that approximately scales as
\begin{equation}
N_{\rm sim} \in \mathrm{T}heta\left(\frac{\max_i \|H(\vec{x}_i)\|^4|\{\vec{x}_i\}|}{\mathrm{d}elta^2}\frac{\max_k \Pr(D|H(\vec{x}_k))(1-\Pr(D|H(\vec{x}_k)))}{\left( {\sum_k \Pr(D|H(\vec{x}_k))\Pr(H(\vec{x}_k))}\right)^2} \right).
\end{equation}
It is then easy to see from Markov's inequality that with high probability over the experiments the cost of any given update will be at most a constant multiple of the cost of the expectation value over all prior distributions $\Pr(\vec{x}_i)$ that appear in the learning process and all outcomes $D$ observed. Therefore, our approximation to the total number of simulations required to learn the parameters within loss $\mathrm{d}elta$ scales, with high probability, as
\begin{align}
N_{\rm total} &\in \mathrm{T}heta\left(\frac{N\max_i \|H(\vec{x}_i)\|^4|\{\vec{x}_i\}|}{\mathrm{d}elta^2}\mathbb{E}\left(\frac{\max_k \Pr(D|H(\vec{x}_k))(1-\Pr(D|H(\vec{x}_k)))}{\left( {\sum_k \Pr(D|H(\vec{x}_k))\Pr(H(\vec{x}_k))}\right)^2}\right) \right)\nonumber\\
&\in \mathrm{T}heta\left(\frac{\log(1/\mathrm{d}elta)\max_i \|H(\vec{x}_i)\|^4|\{\vec{x}_i\}|}{\gamma\mathrm{d}elta^2}\mathbb{E}\left(\frac{\max_k \Pr(D|H(\vec{x}_k))(1-\Pr(D|H(\vec{x}_k)))}{\left( {\sum_k \Pr(D|H(\vec{x}_k))\Pr(H(\vec{x}_k))}\right)^2}\right) \right).
\end{align}
This suggests that QLE and IQLE may be efficient, given that $\gamma\in \Omega(\poly(1/n))$, $\max_j\|H_j\|\in O(\poly (n))$ and experiments that yield, with high probability, $\Pr(D|H(\vec{x}_k))\in O(1/\poly(n))$ are avoided. We observed that these scalings are obeyed for the examples considered in the main body if IQLE and the particle guess heuristic are employed. More complex examples may require local optimization of the guesses in order to avoid multi-modal prior distributions, which can be problematic for the PGH; however, we saw no benefit to local optimization for the Ising models considered previously. Finally, the scaling predicted for $N_{\rm total}$ as a function of $\mathrm{d}elta$ does not seem to be tight in the prior examples since $N_{\rm total}$ does not appear to strongly depend on $\mathrm{d}elta$ in those examples. A more careful analysis of the uncertainty is likely to reveal that our conditions for stability are unnecessarily pessimistic.
\end{document} |
\betaegin{document}
\thetaitle{On Antipodes Of Hom-Hopf algebras}
\alphauthor {Mohammad Hassanzadeh}
\deltaate{University of Windsor\\
Windsor, Ontario, Canada\\
[email protected]
}
\title{On Antipodes Of Hom-Hopf algebras}
\betaegin{abstract}
In the recent definition of Hom-Hopf algebras the antipode $S$ is the relative Hom-inverse of the identity map with respect to the convolution product.
We observe that some fundamental properties of the antipode of Hopf algebras and Hom-Hopf algebras, with the original definition, do not hold generally in the new setting.
We show that the antipode is a relative Hom-anti algebra and a relative anti-coalgebra morphism. It is also relative Hom-unital, and relative Hom-counital. Furthermore if the twisting maps of multiplications and comultiplications are invertible then $S$ is an anti-algebra and an anti-coalgebra map.
We show that any Hom-bialgebra map between two Hom-Hopf algebras is a relative Hom-morphism of Hom-Hopf alegbras. Specially if the corresponding twisting maps are all invertible then it is a Hom-Hopf algebra map.
If the Hom-Hopf algebra is commutative or cocommutative we observe that $S^2$ is equal to the identity map in some sense. At the end we study the images of primitive and group-like elements under the antipode.
\epsilonnd{abstract}
\sigmaection{ Introduction}
The examples of Hom-Lie algebras were first appeared in $q$-deformations of
algebras of vector fields, such as Witt and Virasoro algebras \chiite{as}, \chiite{ckl}, \chiite{cz}. The concept of Hom-Lie algebras generalizes the one for Lie algebras where the Jocobi identity is twisted by a homomorphism \chiite{hls}, \chiite{ls}.
Hom-associative algebras were introduced and studied in \chiite{ms1}. Moreover Hom-coalgebras and Hom-bialgebras were studied in \chiite{ms2}, \chiite{ms3}, \chiite{ya2}, \chiite{ya3}, \chiite{ya4}. In the last years, many classical algebraic concepts have
been extended to the framework of Hom-structures. For examples see \chiite{hls}, \chiite{gw}, \chiite{pss}, \chiite{hss}, \chiite{aem}, \chiite{gmmp}, \chiite{gr}, \chiite{cq}, \chiite{cs}, \chiite{zz}.
The Hom-Hopf algebras first introduced in \chiite{ms2} and \chiite{ms3}. In these works they defined a Hom-Hopf algebra $H$ to be a Hom-bialgebra $(H, \mu, \alpha, \epsilonta, \Deltaelta, \beta, \varepsilon)$, endowed with a map $S: H\longrightarrow H$, where it is the inverse of the identity map $\mathop{\rm Id}\nolimits_H$ with respect to the convolution product $\sigmatar$, i.e,
$$ S \sigmatar \mathop{\rm Id}\nolimits = \mathop{\rm Id}\nolimits \sigmatar S= \epsilonta\chiirc \varepsilon.$$
This definition of antipode is the same as the one for Hopf algebras. The universal enveloping algebra of a Hom-Lie algebra introduced in \chiite{ya4}. It has been shown that it is a Hom-bialgebra. However
it is not a Hom-Hopf algebra in the sense of \chiite{ms2}, since it is shown in \chiite{lmt} that the antipode is not an inverse of the identity map with respect to the the convolution product. This was a motivation to change definition of the antipode such that a Hom-Hopf algebra is a Hom-bialgebra which satisfies a weakened condition. For every $h\iotan H$ there exists $k\iotan \mathbb{N}$ satisfying the weakened condition
$$ \alpha^k(S \sigmatar \mathop{\rm Id}\nolimits )(h)= \alpha^k(\mathop{\rm Id}\nolimits \sigmatar S)(h)= \epsilonta\chiirc \varepsilon(h). $$ This naturally suggests to change the definition of invertible elements of a Hom-algebra $A$ as being elements $a\iotan A$ such that there exists $b\iotan A$ and $k \iotan \mathbb{N}$ where $ \alpha^k( ab) = \alpha^k(ba) =1_A.$
This means the antipode is the relative Hom-inverse of the identity map.
In this paper we study this recent notion of Hom-Hopf algebras. More precisely by Definition \ref{Hom-Hopf} a Hom-Hopf algebra in the new setting is a Hom-bialgebra endowed with a unital, counital, anti-algebra and anti-coalgebra map $S: H\longrightarrow H$ which is relative Hom-inverse of the identity map $\mathop{\rm Id}\nolimits_H$, and it commutes with $\alpha$.
The Hom-Hopf algebras in Examples \ref{Sweedler} and \ref{2-dimensional Hopf} satisfy the conditions of both definitions.
The set of group-like elements and primitive elements are important to study Hopf type objects. The group-like elements gives a relation between Hom-Hopf algebras and Hom-groups while primitive elements connects Hom-Hopf algebras to Hom-Lie algebras.
The authors in \chiite{lmt} showed that the set of group-like elements in a Hom-Hopf algebra is a Hom-group where the inverse elements are given by the antipode. In Example \ref{polynomial}, we introduce a Hom-bialgebra containing a group-like element which does not have any inverse. Therefore it does not have any antipode or Hom-Hopf algebra structure.
The main aim of this paper is to find out if one removes the important conditions unitality, counitality, anti-algebra map, anti coalgebra map, and $S\chiirc \alpha=\alpha\chiirc S$, from Definition \ref{Hom-Hopf},
and only sticks with the relative Hom-invertibility condition of $S$, then how much of these properties can be recovered and what are the other properties of the antipode.
To investigate this, we consider a Hom-bialgebra endowed with a map $S$ which is a relative Hom-inverse of the identity map.
First we need to find out the relations between relative Hom-inverse elements with respect to the convolution product in Proposition \ref{important}. In Propositions \ref{relative anti algebra} and \ref{relative anti coalgebra}, we show that the antipode is a relative Hom-anti-algebra and a relative Hom-anti-coalgebra morphism.
it is also shown in Propositions \ref{hom unitality prop} and \ref{relative counitality} that the antipode is relative Hom-unital and relative Hom-counital.
Furthermore if the twisting maps $\alpha$ and $\beta$ are invertible then $S$ is an anti-algebra and an anti-coalgebra map.
Then in Proposition \ref{Hopf map} we prove that any Hom-bialgebra map between two Hom-Hopf algebras is a relative Hom-morphism of Hom-Hopf alegbras. By Corollary \ref{hopf morphism}, if the corresponding twisting maps are all invertible then it is a Hom-Hopf algebra map. Furthermore we observe that if $\alpha=\beta$ then $S$ commutes with powers of $\alpha$. Later we study $S^2$ for commutative and cocommutative Hom-Hopf algebras. In these cases we prove that $S^2$ is equal to the identity map in some sense. If $\alpha$ and $\beta$ are invertible then $S^2=\mathop{\rm Id}\nolimits$. At the end we study the images of primitive and group-like elements under the antipode.
\betaigskip
\thetaextbf{Notations}:In this paper all (Hom)-algebras, (Hom)-colagebras, (Hom)-bialgebras and (Hom)-Hopf algebras are defined on a field $\mathbb{K}$.
All tensor products $\otimes$ are on a field $\mathbb{K}$. We denote the set of natural numbers by $\mathbb{N}$.
\thetaableofcontents
\sigmaection{ Hom-Hopf algebras}
In this section we recall the basics of Hom-algebras, Hom-coalgebras, Hom-bialgebras and Hom-Hopf algebras. To understand these structures we introduce some examples.
By \chiite{ms1}, a Hom-associative algebra $A$ over a field $\mathbb{K}$ is a $\mathbb{K}$-vector space with a bilinear map $m: A\otimes A\longrightarrow A$, called multiplication, and a linear homomorphism
$\alphalpha: A\longrightarrow A$ satisfying the Hom-associativity condition
$$m \chiirc (m \otimes \alphalpha)= m \chiirc ( \alphalpha \otimes m).$$ In terms of elements $a,b,c\iotan A$, this can be written as $\alpha(a)(bc) = (ab)\alpha(c)$.
The Hom-associativity property in terms of a commutative diagram is
$$
\xymatrix{
A\otimes A\otimes A \alphar[r]^{m\otimes \alpha} \alphar[d]_{\alpha\otimes m} & A\otimes A \alphar[d]^{m} \\
A \otimesimes A\alphar[r]^{m} & A }
\hspace{30pt}
\xymatrix{ } $$
A Hom-associative algebra $A$ is called unital if there exists a linear map $\epsilonta: k\longrightarrow A$ where $\alpha \chiirc \epsilonta=\epsilonta$, and
$$m \chiirc (\mathop{\rm Id}\nolimits \otimes \epsilonta)= m \chiirc ( \epsilonta\otimes\mathop{\rm Id}\nolimits) =\alpha.$$ The unit element of $A$ is $\epsilonta(1_k)=1_A$.
These conditions in terms of an element $a\iotan A$ can be written as $\alpha(1_A)=1$ and $a 1_A= 1_A a =\alpha(a)$.
The unitality condition in terms of a commutative diagram is
$$
\xymatrix{
A \alphar[r]^{\epsilonta\otimes Id} \alphar[rd]_{\alpha} &
A\otimes A \alphar[d]^{m} &
A \alphar[l]_{\epsilonta\otimes Id} \alphar[ld]^{\alpha}\\
&A}
$$
In many examples $\alpha$ is an algebra map, i.e, $\alpha(xy)= \alpha(x) \alpha(y)$ for all $x, y\iotan A$.
When $\alpha=\mathop{\rm Id}\nolimits$, then we obtain the definition of associative algebras.
\betaegin{example}{\rm
Let $A$ be an algebra with multiplication $m: A\otimes A\longrightarrow A$, and $\alpha: A \longrightarrow A$ be an algebra map. We twist the multiplication of $A$ by $\alpha$ to obtain a new multiplication $m_{\alpha}(x,y)= m( \alpha(x), \alpha(y))$. Then $(A, m_{\alpha}, \alpha)$ is a Hom-algebra.}
\epsilonnd{example}
\betaegin{example}\label{2d}
{\rm This example is a special case of the last example in \chiite{ms2}.
In this example we define a $2$-dimensional Hom-algebra $A$ with a basis $B=\{ e_1, e_2\}$. We define the multiplication by
$$m(e_1, e_1)=e_1, ~~~~~m(e_1, e_2)=m(e_2, e_1)=e_2, ~~~~~m(e_2, e_2)=e_2.$$
We set $\alpha(e_1)= 2e_1-e_2$ and $\alpha(e_2)=e_2$. This Hom-algebra is unital and commutative with the unit element $\epsilonta(1)=e_1$.
}
\epsilonnd{example}
An element $x$ in an unital Hom-associative algebra $ (A, \alpha)$ is called Hom-invertible \chiite{lmt}, if there exists an element $x^{-1}$ and a non-negative integer $k\iotan \mathbb{N}$ such that
$$\alpha^k(x x^{-1}) = \alpha^k(x^{-1} x)= 1.$$ The element $x^{-1}$ is called a Hom-inverse and the smallest $k$ is the invertibility index of $x$.
The Hom-inverse may not be unique if it exists. However the authors in \chiite{lmt} showed that the unit element $1_A$ is Hom-invertible, the
product of any two Hom-invertible elements is Hom-invertible and every inverse of a Hom-invertible element
is Hom-invertible.
For two Hom-algebras $(A,\mu ,\alphalpha )$ and $(A^{\prime },\mu ^{\prime },\alphalpha ^{\prime })$
a linear map $f:A\rightarrow A^{\prime }$
is called a Hom-algebra morphism if
$$f(xy)= f(x) f(y), ~~~~~\thetaext{and} ~~~ f(\alpha(x))= \alpha'(f(x)), ~~~\forall x, y\iotan A.$$
Now we recall the dual notion of a Hom-algebra which is called a Hom-coalgebra \chiite{ms2}, \chiite{ms3}. A Hom-coalgebra is a triple $(A, \Deltaelta, \beta)$, where $C$ is a $\mathbb{K}$-vector space, $\Deltaelta: C\longrightarrow C\otimes C$ is linear map, called comultiplication, and $\beta: C\longrightarrow C $ a linear map satisfying the Hom-coassociativity condition,
$$(\Deltaelta\otimes \beta ) \chiirc \Deltaelta = ( \beta \otimes \Deltaelta) \chiirc \Deltaelta .$$
If we use the Sweedler notation $ \Deltaelta (c) = c^{(1)} \otimes c^{(2)}$, then the coassociativity condition can be written as
$$ \beta( c^{(1)}) \otimes c^{(2)(1)} \otimes c^{(2)(2)} = c^{(1)(1)} \otimes c^{(1)(2)}\otimes \beta(c^{(2)}).$$
The coassociativity property in terms of a commutative diagram is the dual of the one for the Hom-associativity of Hom-algebras as follows;
$$
\xymatrix{
C\otimes C\otimes C & C\otimes C\alphar[l]_{\Deltaelta\otimes \beta} \\
C \otimesimes C \alphar[u]^{\beta\otimes \Deltaelta}& C\alphar[l]_{\Deltaelta}\alphar[u]_{\Deltaelta} }
\hspace{30pt}
\xymatrix{ } $$
A Hom-coassociative coalgebra is said to be counital if there exists a linear map $\varepsilon: C \longrightarrow \mathbb{K}$ where
$$(\mathop{\rm Id}\nolimits\otimes \varepsilon ) \chiirc \Deltaelta = ( \varepsilon \otimes \mathop{\rm Id}\nolimits) \chiirc \Deltaelta =\beta .$$
This means $$c^{(1)} \varepsilon (c^{(2)})= \varepsilon (c^{(1)})c^{(2)} = \beta(c).$$
Furthermore the map $\beta$ is counital, i.e, $\varepsilon (\beta(c))= \varepsilon (c)$.
The counitality condition in terms of a commutative diagram is
$$
\xymatrix{
C \alphar[r]^{\Deltaelta} \alphar[rd]_{\beta} &
C\otimes C \alphar[d]^{\varepsilon \otimes \mathop{\rm Id}\nolimits}_{\varepsilon \otimes \mathop{\rm Id}\nolimits} &
C \alphar[l]_{\Deltaelta} \alphar[ld]^{\beta}\\
&C}
$$
Moreover if the map $\beta$ is a coalgebra map then we have $ \Deltaelta \chiirc \beta= (\beta \otimes \beta)\chiirc \Deltaelta$.
\betaegin{example}\label{2dco}{\rm
This example is a special case of the last example in \chiite{ms2}. The Hom-algebra introduced in Example \ref{2d} is a Hom-coalgebra by
\betaegin{align*}
&\Deltaelta(e_1)=e_1\otimes e_1, ~~~~~~ \Deltaelta(e_2)= e_1\otimes e_2 + e_2\otimes e_1 -2 e_2\otimes e_2\\
&\varepsilon(e_1)=1, ~~~~~~~~~~~~~~~ \varepsilon(e_2)=0.
\epsilonnd{align*}
We set $\beta(e_1)= e_1+e_2$ and $\beta(e_2)= e_2$.
}
\epsilonnd{example}
Let $(C, \Deltaelta, \varepsilon, \beta)$ and $(C', \Deltaelta', \varepsilon', \beta')$ be two Hom-coalgebras. A morphism $f: C\longrightarrow C'$ is called a Hom-coalgebra map if for all $x\iotan C$ we have $$ f(x)^{(1)}\otimes f(x)^{(2)}= f(x^{(1)})\otimes f(x^{(2)}), ~~~~~ f\chiirc \beta= \beta' \chiirc f. $$
A $(\alpha, \beta)$-Hom-bialgebra is a tuple $(B, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta)$ where $(B, m, \epsilonta, \alpha)$ is a Hom-algebra and $(B, \varepsilon, \Deltaelta, \beta)$ is a Hom-coalgebra where $\Deltaelta$ and $\varepsilon$ are morphisms of Hom-algebras, that is\\
i) $\Deltaelta$ is a Hom-algebra map, $\Deltaelta (hk)= \Deltaelta(h) \Deltaelta(k)$, which is
$$(hk)^{(1)}\otimesimes (hk)^{(2)}= h^{(1)}k^{(1)}\otimesimes h^{(2)}k^{(2)}, \quad \forall~~ h,k\iotan B,$$
ii) $\Deltaelta$ is unital; $\Deltaelta(1)= 1\otimesimes 1.$
iii) $\varepsilon$ is a Hom-algebra map; $\varepsilon(xy)= \varepsilon(x) \varepsilon (y)$.
iv) $\varepsilon$ is unital; $\varepsilon(1)=1$.
v) $\varepsilon (\alpha(x))= \varepsilon(x)$.
The algebra map property of $\Deltaelta$ in terms of commutative diagrams is
\[ \underset{ { \thetaext{{\betaf{ }}}}} {
\xymatrixcolsep{5pc}\xymatrix{
B\otimes B \alphar[r]^-{\Deltaelta m} \alphar[d]_-{\Deltaelta \otimes \Deltaelta} &B\otimes B \\
B\otimes B\otimes B\otimes B \alphar[r]_-{id \otimes \thetaau \otimes id} & B\otimes B \otimes B\otimes B \alphar[u]_-{m \otimes m} }
}
\] Here the linear map $\thetaau:B\otimes B \rightarrow B\otimes B$ is given by $\thetaau(h\otimes k)=k\otimes h$.
The map $\epsilonpsilon$ being an algebra morphism in terms of a commutative diagram means
\[ \underset{ { \thetaext{{\betaf{ }}}}} {
\xymatrixcolsep{5pc}\xymatrix{
B \otimes B \alphar[r]^-{m} \alphar[d]_-{\epsilonpsilon \otimes \epsilonpsilon}&B\ \alphar[ld]^-{\epsilonpsilon}\\
k\otimes k =k }}
\]
\betaegin{remark}{\rm
It can be proved that $\Deltaelta$ and $\varepsilon$ are morphisms of unital Hom-algebras if and only if $m$ and $\epsilonta$ are morphism of Hom-coalgebras.}
\epsilonnd{remark}
\betaegin{example}\label{2dbi}{\rm
The $2$-dimensional Hom-algebra in Example \ref{2d} is a $(\alpha, \beta)$-Hom-bialgebra by the coalgebra structure given in Example \ref{2dco}. See \chiite{ms2}.
}
\epsilonnd{example}
\betaegin{example}\label{twisted Hom-bialgebra}
{\rm
Let $ (B, m, \epsilonta, \Deltaelta, \varepsilon)$ be a bialgebra and $\alpha: B\longrightarrow B$ be a bialgebra map. Then $ (B, m_{\alpha}=\alpha\chiirc m, \alpha, \epsilonta, \Deltaelta_{\alpha}=\Deltaelta\chiirc \alpha, \varepsilon, \alpha)$ is a $(\alpha, \alpha)$-Hom-bialgebra.
}
\epsilonnd{example}
Let $(B, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta)$ and $(B', m', \epsilonta', \alpha', \Deltaelta', \varepsilon', \beta')$ be two Hom-bialgebras. A morphism $f: B\longrightarrow B'$ is called a map of Hom-bialgebras of it is both morphisms of Hom-algebras and Hom-coalgebras.
Let $(B, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta)$ be a Hom-algebra. The authors in \chiite{ms2}, \chiite{ms3}, showed that $(\mathop{\rm Hom}\nolimits(B, B), \sigmatar, \gammaamma)$ is an unital Hom-algebra with $\sigmatar$ is the convolution product
$$f \sigmatar g = m \chiirc (f\otimes g) \chiirc \Deltaelta,$$
and $ \gammaamma \iotan \mathop{\rm Hom}\nolimits(B, B)$ is defined by $\gammaamma(f)= \alpha \chiirc f \chiirc \beta.$ The unit is $\gammaamma \chiirc \varepsilon$.
Similarly if $(A, m, \epsilonta, \alpha)$ and $(C, \varepsilon, \Deltaelta, \beta)$ are a Hom-algebra and a Hom-coalgebra, respectively, then $(\mathop{\rm Hom}\nolimits(C, A), \sigmatar, \gammaamma)$ is an unital Hom-algebra where $\sigmatar$ is the convolution product.\\
Here we recall the original definition of Hom-Hopf algebras.
\betaegin{remark}\label{old hom-hopf}{\rm
The notion of Hom-Hopf algebras first was appeared in \chiite{ms2} and \chiite{ms3} as follows. A $(\alpha, \beta)$-Hom-bialgebra $(H, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta)$ with an antipode $S:H\longrightarrow H$ is called a $(\alpha, \beta)$-Hom-Hopf algebra. A map $S$ is called antipode if it is an inverse of the identity map $\mathop{\rm Id}\nolimits: H\longrightarrow H$
in the Hom-associative algebra $\mathop{\rm Hom}\nolimits(H,H) $ with respect to the multiplication given by the convolution product, i.e. $S \sigmatar \mathop{\rm Id}\nolimits = \mathop{\rm Id}\nolimits \sigmatar S= \epsilonta \chiirc \varepsilon$. In fact for all $h\iotan H$ we have
$$S(h^{(1)}) h^{(2)}= h^{(1)} S(h^{(2)})= \varepsilon(h) 1.$$
This is the same as usual definition of an antipode for Hopf algebras. The following properties of antipode of Hom-Hopf algebras with this definition were proved in \chiite{cg} and \chiite{ms2}. For all $x, y\iotan H$ we have;\\
i) If $\alpha=\beta$ then $S\chiirc \alpha=\alpha\chiirc S$.
ii) The antipode $S$ of a Hom-Hopf algebra is unique.
iii) $S$ is anti-algebra map, i.e, $S(xy)= S(y) S(x)$.
iv) $S$ is anti-coalgebra map, i.e, $S(x)^{(1)}\otimes S(x)^{(2)}= S(x^{(2)})\otimes S(x^{(1)})$.
v) $S$ is unital, i.e, $S(1)=1$.
vi) $S$ is counital, i.e, $ \varepsilon(S(x))= \varepsilon(x)$.
}
\epsilonnd{remark}
In this paper we use the recent notion of Hom-Hopf algebras introduced in \chiite{lmt}.
\betaegin{definition}\label{Hom-Hopf}\chiite{lmt}
Let $(B, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta)$ be a $(\alpha, \beta)$-Hom-bialgebra. An anti-algebra, anti-coalgebra morphism $S: B\longrightarrow B$ is
said to be an antipode if\\
a) $S\chiirc \alpha= \alpha \chiirc S$.
b) $S \chiirc \epsilonta = \epsilonta$ and $\varepsilon \chiirc S= \varepsilon$.
c) $S$ is a relative Hom-inverse of the identity map $\mathop{\rm Id}\nolimits : B \longrightarrow B$ for the convolution product, i.e, for any $x\iotan B$, there exists $k\iotan \mathbb{N}$ such that
\betaegin{equation}
\alpha^k \chiirc (S\otimes \mathop{\rm Id}\nolimits)\chiirc\Deltaelta(x) = \alpha^k \chiirc (\mathop{\rm Id}\nolimits \otimes S)\chiirc\Deltaelta(x)= \epsilonta \chiirc \varepsilon (x).
\epsilonnd{equation}
A $(\alpha, \beta)$-Hom-bialgebra with an antipode is called a $(\alpha, \beta)$-Hom-Hopf algebra.
\epsilonnd{definition}
One notes that Definition \ref{Hom-Hopf}(c) in terms of Sweedler notation can be written as follows:
\betaegin{equation}
\alpha^k(S(x^{(1)}) x^{(2)}) = \alpha^k (x^{(1)} S(x^{(2)}))= \varepsilon(x)1_B.
\epsilonnd{equation}
\betaegin{remark}{\rm
There are some differences between the old definition of Hom-Hopf algebras in Remark \ref{old hom-hopf} and the recent one in Definition \ref{Hom-Hopf}.
The Definition \ref{Hom-Hopf}(a) in the special case of $\alpha=\beta$ is followed by the definition of Hom-Hopf algebras in Remark \ref{old hom-hopf}(i).
Also Definition \ref{Hom-Hopf}(b) is the result of the old definition in Remark \ref{old hom-hopf}(v)(vi).
Furthermore the antipodes of Hom-Hopf algebras in Definition \ref{Hom-Hopf} are the relative Hom-inverse of the identity map whereas the antipode in Remark \ref{old hom-hopf} is actually the inverse of the identity map. Finally the antipode in Remark \ref{old hom-hopf} is unique however the antipode in Definition
\ref{Hom-Hopf} is not necessarily unique. In fact the authors in \chiite{lmt} proved that if $S$ and $S'$ are two antipodes for the Hom-Hopf algebra $H$ in the sense of Definition \ref{Hom-Hopf}, then for every $x\iotan H$ there exist $k\iotan \mathbb{N}$ where
$$\alpha^{k+2} \chiirc S \chiirc \beta^2(x) = \alpha^{k+2} \chiirc S' \chiirc \beta^2(x).$$
In special case when $\alpha$ and $\beta$ are both invertible then $S=S'$ and the antipode is unique.}
\epsilonnd{remark}
\betaegin{proposition}
Any Hom-Hopf algebra $(H, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta, S)$ in the sense of Remark \ref{old hom-hopf} which satisfies the extra condition $S\chiirc \alpha= \alpha \chiirc S$, is a Hom-Hopf algebra in the sense of Definition \ref{Hom-Hopf} where $k=1$ for all elements $x\iotan H$.
\epsilonnd{proposition}
\betaegin{proof}
By Remark \ref{old hom-hopf} the antipode $S$ is an unital, counital, anti-algebra, and anti-coalgebra map. Therefore for $k=1$ it satisfies all the conditions of Hom-Hopf algebras in Definition \ref{Hom-Hopf}.
\epsilonnd{proof}
\betaegin{corollary}
If for Hom-Hopf algebra $(H, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta, S)$ in the sense of Remark \ref{old hom-hopf} satisfies $\alpha=\beta$ then $H$ is a Hom-Hopf algebra in the sense of Definition \ref{Hom-Hopf}.
\epsilonnd{corollary}
\betaegin{proof}
If $\alpha=\beta$ then $\alpha$ is a map of Hom-bialgebras and by Remark \ref{old hom-hopf} we have $\alpha \chiirc S= S\chiirc \alpha$. Now the result is followed by the previous Proposition.
\epsilonnd{proof}
A Hom-Hopf algebra is called commutative if it is commutative as Hom-algebra and it is called cocommutative if it cocommutative as Hom-coalgebra.
\betaegin{example}\label{HH}
{\rm
Let $(H, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta, S)$ and Let $(K, m', \epsilonta', \alpha', \Deltaelta', \varepsilon', \beta', S')$ be two Hom-Hopf algebras. Then
$H\otimes K$ is also a Hom-Hopf algebra by multiplication $m\otimes m$, unit $\epsilonta\otimes \epsilonta'$, and $\alpha\otimes \alpha': H\otimes H \longrightarrow H\otimes H$, the coproduct $\Deltaelta\otimes \Deltaelta'$ and counit $\varepsilon \otimes \varepsilon'$ and the linear map $\beta\otimes \beta': H\otimes H \longrightarrow H\otimes H$.
}
\epsilonnd{example}
\betaegin{definition}
Let $(H, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta, S)$ be a $(\alpha, \beta)$-Hom-Hopf algebra. An element $h\iotan H$ is called a group-like element if $\Deltaelta(h)=h\otimes h$ and $\beta(h)=h$.
\epsilonnd{definition}
\betaegin{remark}{\rm
If $h\iotan H$ is a group-like element then $ \varepsilon(h)h=\beta(h)=h$. Therefore $\varepsilon(h)=1$.}
\epsilonnd{remark}
One notes that the authors in \chiite{lmt} introduced group-like elements with condition $\varepsilon(h)=1$ which in fact implies $\beta(h)=h$. Therefore their definition is equivalent to the one in this paper. However we preferred to have $\beta(h)=h$ as definition and similar as ordinary Hopf algebras the condition $\varepsilon(h)=1$ is the result of the fact that $h$ is a group-like element. The notion of Hom-groups introduced in \chiite{lmt} and studied in \chiite{hassan}.
For any Hom-group $(G, \alpha)$, the author in \chiite{hassan} introduce the Hom-group algebra $\mathbb{K}G$.
\betaegin{proposition}
For any Hom-group $(G, \alpha) $, the Hom-group algebra $\mathbb{K}G$ is a $(\alpha, \mathop{\rm Id}\nolimits)$-Hom-Hopf algebra.
\epsilonnd{proposition}
\betaegin{proof}
We define the coproduct by $\Deltaelta (g)= g\otimes g$, counit by $\varepsilon(g)=1$, and the antipode by $S(g)=g^{-1}$.
Since $\beta=\mathop{\rm Id}\nolimits$ one verifies that $\mathbb{K}G$ is a $(\alpha, \mathop{\rm Id}\nolimits)$-Hom-bialgebra.
Since for any Hom-group we have $\alpha(g)^{-1}= \alpha(g^{-1})$ then $ S(\alpha(g))=\alpha(S(g))$. The unit element of $\mathbb{K}G$ is $1_G$ and therefore $S(1_G)=1_G^{-1}= 1_G$. Furthermore $\varepsilon (S(g))= \varepsilon (g^{-1})=1$. Finally if the invertibility index of $g\iotan G$ is $k$ then
$$\alpha^k( S(g) g)= \alpha^k(g^{-1} g)=1.$$
\epsilonnd{proof}
One notes that all elements $g\iotan \mathbb{K}G$ are group-like elements. Also $\mathbb{K}G$ is a cocommutative Hom-Hopf algebra. If $G$ is an abelian Hom-group then $\mathbb{K}G$ is a commutative Hom-Hopf algebra.
The authors in \chiite{lmt}, proved that set of group-like elements of a Hom-Hopf algebra is a Hom-group.
In the Hom-bialgebra structure of $\mathbb{K}G$ one can define the comultiplication by $\Deltaelta(g) = \alpha(g)\otimes \alpha(g)$ to obtain a $(\alpha, \alpha)$-Hom-bialgebra.
\betaegin{example}\label{polynomial}
{\rm (Hom-bialgebra of quantum matrices)
In this example we study a $4$-dimensional Hom-bialgebra which is not a Hom-Hopf algebra. First we recall the construction of quantum matrices from \chiite{es}, \chiite{k}, \chiite{m}, \chiite{s}.
Let $q\iotan \mathbb{K}$ where $q\neq 0$ and $q^2\neq -1$.
Let $\mathcal{O}_q(M_2)= \mathbb{K}[ a, b, c, d]$ be the polynomial algebra with variables $a,b,c,d$ satisfying the following relations
\betaegin{align*}
&ab = q^{-1} ba, ~~~~~~ bd= q^{-1}db, ~~~~ac = q^{-1}ca, ~~~~ cd=q^{-1}dc\\
&bc=cb, ~~~~~ ad-da= (q^{-1}-q)bc.
\epsilonnd{align*}
Clearly $\mathcal{O}_q(M_2)$ is not commutative except $q=1$.
We define a coproduct as follows.
\betaegin{align*}
&\Deltaelta(a) = a\otimes a + b\otimes c, ~~~~~~~ \Deltaelta(b)= a\otimes b+ b\otimes d\\
& \Deltaelta (c) = c\otimes a+ d\otimes c, ~~~~~~~ \Deltaelta(d) = c\otimes b+ d\otimes d
\epsilonnd{align*}
If we consider the elements of $\mathcal{O}(M_2)$ as $2 \thetaimes 2$ matrices with entries in $\mathbb{K}$ then \\
$~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\betaegin{bmatrix}
\Deltaelta(a)& \Deltaelta(b)\\ \Deltaelta(c) &\Deltaelta(d)
\epsilonnd{bmatrix}_{\mathcal{O}(M_2)}= \betaegin{bmatrix}
a&b\\chi&d
\epsilonnd{bmatrix} \otimes \betaegin{bmatrix}
a&b\\chi&d
\epsilonnd{bmatrix} $\\
This comultiplication is not cocommutative.
We define the counit by
\betaegin{align*}
\varepsilon(a)=\varepsilon(d)=1, ~~~~~~~~~~~~~~~~~~~~ \varepsilon(b)=\varepsilon(c)=0.
\epsilonnd{align*}
This coproduct and counit defines a bialgebra structure on $\mathcal{O}(M_2)$. Now we explain the Hom-bialgebra structure from \chiite{ya1}. We define a bialgebra map $\alpha : \mathcal{O}(M_2)\longrightarrow \mathcal{O}(M_2)$ by
\betaegin{align*}
\alpha(a)=a, ~~~ \alpha(b)=\lambda b, ~~~ \alpha(c)=\lambda^{-1}c, ~~~ \alpha(d)=d.
\epsilonnd{align*}
where $\lambda \iotan \mathbb{K}$ is any invertible element. In fact\\
$$
\alpha(\betaegin{bmatrix}
a&b\\chi&d
\epsilonnd{bmatrix}) = \betaegin{bmatrix}
\alpha(a)&\alpha(b)\\\alpha(c)&\alpha(d)
\epsilonnd{bmatrix}= \betaegin{bmatrix}
a&\lambda b\\\lambda^{-1}c&d
\epsilonnd{bmatrix}
$$\\
It can be verified that $\alpha$ is a bialgebra morphism. One notes that $\varepsilon\chiirc \alpha=\varepsilon$. Now we use $\alpha$ to twist both product and coproduct of $\mathcal{O}(M_2)$ as explained in Example \ref{twisted Hom-bialgebra} to obtain a $(\alpha, \alpha)$- Hom-bialgebra $\mathcal{O}_q(M_2)_{\alpha}$. Therefore the coproduct of $\mathcal{O}_q(M_2)_{\alpha}$ is
\betaegin{align*}
&\Deltaelta(a) = a\otimes a + b\otimes c, ~~~~~~~~~~~~~~ \Deltaelta(b)= \lambda a\otimes b+ \lambda b\otimes d\\
& \Deltaelta (c) = \lambda^{-1}c\otimes a+ \lambda^{-1} d\otimes c, ~~~~~~~ \Deltaelta(d) = c\otimes b+ d\otimes d
\epsilonnd{align*}
In fact
$~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\betaegin{bmatrix}
\Deltaelta(a)& \Deltaelta(b)\\ \Deltaelta(c) &\Deltaelta(d)
\epsilonnd{bmatrix}_{\mathcal{O}_q(M_2)_{\alpha}}= \betaegin{bmatrix}
a&\lambda b\\\lambda^{-1}c&d
\epsilonnd{bmatrix} \otimes \betaegin{bmatrix}
a&\lambda b\\\lambda^{-1}c&d
\epsilonnd{bmatrix} $\\
Now we consider quantum determinant element
$$det_q= \mu_{\alpha}(a, d) -q^{-1}\mu_{\alpha}(b, c )\iotan \mathcal{O}_q(M_2)_{\alpha}. $$
One notes that $$det_q= \alpha(a)\alpha(d)- q^{-1} \alpha(b)\alpha(c)= ad - q^{-1} (\lambda b) ( \lambda^{-1} c)= ad - q^{-1} bc.$$
Similarly $\alpha( det_q)= det_q$. Therefore
$$\Deltaelta_{\mathcal{O}_q(M_2)_{\alpha}}(det_q) = \Deltaelta \alpha(det_q) = \Deltaelta (det_q).$$
It is shown in \chiite{k} and \chiite{s} that $\Deltaelta(det_q) = det_q \otimes det_q$ which means $det_q$ is a group-like element.
Also $\varepsilon_{ \mathcal{O}(M_2)_{\alpha}}= \varepsilon_{\mathcal{O}(M_2)}.$
Therefore $\varepsilon(ad-q^{-1}bc)= 1$. Then $det_q$ is a group-like element of Hom-bialgebra $\mathcal{O}_q(M_2)_{\alpha}$. Since the set of group-like elements of a Hom-Hopf algebras is a Hom-group \chiite{lmt}, then every group-like element is relative Hom-invertible. However $det_q$ is not clearly relative Hom-invertible by definition of $\alpha$. Therefore $\mathcal{O}_q(M_2)$ is not a Hom-Hopf algebra.
}
\epsilonnd{example}
\betaegin{example}\label{2-dimensional Hopf}{\rm
The $2$-dimensional bialgebra $H$, in Example \ref{2dbi}, is a $(\alpha, \beta)$-Hom-Hopf algebra by
\betaegin{align*}
S(e_1)=e_1, ~~~~~~~~ S(e_2)=e_2.
\epsilonnd{align*}
It is straightforward to check that $S(h^{(1)}) h^{(2)}= h^{(1)} S(h^{(2)}) = \varepsilon(h) 1$ for all $h\iotan H$.
Therefore it is a Hom-Hopf algebra in the sense of Remark \ref{old hom-hopf}. Since $S=\mathop{\rm Id}\nolimits$ then $S\chiirc \alpha= \alpha \chiirc S$ and $H$ is also Hom-Hopf algebra in the sense of Definition \ref{Hom-Hopf}.
}
\epsilonnd{example}
\sigmaection{Properties of antipodes}
In this section we study the properties of antipods for Hom-Hopf algebras. We remind that we are using Definition \ref{Hom-Hopf}.
We need the following basic properties of convolution product for later results.
\betaegin{remark}\label{convolution}
Let $(H, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta)$ be a $(\alpha, \beta)$-Hom-bialgebra.
We consider the convolution Hom-algebra $\mathop{\rm Hom}\nolimits( H, H)$. If $f, g\iotan \mathop{\rm Hom}\nolimits(H, H)$, then the authors in \chiite{lmt}, showed that \\
i) $ \alpha^n ( f\sigmatar g) = \alpha^n f \sigmatar \alpha^n g$.
ii) $f \sigmatar (\epsilonta \chiirc\varepsilon )= \alpha \chiirc f \chiirc \beta= (\epsilonta \chiirc\varepsilon ) \sigmatar f$.
\epsilonnd{remark}
The following Proposition will play an important rule for the further results in this paper.
\betaegin{proposition}\label{important}
Let $(H, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta, S)$ and $(A, m', \epsilonta', \alpha', \Deltaelta', \varepsilon', \beta', S')$ be $(\alpha, \beta)$ and $(\alpha', \beta')$-Hom-bialgebras and $\mathop{\rm Hom}\nolimits(H, A)$ be the convolution Hom-algebra.
If $f, g, \varphi \iotan Hom(H, A)$ where $f$ and $g$ are the relative Hom-inverse of $\varphi$, then for every $x\iotan H$ there exists $k\iotan\mathbb{ N}$ where
$$\alpha'^{k+2} \chiirc f\chiirc \beta^2 (x)= \alpha'^{k+2} \chiirc g \chiirc \beta^2(x).$$
\epsilonnd{proposition}
\betaegin{proof}
For $x\iotan H$ there exist $k', k''\iotan \mathbb{N}$ where
$$ \alpha'^{k'} ( f\sigmatar \varphi)(x) = \alpha'^{k'} (\varphi \sigmatar f)(x)= \varepsilon(x)1 = \epsilonta \chiirc \varepsilon(x) ,$$ and
$$ \alpha'^{k''} ( g\sigmatar \varphi)(x) = \alpha'^{k''} (\varphi \sigmatar g)(x)=\varepsilon(x)1 =\epsilonta \chiirc \varepsilon(x). $$
Let $k= \max (k', k'')$. We ignore the composition sign for easier computation. We have
\betaegin{align*}
\alpha'^{k+2} f \beta^2 &= \alpha' (\alpha'^{k+1} f\beta) \beta\\
&=(\alpha'^{k+1} f\beta) \sigmatar (\epsilonta \varepsilon)= (\alpha'^{k+1} f\beta)\sigmatar \alpha'^k(\varphi \sigmatar g)\\
&=(\alpha'^{k+1} f\beta)\sigmatar (\alpha'^k\varphi \sigmatar \alpha'^kg)= (\alpha'^{k} f\beta)\sigmatar \alpha'^k \varphi) \sigmatar \alpha'^{k+1}g\\
&=(\alpha'^k f \sigmatar \alpha'^k \varphi)\sigmatar \alpha'^{k+1}g\beta= \alpha'^k ( f\sigmatar \varphi) \sigmatar \alpha^{k+1} g\beta\\
&=\alpha'^k(\epsilonta \varepsilon) \sigmatar \alpha^{k+1} g\beta=\epsilonta \varepsilon \sigmatar \alpha^{k+1} g\beta\\
&= \alpha^{k+2}g\beta^2.
\epsilonnd{align*}
We used the Remark \ref{convolution}(ii) in the second equality, the Remark \ref{convolution}(i) in the fourth equality, the Hom-associativity in the fifth equality, the Hom-coassociativity in the sixth equality, the Remark \ref{convolution}(i) in the seventh equality, unitality of $\alpha'$ in the eight equality, and the Remark \ref{convolution}(ii) in the last equality.
\epsilonnd{proof}
The previous proposition shows that the relative Hom-inverses are unique in some sense. In fact if $\alpha$ and $\beta$ are invertible then $f=g$.
\betaegin{proposition}\label{relative anti algebra}
Let $(H, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta)$ be a Hom-bialgebra with multiplicative $\alpha$, endowed with a map $S: H\longrightarrow H$ where $S$ is a relative Hom-inverse of the identity map $\mathop{\rm Id}\nolimits : H \longrightarrow H$ in $\mathop{\rm Hom}\nolimits(H, H)$.
Let $P(x, y)=S(x y)$, $N(x,y)= S(y) S(x)$, and $ M(x, y)= xy$.
Then $S(xy)$ and $S(y)S(x)$ are both relative Hom-inverse of the multiplication $M(x, y)=xy$ in $\mathop{\rm Hom}\nolimits(H\otimes H, H)$, with respect to the convolution product, and for every $x, y\iotan H$ there exists $K\iotan \mathbb{N}$ where
\betaegin{equation}\label{relative hom-anti algebra}
\alpha^{K+2}( S[\beta^2(x) \beta^2(y)])= \alpha^{K+2} [S(\beta^2(y)) S(\beta^2(x))].
\epsilonnd{equation}
\epsilonnd{proposition}
\betaegin{proof}
For any $x, y\iotan H$ there exists $k', k''\iotan \mathbb{N}$, such that
$$ \alpha^{k'}(S(x^{(1)}) x^{(2)}) = \alpha^{k'} (x^{(1)} S(x^{(2)}))= \varepsilon(x)1_B, $$
and
$$ \alpha^{k''}(S(y^{(1)}) y^{(2)}) = \alpha^{k''} (y^{(1)} S(y^{(2)}))= \varepsilon(y)1_B. $$
Let $k=\max(k', k'')+2$. Then
\betaegin{align*}
\alpha^k(M\sigmatar N)( x,y) = &\alpha^k[M(x^{(1)}, y^{(1)}) N(x^{(2)},y^{(2)})]\\
&=\alpha^k([x^{(1)} y^{(1)}][S(y^{(2)}) S(x^{(2)}])\\
&=[\alpha^k(x^{(1)})\alpha^k(y^{(1)})] \alpha^k ( S(y^{(2)}) S(x^{(2)}))\\
&=\alpha^{k+1} (x^{(1)}) [\alpha^k (y^{(1)}) \alpha^{k-1} ( S(y^{(2)}) S(x^{(2)})) ]\\
&=\alpha^{k+1} (x^{(1)}) [\alpha^k (y^{(1)}) [ \alpha^{k-1} ( S(y^{(2)}))\alpha^{k-1}( S(x^{(2)}))] ]\\
&=\alpha^{k+1} (x^{(1)}) [[\alpha^{k -1}(y^{(1)}) \alpha^{k-1}( S(y^{(2)})]\alpha^{k}( S(x^{(2)}) ]\\
&=\alpha^{k+1} (x^{(1)}) [\alpha^{k -1}(y^{(1)} S(y^{(2)}))\alpha^{k}( S(x^{(2)}) ]\\
&=\alpha^{k+1} (x^{(1)})[\varepsilon(y) 1\alpha^{k}( S(x^{(2)})]\\
&=\alpha^{k+1} (x^{(1)})\alpha^{k+1}( S(x^{(2)}) \varepsilon(y)\\
&=\alpha^{k+1} [x^{(1)} S(x^{(2)})] \varepsilon(y)\\
&=\alpha^{k+1}( \varepsilon (x) 1) \varepsilon(y)\\
&= \varepsilon(x) \varepsilon(y)1.
\epsilonnd{align*}
We used the Hom-associativity property in the fourth equality, the Hom-unitality in ninth equality. Therefore $ N(x,y)= S(y) S(x)$ is a relative Hom-inverse of
$ M(x,y)= xy$. Now for $xy\iotan H$ there exists $n\iotan \mathbb{N}$ where
$$ \alpha^{n}(S(x^{(1)}) x^{(2)}) = \alpha^{n} (x^{(1)} S(x^{(2)}))= \varepsilon(x)1_B. $$
Then
\betaegin{align*}
&\alpha^n( P\sigmatar M)(x, y)= \alpha^n(P(x^{(1)}, y^{(1)}) M(x^{(2)},y^{(2)}))\\
&= \alpha^n [S(x^{(1)} y^{(1)}) x^{(2)}y^{(2)}]= a^n [S ((xy)^{(1)}) (xy)^{(2)}]= \varepsilon (xy) 1.
\epsilonnd{align*}
Therefore $ P(x, y) = S(xy)$ is a relative Hom-inverse of $ M(x,y)=xy$.
Then $S(xy)$ and $S(y)S(x)$ are both relative Hom-inverse of the multiplication $M(x, y)=xy$ in $\mathop{\rm Hom}\nolimits(H\otimes H, H)$ with respect to the convolution product. Therefore by Proposition \ref{important} there exists $K\iotan \mathbb{N}$ such that
$$ \alpha^{K+2} \chiirc P \chiirc \beta_{H\otimes H}^2 (x,y)= \alpha^{K+2} \chiirc N \chiirc \beta_{H\otimes H}^2(x,y) .$$
By Example \ref{HH}, we have $\beta_{H\otimes H}= \beta_H\otimes \beta_H$ and therefore we obtain the result.
\epsilonnd{proof}
The relation \ref{relative hom-anti algebra} is called the relative Hom-anti algebra map property of $S$.
\betaegin{corollary}
Let $(H, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta)$ be a $(\alpha, \beta)$-Hom-bialgebra, where $\alpha$ is multiplicative and $\alpha$ and $\beta$ are invertible. If $H$ is endowed with a linear map $S: H \longrightarrow H$ where $S$ is a relative Hom-inverse of the identity map $\mathop{\rm Id}\nolimits : H \longrightarrow H$ in $\mathop{\rm Hom}\nolimits(H, H)$,
then $S$ is an anti-algebra map.
\epsilonnd{corollary}
\betaegin{proof}
By previous Proposition we have $ \alpha^{K+2} \chiirc P \chiirc \beta_{H\otimes H}^2 (x,y)= \alpha^{K+2} \chiirc N \chiirc \beta_{H\otimes H}^2(x,y) .$ Since $\alpha$ and $\beta$ are invertible then $P= N$ or $S(xy)=S(y) S(x)$.
\epsilonnd{proof}
Similarly we have the following proposition.
\betaegin{proposition}\label{relative anti coalgebra}
Let $(H, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta)$ be a Hom-bialgebra where $\beta$ is a coalgebra map. Assume $H$ is endowed with a map $S: H\longrightarrow H$ where $S$ is a relative Hom-inverse of the identity map $\mathop{\rm Id}\nolimits : H\longrightarrow H$ with respect to the convolution product.
Let $P(x)=\Deltaelta(S(x))$, $N= \thetaau(S\otimes S)\Deltaelta$, and $ \Deltaelta(x)= x^{(1)} \otimes x^{(2)}$ where $\thetaau(x,y)= (y, x)$ for all $x, y\iotan H$.
Then $P$ and $N$ are both Hom-relative inverse of the comultiplication $\Deltaelta$ in $\mathop{\rm Hom}\nolimits(H, H\otimes H)$ with respect to the convolution product and for every $x\iotan H$ there exists $k\iotan \mathbb{N}$ such that
\betaegin{equation}\label{relative hom-anti coalgebra}
\alpha^{k+2}[ S(\beta^2(x) )^{(1)}] \otimes \alpha^{k+2}[ S(\beta^2(x) )^{(2)}] = \alpha^{k+2} [S(\beta^2(x^{(2)}))]\otimes \alpha^{k+2} [S(\beta^2(x^{(1)}))].
\epsilonnd{equation}
\epsilonnd{proposition}
\betaegin{proof}
Using proposition \ref{important} and Example \ref{HH}, the proof is similar to the previous Proposition.
\epsilonnd{proof}
The relation \ref{relative hom-anti coalgebra} is called the relative Hom-anti coalgebra map property of $S$.
In special case we have the following.
\betaegin{corollary}
Let $(H, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta)$ be a $(\alpha, \beta)$-Hom-bialgebra, where $\beta$ is coalgebra morphism and $\alpha$ and $\beta$ are invertible. Assume $H$ is endowed with a linear map $S: H \longrightarrow H$ where $S$ is a relative Hom-inverse of the identity map $\mathop{\rm Id}\nolimits : H \longrightarrow H$ with respect to the convolution product.
Then $S$ is an anti-coalgebra map.
\epsilonnd{corollary}
\betaegin{proposition}\label{hom unitality prop}
Let $(H, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta)$ be a Hom-bialgebra , endowed with a map $S: H\longrightarrow H$ where $S$ is a relative Hom-inverse of the identity map $\mathop{\rm Id}\nolimits : H \longrightarrow H$ with respect to the the convolution product. Then there exists $k\iotan \mathbb{N}$ such that
\betaegin{equation}\label{hom-unitality}
\alpha^{k+1}( S(1))=1.
\epsilonnd{equation}
\epsilonnd{proposition}
\betaegin{proof}
We apply relative Hom-invertibility of $S$ for $h=1$. So there exist $k\iotan \mathbb{N}$ where
$$
1= \varepsilon(1) 1= \alpha^k( \mathop{\rm Id}\nolimits \sigmatar S)(1)= \alpha^k(1S(1)) = \alpha^{k+1}(S(1)).
$$
\epsilonnd{proof}
The condition \ref{hom-unitality} is called the relative Hom-unitality property of $S$.
\betaegin{proposition}\label{relative counitality}
Let $(H, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta)$ be a Hom-bialgebra , endowed with a map $S: H\longrightarrow H$ where $S$ is a relative Hom-inverse of the identity map $\mathop{\rm Id}\nolimits : H \longrightarrow H$ with respect to the the convolution product. Then there exists $k\iotan \mathbb{N}$ such that
\betaegin{equation}\label{hom-counitality}
\varepsilon (\alpha^k(S(h)))=1 \varepsilon(h).
\epsilonnd{equation}
\epsilonnd{proposition}
\betaegin{proof}
For any $h\iotan H$ there exists $k\iotan \mathbb{N}$ such that
$$\varepsilon(h) 1= \alpha^k( S(h^{(1)}h^{(2)})).$$
Therefore $$\varepsilon (\varepsilon(h)1) = \varepsilon(\alpha^k( S(h^{(1)}h^{(2)}))).$$
Since $\varepsilon$ is unital and it commutes with $\alpha$ then
$$1\varepsilon(h) = \alpha^k ( \varepsilon(S(h^{(1)}h^{(2)})) ) = \alpha^k (\varepsilon(S(h^{(1)}) \varepsilon(h^{(2)}) ). $$
Therefore
$$1\varepsilon(h) = \alpha^k( \varepsilon(S(h^{(1)}\varepsilon(h^{(2)})) ) = \alpha^k \varepsilon(S(h))= \varepsilon (\alpha^k(S(h))).$$
\epsilonnd{proof}
The condition \ref{hom-counitality} is called the relative Hom-counitality property of $S$.
\betaegin{lemma}
Let $(H, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta, S )$ be a Hom-Hopf algebra, $(A, m', \epsilonta', \alpha' )$ be a Hom-algebra and $f: H\longrightarrow A$ be a Hom-algebra map. Then $f \chiirc S$ is a relative Hom-inverse of $f$ in $\mathop{\rm Hom}\nolimits (H, A)$.
\epsilonnd{lemma}
\betaegin{proof}
We show that $f\chiirc S$ is a relative Hom-inverse of $f$ in $ \mathop{\rm Hom}\nolimits( H, A)$.
For any $h\iotan H$ there exist $k\iotan \mathbb{N}$ where $\alpha^k(S(h^{(1)}) h^{(2)}) = \alpha^k (h^{(1)} S(h^{(2)}))= \varepsilon(x)1.$ Therefore
\betaegin{align*}
&\alpha^k( (f\chiirc S)\sigmatar f)(h) = \alpha^k (f( S(h^{(1)})) f(h^{(2)})) = \alpha^k(f(S(h^{(1)})h^{(2)}) )\\
& = f (\alpha^k(S(h^{(1)})h^{(2)}))= f(\varepsilon(h)1)=\varepsilon(h)1.
\epsilonnd{align*}
Similarly since $ \alpha^k (h^{(1)} S(h^{(2)}))= \varepsilon(x)1$ we have $\alpha^k( f \sigmatar(f\chiirc S) )(h)=\varepsilon(h)1$.
\epsilonnd{proof}
\betaegin{lemma}
Let $(H, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta, S )$ be a Hom-Hopf algebra, $(C, \Deltaelta', \varepsilon', \beta' )$ be a Hom-coalgebra and $f: C\longrightarrow H$ be a Hom-coalgebra map. Then $ S\chiirc f$ is a relative Hom-inverse of $f$ in $\mathop{\rm Hom}\nolimits (C, H)$.
\epsilonnd{lemma}
\betaegin{proof}
Similar to the previous Lemma.
\epsilonnd{proof}
\betaegin{proposition}\label{Hopf map}
Let $(H, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta, S)$ and $(K, m', \epsilonta', \alpha', \Deltaelta', \varepsilon', \beta', S')$ be $(\alpha, \beta)$ and $(\alpha', \beta')$-Hom-Hopf algebras.
If $f: H \longrightarrow K$ is a map of Hom-bialgebras then there exists $K\iotan \mathbb{N}$ such that
\betaegin{align}\label{Hopf map condition}
\alpha^K \chiirc ( f\chiirc S)\chiirc \beta^2(h) = \alpha^K\chiirc ( S'\chiirc f)\beta^2(h).
\epsilonnd{align}
\epsilonnd{proposition}
\betaegin{proof}
By the previous Lemmas $f \chiirc S$ and $ S' \chiirc f $ are the relative Hom-inverse of $f$ in $ \mathop{\rm Hom}\nolimits( H, K)$. Then the result is followed by Proposition \ref{important}.
\epsilonnd{proof}
The condition \ref{Hopf map condition} is called the relative Hom-Hopf algebra map property of $S$.
As a special case of the previous Proposition we have the following result.
\betaegin{corollary}\label{hopf morphism}
Let $(H, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta, S)$ and $(K, m', \epsilonta', \alpha', \Deltaelta', \varepsilon', \beta', S')$ be Hom-Hopf algebras where $\alpha, \beta, \alpha',\beta'$ are invertible. Then any Hom-bialgebra map $f: H \longrightarrow K$ is a Hom-Hopf algebra map, i.e,
$$f\chiirc S= S' \chiirc f.$$
\epsilonnd{corollary}
\betaegin{corollary}
Let $(H, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta, S)$ be Hom-Hopf algebra where $\alpha=\beta$. Then there exists $k\iotan\mathbb{ N}$ such that
\betaegin{equation}
\alpha^k (\alpha\chiirc S) \alpha^2 (h) = \alpha^k( S\chiirc \alpha)\alpha^2 .
\epsilonnd{equation}
If $\alpha$ is invertible then $ \alpha \chiirc S = S\chiirc \alpha$.
\epsilonnd{corollary}
\betaegin{proof}
Since $\alpha=\beta$ then $\alpha$ is a map of Hom-bialgebras and the result is followed by Proposition \ref{Hopf map}.
\epsilonnd{proof}
Here we summarize some of the results in this section.
\betaegin{theorem}
Let $(H, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta)$ be a Hom-bialgebra where $\alpha$ and $\beta$ are morphisms of algebra and coalgebra, respectively. Assume $H$ is endowed with a map $S: H\longrightarrow H$ where $S$ is a relative Hom-inverse of the identity map $\mathop{\rm Id}\nolimits : H \longrightarrow H$ with respect to the the convolution product. Then $S$ is a relative Hom-anti-algebra map, a relative Hom-anti-coalgebra map, relative Hom-unital, and relative Hom-counital, i.e, there exists $k\iotan\mathbb{ N}$ such that\\
i) $\alpha^{k+2}( S(\beta^2(x) \beta^2(y))= \alpha^{k+2} (S(\beta^2(y)) S(\beta^2(x))).$
ii)$\alpha^{k+2}[ S(\beta^2(x) )^{(1)}] \otimes \alpha^{k+2}[ S(\beta^2(x) )^{(2)}] = \alpha^{k+2} [S(\beta^2(x^{(2)}))]\otimes \alpha^{k+2} [S(\beta^2(x^{(1)}))].$
iii) $\alpha^{k+1}( S(1))=1.$
iv) $ \varepsilon (\alpha^k(S(h)))=1 \varepsilon(h)$.
v) If $\alpha=\beta$ then $\alpha^k (\alpha\chiirc S) \alpha^2 (h) = \alpha^k( S\chiirc \alpha)\alpha^2 .$\\
Furthermore if $\alpha$ and $\beta$ are invertible then $S$ is morphisms of algebras and coalgebras, respectively, and $\alpha \chiirc S= S \chiirc \alpha$.
\epsilonnd{theorem}
\betaegin{proposition}
Let $(H, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta, S)$ be a commutative Hom-Hopf algebra. Then for every $x\iotan H$ there exist $k\iotan \mathbb{N}$ where
$$\alpha^{k+2} \chiirc S^2\chiirc \beta^2 (x)= \alpha^{k+2} \chiirc \mathop{\rm Id}\nolimits \chiirc \beta^2(x).$$
\epsilonnd{proposition}
\betaegin{proof}
We show that $S^2$ is a relative Hom-inverse of $S$. For any $h\iotan H$ there exist $k\iotan \mathbb{N}$ where $\alpha^k(S(h^{(1)}) h^{(2)}) = \alpha^k (h^{(1)} S(h^{(2)}))= \varepsilon(x)1.$ Therefore
\betaegin{align*}
\alpha^k(S\sigmatar S^2)(h)= & \alpha^k [S(h^{(1)}) S^2(h^{(2)})] \\
&= \alpha^k [S[S(h^{(2)}) h^{(1)} ]] = \alpha^k [S[ h^{(1)}S(h^{(2)}) ]]\\
&= S [\alpha^k[ h^{(1)}S(h^{(2)})] ]= S(\varepsilon(h) 1)= \varepsilon(h)1.
\epsilonnd{align*}
We used the anti-algebra map property of $S$ in the second equality, commutativity of $H$ in the third equality, commutativity of $S$ and $\alpha$ in the fourth equality, and the unitality of $S$ in the last equality. Similarly it can be shown that $\alpha^k (S^2 \sigmatar S)(h)= \varepsilon(h)1$. Therefore $S^2$ and the identity map $\mathop{\rm Id}\nolimits_H$ are both relative Hom-inverse of $S$. Now the result is followed by Proposition \ref{important}.
\epsilonnd{proof}
As a special case of previous Proposition, we have the following result.
\betaegin{corollary}
If $(H, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta, S)$ is a commutative Hom-Hopf algebra with invertible $\alpha$ and $\beta$ then
$$S^2=\mathop{\rm Id}\nolimits.$$
\epsilonnd{corollary}
\betaegin{proposition}
Let $(H, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta, S)$ be a cocommutative Hom-Hopf algebra. Then for every $x\iotan H$ there exist $k\iotan \mathbb{N}$ where
$$\alpha^{k+2} \chiirc S^2\chiirc \beta^2 (x)= \alpha^{k+2} \chiirc \mathop{\rm Id}\nolimits \chiirc \beta^2(x).$$
\epsilonnd{proposition}
\betaegin{proof}
We show that $S^2$ is a relative Hom-inverse of $S$. For any $h\iotan H$ there exist $k', k''\iotan \mathbb{N}$ where $\alpha^{k'}(S(h^{(1)}) h^{(2)}) = \alpha^{k'} (h^{(1)} S(h^{(2)}))= \varepsilon(x)1,$ and $\alpha^{k''}(S(S(h)^{(1)}) h^{(2)}) = \alpha^{k''} (h^{(1)} S(S(h)^{(2)}))= \varepsilon(x)1.$ Let $k=\max (k', k'')$.
Therefore
\betaegin{align*}
\alpha^k(S\sigmatar S^2)(h)= & \alpha^k [S(h^{(1)}) S^2(h^{(2)})] \\
&= \alpha^k [S(h)^{(2)} S (S(h)^{(1)} )] = \alpha^k [S(h)^{(1)} S ( S(h)^{(2)}) ]\\
&= \alpha^k(\varepsilon(S(h))1)= \varepsilon(S(h))\alpha^k(1)= \varepsilon(h) 1.
\epsilonnd{align*}
We used the anti-coalgebra map property of $S$ in the second equality, cocommutativity of $H$ in the third equality, and the counitality of antipode in fifth equality. Similarly $\alpha^k (S^2 \sigmatar S))(h)= \varepsilon(h) 1$. Therefore $S^2$ and the identity map $\mathop{\rm Id}\nolimits_H$ are both Hom-relative inverse of $S$. Then the result is followed by Proposition \ref{important}.
\epsilonnd{proof}
As a special case of the previous Proposition we have the following result.
\betaegin{corollary}
If $(H, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta, S)$ is a cocommutative Hom-Hopf algebra with invertible $\alpha$ and $\beta$ then
$$S^2=\mathop{\rm Id}\nolimits.$$
\epsilonnd{corollary}
\betaegin{proposition}
Let $(H, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta, S)$ be a Hom-Hopf algebra and $h\iotan H$ a primitive element, i.e, $\Deltaelta(h)= 1\otimes h+ h\otimes 1$. Then there exists $k\iotan \mathbb{N}$ where
\betaegin{equation}
\alpha^{k+1} (S(h))= -\alpha^{k+1} (h).
\epsilonnd{equation}
\epsilonnd{proposition}
\betaegin{proof} There exists $k\iotan \mathbb{N}$ such that $\alpha^{k}(S(h^{(1)}) h^{(2)}) = \alpha^{k'} (h^{(1)} S(h^{(2)}))= \varepsilon(x)1.$ Therefore
$$\alpha^k( hS(1)) + \alpha^k(1S(h))=\varepsilon(h)1.$$ Since $S$ is unital and for any $x\iotan H$, we have $1x=\alpha(x)$, then
$$\alpha^{k+1}(h) + \alpha^{k+1}(h) = \epsilonpsilon(h)1.$$
By \chiite{lmt}, for any primitive element $h$, we have $\varepsilon(h)=0$. Therefore $\alpha^{k+1} (S(h))= -\alpha^{k+1} (h).$
\epsilonnd{proof}
\betaegin{proposition}
Let $(H, m, \epsilonta, \alpha, \Deltaelta, \varepsilon, \beta, S)$ be a Hom-Hopf algebra and $h\iotan H$ a group-like element, i.e, $\Deltaelta(h)= h\otimes h$. Then there exists $k\iotan \mathbb{ N}$ where
\betaegin{equation}
\alpha^{k} (S(h)h)= \alpha^k ( h S(h))=1.
\epsilonnd{equation}
\epsilonnd{proposition}
\betaegin{proof}
There exists $k\iotan \mathbb{N}$ such that $\alpha^{k}(S(h^{(1)}) h^{(2)}) = \alpha^{k'} (h^{(1)} S(h^{(2)}))= \varepsilon(x)1.$ Then
$$\alpha^k(S(h)h)= \alpha^k( hS(h))=\varepsilon(h)1.$$
Now the result is followed by the fact that $\varepsilon(h)=1$.
\epsilonnd{proof}
By previous Proposition the relative Home inverse of a group-like element $h$ is $S(h)$.
\betaegin{thebibliography}{9}
\betaibitem[AEM]{aem} F. Ammar, Z. Ejbehi, and A. Makhlouf, \epsilonmph{Cohomology and deformations
of Hom-algebras}, J. Lie Theory 21 (2011), no. 4, 813–836.
\betaibitem[AS]{as} N. Aizawa and H. Sato, \epsilonmph{q-deformation of the Virasoro algebra with central extension}, Phys. Lett. B, 256, (1991), p. 185-190.
\betaibitem[CG]{cg} S. Caenepeel, and I. Goyvaerts, \epsilonmph{Monoidal Hom-Hopf algebras}, Comm. Algebra 39(6)(2011), p. 2216-2240.
\betaibitem[CKL]{ckl} M. Chaichian, P. Kulish, and J. Lukierski , \epsilonmph{q-deformed Jacobi identity, q-oscillators and q-deformed infinite-dimensional
algebras}, Phys. Lett. B, 237 (1990), p. 401-406.
\betaibitem[CQ]{cq} Y. Cheng and H. Qi, \epsilonmph{Representations of BiHom-Lie algebras}, arXiv:1610.04302v1, (2016).
\betaibitem[CZ]{cz} T. L. Curtright and C. K. Zachos , \epsilonmph{Deforming maps for quantum algebras}, Phys. Lett. B, 243 (1990), p. 237-244.
\betaibitem[CS]{cs} A. J. Calder\'on and J. M. S\'anchez, \epsilonmph{The structure of split regular BiHom-Lie algebras}, Journal of Geometry and Physics
Volume 110, December (2016), P. 296-305.
\betaibitem[DM]{dm} K. Dekkar and A. Makhlouf \epsilonmph{
Gerstenhaber–Schack cohomology for Hom-bialgebras and
deformations}, Communications in Algebra,
2017, VOL. 45, NO. 10, P. 4400-4428.
\betaibitem[ES]{es} P. Etingof and O. Schiffmann, \epsilonmph{Lectures on quantum groups}, 2nd ed., Int. Press of Boston, Cambridge, (2002).
\betaibitem[GMMP]{gmmp} G. Graziani, A. Makhlouf, C. Menini, and F. Panaite
\epsilonmph{BiHom-associative algebras, BiHom-Lie algebras and BiHom-bialgebras}, SIGMA 11 (2015), 086, 34 pages.
\betaibitem[GW]{gw} S. Guo and S. Wang, \epsilonmph{Symmetric pairs and pseudosymmetries in Hom-Yetter–Drinfeld categories},
J. Algebra Appl. 16, 1750125 (2017), 21 pages.
\betaibitem[GR]{gr} M. Goze and E. Remm, \epsilonmph{ On the algebraic variety of Hom-Lie algebras}, https://arxiv.org/abs/1706.02484, (2017).
\betaibitem[H]{hassan} M. Hassanzadeh, \epsilonmph{Hom-groups, Representations and homological algebra}, accepted in Colloquium Mathematicum, August 2018.
\betaibitem[HLS]{hls} J. T. Hartwig, D. Larsson, and S. D. Silvestrov, \epsilonmph{Deformations of Lie
algebras using $\sigmaigma$-derivations}, J. Algebra 295 (2006), no. 2, p. 314.361.
\betaibitem[HSS]{hss} M. Hassanzadeh, I. Shapiro and S. S\"utl\"u, \epsilonmph{Cyclic homology for Hom- algebras},
Journal of Geometry and Physics, Volume 98, December (2015) , P. 40-56.
\betaibitem[K]{k} C. Kassel, \epsilonmph{ Quantum groups}, Grad. Texts in Math. 155, Springer-Verlag, New York, (1995).
\betaibitem[LS]{ls} D. Larsson and S. D. Silvestrov,\epsilonmph{ Quasi-hom-Lie algebras, central exten-
sions and 2-cocycle-like identities}, J. Algebra 288 (2005), no. 2, p. 321–344.
\betaibitem[LMT]{lmt} C. Laurent-Gengouxa, A. Makhlouf, and J. Teles,\epsilonmph{ Universal algebra of a Hom-Lie algebra and group-like elements}
, Journal of Pure and Applied Algebra, Volume 222, Issue 5, (2018), P. 1139-1163.
\betaibitem[MS1]{ms1} A. Makhlouf and S. D. Silvestrov,\epsilonmph{ Hom-algebra structures}, J. Gen. Lie
Theory Appl. 2 (2008), no. 2, p. 51–64.
\betaibitem[MS2]{ms2} A. Makhlouf and S. Silvestrov,\epsilonmph{ Hom-Lie admissible Hom-coalgebras and
Hom-Hopf algebras}, Generalized Lie theory in mathematics, physics and
beyond, Springer, Berlin, (2009), p. 189–206.
\betaibitem[MS3]{ms3} A. Makhlouf and S. Silvestrov ,\epsilonmph{ Hom-algebras and Hom-coalgebras}, J. Algebra Appl. 9 (2010),
no. 4, p. 553–589.
\betaibitem[M]{m} S. Majid, \epsilonmph{Foundations of quantum group theory}, Cambridge U. Press, Cambridge, UK, (1995).
\betaibitem[PSS]{pss} F. Panaite, P. Schrader, and M. D. Staic, \epsilonmph{Hom-Tensor Categories and the Hom-Yang-Baxter Equation},
https://arxiv.org/abs/1702.08475, (2017).
\betaibitem[S]{s} R. Street, \epsilonmph{Quantum groups: a path to current algebra}, Australian Math. Soc. Lecture Series 19, Cambridge
Univ. Press, Cambridge, (2007).
\betaibitem[Ya1]{ya1} D. Yau, \epsilonmph{Hom-quantum groups II: Cobraided Hom-bialgebras and Hom-quantum geometry},(2009) e-Print
arXiv:0907.1880.
\betaibitem[Ya2]{ya2} D. Yau,\epsilonmph{ Hom-bialgebras and comodule Hom-algebras}, Int. Electron. J.
Algebra 8 (2010), p. 45-64.
\betaibitem[Ya3]{ya3} D. Yau, \epsilonmph{ Hom-quantum groups: I. Quasi-triangular Hom-bialgebras}, J.
Phys. A 45 (2012), no. 6.
\betaibitem[Ya4]{ya4} D. Yau, \epsilonmph{ Enveloping algebras of Hom-Lie algebras}, Journal of Generalized Lie Theory and Applications, 2,
(2008), p. 95-108.
\betaibitem[ZZ]{zz} X. Zhao, and X. Zhang, \epsilonmph{Lazy 2-cocycles over monoidal Hom-Hopf algebras}, Colloq. Math. 142(1), (2016), p. 61-81.
\epsilonnd{thebibliography}
\epsilonnd{document} |
\begin{document}
\begin{center} {\bf \LARGE Shadowing of non-transversal heteroclinic chains }\\
\vskip 0.5cm
\vskip 0.5cm
{\large Amadeu Delshams\footnote{Partially supported by the MINECO-FEDER Grant MTM2015-65715-P and the Russian
Scientific Foundation Grant 14-41-00044 at the Lobachevsky University of
Nizhny Novgorod.}, Adri\`a Simon\footnote{Partially supported by
the MINECO-FEDER Grant MTM2012-31714 and the Catalan
Grant 2014SGR504.}} \\
Departament de Matem\`atiques, Universitat Polit\`ecnica de Catalunya \\
Av. Diagonal 647, 08028 Barcelona \\
[email protected], [email protected]
\vskip\baselineskip
{\large Piotr Zgliczy\'nski}\footnote{Research has been supported by Polish National Science Centre grant 2011/03B/ST1/04780} \\
Jagiellonian University, Institute of Computer Science and Computational Mathematics, \\
{\L}ojasiewicza 6, 30--348 Krak\'ow, Poland \\ e-mail:
[email protected]
\vskip 0.5cm
\today
\end{center}
\begin{abstract}
We present a new result about the shadowing of nontransversal chain of heteroclinic connections based on the idea
of dropping dimensions. We illustrate this new mechanism with several examples.
As an application we discuss this mechanism in a simplification of a toy model system derived by Colliander \emph{et al.}
in the context of cubic defocusing nonlinear Schr\"odinger equation.
\end{abstract}
\input intro.tex
\input notation.tex
\input nonTransDiff.tex
\input geom-idea.tex
\input covrel.tex
\input top-proof.tex
\input lin-model-cv.tex
\input toytoymodel.tex
\input ref.tex
\end{document} |
\begin{document}
\title[]{The Extended Power Distribution: A new distribution on (0, 1)}
\author{C. ~E. Ogbonnaya, S. ~P. Preston, A. ~T. ~A. Wood}
\address{School of Mathematical Sciences, University of Nottingham}
\thanks{[email protected]}
\keywords{Beta distribution; Kumaraswamy distribution; bounded support; proportions; power function distribution}
\begin{abstract}
We propose a two-parameter bounded probability distribution called the extended power distribution. This distribution on $(0, 1)$ is similar to the beta distribution, however there are some advantages which we explore. We define the moments and quantiles of this distribution and show that it is possible to give an $r$-parameter extension of this distribution ($r>2$). We also consider its complementary distribution and show that it has some flexibility advantages over the Kumaraswamy and beta distributions. This distribution can be used as an alternative to the Kumaraswamy distribution since it has a closed form for its cumulative function. However, it can be fitted to data where there are some samples that are exactly equal to 1, unlike the Kumaraswamy and beta distributions which cannot be fitted to such data or may require some censoring. Applications considered show the extended power distribution performs favourably against the Kumaraswamy distribution in most cases.
\end{abstract}
\begin{nouppercase}
\maketitle
\end{nouppercase}
\section{Introduction}
In this work, we propose an interesting two-parameter probability distribution with bounded support and propose this as an alternative to the Kumaraswamy and beta distributions. The proposed distribution is bounded on $(0, 1)$, just like the beta and Kumaraswamy distributions. However, the extended power distribution has some advantage over the beta distribution, since its cumulative distribution can be obtained in closed form. We will explore properties of this distribution such as moments, quantiles and cumulative distribution. We even go further to give a closed form for its complementary distribution as discussed in \cite{jones2002complementary} and \cite{jones2009kumaraswamy} for the beta and Kumaraswamy distributions respectively.
The extended power distribution is an extension of the power function distribution (which is a special case of the beta distribution). However, it has the advantage of being easily extendable to a multi-parameter case, which add extra flexibility when fitting to observed samples. We can also easily obtain the cumulative distribution function of the generalised extended power distribution in closed form unlike the generalised beta distribution.
Other distributions with bounded support have been investigated in statistical literature, such as the beta distribution, truncated normal distribution, log-Lindley distribution (\cite{gomez2014log}) and the Kumaraswamy distribution (\cite{kumaraswamy1980generalized}). The beta distribution has been used to model data arising from distribution of proportions and is widely used in bayesian analysis as a conjugate prior for sampling proportions from the binomial distribution. A beta regression model has been proposed by \cite{ferrari2004beta} for modelling responses that are proportions. Using the idea of the uniform distribution, \cite{jones2004families} and \cite{eugene2002beta} have proposed generating a new class of distributions from the beta distribution with the shape parameters controlling asymmetry. In \cite{eugene2002beta}, a new distribution called the beta-normal distribution was proposed and other properties such as moments were explored. Using the idea of \cite{eugene2002beta}, other distributions arising from the beta distribution have been proposed such as the beta-exponential distribution (\cite{nadarajah2006beta}), beta-Gumbel distribution (\cite{nadarajah2004beta}), beta generalised exponential distribution (\cite{barreto2010beta}), beta-Pareto distribution (\cite{akinsete2008beta}), beta linear failure rate distribution (\cite{jafari2012beta}) among others. In \cite{jones2002complementary}, a new distribution arising from the quantile function of the beta distribution named the complementary beta distribution is proposed. However, a drawback of the beta distribution is the non-availability of its cumulative distribution in closed form. To deal with this \cite{kumaraswamy1980generalized} proposed a double bounded distribution (renamed Kumaraswamy distribution by \cite{jones2009kumaraswamy}). This distribution was originally proposed for modelling data in the field of hydrology but later suggested as an alternative to the beta distribution with a closed form for its cumulative distribution and a simple density function without any special functions. A new class of distributions arising from the Kumaraswamy distribution was proposed by \cite{cordeiro2011new}. These are sometimes called the Kumaraswamy-G distribution. Some new distributions proposed include the Kumaraswamy Weibull distribution (\cite{cordeiro2010kumaraswamy}), Kumaraswamy Gumbel distribution (\cite{cordeiro2012kumaraswamy}) and the Kumaraswamy generalised gamma distribution (\cite{de2011kumaraswamy}) among others. However, the Kumaraswamy distribution (just like the beta distribution) is unable to fit data in which some sample points are exactly $1$. The extended power distribution has some interesting advantages as an alternative to the beta distribution with its simple qunatile function and interesting complementary distribution.
In this work, we propose a new bounded distribution with a closed form cumulative distribution function and with additional flexibility through generalisation. We will calculate moments of this distribution for the two parameter case and give its quantile function to enable simulations. We also show that for the generalised case with multiple parameters, simulation simply involves finding the feasible solution to some polynomial equation. We use applications to show that the extended power distribution performs favourably against the Kumaraswamy distribution. In section 2, we explore the origin and basic properties of the extended power distribution as well as special cases of the distribution such as the linear failure rate distribution, exponential and Raleigh distribution. In section 3, we calculate the moments and quantile of this distribution and give a procedure for calculating the maximum likelihood estimates of the parameters. We also explore the distribution of order statistics for the minimum and maximum and give a closed form for the generalised extended power distribution as well as discuss its basic properties. In section 4, we propose the complementary extended power distribution using the quantile distribution of the extended power distribution. We also obtain the moments, quantiles and give special cases of the complementary distribution. Conclusion and further discussions are given in section 5.
\section{Basics and special cases}
The name "extended power distribution" is obtained from the fact that the cumulative distribution function of the extended power function is derived from an extension of the power function. This was motivated by extension of the single parameter power warping function to a warping function with $r$ parameters (the warping functions are used in functional data analysis for aligning curves). The power function is given by
\begin{equation}\label{eqn:eqn2}
G(t)=t^{\alpha}=\exp(\alpha \log (t)).
\end{equation}
We extended equation \ref{eqn:eqn2} in powers of $\log(t)$ and the two parameter case is what we have as the cumulative distribution function of the extended power function which is given in equation \ref{eqn:eqn3}
\begin{equation}\label{eqn:eqn3}
F(t)=\exp\{\alpha_{0} \log(t)-\alpha_{1}(\log (t))^{2}\}.
\end{equation}
The probability density function of the extended power distribution (EPD) with parameters $\alpha_{0}$ and $\alpha_{1}$ is given as follows
\begin{equation}\label{eqn:eqn1}
f(t)=\left\{\frac{\alpha_{0}-2\alpha_{1}\log (t)}{t}\right\}\exp\left\{\alpha_{0}\log (t) -\alpha_{1}(\log (t))^{2}\right\}, \quad t\in (0, 1).
\end{equation}
The shape parameters for this distribution satisfy $\alpha_{0}>0$ and $\alpha_{1}\geq 0$.
An implication of the relationship between the power function and the extended power distribution is that for $\alpha_{1}=0$, the extended power function reduces to a special case of beta distribution.
\begin{figure}
\caption{Plots of the extended power distribution for different values of $\alpha_{0}
\label{fig:fig1}
\end{figure}
If $\alpha_{1}=0$, then
\begin{equation}\label{eqn:eqn4}
f(t)=\alpha_{0}t^{\alpha_{0}-1}
\end{equation}
which is a special case of the beta distribution with $\beta=1$. This special case is in fact the power function distribution, and is obtainable from the Kumaraswamy distribution by setting $\beta=1$. Recall that the density function for the beta distribution is
\begin{equation*}
g(t)=\frac{t^{\alpha-1}(1-t)^{\beta-1}}{B(\alpha, \beta)}
\end{equation*}
where $B(\alpha, \beta)$ is beta function. The density function for the Kumaraswamy distribution is
\begin{equation*}
g(t)=\alpha \beta t^{\alpha-1}(1-t^{\alpha})^{\beta-1}
\end{equation*}
For $\alpha_{0}=1$ and $\alpha_{1}=0$, the extended power distribution reduces to the uniform distribution on $(0, 1)$. In a similar manner, the Beta(1, 1) and Kumaraswamy(1, 1) gives the uniform on $(0, 1)$ (see \cite{jones2009kumaraswamy}). If T follows the extended power distribution with parameters $\alpha_{0}$ and $\alpha_{1}$, then $V=-\log (T)$ is a random variable from the linear failure rate distribution (\cite{bain1974analysis}, \cite{sarhan2009generalized}) with density function
\begin{equation*}
f(v)=(\alpha_{0}+2\alpha_{1}v)\exp\{-\alpha_{0}v-\alpha_{1}v^{2}\}, \quad 0<v<\infty.
\end{equation*}
Properties of this distribution including moments and quantiles have been studied by \cite{sen1995inference} and \cite{sen2006linear}.
For $\alpha_{1}=0$, we have
\begin{equation*}
f(v)=\alpha_{0}\exp\{-\alpha_{0}v\}, \quad 0<v<\infty
\end{equation*}
which is an exponential distribution with parameter $\alpha_{0}$. If we allow $\alpha_{0}=0$ in the linear failure rate distribution, then $V$ reduces to a random variable from the Raleigh distribution with scale parameter $\sqrt{\frac{1}{2\alpha_{1}}}$ and density
\begin{equation*}
f(v)=2\alpha_{1}v\exp\{-\alpha_{1}v^{2}\}, \quad 0<v<\infty.
\end{equation*}
As stated earlier, an advantage of the extended power distribution over the beta distribution is that we have its cumulative distribution function in closed form. With a closed form for the cumulative distribution and invertibility, it is possible to easily use the probability integral transform for simulation. If we define the $U\sim U(0, 1)$, then equating $F(T)=U$ from equation \ref{eqn:eqn3}, we have
\begin{equation*}
\alpha_{1}(\log(T))^{2}-\alpha_{0}\log(T)+\log(U)=0
\end{equation*}
hence,
\begin{align}\label{eqn:eqn31}
T=\begin{cases}
\exp\left\{\frac{\alpha_{0}-\sqrt{\alpha_{0}^{2}-4\alpha_{1}\log(U)}}{2\alpha_{1}}\right\}, & \mbox{if } \alpha_{1}\neq 0 \\
U^{\frac{1}{\alpha_{0}}}, & \mbox{otherwise}.
\end{cases}
\end{align}
This random variable generator like that specified for the Kumaraswamy distribution (as mentioned in \cite{jones2009kumaraswamy}) is less complicated than those required to simulate from the beta distribution. To simulate a random variate T from the extended power distribution, we simply simulate U from $U(0, 1)$ and evaluate equation \ref{eqn:eqn31}. The limiting behaviour of the extended power distribution is as follows
\begin{eqnarray*}
\lim_{t \to 0}\frac{f(t)}{t^{\alpha_{0}-1}} &=& 0 \\
\lim_{t \to 1}f(t) &=&\alpha_{0}.
\end{eqnarray*}
\section{Moments, Quantiles and Estimators}
In this section, we will estimate some relevant quantities related to the extended power distribution. An interesting property of the extended power distribution is that we can estimate quantiles in nice closed form without any special functions as against the beta distribution whose median requires special functions. However, the moments of the extended power distribution are a bit more complicated than those of the beta and Kumaraswamy distributions. We need the complementary error integral functions (sometimes denoted by erfc(.)) to specify the moments of the extended power distribution.
\subsection{Moments}
We can derive a general formula for the kth moment of the extended power distribution.
\begin{thm}\label{cor1}
The kth moment of the extended power distribution is given as
\begin{equation}\label{eqn:eqn7}
E(T^{k})=1-\frac{k}{2}\sqrt{\frac{\pi}{\alpha_{1}}}\exp\left\{\frac{(\alpha_{0}+k)^{2}}{4\alpha_{1}}\right\}\text{erfc}\bigg(\frac{\alpha_{0}+k}{2\sqrt{\alpha_{1}}}\bigg).
\end{equation}
\end{thm}
\begin{proof}
\begin{equation*}
E(T^{k})=\int_{0}^{1}t^{k}f(t)dt=\int_{0}^{1}t^{k-1}(\alpha_{0}-2\alpha_{1}\log(t))\exp\left\{\alpha_{0}\log(t)-\alpha_{1}(\log(t))^{2}\right\}dt
\end{equation*}
Defining $u=\alpha_{0}\log(t)-\alpha_{1}(\log(t))^{2}$, we have
\begin{equation*}
E(T^{k})=\exp\left\{\frac{\alpha_{0}k}{2\alpha_{1}}\right\}\int_{-\infty}^{0}\exp\left\{\frac{k(\alpha_{0}^{2}-4\alpha_{1}u)^{1/2}}{-2\alpha_{1}}. \right\}\exp\{u\}du.
\end{equation*}
Let $v=k(\alpha_{0}^{2}-4\alpha_{1}u)^{1/2}$, this implies
\begin{equation*}
E(T^{k})=\frac{\exp\left\{\frac{(\alpha_{0}+k)^{2}}{4\alpha_{1}}\right\}}{2\alpha_{1}k^{2}}\int_{\alpha_{0}k}^{\infty}v\exp\left\{-\frac{(v+k^{2})^{2}}{4\alpha_{1}k^{2}}\right\}dv.
\end{equation*}
Therefore,
\begin{equation*}
E(T^{k})=1-\frac{k}{2}\sqrt{\frac{\pi}{\alpha_{1}}}\exp\left\{\frac{(\alpha_{0}+k)^{2}}{4\alpha_{1}}\right\}\text{erfc}\bigg(\frac{\alpha_{0}+k}{2\sqrt{\alpha_{1}}}\bigg).
\end{equation*}
\end{proof}
From theorem \ref{cor1}, we have the following results for $E(T)$ and $Var(T)$
\begin{align*}
E(T)=&1-\frac{1}{2}\sqrt{\frac{\pi}{\alpha_{1}}}\exp\left\{\frac{(\alpha_{0}+1)^{2}}{4\alpha_{1}}\right\}\text{erfc}\bigg(\frac{\alpha_{0}+1}{2\sqrt{\alpha_{1}}}\bigg)\\
Var(T)=&\bigg( \sqrt{\frac{\pi}{\alpha_{1}}}\exp\left\{\frac{(\alpha_{0}+1)^{2}}{4\alpha_{1}}\right\}\text{erfc}\bigg(\frac{\alpha_{0}+1}{2\sqrt{\alpha_{1}}}\bigg)\bigg)\bigg( 1-\frac{1}{4}\sqrt{\frac{\pi}{\alpha_{1}}}\exp\left\{\frac{(\alpha_{0}+1)^{2}}{4\alpha_{1}}\right\}\text{erfc}\bigg(\frac{\alpha_{0}+1}{2\sqrt{\alpha_{1}}}\bigg)\bigg)\\
&-\sqrt{\frac{\pi}{\alpha_{1}}}\exp\left\{\frac{(\alpha_{0}+2)^{2}}{4\alpha_{1}}\right\}\text{erfc}\bigg(\frac{\alpha_{0}+2}{2\sqrt{\alpha_{1}}}\bigg)
\end{align*}
where $\text{erfc}(x)=2(1-\Phi(x\sqrt{2}))$ and $\Phi(.)$ is the normal CDF.
\subsection{Quantiles and Mode}
The $p$th quantile function for the extended power distribution is easily obtainable from the cumulative distribution function and is given as
\begin{equation}\label{eqn:eqn8}
Q_{p}=\exp\left\{\frac{\alpha_{0}-\sqrt{\alpha_{0}^{2}-4\alpha_{1}\log(p)}}{2\alpha_{1}}\right\}.
\end{equation}
Estimating the median in closed form is straightforward from equation \ref{eqn:eqn8} and it can be written as shown in equation \ref{eqn:eqn9}.
\begin{equation}\label{eqn:eqn9}
Q_{0.5}=\exp\left\{\frac{\alpha_{0}-\sqrt{\alpha_{0}^{2}-4\alpha_{1}\log(0.5)}}{2\alpha_{1}}\right\}
\end{equation}
This expression for the median is obviously an improvement on the beta distribution, where the median is expressed in terms of the incomplete beta function. Since the first derivative of the density ($f(t)$) is easily obtainable, we can calculate the mode of the extended power distribution in closed form.
The mode for the extended power distribution is given as
\begin{equation}\label{eqn:eqn10}
M=\exp\left\{ \frac{(2\alpha_{0}-1)-\sqrt{1+8\alpha_{1}}}{4\alpha_{1}}\right\}.
\end{equation}
\subsection{Maximum Likelihood Estimators}
Like in the beta and Kumaraswamy distributions, there is no simple form for the maximum likelihood estimators (MLEs) of $\alpha_{0}$ and $\alpha_{1}$. However, we can use a non-linear optimisation procedure to estimate these parameters numerically.
Given n random samples from the extended power distribution $t_{1}, t_{2}, \ldots, t_{n}$, the log-likelihood function is
\begin{equation*}
\ell(\alpha_{0}, \alpha_{1})=\sum_{i=1}^{n}\log (\alpha_{0}-2\alpha_{1}\log t_{i})-\sum_{i=1}^{n}\log t_{i}+\alpha_{0}\sum_{i=1}^{n}\log t_{i}-\alpha_{1}\sum_{i=1}^{n}(\log t_{i})^{2}.
\end{equation*}
Differentiating $\ell(\alpha_{0}, \alpha_{1})$ w.r.t $\alpha_{0}$ and $\alpha_{1}$, we obtain the system of equations
\begin{eqnarray}
\frac{\partial \ell(\alpha_{0}, \alpha_{1}) }{\partial \alpha_{0}} &=& \sum_{i=1}^{n}\frac{1}{(\alpha_{0}-2\alpha_{1}\log t_{i})}+\sum_{i=1}^{n}\log t_{i}=0 \\ \label{eqn:eqn11}
\frac{\partial \ell(\alpha_{0}, \alpha_{1}) }{\partial \alpha_{1}} &=& 2\sum_{i=1}^{n}\frac{\log t_{i}}{(\alpha_{0}-2\alpha_{1}\log t_{i})}+\sum_{i=1}^{n}(\log t_{i})^{2}=0. \label{eqn:eqn12}
\end{eqnarray}
The observed Fisher's information matrix is
\begin{equation*}
\bm{H}=\left[
\begin{array}{cc}
\sum_{i=1}^{n}\frac{1}{(\hat{\alpha}_{0}-2\hat{\alpha}_{1}\log t_{i})^{2}} & -2\sum_{i=1}^{n}\frac{\log t_{i}}{(\hat{\alpha}_{0}-2\hat{\alpha}_{1}\log t_{i})^{2}} \\
-2\sum_{i=1}^{n}\frac{\log t_{i}}{(\hat{\alpha}_{0}-2\hat{\alpha}_{1}\log t_{i})^{2}} & 4\sum_{i=1}^{n}\frac{(\log t_{i})^{2}}{(\hat{\alpha}_{0}-2\hat{\alpha}_{1}\log t_{i})^{2}} \\
\end{array}
\right].
\end{equation*}
Estimating the method of moments estimators will be more complicated, because the parameters of the distribution are contained in special functions of the population moment.
In table \ref{tab:table1}, we simulate random samples using the probability integral transform and use the MLE method to estimate the parameters of these samples. In each case, 5000 random samples were generated with specified parameter values for $\alpha_{0}$ and $\alpha_{1}$ and estimated parameters are compared to the actual parameter values.
\begin{table}[h!]
\begin{tabular}{|p{4.8cm}||p{5.9cm}|}
\hline
Actual Parameters $(\alpha_{0}, \alpha_{1})$ &Maximum Likelihood estimates $(\hat{\alpha}_{0}, \hat{\alpha}_{1})$\\
\hline
(2, 1)& (2.0042, 1.0088)\\
\hline
(1, 1)&(1.0110, 1.0022)\\
\hline
(1.2, 3.3)&(1.2093, 3.3191)\\
\hline
(0.02, 5)&(0.0174, 5.0162)\\
\hline
(3, 8)&(3.0528, 8.0279)\\
\hline
(0.8, 5)&(0.8301, 4.9695)\\
\hline
(0.8, 25)&(0.8555, 25.5528)\\
\hline
(1, 0.01)&(1.0047, 0.0063)\\
\hline
\end{tabular}
\caption{MLE for different simulated samples}
\label{tab:table1}
\end{table}
\subsection{Distribution of Order Statistics}
Let $T_{(1)}\leq T_{(2)} \leq T_{(3)} \leq \ldots \leq T_{(n)}$ be the order statistics of a random sample of size n from the extended power distribution with parameters $\alpha_{0}$ and $\alpha_{1}$ (EPD($\alpha_{0}$, $\alpha_{1}$)). Then the minimum, $T_{(1)}$ has density function
\begin{equation}\label{eqn:eqn28}
f(t_{(1)})=\left\{\frac{\alpha_{0}n-2\alpha_{1}n\log (t)}{t}\right\}\exp\left\{\alpha_{0}\log (t) -\alpha_{1}(\log (t))^{2}\right\}\bigg[1- \exp\left\{\alpha_{0}\log (t) -\alpha_{1}(\log (t))^{2}\right\}\bigg]^{n-1}.
\end{equation}
The minimum has the Kumaraswamy-G (Kw-G) distribution with parameters $a=1$ and $b=n$. The Kw-G distribution was introduced by \cite{cordeiro2011new} (motivated by \cite{jones2004families} work on distributions arising from beta distribution)and has density function
\begin{equation}\label{eqn:eqn29}
f(t)=abg(t)G(t)^{a-1}\bigg[1-G(t)^{a}\bigg]^{b-1}
\end{equation}
where $G(t)$ is the parent continuous cumulative distribution function.
Similarly, the maximum $T_{(n)}$ has density function
\begin{equation}\label{eqn:eqn30}
f(t_{(n)})=\left\{\frac{\alpha_{0}n-2\alpha_{1}n\log (t)}{t}\right\}\exp\left\{\alpha_{0}n\log (t) -\alpha_{1}n(\log (t))^{2}\right\}
\end{equation}
which is an extended power distribution with parameters $\alpha_{0}n$ and $\alpha_{1}n$ (EPD ($\alpha_{0}n$, $\alpha_{1}n$)).
\subsection{A Generalisation}
An important advantage of the extended power distribution is that it can be easily generalised and will have a similar form to the two parameter case. Generalisations of the beta and Kumaraswamy distributions have been studied by \cite{gordy1998generalization}, \cite{nadarajah2003generalized}, \cite{mcdonald1995generalization} and these usually have complicated forms with cumulative distribution needing special functions (which makes simulations more complex and requires algorithms like rejection sampling). The generalised beta distribution has density function
\begin{equation}\label{eqn:eqn27}
h(t)=\frac{\mid a\mid t^{ap-1}(1-(1-c)(t/b)^{a})^{q-1}}{b^{ap}B(p, q)(1+c(t/b)^{a})^{p+q}}, \quad 0<t^{a}<b^{a}/(1-c).
\end{equation}
We can generalise the extended power distribution to have $r$ parameters and the density function is given as follows
\begin{equation}\label{eqn:eqn22}
f(t)=\frac{1}{t}\sum_{h=1}^{r}\alpha_{h-1}(\log t)^{h-1}h(-1)^{h-1}\exp\left\{\sum_{h=1}^{r}(-1)^{h-1}\alpha_{h-1}(\log t)^{h}\right\}, \quad t \in (0, 1).
\end{equation}
The $r$ parameters of this density function are $\alpha_{0}>0, \alpha_{1}\geq0, \alpha_{2}\geq0, \ldots, \alpha_{r-1}\geq0 $ are determine the shape of the distribution. Figure \ref{fig:fig6} gives plot of the density function of this distribution for the three-parameter and four-parameter cases.
\begin{figure}
\caption{Shapes of the three-parameter case}
\label{fig:fig6a}
\caption{Shapes of the four-parameter cases}
\label{fig:fig6b}
\caption{The EPD for the three and four-parameter cases}
\label{fig:fig6}
\end{figure}
This generalisation reduces to the two parameter extended power distribution if we set $\alpha_{2}=\alpha_{3}=\ldots=\alpha_{r-1}=0$ and to the $\text{Beta}(\alpha_{0}, 1)$, if we set $\alpha_{1}=\alpha_{2}=\alpha_{3}=\ldots=\alpha_{r-1}=0$. In a similar manner, setting $\alpha_{0}=1, \alpha_{1}=\alpha_{2}=\alpha_{3}=\ldots=\alpha_{r-1}=0$ gives the uniform distribution on $(0, 1)$. For this generalisation, the cumulative distribution function is
\begin{equation}\label{eqn:eqn23}
F(t)=\exp\left\{\sum_{h=1}^{r}(-1)^{h-1}\alpha_{h-1}(\log t)^{h}\right\}
\end{equation}
and we can simulate random variates using probability integral transform by finding the root of the polynomial
\begin{equation*}
\sum_{h=1}^{r}(-1)^{h-1}\alpha_{h-1}(\log T)^{h}-\log U=0
\end{equation*}
which lies on $(0, 1)$. This is more complicated than in the two parameter case, however, there are a number of available mathematical programs for calculating roots of a polynomial and we can easily utilise the roots function in MATLAB for this purpose. A similar scenario applies when calculating the median of the generalised distribution and we obtain the median ($M$) as a root of the polynomial
\begin{equation*}
\sum_{h=1}^{r}(-1)^{h-1}\alpha_{h-1}(\log M)^{h}-\log 0.5=0.
\end{equation*}
Maximum likelihood estimators can be derived for the generalised extended power distribution by first obtaining the likelihood function and optimising using numerical methods.
The log-likelihood function for the generalised density is
\begin{align*}\label{eqn:eqn24}
\ell(\alpha_{0}, \alpha_{1}, \ldots, \alpha_{r-1})=&\sum_{i=1}^{n}\log\bigg(\sum_{h=1}^{r}\alpha_{h-1}(\log t_{i})^{h-1}h(-1)^{h-1}\bigg) -\sum_{i=1}^{n}\log t_{i}\\
&+\sum_{h=1}^{r}(-1)^{h-1}\alpha_{h-1}\sum_{i=1}^{n}(\log t_{i})^{h}.
\end{align*}
Differentiating the log-likelihood function with respect to $\alpha_{h-1}$, we get
\begin{equation}\label{eqn:eqn25}
\frac{\partial \ell}{\partial \alpha_{h-1}}=\sum_{i=1}^{n}\frac{(\log t_{i})^{h-1}h(-1)^{h-1}}{\sum_{h=1}^{r}\alpha_{h-1}(\log t_{i})^{h-1}h(-1)^{h-1}}+(-1)^{h-1}\sum_{i=1}^{n}(\log t_{i})^{h}
\end{equation}
and second derivative
\begin{equation}\label{eqn:eqn26}
\frac{\partial^{2} \ell}{\partial \alpha_{h-1}\alpha_{k-1}}=\sum_{i=1}^{n}\frac{(\log t_{i})^{h+k-2}hk(-1)^{h+k-1}}{\bigg(\sum_{h=1}^{r}\alpha_{h-1}(\log t_{i})^{h-1}h(-1)^{h-1}\bigg)^{2}}.
\end{equation}
\subsection{Applications and Examples}
In this subsection, we show examples where the extended power distribution is applicable. We also compare how well the extended power function fits actual data to how the Kumaraswamy distribution compares in performance. Finally, we will simulate data using the extended power distribution and try fitting the data with the Kumaraswamy distribution to see show cases where the extended power distribution has particular advantages over other bounded distributions. To fit the observed data, we will obtain maximum likelihood estimators for the parameters the distribution by maximising the log-likelihood functions for both the extended power distribution and the Kumaraswamy distribution. Table \ref{tab:table2} shows the Akaike information criterion (AIC) of the fitted distributions in each application. The corrected AIC (AICc) and the Bayesian information criterion (BIC) which penalises more for extra parameters are given in tables \ref{tab:table4} and \ref{tab:table5} respectively. The MLE for the fitted distributions are detailed in table \ref{tab:table3}. In most of the examples, the extended power distribution performed better than the Kumaraswamy distribution (using AIC, AICc and BIC). The only exception is example 2, where the Kumaraswamy had the least BIC. The difference between this BIC and the BIC of the 3 parameter EPD is quite negligible.
\begin{table}[h!]
\begin{tabular}{|p{2.3cm}|p{2.3cm}|p{2.3cm}|p{2.3cm}|p{2.3cm}|}
\hline
Distribution & Kumaras. &2-Par EPD &3-Par EPD & 4-Par EPD\\
\hline
Example 1 & -88.6054 & -70.4173 & -91.9646 & $\mathbf{-92.8209}$ \\
\hline
Example 2 & -82.7203 & -67.5975 & $\mathbf{-84.6515}$ & -83.0609\\
\hline
Example 3 & -153.3079 & -170.5573 & $\mathbf{-177.6280}$ & -175.6280\\
\hline
Example 4 & -82.4570 & $\mathbf{-83.0053}$ & -81.0053 & -79.0053 \\
\hline
Example 5 & -669.9358 & -719.9494 & -814.5105 & $\mathbf{-850.8778}$\\
\hline
Example 6 & -795.4145 & -965.8150 & $\mathbf{-996.5056}$ & $-994.5056$\\
\hline
Example 7 & &$\mathbf{-302.7052}$ & -300.7052 & -298.7052 \\
\hline
\end{tabular}
\caption{AIC for fitted distributions in examples 1-7, with the best fitting distribution in bold}
\label{tab:table2}
\end{table}
\begin{table}[h!]
\begin{tabular}{|p{2.3cm}|p{2.3cm}|p{2.3cm}|p{2.3cm}|p{2.3cm}|}
\hline
Distribution & Kumaras. &2-Par EPD &3-Par EPD & 4-Par EPD\\
\hline
Example 1 & -88.4054 & -70.2173 & -91.5579 & $\mathbf{-92.1313}$ \\
\hline
Example 2 & -82.5203 & -67.3975 & $\mathbf{-84.3447}$ & -82.3712\\
\hline
Example 3 & -153.1364 & -170.3859 & $\mathbf{-177.2802}$ & -175.0398\\
\hline
Example 4 & -82.2570 & $\mathbf{-82.8053}$ & -80.5985 & -78.3156 \\
\hline
Example 5 & -669.9052 & -719.9188 & -814.4491 & $\mathbf{-850.7753}$\\
\hline
Example 6 & -795.4024 & -965.8030 & $\mathbf{-996.4815}$ & $-994.4654$\\
\hline
Example 7 & &$\mathbf{-302.6230}$ & -300.5396 & -298.4274 \\
\hline
\end{tabular}
\caption{AICc for fitted distributions in examples 1-7, with the best fitting distribution in bold}
\label{tab:table4}
\end{table}
\begin{table}[h!]
\begin{tabular}{|p{2.3cm}|p{2.3cm}|p{2.3cm}|p{2.3cm}|p{2.3cm}|}
\hline
Distribution & Kumaras. &2-Par EPD &3-Par EPD & 4-Par EPD\\
\hline
Example 1 & -84.3191 & -66.1311 & $\mathbf{-85.5352}$ & -84.2484 \\
\hline
Example 2 & $\mathbf{-78.4340}$ & -63.3113 & -78.2221 & -74.4883\\
\hline
Example 3 & -148.7269 & -165.9764 & $\mathbf{-170.7566}$ & -166.4662\\
\hline
Example 4 & -78.1708 & $\mathbf{-78.7190}$ & -74.5759 & -70.4327 \\
\hline
Example 5 & -661.9780 & -711.9916 & -802.5738 & $\mathbf{-834.9623}$\\
\hline
Example 6 & -785.5990 & -955.9995 & $\mathbf{-981.7823}$ & $-974.8745$\\
\hline
Example 7 & &$\mathbf{-296.6973}$ & -291.6933 & -286.6894 \\
\hline
\end{tabular}
\caption{BIC for fitted distributions in examples 1-7, with the best fitting distribution in bold}
\label{tab:table5}
\end{table}
\begin{table}[h!]
\begin{tabular}{|p{2cm}|p{2.3cm}|p{2.3cm}|p{2.3cm}|p{2.3cm}|}
\hline
Distribution & Kumaras. &2-Par EPD &3-Par EPD & 4-Par EPD\\
\hline
Example 1 & $\hat{\alpha}=4.51, \hat{\beta}=15.21$& $\hat{\alpha}_{0}=0.00, \hat{\alpha}_{1}= 1.72$ & $\hat{\alpha}_{0}=0.00, \hat{\alpha}_{1}= 0.00, \hat{\alpha}_{2}=2.00$ & $\hat{\alpha}_{0}=0.00, \alpha_{1}=0.00, \hat{\alpha}_{2}= 0.73, \hat{\alpha}_{3}=1.39$ \\
\hline
Example 2 & $\hat{\alpha}=4.31, \hat{\beta}=13.06$& $\hat{\alpha}_{0}=0.00, \hat{\alpha}_{1}= 1.68$ & $\hat{\alpha}_{0}=0.00, \hat{\alpha}_{1}= 0.00, \hat{\alpha}_{2}=1.90$ & $\hat{\alpha}_{0}=0.00, \alpha_{1}=0.00, \hat{\alpha}_{2}= 1.45, \hat{\alpha}_{3}=0.47$\\
\hline
Example 3 & $\hat{\alpha}=0.66, \hat{\beta}=3.44$& $\hat{\alpha}_{0}=0.01, \hat{\alpha}_{1}=0.10$ & $\hat{\alpha}_{0}=0.04, \hat{\alpha}_{1}= 0.00, \hat{\alpha}_{2}=0.02$ & $\hat{\alpha}_{0}=0.04, \hat{\alpha}_{1}=0.00, \hat{\alpha}_{2}=0.02, \hat{\alpha}_{3}=0.00$\\
\hline
Example 4 & $\hat{\alpha}=5.85, \hat{\beta}=3.03$& $\hat{\alpha}_{0}=0.25, \hat{\alpha}_{1}=7.03$ & $\hat{\alpha}_{0}=0.25, \hat{\alpha}_{1}= 7.03, \hat{\alpha}_{2}=0.00$ & $\hat{\alpha}_{0}=0.25, \hat{\alpha}_{1}= 7.03, \hat{\alpha}_{2}=0.00, \hat{\alpha}_{3}=0.00$ \\
\hline
Example 5 & $\hat{\alpha}=1.00, \hat{\beta}=5.28$& $\hat{\alpha}_{0}=0.00, \hat{\alpha}_{1}=0.17$ & $\hat{\alpha}_{0}=0.03, \hat{\alpha}_{1}= 0.00, \hat{\alpha}_{2}=0.06$ & $\hat{\alpha}_{0}=0.07, \hat{\alpha}_{1}= 0.00, \hat{\alpha}_{2}=0.00, \hat{\alpha}_{3}=0.02$\\
\hline
Example 6 & $\hat{\alpha}=3.58, \hat{\beta}= 2.05$& $\hat{\alpha}_{0}=0.51, \hat{\alpha}_{1}=3.38$ & $\hat{\alpha}_{0}=0.90, \hat{\alpha}_{1}= 0.62, \hat{\alpha}_{2}=3.22$ & $\hat{\alpha}_{0}=0.90, \hat{\alpha}_{1}= 0.62, \hat{\alpha}_{2}=3.22, \hat{\alpha}_{3}=0.00$\\
\hline
Example 7 & & $\hat{\alpha}_{0}=6.53, \hat{\alpha}_{1}=0.00$ & $\hat{\alpha}_{0}=6.53, \hat{\alpha}_{1}=0.00, \hat{\alpha}_{2}=0.00$ & $\hat{\alpha}_{0}=6.53, \hat{\alpha}_{1}= 0.00, \hat{\alpha}_{2}=0.00, \hat{\alpha}_{3}=0.00$\\
\hline
\end{tabular}
\caption{MLE for fitted distributions in examples 1-7}
\label{tab:table3}
\end{table}
\subsubsection{Example 1}
In this example, we explore data on US senate voting patterns from 1953 to 2015. The observed data are proportion of party unity votes in which a majority of voting Democrats opposed a majority of voting Republicans. The data is taken from the Brookings Institution (\cite{Brookings2017}) and is available for free.
A histogram of the actual data and its empirical cumulative distribution along with fitted density and distribution functions are given in figure \ref{fig:fig3}. From the fitted distributions, we see that the four-parameter extended power distribution best fits the actual data.
\begin{figure}
\caption{Density functions fitted to histogram}
\label{fig:fig3a}
\caption{Fitting cdfs to the empirical cdf}
\label{fig:fig3b}
\caption{Fitting proportion of US Senate party unity votes using the Kumaraswamy and extended power distributions}
\label{fig:fig3}
\end{figure}
\subsubsection{Example 2}
This second data explores proportion of unity votes in the US House of Representatives from 1953 to 2015. The data is also from \cite{Brookings2017}. A histogram (and empirical cumulative distribution) of the actual data and fitted distributions using MLE are shown in figure \ref{fig:fig4}. In this case, the three-parameter and four-parameter extended power distribution give the best fit, with the four-parameter case having a slightly higher peak. From the cumulative distribution plot, we see that at the lower tail, the three and four-parameter extended power distribution function best fits the empirical distribution function.
\begin{figure}
\caption{Density functions fitted to histogram}
\label{fig:fig4a}
\caption{Fitting cdfs to the empirical cdf}
\label{fig:fig4b}
\caption{Fitting proportion of US House of Representatives party unity votes using the Kumaraswamy and extended power distributions}
\label{fig:fig4}
\end{figure}
\subsubsection{Example 3}
This data which is taken from \cite{jodra2016note} and is a measure of a firm's risk management cost effectiveness given as a proportion. This data has been used in \cite{jodra2016note} for regression modelling using the beta and log-Lindley distributions. The extended power distribution gives the best fit for this data and even has smaller AIC than that obtained for the log-Lindley distribution (\cite{gomez2014log}) which is $-149.2083$.
\begin{figure}
\caption{Density functions fitted to histogram}
\label{fig:fig13a}
\caption{Fitting cdfs to the empirical cdf}
\label{fig:fig13b}
\caption{Fitting risk management cost effectiveness using the Kumaraswamy and extended power distributions}
\label{fig:fig13}
\end{figure}
\subsubsection{Example 4}
This example involves proportion of presidential bill victories in the US Senate from President Einsenhower in 1953 to President Obama in 2015 (obtained from \cite{Brookings2017}). The fitted distributions are shown in figure \ref{fig:fig9}. In this case, the two-parameter extended power distribution seems to give the best fit.
\begin{figure}
\caption{Density functions fitted to histogram}
\label{fig:fig9a}
\caption{Fitting cdfs to the empirical cdf}
\label{fig:fig9b}
\caption{Fitting proportion of US senators of who support presidential bills using the Kumaraswamy and extended power distributions}
\label{fig:fig9}
\end{figure}
\subsubsection{Example 5}
This example is taken from the 2011 census data on the proportion of minority ethnic groups across different local authorities in England and Wales. The data is obtained from the Office of National Statistics 2011 census (\cite{ONSrace}) and the minorities include groups such as white Irish, white and black Caribbean, Arabian, etc. The extended power distribution has the best fit for this example.
\begin{figure}
\caption{Density functions fitted to histogram}
\label{fig:fig10a}
\caption{Fitting cdfs to the empirical cdf}
\label{fig:fig10b}
\caption{Fitting proportion of ethnic minorities in different local authorities in England and Wales using the Kumaraswamy and extended power distributions}
\label{fig:fig10}
\end{figure}
\subsubsection{Example 6}
In this example, we simulate data ($n=1000$) from the three-parameter extended power distribution with parameters $\alpha_{0}=1$, $\alpha_{1}=0.001$ and $\alpha_{4}=4$. We then fit the simulated data using the Kumaraswamy distribution. From figure \ref{fig:fig7}, we see that the the Kumaraswamy distribution fails to properly fit the data as values of the random variable approaches 1.
\begin{figure}
\caption{Density functions fitted to histogram}
\label{fig:fig7a}
\caption{Fitting cdfs to the empirical cdf}
\label{fig:fig7b}
\caption{Fitting a Kumaraswamy distribution and extended power distribution to data simulated from the three-parameter extended power distribution}
\label{fig:fig7}
\end{figure}
\subsubsection{Example 7}
This example, obtained from the UNICEF website (\cite{UNICEFeducation}) contains the proportion of literate youths in 149 different countries (updated in October, 2015). Figure \ref{fig:fig11} show a plot of the actual data as well as the maximum likelihood fits. In this example, the Kumaraswamy distribution is not applicable, because some countries have youth literacy of $100\%$ (that is proportion is 1), however, the Kumaraswamy distribution has density converging to 0 (for $\beta>1$) and to $\infty$ (for $\beta<1$) when $t$ is exactly 1, making the log-likelihood function undefined at $t=1$ and the MLE cannot be computed. For this example, the two-parameter extended power distribution has the smallest AIC values, hence gives the best fit.
\begin{figure}
\caption{Density functions fitted to histogram}
\label{fig:fig11a}
\caption{Fitting cdfs to the empirical cdf}
\label{fig:fig11b}
\caption{Fitting youths literacy rates in 149 different countries using the Kumaraswamy and extended power distributions}
\label{fig:fig11}
\end{figure}
\section{Complementary Distribution}
The complementary distribution for the beta distribution has been defined in \cite{jones2002complementary} and are obtained by considering the quantile functions and as the cumulative distribution function of a probability distribution on $(0, 1)$. This same procedure can be considered for other bounded distribution on $(0, 1)$ to get a new bounded distribution. In the complementary beta distribution, the form of density function and moments involve the use of special function as given in \cite{jones2002complementary}. However, for the complementary Kumarawamy distribution (\cite{jones2009kumaraswamy}), even though the density function can be written in a nice form, there is nothing new to be seen in its form because it is equivalent to $g(1-t, \frac{1}{\beta}, \frac{1}{\alpha})$, where $g\sim \text{Kumaraswamy}(t, \alpha, \beta)$.
In this section, we will define the complementary extended power distribution, its properties such as moments, quantiles and parameter estimation. The density function of the complementary extended power distribution is obtained by taking the first derivative of the quantile function. The quantile function is given as
\begin{equation*}
Q(t)=F^{-1}(t)=\exp\left\{\frac{-\alpha_{0}+(\alpha_{0}^{2}-4\alpha_{1}\log(t))^{1/2}}{-2\alpha_{1}}\right\},
\end{equation*}
hence the density function of the proposed complementary extended power distribution is
\begin{equation}\label{eqn:eqn16}
q(t)=\frac{1}{t}(\alpha_{0}^{2}-4\alpha_{1}\log t)^{-1/2}\exp\left\{\frac{-\alpha_{0}+(\alpha_{0}^{2}-4\alpha_{1}\log(t))^{1/2}}{-2\alpha_{1}}\right\}.
\end{equation}
An interesting properties of this distribution on first sight is that both its density function and distribution are available in a simple closed form without the need for any special function. Secondly, it has a form which is not exactly similar to the extended power distribution, implying some new information may be gained.
An interesting special case of the complementary extended power distribution is obtained by setting $\alpha_{1}=0$. Simply replacing $\alpha_{1}=0$ in equation \ref{eqn:eqn16}, will make the exponent indeterminate. To deal with this problem, we use binomial expansion on $(\alpha_{0}^{2}-4\alpha_{1}\log(t))^{1/2}$, hence the density function becomes
\begin{equation*}
q(t)= \frac{1}{t}(\alpha_{0}^{2}-4\alpha_{1}\log t)^{-1/2}\exp\left\{ \frac{1}{\alpha_{0}}\log t +\frac{\alpha_{1}(\log t)^{2}}{\alpha_{0}^{3}}+\frac{2\alpha_{1}^{2}(\log t)^{3}}{\alpha_{0}^{5}}+ \ldots\right\}, \quad t \in (0, 1)
\end{equation*}
and setting $\alpha_{1}=0$ gives
\begin{equation*}
q(t)=\frac{1}{\alpha_{0}}t^{\frac{1}{\alpha_{0}}-1}
\end{equation*}
which corresponds to a $\text{Beta}(\frac{1}{\alpha_{0}}, 1)$ or a $\text{Kumaraswamy}(\frac{1}{\alpha_{0}}, 1)$. Like in the extended power distribution considered earlier, fixing $\alpha_{0}=1$ and $\alpha_{1}=0$, $q(t)$ reduces to the density for a uniform distribution on $(0, 1)$. If T is a random variable from the two-parameter complementary extended power distribution, with $\alpha_{1}=0$, then the random variable $V=-\log T$ follows an exponential distribution with density function
\begin{equation*}
f(v)=\frac{1}{\alpha_{0}}\exp\{-\frac{v}{\alpha_{0}}\}.
\end{equation*}
Generating random variates from the complementary extended power distribution is straightforward using the probability integral transform. If $U\sim U(0, 1)$, then we simulate random variables T using,
\begin{equation}\label{eqn:eqn19}
T=\exp\left\{ \alpha_{0}\log U-\alpha_{1}(\log U)^{2}\right\}.
\end{equation}
\subsection{Moments, Quantiles and Mode}
In this section, we will give formulas for the moments, quantiles and mode of the complementary extended power distribution.
We will begin by giving a formula for the $kth$ moment of the complementary extended power distribution. This makes it easier to calculate quantities such as the variance, skewness and kurtosis.
\begin{thm}
The $kth$ moment of the complementary extended power distribution is
\begin{equation}\label{eqn:eqn18}
E(T^{k})=\frac{1}{2}\sqrt{\frac{\pi}{\alpha_{1}k}}\exp\left\{\frac{(4\alpha_{0}\alpha_{1}k+4\alpha_{1})^{2}}{64\alpha_{1}^{3}k}\right\}\text{erfc}\bigg(\frac{4\alpha_{0}\alpha_{1}k+4\alpha_{1}}{8\alpha_{1}^{3/2}k^{1/2}}\bigg).
\end{equation}
\end{thm}
\begin{proof}
\begin{equation*}
E(T^{k})=\int_{0}^{1}t^{k-1}(\alpha_{0}^{2}-4\alpha_{1}\log t)^{-1/2}\exp\left\{\frac{-\alpha_{0}+(\alpha_{0}^{2}-4\alpha_{1}\log(t))^{1/2}}{-2\alpha_{1}}\right\}dt.
\end{equation*}
If we define $u=\frac{-\alpha_{0}+(\alpha_{0}^{2}-4\alpha_{1}\log(t))^{1/2}}{-2\alpha_{1}}$, we have
\begin{equation*}
E(T^{k})=\exp\left\{ \alpha_{1}k\bigg(\ \frac{4\alpha_{0}\alpha_{1}k+4\alpha_{1}}{8\alpha_{1}^{2}k}\bigg)^{2}\right\}\int_{-\infty}^{0}\exp\left\{-\alpha_{1}k\bigg[u- \bigg(\ \frac{4\alpha_{0}\alpha_{1}k+4\alpha_{1}}{8\alpha_{1}^{2}k}\bigg)\bigg]^{2}\right\}du.
\end{equation*}
Simplifying further, we have the final result as
\begin{equation*}
E(T^{k})=\frac{1}{2}\sqrt{\frac{\pi}{\alpha_{1}k}}\exp\left\{\frac{(4\alpha_{0}\alpha_{1}k+4\alpha_{1})^{2}}{64\alpha_{1}^{3}k}\right\}\text{erfc}\bigg(\frac{4\alpha_{0}\alpha_{1}k+4\alpha_{1}}{8\alpha_{1}^{3/2}k^{1/2}}\bigg).
\end{equation*}
\end{proof}
With this result, we have $E(T)$ and $Var(T)$ as
\begin{align*}
E(T)=&\frac{1}{2}\sqrt{\frac{\pi}{\alpha_{1}}}\exp\left\{\frac{(4\alpha_{0}\alpha_{1}+4\alpha_{1})^{2}}{64\alpha_{1}^{3}}\right\}\text{erfc}\bigg(\frac{4\alpha_{0}\alpha_{1}+4\alpha_{1}}{8\alpha_{1}^{3/2}}\bigg)\\
Var(T)=&\frac{1}{2}\sqrt{\frac{\pi}{2\alpha_{1}}}\exp\left\{\frac{(8\alpha_{0}\alpha_{1}+4\alpha_{1})^{2}}{128\alpha_{1}^{3}}\right\}\text{erfc}\bigg(\frac{8\alpha_{0}\alpha_{1}+4\alpha_{1}}{(2^{7/3}\alpha_{1})^{3/2}}\bigg)\\
&-\frac{1}{4}\frac{\pi}{\alpha_{1}}\exp\left\{\frac{(4\alpha_{0}\alpha_{1}+4\alpha_{1})^{2}}{32\alpha_{1}^{3}}\right\}\left\{\text{erfc}\bigg(\frac{4\alpha_{0}\alpha_{1}+4\alpha_{1}}{8\alpha_{1}^{3/2}}\bigg)\right\}^{2}.\\
\end{align*}
The median for the complementary extended power distribution is
\begin{equation*}
Q_{0.5}=\exp\left\{ \alpha_{0}\log(0.5)-\alpha_{1}(\log(0.5))^{2}\right\}.
\end{equation*}
and the mode is obtainable in a closed form by calculating the first derivative of $q(t)$ and equating to zero. Hence, we can calculate the mode by solving the cubic equation
\begin{equation}\label{eqn:eqn20}
A_{0}+A_{1}\log t+A_{2}(\log t)^{2}+A_{3}(\log t)^3=0
\end{equation}
where
\begin{eqnarray*}
A_{0} &=& \alpha_{0}^{2}-4\alpha_{0}^{4}\alpha_{1}+4\alpha_{0}^{6}\alpha_{1}^{2}-1 \\
A_{1} &=& -48\alpha_{0}^{4}\alpha_{1}^{3}-4\alpha_{1} \\
A_{2} &=& 192\alpha_{0}^{2}\alpha_{1}^{4}-64\alpha_{1}^{3} \\
A_{3} &=& -256\alpha_{1}^{5}.
\end{eqnarray*}
\subsection{Maximum Likelihood Estimation}
The log-likelihood function of the complementary extended power distribution is
\begin{equation}\label{eqn:eqn21}
\ell(\alpha_{0}, \alpha_{1})=\sum_{i=1}^{n}\log (\alpha_{0}^{2}-4\alpha_{1}\log t_{i})^{-1/2}-\sum_{i=1}^{n}\log t_{i}+\frac{\alpha_{0}n}{2\alpha_{1}}-\frac{1}{2\alpha_{1}}\sum_{i=1}^{n}(\alpha_{0}^{2}-4\alpha_{1}\log t_{i})^{1/2}
\end{equation}
and we have the following system of equations by differential the log-likelihood
\begin{eqnarray*}
\frac{\partial \ell(\alpha_{0}, \alpha_{1})}{\partial \alpha_{0}} &=& -\alpha_{0}\sum_{i=1}^{n}(\alpha_{0}^{2}-4\alpha_{1}\log t_{i})^{-1}+\frac{n}{2\alpha_{1}}-\frac{\alpha_{0}}{2\alpha_{1}}\sum_{i=1}^{n}(\alpha_{0}^{2}-4\alpha_{1}\log t_{i})^{-1/2}=0 \\
\frac{\partial \ell(\alpha_{0}, \alpha_{1})}{\partial \alpha_{0}} &=& 2\sum_{i=1}^{n}\log t_{i}(\alpha_{0}^{2}-4\alpha_{1}\log t_{i})^{-1}-\frac{\alpha_{0}n}{2\alpha_{1}^{2}}-\frac{\alpha_{1}^{-3}}{2}\sum_{i=1}^{n}(\alpha_{0}^{2}-4\alpha_{1}\log t_{i})^{-1/2}=0.
\end{eqnarray*}
Like in the extended power function, we can apply non-linear optimisation to estimate the unknown parameters.
\section{Conclusion}
A bounded probability distribution motivated by warping functions in functional data analysis is proposed. We have explored properties of the extended power distribution (EPD), such as moments and applications. We have given special cases of the distribution which are related to the Raleigh distribution, exponential distribution, beta distribution and linear hazard rate distribution (\cite{bain1974analysis}).
In this work, closed forms for the mode, median and other quantiles were given. An important property of this distribution over the beta distribution is that we have its cumulative distribution function and quantile function available in simple mathematical forms which makes it easy to simulate using the probability integral transform.
\begin{figure}
\caption{Plots of the 5-parameter extended power distribution for different parameter values showing some extra flexibilities}
\label{fig:fig8}
\end{figure}
We note that the Kumaraswamy distribution and extended power distribution have some interesting properties in common, such as a closed form for their cumulative distribution and quantile functions. However, unlike Kumaraswamy distribution, the extended power distribution is easily extendable from a two-parameter to a muilti-parameter distribution. This multi-parameter extension comes with added flexibility as we have seen in some applications in this work. In figure \ref{fig:fig8} for example, we show a five-parameter case of the extended power distribution with different parameter choices. Another advantage of the extended power distribution is that as $t$ nears $1$, the density approaches the parameter $\alpha_{0}$, while the density function of the Kumaraswamy distribution (and the beta distribution) approaches $0$ or $\infty$, as $t$ approaches $1$. This is useful in applications where there is a high proportion of observed values closer to the upper bound.
The generalisation of the extended power distribution makes computing moments more complicated and would involve numerical integration. However, simulations using the probability integral transform are less complicated and would involve finding the roots of a polynomial (same applies to the median). We can also estimate the parameters of the generalised extended power distribution numerically, using maximum likelihood.
Like in \cite{jones2002complementary}, we have proposed a complementary extended power distribution, which is the distribution derived from the quantile of the extended power distribution. We have shown that this distribution is linked to the beta distribution and the exponential distribution with inverted parameters. Simulation from this distribution, are easy using the probability integral transform because of the closed form of the cumulative density of the extended power distribution. However, the mode of the complementary extended power distribution is not available in closed form, but can the calculated as the solution to a cubic equation.
\begin{figure}
\caption{Fitting distributions to skewed data with high peak}
\label{fig:fig12}
\end{figure}
We note that for skewed data (either left or right skewed) with very high peaks, the Kumaraswamy distribution gives a better fit than both the beta distribution and extended power distribution. This is seen in figure \ref{fig:fig12}, for data containing information on employment rates in different counties in the US (\cite{BLSemploy}). The Kumaraswamy distribution has a higher peak than the two, three and four-parameter extended power distribution.
In the future, more applications peculiar to the extended power distribution needs to be explored. It will also be interesting to consider a new family of distributions by combining the extended power distribution and its complementary distribution. Just like the beta regression models are used for modelling bounded responses, we believe the extended power distribution can be alternative to the beta distribution. Programs for calculating different quantities related to extended power distribution are available in MATLAB.
\end{document} |
\begin{document}
\title{
Forcing clique immersions through chromatic number
\thanks{This work supported by the European Research Council under the European Union's Seventh Framework Programme (FP7/2007-2013)/ERC Grant Agreement no. 279558.}}
\author{
Gregory Gauthier
\thanks{ Princeton University, Princeton, NJ, USA, \texttt{[email protected]}},
Tien-Nam Le
\thanks{Laboratoire d'Informatique du Parall\'elisme,
\'Ecole Normale Sup\'erieure de Lyon, France, \texttt{[email protected]}}, \\ and
Paul Wollan
\thanks{Department of Computer Science, University of Rome, ``La Sapienza'',
Rome, Italy, \texttt{[email protected]}}}
\date{}
\maketitle
\begin{abstract}
Building on recent work of Dvo\v{r}\'ak and Yepremyan, we show that every simple graph of minimum degree $7t+7$ contains $K_t$ as an immersion and that every graph with chromatic number at least $3.54t + 4$ contains $K_t$ as an immersion. We also show that every graph on $n$ vertices with no stable set of size three contains $K_{2\lfloor n/5 \rfloor}$ as an immersion.
\end{abstract}
Keywords: Graph immersion, Hadwiger conjecture, chromatic number.
\section{Introduction} \label{section:introduction}
\subsection{Hadwiger's conjecture}
The graphs in this paper are simple and finite, while multigraphs may have loops and multiple edges.
A fundamental question in graph theory is the relationship between the chromatic number of a graph $G$ and the presence of certain structures in $G$. One of the most well-known specific example of this type of question is the Four Color Theorem, which states that every planar graph is 4-colorable.
Hadwiger \cite{Had} in 1943 proposed a far-reaching generalization of the Four Color Theorem, which asserts that for all positive integers $t$, every graph of chromatic number $t$ contains $K_t$, the clique on $t$ vertices, as a minor. In 1937, Wagner \cite{wa} proved that the Hadwiger's conjecture for $t=5$ is equivalent to the Four Color Theorem. Robertson, Seymour, and Thomas \cite{RST} settle the conjecture for $t=6$, while the conjecture is still open for $t\ge 7$.
On the other hand, it was independently proved in 1984 by Kostochka and Thomasson \cite{Ko,Tho} that a graph without a $K_t$-minor is $O(k\sqrt{\log k})$-colorable for every $k\ge 1$, and there has been no improvement in the order $k\sqrt{\log k}$ since then.
For graphs with no stable set of size three (i.e. there do not exist three vertices, all pairwise nonadjacent), Duchet and Meyniel \cite{DM} proposed an analogous conjecture to the Hadwiger's conjecture that every graph with $n$ vertices and no stable set of size three contains a $K_{\lceil n/2\rceil}$-minor and proved that such graphs contain $K_{\lceil n/3\rceil}$ as a minor, which remains the best bound to date. Plumber, Stiebitz, and Toft \cite{PST} showed that the conjecture of Duchet and Meyniel is indeed equivalent to the Hadwiger's conjecture for graphs with no stable set of size three.
\subsection{Graph immersion}
In this paper, we focus on the immersion relation on graphs, which is a variant of minor relation (see \cite{RS}).
We follow the definitions in \cite{Wo}.
Given loopless multigraphs $G,H$, we say that $G$ admits an \emph{immersion} of $H$ if there exists functions $\pi_1:V(H)\to V(G)$ and $\pi_2$ mapping the edges of $H$ to paths of $G$ satisfying the following:
\begin{itemize}
\item the map $\pi_1$ is an injection;
\item for every edge $e\in E(H)$ with endpoints $x$ and $y$, $\pi_2(e)$ is a path with endpoints equal to $\pi_1(x)$ and $\pi_2(y)$; and
\item for edges $e,e'\in E(H)$, $e\ne e'$, $\pi_2(e)$ and $\pi_2(e')$ have no edge in common.
\end{itemize}
We say that $G$ admits a \emph{strong immersion} of $H$ if the following condition holds as well.
\begin{itemize}
\item For every edge $e\in E(H)$ with endpoints $x$ and $y$, the path $\pi_2(e)$ intersects the set $\pi_1(V(H))$ only in its endpoints.
\end{itemize}
The vertices $\{\pi_1(x) : x \in V (H)\}$ are the \emph{branch vertices} of the immersion. We will
also say that $G$ (strongly) immerses $H$ or alternatively that $G$ contains $H$ as a (strong) immersion.
We can alternately define immersions as follows. Let $e_1$ and $e_2$ be distinct edges in $G$ such that the endpoints of $e_1$ are $x, y$ and the endpoints of $e_2$ are $y, z$. To \emph{split off} the edges $e_1$ and $e_2$, we delete the edges $e_1$ and $e_2$ from $G$ and add
a new edge $e$ with endpoints $x$ and $z$ (note
that this might result in a multi-edge or a loop). Then $G$ contains $H$ as an immersion if and
only if $H$ can be obtained from a subgraph of $G$ by repeatedly splitting off pairs of edges and deleting isolated vertices.
We consider a variant of Hadwiger's conjecture to {graph immersions} due to Lescure and Meynial \cite{LM} in 1989 and, independently, to Abu-Khzam and Langston \cite{AL} in 2003.
The conjecture explicitly states the following.
\begin{conjecture}[\cite{AL}, \cite{LM}]\label{conj:main}
For every positive integer $t$, every graph with no $K_t$ immersion is properly colorable with at most $t-1$ colors.
\end{conjecture}
Conjecture \ref{conj:main} is trivial for $t \le 4$, and was independently proved by
Lescure and Meyniel \cite{LM} and DeVos et al. \cite{DKMO10} for $5 \le t \le 7$.
One can immediately show that a minimum counterexample to Conjecture \ref{conj:main} has minimum degree $t-1$. Thus, the conjecture provides additional motivation for the natural question of what is the smallest minimum degree necessary to force a clique immersion. DeVos et al. \cite{mohar} showed that minimum degree $200t$ suffices to force a $K_t$ immersion in a simple graph. This implies that every graph without a $K_t$-immersion is $200t$-colorable, providing the first linear bound for Conjecture \ref{conj:main}, while, as we discussed above, the best known bound for the Hadwiger's conjecture is superlinear.
The bound $200t$ was recently improved by Dvo\v{r}\'ak and Yepremyan \cite{dvo} to $11t+7$.
\begin{theorem}[Dvo\v{r}\'ak--Yepremyan, \cite{dvo}]\label{theorem:mindeg11}
Every graph with minimum degree at least $11t+7$ contains an immersion of $K_t$.
\end{theorem}
We give a new result on clique immersions in dense graphs; we leave the exact statement for Section \ref{section:dense} below. As a consequence, it is possible to improve the analysis in \cite{dvo} and obtain the following bound.
\begin{theorem}\label{theorem:mindeg}
Every graph with minimum degree at least $7t+7$ contains an immersion of $K_t$.
\end{theorem}
Conjecture \ref{conj:main} can be relaxed to consider the following question.
\begin{problem}
What is the smallest function $f$ such that for all positive $t$ and all graphs $G$ with $\chi(G) \ge f(t)$, it holds that $G$ contains $K_t$ as an immersion.
\end{problem}
As observed above, a minimum counterexample to Conjecture \ref{conj:main} has minimum degree $t-1$. Thus by Theorem \ref{theorem:mindeg}, we get that chromatic number at least $f(t) = 7t+8$ forces a $K_t$ immersion. By combining our results for dense graphs with arguments based on analyzing Kempe chains in proper colorings of graphs, we obtain the following improved bound.
\begin{theorem}\label{theorem:chromatic}
Every graph with chromatic number at least $3.54t+4$ contains an immersion of $K_t$.
\end{theorem}
For graphs with no stable set of size three, Vegara \cite{V17} proposed a similar conjecture as that of Duchet and Meyniel that every graph with $n$ vertices and no stable set of size three contains a strong $K_{\lceil n/2\rceil}$-immersion and proved that it is equivalent to Conjecture \ref{conj:main} for graphs with no stable set of size three. In the same paper, Vegara showed that a relaxation to $K_{\lceil n/3\rceil}$-immersion holds. We improve this to $K_{2\lfloor n/5 \rfloor}$.
\begin{theorem}\label{theorem:2.5}
For every integer $n\ge 1$, every graph $G$ with $n$ vertices and no stable set of size three has a strong immersion of $K_{2\lfloor n/5 \rfloor}$.
\end{theorem}
An extended abstract presenting Theorems \ref{theorem:mindeg} and \ref{theorem:chromatic} appeared in 2016 \cite{LW}.
\subsection{Notation}
Given a multigraph $G$ and distinct vertices $u,v\in V(G)$, if there are $k\ge 2$ edges between $u$ and $v$, we say that $uv$ is a \emph{multi-edge} with \emph{multiplicity} $k$, and if $u$ is not adjacent to $v$, we say that $uv$ is a \emph{missing edge}.
We denote by $N_G(v)$ the (non-repeated) set of neighbors of $v$ in $G$, and by $d_G(v)$ the {degree} of $v$ in $G$ (where a loop is counted $2$ and a multi-edge with multiplicity $k$ is counted $k$).
We denote by $E_G(v)$ the multi-set of edges (loops are excluded) incident with $v$ (if $uv$ is a multi-edge of multicity $k$ then there are $k$ edges $uv$ in $E_G(v)$).
Given $X\subseteq V(G)$, we denote by $f_G(v|X)$ the number of vertices in $X\backslash\{v\}$ which are not adjacent to $v$ in $G$, and we write $f_G(v)=f_G(v|V(G))$ for short.
When it is clear in the context, we omit the subscript $G$ in this notation. Note that if $G$ is simple, then $d(v)=|N(v)|=|E(v)|=|V(G)|-f(v)-1$, but may not be the case if $G$ is a multigraph.
Given a multigraph $G$ and a subset $M$ of $V(G)$, let $G[M]$ denote the subgraph of $G$ induced by $M$.
Given a path linking vertices $u$ and $v$, to \emph{split off the path}, we delete the edges of the path and add an edge $uv$ to $G$. Given a vertex $v$ with $|E_G(v)|$ even, to \emph{suppress} $v$, we first match all edges of $E_G(v)$ into pairs; then we split off every pair $\{vu,vw\}$ of the matching, and finally delete $v$ and its loops (if any).
Note that after suppressing a vertex, the degree of other vertices are unchanged. Both operations (splitting off a path and suppressing a vertex) can be expressed as a sequence of splitting off pairs of edges.
Given two multigraphs $G$ and $G'$, we define the \emph{union} of $G$ and $G'$, denoted $G \cup G'=G^*$ to be the multigraph with vertex set $V(G) \cup V(G')$ and the following edge set. For every two vertices $u$ and $v$ in $V(G) \cup V(G')$, the number of edges $uv$ in $G^*$ is equal to the sum of the number of edges $uv$ in $G$ and $G'$.
The structure of the paper is as follows. In sections \ref{section:dense}, we give some results on clique immersion in dense graphs, which are necessary for the proofs of Theorems \ref{theorem:mindeg} and \ref{theorem:chromatic}. Then we prove Theorems \ref{theorem:mindeg}, \ref{theorem:chromatic}, and \ref{theorem:2.5} in Sections \ref{section:minimum}, \ref{section:chromatic}, and \ref{section:2.5}, respectively.
\section{Clique immersion in dense graphs} \label{section:dense}
In the following lemma, we show that if $G$ contains a set $M$ of $t$ vertices where the total sum of ``missing degree'' is small, then $G$ immerses a $K_t$ on $M$.
\begin{lemma} \label{lemma:average1}
Let $G=(V,E)$ be a graph with $n$ vertices and $M$ be a subset of $V$ with $t$ vertices.
If
\begin{equation}\label{eq:dense1}
\sum_{v\in M} f_G(v)\le \Big(n-t-\max_{v\in M}f_G(v)\Big)t,
\end{equation}
then $G$ contains an immersion of $K_t$.
\end{lemma}
\begin{proof}
Let $\overline{M}=V\backslash M$ and let $b=\max_{v\in M}f_{G}(v)$.
Suppose that there are distinct vertices $v,v'\in M$ and $w\in \overline{M}$ such that $vv'\notin E(G)$ and $vw,wv'\in E(G)$. By splitting off the path $vwv'$, we obtain the edge $vv'$ while $f(v)$ and $f(v')$ are unchanged, and so (\ref{eq:dense1}) still holds for the new graph. Thus by repeatedly finding such triples and splitting off, we obtain new graphs satisfying (\ref{eq:dense1}) while the number of edges strictly decreases after each step.
Therefore the process must halt and return a graph $G_1=(V,E_1)$ satisfying
\begin{equation}\label{en:dense1}
\sum_{v\in M} f_{G_1}(v)\le \big(n-t-b\big)t, \text{ and}
\end{equation}
\begin{enumerate}[label=(\roman*)]
\item \label{en:dense2} there are no $v,v'\in M$ and $w\in \overline{M}$ such that $vv'\notin E_1$ and $vw,wv'\in E_1$.
\end{enumerate}
For the rest of the proof, we write $f$ instead of $f_{G_1}$. Let $r$ be the number of missing edges of $G_1$ with two endpoints in $M$, and $X$ be the set of endpoints of these missing edges. If $r=0$, then $G_1[M]$ is a copy of $K_t$, which proves the lemma. Hence we may suppose that $r\ge 1$.
For every $v\in X$, there is $v'\in M$ such that $vv'\notin E_1$. From \ref{en:dense2} we have
$f(v|\overline{M})+f(v'|\overline{M})\ge |\overline{M}|=n-t;$
otherwise, there exists $w\in \overline{M}$ such that $vw,wv'\in E_1$.
Hence
$$n-t\le f(v|\overline{M})+f(v'|\overline{M})\le f(v|\overline{M})+f(v')\le f(v|\overline{M})+b,$$ and so $f(v|\overline{M})\ge n-t-b$ for every $v\in X$. This gives
\begin{equation}
\sum_{v\in X} f(v)= \sum_{v\in X}f(v|\overline{M})+\sum_{v\in X} f(v|M)\ge (n-t-b)|X| + 2r.\label{equation:XY}
\end{equation}
We will construct a $K_t$ immersion in $G_1$ as follows: for every non-adjacent pair of vertices $v,v'$ in $X$, we will obtain the edge $vv'$ by splitting off path $vwuw'v'$ for some $u\in Y= M \setminus X$ and $w,w' \in \overline{M}$.
As a first step to finding such 4-edge paths, for all $u \in Y$, define $$h(u)=\max\Big(0,\Big\lfloor\frac{n-t-b-f(u)+1}{2}\Big\rfloor\Big).$$
It holds that $2h(u)\ge n-t-b-f(u)$. Hence
$$2\sum_{u\in Y}h(u) \ge (n-t-b)|Y|-\sum_{u\in Y}f(u).$$
Combining with \eqref{equation:XY}, and then with (\ref{en:dense1}) yields
\begin{align*}
2\sum_{u\in Y}h(u) -2r&\ge \Big((n-t-b)|Y|-\sum_{u\in Y}f(u)\Big)+\Big( (n-t-b)|X|-\sum_{v\in X}f(v)\Big)\\
&\ge (n-t-b)(|X|+|Y|)-\sum_{v\in M}f(v)\\
&\ge (n-t-b)t-(n-b-t)t= 0.
\end{align*}
Hence $\sum_{u\in Y}h(u) \ge r$.
Choose arbitrarily two non-adjacent vertices $v,v'$ in $M$ (clearly $v,v'\in X$), and an arbitrary vertex $u\in Y$ such that $h(u)\ge 1$.
Such a vertex $u$ always exists as $\sum_{u\in Y}h(u)\ge r\ge 1$ and $h(u)$ is an integer for every $u$.
By definition of function $h$, we have
$$f(u|\overline{M})\le f(u)\le n-t-b+1-2h(u)\le n-t-b-1.$$
From $f(v)\le b$, we have $$f(u|\overline{M})+f(v|\overline{M})\le (n-t-b-1)+f(v)\le n-t-1<|\overline{M}|,$$
so $u$ and $v$ have a common neighbor $w\in \overline{M}$.
Similarly $u$ and $v'$ have a common neighbor $w'\in \overline{M}$. If $w=w'$ then $vw,wv'\in E_1$, contrary to \ref{en:dense2}.
By splitting off the path $vwuw'v'$, we get the edge $vv'$. In doing so,
we have that $f(v)$ and $f(v')$ remain unchanged while $f(u)$ increases by 2, i.e., $h(u)$ decreases by 1.
Thus $\sum_{u\in Y}h(u)$ decreases by 1.
However, the number of missing edges in $G_1[M]$ also decreases by 1, so we still have that $\sum_{u\in Y}h(u)$ is at least the number of missing edges in $G_1[M]$.
We repeat the process above until we link all pairs of non-adjacent vertices in $M$, and so obtain a complete graph on $M$. Thus $G_1$ contains an immersion of $K_t$, and consequently, $G$ contains $K_t$ as an immersion as well. This proves the lemma.
\end{proof}
As a corollary of Lemma \ref{lemma:average1}, the following lemma provides a more general bound for clique immersion of a graph by its average ``missing degree''.
\begin{lemma}\label{lemma:average2}
Let $G$ be a graph on $n$ vertices, and let $\gamma=\sum_{v\in V(G)}f_G(v)/n$ be the average ``missing degree" of $G$.
If $\gamma \le n/2$, then $G$ contains an immersion of $K_t$ where $t=\min\big(
\lfloor n/2 \rfloor,\lfloor n -2\gamma\rfloor\big)$.
\end{lemma}
\begin{proof}
Let $M$ be a set of $t=\min\big(
\lfloor n/2 \rfloor,\lfloor n -2\gamma\rfloor\big)$ vertices minimizing $\sum_{v\in M}f(v)$. Let $b=\max_{v\in M}f(v)$ and $\overline{M}=V(G)\backslash M$.
If $2b\le n-t$, note that $f(v)\le b$ for every $v\in M$, and so $\sum_{v\in M}f(v)\le bt\le (n-t-b)t$, and we apply Lemma \ref{lemma:average1} to complete the proof.
Otherwise, $2b> n-t$. By the minimality of $f$ on $M$, we have $f(w)\ge b$ for every $w\in \overline{M}$. Hence
\begin{equation}
\sum_{v\in M}f(v)= \sum_{v\in V(G)}f(v)- \sum_{w\in \overline{M}}f(w) \le \gamma n-b(n-t).\label{equation:0}
\end{equation}
We now show that $\gamma n-b(n-t) \le (n-t-b)t$.
Indeed,
\begin{align}
\gamma n-b(n-t) &\le (n-t-b)t\nonumber \\
\Longleftrightarrow 2(\gamma n-bn+bt) &\le 2(n-t-b)t\nonumber \\
\Longleftrightarrow\ \ 2\gamma n-n^2+tn &\le 2b(n-2t)-(n-t)(n-2t) \nonumber\\
\Longleftrightarrow\ \ \ (2\gamma+t-n)n &\le (2b-n+t)(n-2t).
\label{equation:1}
\end{align}
Since $t=\min\big(
\lfloor n/2 \rfloor,\lfloor n -2\gamma\rfloor\big)$, we have $2\gamma \le n-t$ and $2t\le n$. Combining with $2b>n-t$ yields
$$(2\gamma +t-n)n\le 0\le (2b-n+t)(n-2t).$$ Hence (\ref{equation:1}) holds, and so $\gamma n-b(n-t) \le (n-t-b)t$. This, together with equality (\ref{equation:0}), implies that $\sum_{v\in M}f(v)\le (n-t-b)t$, and we apply Lemma \ref{lemma:average1} to complete the proof.
\end{proof}
In the case $n/4\le \gamma\le n/2$, by tightening the analysis, we can slightly improve the bound in Lemma \ref{lemma:average2} to $t=\lfloor n-2\gamma \rfloor+1$, which is sharp even if $\gamma$ is the maximum missing degree (see \cite{FW16}, Lemma 2.1). In the case $\gamma<n/4$, the above technique could yield $t=\max\big(\lfloor n/2\rfloor,\lfloor n-\sqrt{2\gamma n}\rfloor\big)$; however, $t=\lfloor n/2\rfloor$
is enough for our purpose.
\section{Forcing a clique immersion via minimum degree} \label{section:minimum}
In this section, we show how the proof of Theorem \ref{theorem:mindeg11} can be refined to give the proof of Theorem \ref{theorem:mindeg}.
The main idea is as follows. Suppose, to reach a contradiction, that there is a graph with high minimum degree which does not contain a $K_t$-immersion. We choose such a graph $G$ with as few vertices as possible. If $G$ is dense, then we can find a $K_t$ immersion, a contradiction. Otherwise, $G$ is sparse, and so we can supress a vertex to get a smaller graph, which still has high minimum degree and does not contain a $K_t$-immersion, a contradiction again. The main difficulty is how to suppress a vertex of $G$ so that the new graph is still simple.
We first state several results from \cite{dvo}.
\begin{proposition}[\cite{dvo}, Lemma 6]\label{lemma:completemul}
Every complete multipartite graph of minimum degree at least $t$ contains an immersion of $K_t$.
\end{proposition}
A graph on odd number of vertices is \emph{hypomatchable} if deleting any vertex results in a graph with a perfect matching.
\begin{proposition}[\cite{dvo}, Lemma 8]\label{lemma:edmonds} Fix $t$ and let $H$ be a graph not containing any complete multipartite subgraph with minimum degree at least $t$.
Suppose that the complement graph $\overline{H}$ of $H$ neither has a perfect matching nor is hypomatchable.
Then there exist disjoint subsets $W,L$ of $V(H)$ such that
\begin{itemize}
\item $|W|\le t-1$ and $|L|\ge |V(H)|-2|W|$;
\item $f_H(v)\le |W|$ for every $v\in W$; and
\item $uv\in E(H)$ for every $u\in W$ and $v\in L$.
\end{itemize}
\end{proposition}
Given a multigraph $G$, we say that a vertex $v$ of $G$ can be {well-suppressed} (in $G$) if we can suppress $v$ without creating any new loop or multi-edge in $G$. Precisely, $v$ can be \emph{well-suppressed} if there is a matching of edges of $E_G(v)$ such that
\begin{itemize}
\item for every pair $\{vu_1,vu_2\}$, we have $u_1\ne u_2$ and $u_1u_2\notin E(G)$, and
\item for every two pairs $\{vu_1,vu_2\}$ and $\{vu'_1,vu'_2\}$ we have $\{u_1,u_2\}\ne \{u_1',u_2'\}$.
\end{itemize}
A vertex $v$ can be \emph{nearly well-suppressed} if for all edges $e \in E_G(v)$, the vertex $v$ can be well-suppressed after deleting $e$.
Given a simple graph $G$, it is straightforward that if a vertex $v$ can be well-suppressed (nearly well-suppressed), then the complement graph of the induced subgraph $G[N(v)]$ has a perfect matching (is hypomatchable, respectively). The situation is more complex when $G$ is a multigraph. In the next lemma, we consider the case where some multi-edges are allowed.
\begin{lemma}\label{lem:hypo-inside}
Fix $t\ge 1$ and let $G'$ be a loopless multigraph with vertex set $V\cup \{z\}$ (where $z\notin V$) such that for every $v\in V$, $zv$ is either an edge or a multi-edge with multiplicity $2$.
Let $R$ be the set of vertices incident with $z$ by a multi-edge. If
\begin{itemize}
\item $|V|-2|R|\ge 3t$,
\item $G:=G'[V]$ is simple and does not contains $K_t$ as an immersion, and
\item $z$ cannot be well-suppressed or nearly well-suppresed in $G'$,
\end{itemize}
then there is a set $W\subseteq V$ such that $|W|\le t-1$ and $f_G(v)\le |W|+|R|$ for every $v\in W$.
\end{lemma}
\begin{proof}
We define an auxiliary (simple) graph $H$ as follows.
Beginning with $G$, for every vertex $v\in R$, we add a \emph{clone} vertex $v_c$ to $H$ which has the following neighbors: all the vertices of $R$, all the neighbors of $v$ in $G$, and every other clone vertex $u_c$.
Explicitly, $H$ has vertex set $V \cup \{v_c| v \in R\}$ and edge set
$$E(H)=E(G) \cup \{u_cv| u,v \in R\}\cup \{u_cv_c| u, v \in R\} \cup \{v_cx| v\in R, vx\in E(G)\} .$$
Each vertex in $H$ indeed corresponds to an edge of $E_{G'}(z)$, where each clone vertex $v_c$ represents the additional edge in the multi-edge $zv$.
Let $\overline{H}$ be the complement graph of $H$.
We will show that $\overline{H}$ neither has a perfect matching nor is hypomatchable.
If $\overline{H}$ has a perfect matching, then by the construction of $H$, that perfect matching corresponds to a matching of edges in $E_{G'}(z)$ such that
\begin{itemize}
\item for every pair $\{zu_1,zu_2\}$, we have $u_1\ne u_2$ and $u_1u_2\notin E(G')$, and
\item for every two pairs $\{zu_1,zu_2\}$ and $\{zu'_1,zu'_2\}$ we have $\{u_1,u_2\}\ne \{u_1',u_2'\}$.
\end{itemize}
Thus we can we can well-suppress $z$ in $G'$, a contradiction to the third assumption of the lemma.
If $\overline{H}$ is hypomatchable, then for every $v\in V(H)$, there is a perfect matching of $V(H)\backslash \{v\}$ in $\overline{H}$. The same argument shows that $z$ can be nearly well-suppressed in $G'$, a contradiction.
We conclude that $\overline{H}$ neither has a perfect matching nor is it hypomatchable.
Observe that removing a vertex of a complete multipartite graph with minimum degree $d$ results in a complete multipartite graph with minimum degree at least $d-1$.
Hence suppose that $H$ contains a multipartite subgraph of minimum degree at least $|R|+t$.
By removing all clone vertices of $H$, we obtain $G$, which still contains a complete multipartite subgraph with minimum degree at least $(|R|+t)-|R|=t$. By Proposition \ref{lemma:completemul}, $G$ contains $K_t$ as an immersion, a contradiction. We conclude that $H$ does not contains any multipartite subgraph of minimum degree at least $|R|+t$.
Applying Proposition \ref{lemma:edmonds} to $H$, we obtain disjoint subsets $W',L'$ of $V(H)$ such that
\begin{enumerate}[label=(\alph*)]
\item \label{en:3.1} $|W'|\le |R|+t-1$ and $|L'|\ge |V(H)|-2|W'|$;
\item \label{en:3.2} $f_H(v)\le |W'|$ for every $v\in W'$; and
\item \label{en:3.3} $uv\in E(H)$ for every $u\in W'$ and $v\in L'$.
\end{enumerate}
Let $R_c$ be the set of clone vertices of $H$ and $W=W'\backslash R_c$ and $L=L'\backslash R_c$. We will show that $W$ is a desired set.
By \ref{en:3.1} we have
$$|L'|\ge |V(H)|-2|W'|> (|V|+|R|)-2(|R|+t)\ge |V|-|R|-2t.$$
Thus $|L'|-|R|\ge|V|-2|R|-2t$.
Recall from the hypothesis that $|V|-2|R|\ge 3t$, and so $|L|\ge |L'|-|R|\ge t$. Note that by \ref{en:3.3}, $uv\in E(H)$ for every $u\in W$ and $v\in L$, and hence $uv\in E(G)$ for every $u\in W$ and $v\in L$. If $|W|\ge t$, then $G[W\cup L]$ contains a complete bipartite graph with minimum degree at least $t$, and so contains $K_t$ as an immersion by Proposition \ref{lemma:completemul}, a contradiction. Thus it holds that $|W|\le t-1$.
Note that $f_G(v)\le f_H(v)$ since $G$ is an induced subgraph of $H$. It follows from \ref{en:3.2} that $f_G(v)\le f_H(v)\le |W'|\le |W|+|R|$ for every $v\in W$. This completes the proof of the lemma.
\end{proof}
Given an integer $t>1$, we call a graph $t$-deficient if it can be obtained from a graph with minimum degree $t$ by removing a few edges. Precisely, a graph $G$ is \emph{$t$-deficient} if $\sum_{v\in V(G)}\max(0,t-d_{G}(v))<t$.
\begin{proposition}[\cite{dvo}, Lemma 13]\label{lemma:eulerian}
If $G$ is a graph of minimum degree at least $7t+7$ that does not contain an immersion of $K_t$, then $G$ contains an immersion of some $7t$-deficient eulerian graph $G'$.
\end{proposition}
\begin{proposition}[\cite{dvo}, Lemma 15]\label{lemma:eulerian2}
Every $7t$-deficient eulerian graph contains a vertex of degree at least $7t$.
\end{proposition}
The main technical step in the proof of Theorem \ref{theorem:mindeg} is the following lemma. Dvo\v{r}\'ak and Yepremyan proved a similar result for $11t + 7$-deficient eulerian graphs in \cite{dvo}.
\begin{lemma}\label{lemma:mainmindeg}
Every $7t$-deficient eulerian graph contains an immersion of $K_t$.
\end{lemma}
Theorem \ref{theorem:mindeg} follows easily from Lemma \ref{lemma:mainmindeg}.
Suppose for a contradiction that there exists a graph $G$ of minimum degree at least $7t+7$ and does not have an immersion of $K_t$.
By Proposition \ref{lemma:eulerian}, $G$ contains an immersion of a $7t$-deficient eulerian graph $G'$.
By Lemma \ref{lemma:mainmindeg}, $G'$ contains an immersion of $K_t$, a contradiction.
\begin{proof}[Proof of Lemma \ref{lemma:mainmindeg}]
Suppose that there exists a $7t$-deficient simple eulerian graph which does not contain an immersion of $K_t$. Let $G=(V,E)$ be such a graph with as few vertices as possible. The idea of the proof is as follows. If $G$ has few edges, we show it would be possible to well-suppress some vertex of $G$ to get a smaller counterexample, a contradiction. Hence $G$ has many edges. We are then able to find in $G$ two disjoint sets of vertices $A$ and $B$ of size around $t$ and $6t$, respectively, such that there are very few missing edges between $A$ and $B$. We apply Lemma \ref{lemma:average1} to obtain an immersion of $K_t$ and so reach a contradiction.
Let $z_1$ be a vertex in $G$ with $d(z_1)\ge 7t$, as guaranteed by Proposition \ref{lemma:eulerian2}.
Let $1\le p< t$ be the maximum integer such that there exists an ordered set $A=\{z_1,z_2,...,z_p\}$ satisfying
\begin{equation}
f(z_i|B)\le p+i+r_i,\ \text {for all } i\ge 2. \label{equation:induction}
\end{equation}
where $B=N(z_1)\backslash A$ and $r_i=\big|\{j\le i: z_j\notin N(z_1)\}\big|$ for every $i\ge 2$.
Such number $p$ clearly exists since \eqref{equation:induction} trivially holds for $A=\{z_1\}$.
Since $|N(z_1)\cap A|=p-r_p$, we have
\begin{equation}\label{eq:B}
|B|=|N(z_1)\backslash A|=d(z_1)-|N(z_1)\cap A|\ge 7t-p+r_p.
\end{equation}
Let $\overline{A}=V\backslash A$.
Starting with $G_p = G$, we will attempt to sequentially split off the vertices of $A$ in order $z_p, z_{p-1},\dots, z_1$ to create graphs $G_{p-1},G_{p-2}, \dots, G_0$. At each step, if we could find the complement of a perfect matching in $N_{G_i}(z_i)$, we could split off $z_i$ to obtain $G_{i-1}$ and maintaining the property that $G_{i-1}$ is simple. However, the requirement that $N_{G_i}(z_i)$ have the complement of a perfect matching is too strong and so we will have to slightly relax it. In doing so, we will need to introduce parallel edges into the graphs $G_i$, but we will want to do so in a tightly controlled manner. This leads us to the following definition.
Fix $q$, $0 \le q \le p$ and multigraphs $G_i$, $q \le i \le p$ which satisfy the following.
\begin{enumerate}[label=(\roman*)]
\item \label{en:e.1} $G_p =G$ and for all $i$, $q \le i < p$, $G_i$ is obtained from $G_{i+1}$ by suppressing $z_{i+1}$.
\item \label{en:e.2} For all $i$, $q \le i \le p$, $G_i[\overline{A}]$ is simple.
\item \label{en:e.3} For all $i$, every multi-edge of $G_i$ with an endpoint in $\overline{A}$ has multiplicity 2.
\item \label{en:e.5} For all $j,2\le j \le q$, there are at most $r_p - r_q$ multi-edges from $z_j$ to vertices of $\overline{A}$ in $G_q$, and there are at most $p - q$ multi-edges from $z_1$ to vertices of $\overline{A}$ in $G_q$,
\item \label{en:e.4} There are at least $|\overline{A}|-p+q$ vertices in $\overline{A}$ not incident with any multi-edge in any $G_i,q \le i \le p$.
\item \label{en:e.6} Given $v\in \overline{A}$ and $z\in A$, if $vz$ is a multi-edge in some $G_i,q\le i
\le p$, then for every $z'\in A, z'\ne z$ and every $j, q\le j \le p$, $vz'$ is not a multi-edge in $G_j$.
\item \label{en:e.7} Subject to \ref{en:e.1} -- \ref{en:e.6}, we choose $q$ and $G_i$, $q \le i \le p$ to minimize $q$.
\end{enumerate}
Such a number $q$ and multigraphs $G_i$, $q \le i \le p$ trivially exist, given the observation that $q = p$ and $G_p = G$ satisfy \ref{en:e.1} -- \ref{en:e.6} as $G$ is simple.
We begin with the observation that $q>0$. Otherwise, the graph $G_0$ does not contain $K_t$ as an immersion because $G_0$ itself immerses in $G$ by construction. Moreover, $G_0$ is simple by \ref{en:e.2}, and for all $v \in V(G_0)$, $d_{G_0}(v) = d_{G}(v)$. We conclude that $G_0$ is both eulerian and $t$-deficient, contrary to our choice of $G$ to be a counterexample on a minimum number of vertices.
We now consider the graph $G_q$ and keep in mind that by the minimality of $q$ in \ref{en:e.7}, we cannot supress $z_q$ to obtain $G_{q-1}$ which satisfies all \ref{en:e.1} -- \ref{en:e.6}.
Let $X=N_{G_{q}}(z_q)\cap \overline{A}$. We will show that $G':=G_q[X\cup\{z_q\}]$ satisfies all hypotheses of Lemma \ref{lem:hypo-inside}.
From \ref{en:e.2} and \ref{en:e.3}, we have $G'$ is a loopless multigraph with vertex set $X\cup \{z_q\}$ such that for every $v\in X$, $z_qv$ is either an edge or a multi-edge with multiplicity 2. Let $R$ be the set of vertices in $X$ incident with $z_q$ by a multi-edge. Then by \ref{en:e.5} we have
\begin{equation} \label{eq:R}
\left\{ \begin{array}{ll}
|R|\le p-1 \ \ \ \ \ \ \ \ \ \text{ if } q=1,\\
|R|\le r_p-r_q \ \ \ \ \ \ \ \text{ if } q>1. \end{array} \right.
\end{equation}
\begin{claim}
$G'$ satisfies all hypotheses of Lemma \ref{lem:hypo-inside}.
\end{claim}
\begin{cproof}
We verify the hypotheses one by one.
\begin{itemize}
\item $G'[X]=G_q[X]$ is simple and does not contains $K_t$ as an immersion.
\end{itemize}
$G_q[X]$ is simple by \ref{en:e.2}, and does not contains $K_t$ as an immersion by \ref{en:e.1} and the assumption that $G$ does not contains $K_t$ as an immersion.
\begin{itemize}
\item $|X|-2|R|\ge 3t$.
\end{itemize}
To prove $|X|-2|R|\ge 3t$, note that $|B\backslash X|$ is the number of vertices in $B$ not adjacent to $z_q$ in $G_q$, which is at most the number of vertices in $B$ not adjacent to $z_q$ in $G$ since no edge between $z_q$ and $B$ have been removed in suppressing $z_p, \dots, z_{q+1}$. Thus $|B\backslash X|\le f_G(z_q|B)$. Combining with \eqref{equation:induction} we have
\begin{equation} \label{eq:BX}
\left\{ \begin{array}{ll}
|B\backslash X|=0 \ \ \ \ \ \ \ \ \ \ \ \ \ \ \text{ if } q=1,\\
|B\backslash X|\le p+q+r_q \ \ \ \text{ if } q>1. \end{array} \right.
\end{equation}
In the case $q>1$, by \eqref{eq:B},
$$|X|\ge |B|-|B\backslash X|\ge (7t-p+r_p)-(p+q+r_q).$$
From the fact that $t\ge \max(p,q,r_p)$ and \eqref{eq:R}, we have
$$|X|-2|R|\ge 7t-2p-q-r_p+r_q\ge 3t.$$
In the case $q=1$, by \eqref{eq:B},
$$|X|\ge |B|-|B\backslash X|\ge |B|\ge 7t-p+r_p.$$ Hence from \eqref{eq:R} we have $|X|-2|R|\ge 7t-3p+r_p\ge 3t$.
\begin{itemize}
\item $z_q$ cannot be well-suppressed or nearly well-suppressed in $G'$.
\end{itemize}
Suppose that $z_q$ can be well-suppressed in $G'$. We first split off all edges from $z_q$ to $X$ by that matching. Then there are even number of edges incident with $z_q$ remaining in $G_q$, all from $z_q$ to $A$ since $X=N_{G_{q}}(z_q)\cap \overline{A}$. We now suppress $z_q$ in $G_q$ arbitrarily to obtain $G_{q-1}$. Since we do not create any new edge between $A$ and $\overline{A}$, \ref{en:e.1} -- \ref{en:e.6} hold trivially for $G_{q-1}$, which contradicts \ref{en:e.7}.
As the second case, suppose that $z_q$ can be nearly well-suppressed in $G'$. Pick a vertex $v\in X$ which is not incident with any multi-edge in $G_i$, for all $q \le i \le p$.
Such vertex $v$ exists since by \ref{en:e.4}, there was at most $p-q$ distinct vertices of $\overline{A}$ incident with some multi-edge over all $G_i$, $q \le i \le p$, while $|X|\ge 3t>p-q$ (as we show above that $|X|-2|R|\ge 3t)$.
Since $z_q$ is can be nearly well-suppressed in $G'$, if we remove the edge $z_qv$ in $G'$, we can well-suppress $z_q$ (in $G'$), and we do so.
Since $d_{G_q}(z_q)$ is even, $z_q$ must be adjacent to some vertex $z_s$ with $s<q$. We choose such $s$ as small as possible and split off $z_sz_qv$. We now suppress $z_q$ in $G_q$ arbitrarily to obtain $G_{q-1}$ and will show that $G_{q-1}$ satisfies \ref{en:e.1} -- \ref{en:e.6} and hence violates \ref{en:e.7}.
Properties \ref{en:e.1} and \ref{en:e.2} hold trivially. The only possible new multi-edge that we have created is $z_sv$. Since $v$ is not incident with any multi-edge in $G_i$ for all $q \le i \le p$, \ref{en:e.3}, \ref{en:e.4} and \ref{en:e.6} hold for $G_{q-1}$.
To prove \ref{en:e.5}, first observe that \ref{en:e.5} clearly holds if $z_s=z_1$. If $z_s\ne z_1$, then $z_1$ is not incident with $z_q$ by the choice of $s$, and so $r_{q-1}= r_{q}-1$ by the defintion of function $r$.
Thus $r_p - r_{q-1}= r_p - r_q +1$ and therefore \ref{en:e.5} holds.
\end{cproof}
Hence $G'$ satisfies the hypotheses of Lemma \ref{lem:hypo-inside}, and so there is a set $W\subseteq X$ such that $|W|\le t-1$ and $f_{G_q[X]}(v)\le |W|+|R|$ for every $v\in W$.
We next show that $|W|\ge t-p$. To do so, we need the following claim.
\begin{claim}\label{cl:W-main}
$f_G(v|B)\le |W|+2p+r_p$ for every $v\in W$.
\end{claim}
\begin{cproof}
We first show that $f_{G_q}(v|B)\le|W|+p+q+r_p.$
Note that $f_{G_q}(v|X)= f_{G_q[X]}(v)$ for every $v\in X$, and so
$$f_{G_q}(v|B) \le f_{G_q}(v|X)+f_{G_q}(v|B\backslash X)\le (|W|+|R|)+|B\backslash X|.$$
If $q>1$, recall that $|B\backslash X|\le p+q+r_q$ from \eqref{eq:BX} and $|R|\le r_p-r_q$ from \eqref{eq:R}. Hence we have
$$f_{G_q}(v|B)\le (|W|+r_p-r_q)+(p+q+r_q)\le |W|+p+q+r_p.$$
If $q=1$, recall that $|B\backslash X|=0$ from \eqref{eq:BX} and $|R|\le p-1$ from \eqref{eq:R}. Thus we have
$$f_{G_q}(v|B)\le |W|+p<|W|+p+q+r_p.$$
We conclude that $f_{G_q}(v|B)\le|W|+p+q+r_p$ in all cases. To complete the claim, it suffices to
show that $$f_G(v|B)\le f_{G_q}(v|B)+(p-q)$$ for every $v\in X$. Fix $v\in X$. By property \ref{en:e.6}, there exists a value $s$ such that $z_{i}v$ is not a multi-edge in $G_i$ for every $i\ne s,q\le i\le p$.
Thus for every $i\ne s, q< i\le p$, there is at most one edge $z_iv$ in $G_i$, and so when we supress $z_i$ in $G_i$ to obtain $G_{i-1}$, we add at most one edge between $v$ and $B$ into $G_{i-1}$.
If $s>q$, note that there are at most two edges $z_sv$ in $G_s$ by property \ref{en:e.3}. Hence when we supress $z_s$ in $G_s$ to obtain $G_{s-1}$, we add at most two edges between $v$ and $B$ into $G_{s-1}$. Thus from $G=G_p$, when we supress $z_p,...,z_{q+1}$ to get $G_q$, we add in total at most $p-q-1+1=p-q$ edges from $v$ to $B$, and so $$f_G(v|B)=f_{G_p}(v|B)\le f_{G_q}(v|B)+(p-q).$$
This proves the claim.
\end{cproof}
\begin{claim}
$|W|\ge t-p$.
\end{claim}
\begin{cproof}
Suppose for a contradiction that $|W|+p=p^*<t$. Let $A^*=A\cup W$ where elements in $W$ are enumerated $z_{p+1},...,z_{p^*}$, and let $B^*=N(z_1)\backslash A^*=B\backslash W.$
Then
\begin{itemize}
\item $f_{G}(z_i|B^*) \le f_{G}(z_i|B) \le p+i+r_i\le p^*+i+r_i$ for every $i,2\le i\le p$.
\item $f_{G}(z_i|B^*) \le f_{G}(z_j|B) \le |W|+2p+r_p\le p^*+i+r_i$ for every $i> p$ (note that $r_i\ge r_p$ since by definition $r$ is a non-decreasing function).
\end{itemize}
Hence \eqref{equation:induction} holds for $p^*$ and $A^*$, contrary to the maximality of $p$. Thus $|W|\ge t-p$.
\end{cproof}
Let $\hat{A}$ be an arbitrary set of $t-p$ vertices in $W$ and enumerate them $z_{p+1},...,z_t$.
Let $M=A\cup \hat{A}$ and $\overline{M}=B\backslash \hat{A}$. Let $U=M\cup \overline{M}$ and $H=G[U]$. We will apply Lemma \ref{lemma:average1} to $H$ and deduce that $H$ must contain an immersion of $K_t$, which contradicts the assumption that $G$ does not contains an immersion of $K_t$ and so complete the proof of Lemma \ref{lemma:mainmindeg}. We first give some bounds for function $f$ in $H$. Observe that $f_H(z_i|\overline{M}) = f_{G}(z_i|\overline{M}) \le f_{G}(z_i|B)$ for every $i,1\le i\le p$. Note also that $f_{G}(z_1|B) =0$, and $f_{G}(z_i|B) \le 2p+i$ for every $i,1<i\le p$, and by Claim \ref{cl:W-main},
$$ f_{G}(z_i|B)\le |W|+2p+r_p\le t+2p+r_p$$ for every $i,p<i\le t$ (recall that $|W|\le t$). Thus
\begin{equation} \label{eq:HH}
\left\{ \begin{array}{ll}
f_H(z_i|\overline{M})\le 2p+i \ \ \ \ \ \ \ \ \ \text{ if } i\le p,\\
f_H(z_i|\overline{M})\le t+2p+r_p \ \ \ \text{ if } i>p. \end{array} \right.
\end{equation}
Also note that $|M|=t$, and from \eqref{eq:B}, $$|\overline{M}|\ge |B|-|\hat{A}|\ge 7t-p+r_p-(t-p)=6t+r_p.$$
\begin{claim}\label{cl:HH}
$H$ contains an immersion of $K_t$.
\end{claim}
\begin{cproof}We consider two cases.
\textbf{Case 1:} $p\le t/2$.
We have $f_H(z_i|M)\le |M|\le t$ for every $z_i$, and so
\begin{align*}
\sum_{z_i\in M}f_H(z_i)&\le \sum_{z_i\in M}f_H(z_i|M)+\sum_{z_i\in M}f_H(z_i|\overline{M})\\
&\le t^2+\sum_{1\le i\le p}f_H(z_i|\overline{M})+\sum_{p< i\le t}f_H(z_i|\overline{M})\\
&\le t^2+\sum_{i\le p}(2p+i)+\sum_{p< i\le t}(t+2p+r_p)\\
&\le t^2+3p^2+(t-p)(t+3p)\\
&\le 2t^2+2tp \le 3t^2.
\end{align*}
Since $2p\le t$, we have
$$\max_{z_i\in M}f_H(z_i)\le t+\max_{z_i\in M}f_H(z_i|\overline{M})\le t+(t+2p+r_p)\le 3t+r_p.$$
Note that $|U|=|M|+|\overline{M}|=7t+r_p$. Hence
$$\sum_{z_i\in M}f_H(z_i)\le 3t^2\le \Big(|U|-t-\max_{z_i\in M}f(z_i)\Big)t.$$
Apply Lemma \ref{lemma:average1} to obtain an immersion of $K_t$ on ${H}$.
\textbf{Case 2:} $p>t/2$. Set $q=|\hat{A}|=t-p$, and so $p>q$. The analysis of this case is more involved. Even though $\sum_{z_i\in M}f_H(z_i)$ is small, $\max_{z_i\in M}f_H(z_i)$ could be very large, and so we cannot apply Lemma \ref{lemma:average1} directly.
However, we can still use a similar argument to that in the proof of Lemma \ref{lemma:average1}. We present the argument as an algorithm to explicitly find a series of splitting off of edges to yield a $K_t$ immersion by finding edge disjoint paths of length two or four linking the desired pairs of vertices.
Consider an arbitrary loopless multigraph $H'$ with vertex set $U$ and distinct vertices $z_i,z_j\in M$.
We first define a subroutine called \textsc{Link$(H',z_i,z_j)$}: the algorithm finds $w\in \overline{M}$ such that $z_iw,wz_j\in E(H')$ and then split off the path $z_iwz_j$ to obtain an edge $z_iz_j$. The algorithm then returns $H'$ after splitting off the path. Such a $w$ can be found by checking all possible choices for $w$. In the case that multiple choices exist for $w$, the algorithm arbitrarily chooses one.
In order to successfully run, the algorithm \textsc{Link$(H',z_i,z_j)$} assumes that the input satisfies:
\begin{equation}
f_{H'}(z_i|\overline{M})+f_{H'}(z_j|\overline{M})< 6t+r_p\le |\overline{M}|,
\label{equation:link}
\end{equation}
Under assumption (\ref{equation:link}), such a $w \in \overline{M}$ must exist and therefore, the algorithm correctly terminates. Note also that $z_i,z_j$ are adjacent after performing \textsc{Link$(H',z_i,z_j)$}, and that the input $H'$ contains the output graph as an immersion.
We now present the main algorithm to split off edges of $H$ to obtain a complete graph on $M=A\cup \hat{A}$. Set $H':=H$. The algorithm proceeds in stages. In stage 1, we link all vertices between $\{z_{q+1},...,z_p\}$ and $\hat{A}$. In stage 2, we link each pair of vertices between $\{z_{1},...,z_q\}$ and $\hat{A}$ with multi-edges of order two. Thus after stages 1 and 2, we obtain two edge-disjoint complete bipartite subgraphs, one between $A$ and $\hat{A}$ and another between $\{z_{1},...,z_q\}$ and $\hat{A}$ (the latter will be used later to obtain a complete graph on $\hat{A}$). In stage 3, we link all vertices inside $A$, and then obtain a complete graph on $M$.
\begin{mdframed}
{\sc Main}($H'$)
\begin{enumerate}
\item {Start with $s:=p$ and repeat the following whenever $s>q$.
\begin{enumerate}
\item[] Start with $i:=p+1$ and repeat the following whenever $i\le t$.
\textbf{\ \ \ \ \ } \textsc{Link$(H',z_s,z_i)$}, $i:=i+1$.
\item[] $s:=s-1$.
\end{enumerate}
}
\item {Start with $s:=q$ and repeat the following whenever $s\ge 1$.
\begin{enumerate}
\item[] Start with $i:=p+1$ and repeat the following whenever $i\le t$.
\textbf{\ \ \ \ \ } \textsc{Link$(H',z_s,z_i)$}, \textsc{Link$(H',z_s,z_i)$}, $i:=i+1$.
\item[] $s:=s-1$.
\end{enumerate}
}
\item {Start with $s:=p$ and repeat the following whenever $s\ge 1$.
\begin{enumerate}
\item[] Start with $i:=s-1$ and repeat the following whenever $i\ge 1$.
\textbf{\ \ \ \ \ } \textsc{Link$(H',z_s,z_i)$}, $i:=i-1$.
\item[] $s:=s-1$.
\end{enumerate}
}
\item Return $H'$.
\end{enumerate}
\end{mdframed}
Suppose that we have performed \textsc{Main($H'$)} successfully. The output $H'$ contains two edge-disjoint complete bipartite subgraphs, $H_1$ from $A$ to $\hat{A}$, and $H_2$ from $\{z_{1},...,z_q\}$ to $\hat{A}$, and a complete graph $H_3$ on $A$.
We now show how to obtain from $H_2$ a complete graph $H_4$ on $\hat{A}$.
Since $|\hat{A}|=q$, by Vizing Theorem, we can color the edges of an imagined complete graph on $\hat{A}$ by $q$ colors $\{1,2,...,q\}$ so that any two incident edges have different color.
Now for every $z_i,z_j\in \hat{A}$, if the edge $z_iz_j$ in that imagined graph has color $s$, then we split off edges $z_iz_sz_j$ in the complete bipartite graph $H_2$ to get an edge $z_iz_j$, and so obtain a complete graph $H_4$ on $\hat{A}$. Hence $H_1\cup H_3\cup H_4$ is a complete graph on $M$. Thus the output $H'$ contains $K_t$ as an immersion, which implies that $H$ contains $K_t$ as an immersion.
It only remains to show that we can perform \textsc{Main($H'$)} successfully, which is equivalent to verifying that for each call to the subroutine \textsc{Link($H',z_i,z_j$)} we have that \eqref{equation:link} is satisfied.
We omit the subscript $H'$ of $f$ in the rest of this proof.
Observe that after performing \textsc{Link($H',z_i,z_j$)}, $f(z_i|\overline{M})$ and $f(z_j|\overline{M})$ each increases by at most 1.
Consider step $(s,i)$ of stage 1. The vertex $z_s$ has been linked $i-p-1$ times and so from \eqref{eq:HH} we have $f(z_s|\overline{M})< p+s+i$, and $z_i$ has been linked $p-s-1$ times and so $f(z_i|\overline{M})< t+3p+r_p-s$. Then
$$f(z_s|\overline{M})+f(z_i|\overline{M})< t+4p+i+r_p\le 6t+r_p,$$
and so \eqref{equation:link} holds for every step $(s,i)$ of stage 1.
From \eqref{eq:HH} and the definition of the algorithm, we have that at the end of stage 1:
\begin{equation}\notag
\begin{array}{ll}
f(z_s|\overline{M})\le 2p+s\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \text{ if } s\le q,\\
f(z_s|\overline{M})\le (2p+s) + q\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \text{ if } q<s\le p,\\
f(z_i|\overline{M})\le (t+2p+r_p)+(p-q) \ \ \ \text{ if } i>p. \end{array}
\end{equation}
Consider step $(s, i)$ of stage 2.
The vertex $z_s$ has been linked $2(i-p-1)$ times during stage 2 and so
$f(z_s|\overline{M})\le 2p+s+2q-2=2t+s-2$ (since $t=p+q$), and $z_i$ has been linked $2(q-s-1)$ times. Thus
$f(z_i|\overline{M})\le 2t+2p+r_p-2s-2$. It follows that
$$f(z_s|\overline{M})+f(z_i|\overline{M})\le 4t+2p+r_p-s-4\le 6t+r_p-4.$$
We can perform \textsc{Link$(H',z_s,z_i)$} twice. At the end of stage 2, we have
\begin{equation}\notag
\begin{array}{ll}
f(z_s|\overline{M})\le (2p+s)+2q\le 2t+s\ \ \ \ \text{ if } s\le q,\\
f(z_s|\overline{M})\le (2p+s)+ q\le 2t+s\ \ \ \ \ \ \text{ if } q<s\le p. \end{array}
\end{equation}
Consider step $(s,i)$ of stage 3.
The vertex $z_s$ has been linked $(p-s)+(s-i-1)$ times during stage 3 (in which $p-s$ times with $z_{r},s<r\le p$ and $s-i-1$ with $z_{j},i<j\le s$) and so
$f(z_s|\overline{M})< (2t+s)+p-i$, and $z_i$ has been linked $p-s-1$ times and so
$f(z_i|\overline{M})<(2t+i)+p-s$. Then
$$f(z_s|\overline{M})+f(z_i|\overline{M})< 4t+2p\le 6t+r_p,$$
and so \eqref{equation:link} holds for every step $(s,i)$ of stage 3. Claim \ref{cl:HH} now follows.
\end{cproof}
This proves Lemma \ref{lemma:mainmindeg}, and so prove Theorem \ref{theorem:mindeg}.
\end{proof}
\section{Forcing a clique immersion via the chromatic number} \label{section:chromatic}
In this section we shall prove Theorem \ref{theorem:chromatic}. Recall that given $\ell \ge 1$, a graph $G$ is \emph{$\ell$-critical} if the chromatic number of $G$ is $\ell$, and deleting any vertex of $G$ results in a subgraph with chromatic number $\ell-1$. A well-known property of critical graphs is that if $G$ is a graph
with chromatic number $\ell$, then $G$ contains an $\ell$-critical subgraph. Let us restate Theorem \ref{theorem:chromatic}.
\begin{theorem}\label{theorem:chromatic2}
Every graph with chromatic number at least $3.54t+4$ contains an immersion of $K_t$.
\end{theorem}
\begin{proof}
Assume the theorem is false, and suppose that there exists a graph of chromatic number $\ell \ge 3.54t+4$ but which does not immerse $K_t$.
Let $G^*$ be an $\ell$-critical subgraph of that graph. Let $v_0$ be a vertex of $G^*$ with minimum degree. By Theorem \ref{theorem:mindeg}, $d_{G^*}(v_0)\le 7t+6$.
Let $N=N_{G^*}(v_0)$, and let $G$ be the graph obtained from $G^*$ by deleting $v_0$. It follows that $G$ does not immerse $K_t$.
The graph $G^*$ is $\ell$-critical, so $G$ has chromatic number $\ell-1$. Furthermore, for any coloring of $G$, $N$ always has at least one vertex of each of the $\ell-1$ colors, otherwise we could color $G^*$ with $\ell-1$ colors. The proof uses Kempe-chains, introduced by Alfred Kempe in an 1879 attempt to prove that planar graphs are 4-colorable, to build a clique immersion with branch vertices in $N$.
Given a coloring of $G$, we call a vertex $v\in N$ a \textit{singleton} if $v$ is the unique vertex in $N$ with its color. Two vertices $v,v'\in N$ of the same color form a \textit{doubleton} if they are the only two vertices with a given color in $N$.
Let $\mathcal{C}$ be an $\ell - 1$ coloring of $G$ which maximizes the number of singletons.
Let $X=\{x_1,...,x_\alpha\}$ and $Y=\{y_1,y_1',...,y_\beta,y'_\beta\}$ be the sets of singletons and doubletons, respectively, where $x_i$ has color $a_i$ and $y_i,y'_i$ share color $b_i$.
All other colors appear at least 3 times in $N$. Thus, $\ell - 1$, the number of colors in $N$, is at most $$\alpha +\beta+ \frac{|N|-\alpha-2\beta}{3}=\frac{|N|+2\alpha+\beta}{3}.$$
Since $|N|= d_{G^*}(a)\le 7t+6$, we have
$$3.54t+3\le \ell-1 \le \frac{7t+6+2\alpha+\beta}{3}$$
\begin{equation}
\Longrightarrow\ 2\alpha+\beta\ge 2.62t+3 \label{equation:2alpha}.
\end{equation}
Given colors $a,b$, an \textit{$(a,b)$-chain} is a path with vertices colored alternately by colors $a$ and $b$. Clearly, if $\{a,b\}\ne \{a',b'\}$, then any $(a,b)$-chain and $(a',b')$-chain are edge-disjoint.
The idea is as follows. We first show that there are many chains with endpoints in $X\cup Y$. Since these chains are edge-disjoint, we can split them off to get a dense graph on $X\cup Y$, then apply Lemma \ref{lemma:average2} to obtain a $K_t$ immersion, which leads to the contradiction.
\begin{claim}\label{claim:chains}
The following hold.
\begin{enumerate}[label=(\alph*)]
\item For all pairs of distinct colors $a_i, a_j$, there is an $(a_i,a_j)$-chain from $x_i$ to $x_j$. \label{enumerate:sing}
\item For any colors $a_i,b_j$, there is an $(a_i,b_j)$-chain from $x_i$ to $y_j$, or from $x_i$ to $y'_j$. \label{enumerate:sing-doub}
\item \label{enumerate:doub} For all pairs of distinct colors $b_i,b_j$, one of the following holds:
\begin{enumerate}[label=(\roman*)]
\setcounter{enumi}{2}
\item \label{enumerate:alph1} there exist two edge-disjoint $(b_i,b_j)$-chains linking $y_i$ to $y_j$ and $y'_i$ to $y'_j$;
\item \label{enumerate:alph2} there exist two edge-disjoint $(b_i,b_j)$-chains linking $y_i$ to $y'_j$ and $y'_i$ to $y_j$;
\item \label{enumerate:alph3} there exist $(b_i,b_j)$-chains from any of $y_i,y_i'$ to any of $y_j,y_j'$ but they cannot be chosen edge-disjoint.
\end{enumerate}
\end{enumerate}
\end{claim}
\begin{cproof}
For every color $a$, let $V_{a}\subseteq V(G)$ be the set of all vertices of color $a$ in $\mathcal{C}$.
To prove \ref{enumerate:sing}, suppose that there exist two distinct colors $a_i,a_j$ such that there is no $(a_i,a_j)$-chain from $x_i$ to $x_j$. Then $x_i,x_j$ are disconnected in $G[V_{a_i}\cup V_{a_j}]$.
Let $U$ be the connected component containing $x_i$ in $G[V_{a_i}\cup V_{a_j}]$. We exchange the color of all vertices in $U$ from color $a_i$ to $a_j$ and vice versa and obtain a new coloring $\mathcal{C}'$ in $G$. Clearly $\mathcal{C}'$ is a proper coloring in $G[V_{a_i}\cup V_{a_j}]$, and so is a proper coloring in $G$.
Now both $x_i$ and $x_j$ has color $a_j$, so $\mathcal{C}'$ has no vertex of color $a_i$, contrary to the fact that $N$ has all colors for every $(\ell-1)$-coloring of $G$.
To prove \ref{enumerate:sing-doub}, the same argument works. Suppose that there exist two distinct colors $a_i,b_j$ such that there is no $(a_i,b_j)$-chain from $x_i$ to $\{y_j,y_j'\}$. Then $x_i$ is disconnected with $\{y_j,y_j'\}$ in $G[V_{a_i}\cup V_{b_j}]$.
Let $U$ be the connected component containing $x_i$ in $G[V_{a_i}\cup V_{b_j}]$. We exchange the color of all vertices in $U$ from color $a_i$ to $b_j$ and vice versa and obtain a new coloring $\mathcal{C}'$ in $G$. Then $\mathcal{C}'$ is a proper coloring in $G$ and has smaller number of colors on $N$ than $\mathcal{C}$, contrary to the fact that $N$ has all colors for every $(\ell-1)$-coloring of $G$.
To prove \ref{enumerate:doub}, we first prove that
\begin{enumerate}[label=(\alph*)]\setcounter{enumi}{3}
\item \label{en:d} for every pair of distinct colors $b_i,b_j$, there is a $(b_i,b_j)$-chain from $y_i$ to $y_j$, or from $y_i$ to $y'_j$.
\end{enumerate}
Suppose that there exist two distinct colors $b_i,b_j$ such that there is no $(b_i,b_j)$-chain from $y_i$ to $\{y_j,y_j'\}$. Then $y_i$ is disconnected with $\{y_j,y_j'\}$ in $G[V_{b_i}\cup V_{b_j}]$.
Let $U$ be the connected component containing $y_i$ in $G[V_{y_i}\cup V_{b_j}]$. We exchange the color of all vertices in $U$ from color $b_i$ to $b_j$ and vice versa and obtain a new coloring $\mathcal{C}'$ in $G$. Then $\mathcal{C}'$ is a proper coloring in $G$.
If $y_i'\in U$, then $\mathcal{C}'$ has no vertex of color $b_i$ in $N$, contrary to the fact that $N$ has all colors for every $(\ell-1)$-coloring of $G$.
If $y_i'\notin U$, then $\mathcal{C}'$ has exactly one vertex of color $b_i$ in $N$, and so has more singletons than $\mathcal{C}'$, which contradicts our choice of $\mathcal{C}$ to maximize the number of singletons.
We now show how \ref{en:d} implies \ref{enumerate:doub}. From \ref{en:d}, every pair of distinct colors $b_i,b_j$, there is a $(b_i,b_j)$-chain from $y_i$ to $\{y_j,y_j'\}$ and another $(b_i,b_j)$-chain from $y_i'$ to $\{y_j,y_j'\}$.
If one chain go to $y_j$ and another go to $y_j'$, then there are three possibilities.
First, these chains are edge-disjoint and between $y_i,y_j$ and $y_i',y_j'$, then \ref{enumerate:alph1} holds. Second, these chains are edge-disjoint and between $y_i,y_j'$ and $y_i',y_j$, then \ref{enumerate:alph2} holds. Third, they are not edge-disjoint, then all $\{y_i,y_i',y_j,y_j'\}$ are connected by these two chains, and \ref{enumerate:alph3} holds.
Otherwise, say these chains both go from $y_i,y_i'$ to $y_j$. Then by \ref{en:d}, there is a $(b_i,b_j)$-chain from $y_j'$ to either $y_i$ or $y_i'$. Hence all $\{y_i,y_i',y_j,y_j'\}$ are connected by some $(b_i,b_j)$-chains, and \ref{enumerate:alph3} holds.
\end{cproof}
For every pair of colors, we fix a subgraph based on the appropriate outcome of Claim \ref{claim:chains}. For every $i, j$, $1 \le i < j \le \alpha$, fix $C_a(i,j)$ to be an $(a_i, a_j)$-chain from $x_i$ to $x_j$. For all $i, j$, $1 \le i \le \alpha$, $1 \le j \le \beta$, fix $C_b(i,j)$ to be an $(a_i, b_j)$-chain from $x_i$ to either $y_j$ or $y_j'$. Let $i, j$ be such that $1 \le i < j \le \beta$; one of \ref{enumerate:alph1} - \ref{enumerate:alph3} holds for the colors $b_i$ and $b_j$. If either \ref{enumerate:alph1} or \ref{enumerate:alph2} holds, fix $C_c(i, j)$ to be the subgraph consisting of two edge disjoint $(b_i, b_j)$-chains linking $\{y_i, y_i'\}$ and $\{y_j, y_j'\}$. If \ref{enumerate:alph3} holds, fix $C(i,j)$ to be an edge minimal subgraph containing $(b_i, b_j)$-chains linking each of $y_i, y_i'$ to each of $y_j, y_j'$.
For $i, j$, $1 \le i < j \le \beta$, we say that $C_c(i,j)$ has one of 3 \emph{types}, namely \ref{enumerate:alph1}, \ref{enumerate:alph2}, or \ref{enumerate:alph3}, depending on which outcome of \ref{enumerate:doub} holds. Note that $C_a(i,j)$, $C_b(i,j)$, and $C_c(i,j)$ are all pairwise edge disjoint.
If we split off all the possible edge disjoint paths contained in subgraphs from the previous paragraph, it will not necessarily be the case that we will have sufficient edges on $X\cup Y$ to apply Lemma \ref{lemma:average2}. To get around this problem, we focus instead on the vertex set $X \cup \{y_1, \dots, y_\beta\}$. The subgraphs $C_c(i,j)$ of type \ref{enumerate:alph1} or type \ref{enumerate:alph3} contain a path which can be split off to yield the edge $y_iy_j$. Moreover, if we flip the labels $y_i$ and $y_i'$, every $C_c(i,j)$ subgraph of type \ref{enumerate:alph2} becomes a $C_c(i,j)$ subgraph of type \ref{enumerate:alph1} (and vice versa). Thus, we can increase the density of the resulting graph on $X \cup \{y_1, \dots, y_\beta\}$ by flipping the appropriate pairs of labeles $y_i$, $y_i'$.
Unfortunately, this greedy approach will still not yield enough edges on $X \cup \{y_1, \dots, y_\beta\}$ to apply Lemma \ref{lemma:average2}. To further increase the final edge density, we will group together multiple $C_c(i,j)$ subgraphs of type \ref{enumerate:alph2} to split off paths and add further edges to the set $\{y_1, \dots, y_\beta\}$. The remainder of the argument carefully orders how the subgraphs are grouped together so that when we split them off and get as dense a subgraph as possible on the vertex set $X \cup \{y_1, \dots, y_\beta\}$.
We begin by defining the subgraphs $G_1$, $G_2$ and the auxiliary graph $H$ as follows. Split off all paths of the form $C_a(i, j)$, $C_b(i, j)$, and the two edge disjoint $\{y_iy_i'\}-\{y_j,y_j'\}$-paths contained in the subgraphs $C_c(i,j)$ of type \ref{enumerate:alph1} and \ref{enumerate:alph2}. Let $G_1$ the graph with vertex set $V(G)$ and edge set the set of all new edges arising from splitting off these paths. Let $G_2$ be the subgraph of $G$ with vertex set $V(G)$ edge set the union of $E(C_c(i,j))$ for all subgraphs $C_c(i,j)$ of type \ref{enumerate:alph3}. Observe that $G_1\cup G_2$ is an immersion of $G$ and therefore does not immerse $K_t$.
Clearly, $G_1[X]$ is a complete graph obtained from splitting off all the subgraphs $C_a(i,j)$, and so
\begin{equation}\label{claim:alpha}
\alpha=|X|\le t-1.
\end{equation}
We define an auxiliary graph $H$ by replacing each pair of vertices $y_i,y_i'$ with a single vertex $z_i$, and we color edges of incident with $z_i$ to describe the behavior of $y_i,y_i'$. Precisely, let $H$ be a graph
with vertex set $X \cup Z$ where $Z=\{z_1,...,z_\beta\}$ and edge set
\begin{align*}
E(H) = \{x_iz_j: 1\le i&\ \le \alpha, 1 \le j \le \beta\}\cup \\
\cup \{z_iz_j:&\ 1 \le i < j \le \beta \text{ and $C_c(i,j)$ is not of type \ref{enumerate:alph3}}\}.
\end{align*}
The edges of $H$ are improperly colored by two colors \textit{odd, even} as follows:
\begin{itemize}
\item $x_iz_j$ is even if $x_iy_j\in E(G_1)$, and is odd if $x_iy'_j\in E(G_1)$.
\item $z_iz_j$ is even if $y_iy_j,y_i'y_j'\in E(G_1)$, and is odd if $y_iy_j',y_i'y_j\in E(G_1)$.
\end{itemize}
To perform a \emph{swap} at a vertex $z_i$, we exchange the colors of all edges incident with $z_i$ in $H$; a swap is equivalent to switching the labels of $y_i$ and $y_i'$ in $G_1\cup G_2$.
To \emph{swap} a set $S\subseteq Z$, we swap vertices in $S$ sequentially in an arbitrarily chosen order. One can easily show that to swap a set $S$ is equivalent to switching the color of every edges between $S$ and $V(H)\backslash S$.
A triangle in $H$ is \textit{odd} if it has odd number of odd-edges. A key property of odd-triangles is that an odd-triangle is still odd after any swap.
Each odd-triangle either has 3 vertices in $Z$ or exactly two vertices in $Z$ -- call them type 1 and type 2 odd-triangles, respectively. Given a type 1 odd-triangle $z_iz_jz_k$, the set of edges in $G_1$ with endpoints in $\{y_i,y_i',y_j,y'_j,y_k,y'_k\}$ are called the \emph{corresponding edges} of $z_iz_jz_k$. Similarly, given a type 2 odd-triangle $x_iz_jz_k$, the set of edges in $G_1$ with endpoints in $\{x_i,y_j,y_j',y_k,y'_k\}$ are called the \emph{corresponding edges} of $x_iz_jz_k$. Clearly, the set of corresponding edges of two edge-disjoint odd-triangles are disjoint. In Figure \ref{fig}, we describe all possibilities (up to permutation of indices) of the set of corresponding edges of a type 1 odd-triangle (upper figures) and of a type 2 odd-triangle (lower figures).
\begin{figure}
\caption{Possibilities of corresponding edges of odd-triangles.}
\label{fig}
\end{figure}
Looking at Figure \ref{fig}, we can easily verify the following.
\begin{enumerate}[label=(\Alph*)]
\item \label{enumerate:Alph1} If $z_iz_jz_k$ is an odd-triangle of type 1, we can split off its corresponding edges to obtain edges $y_iy_j,y_jy_k,y_ky_i$.
\item \label{enumerate:Alph3} If $x_iz_jz_k$ is an odd-triangle of type 2, we can split off its corresponding edges to obtain the edge $y_jy_k$.
\item \label{enumerate:Alph2}
If $x_iz_jz_k$ is an odd-triangle of type 2, we can alternatively split off its corresponding edges to obtain two edges from the set$\{x_iy_j,y_jy_k,y_kx_i\}$ (exactly which two edges depends on which case from Figure \ref{fig} we find ourselves in).
\end{enumerate}
Let $H_1$ be a graph obtained from $H$ by removing an (inclusion-wise) maximal set $\mathcal{T}_1$ of pairwise edge-disjoint odd-triangles of type 1, and
let $H_2$ be a graph obtained from $H_1$ by removing an (inclusion-wise) maximal set $\mathcal{T}_2$ of pairwise edge-disjoint odd-triangles of type 2. In the following claims, we employ the assumption that $G$ does not contain a $K_t$-immersion to bound the degree of vertices in $H_1[Z]$ and $H_2$.
\begin{claim}\label{claim:odd1}
$d_{H_1[Z]}(z)< t$ for every $z\in Z$.
\end{claim}
\begin{cproof}
Suppose for a contradiction that there exists $z\in Z$ such that $d_{H_1[Z]}(z)\ge t$.
Let $M_o$ ($M_e$) the sets of vertices adjacent to $z$ in $H_1[Z]$ by an odd-edge (by an even-edge, respectively). Then $|M_o|+|M_e|=d_{H_1[Z]}(z)\ge t$.
Every edge $uv$ in $H_1[Z]$ with $u,v\in M_o$ ($u,v\in M_e$, respectively) must be even; otherwise, $uvz$ is an odd-triangle of type 1, contradicting the maximality assumption on ${\cal T} _1$.
Similarly, every edge $uv$ in $H_1[Z]$ with $u\in M_o$ and $v\in M_e$ must be odd.
We now swap $M_o$, and then the new graph $H_1[M_o\cup M_e]$ contains only even-edges.
Let $M=\{y_i:z_i\in M_o\cup M_e\}$. Then $|M|=|M_o|+|M_e|\ge t$.
For every odd-triangle in $\mathcal{T}_1$, we split off corresponding edges in $G_1$ by method \ref{enumerate:Alph1} to get $y_iy_j,y_jy_k,y_ky_i$. Then for any distinct vertices $y_i,y_j\in M$, we have
\begin{itemize}
\item if $z_iz_j\in H_1$, then $z_iz_j$ is even, and hence $y_iy_j\in G_1$.
\item if $z_iz_j\in H\backslash H_1$, then $z_iz_j$ belongs to some odd-triangle in $\mathcal{T}_1$, and we showed above that we can obtain $y_iy_j$ by splitting off edges of $G_1$ by method \ref{enumerate:Alph1}.
\item if $z_iz_j\notin H$, then $C_c(i,j)$ is of type \ref{enumerate:alph3} and so there exists a $y_i - y_j$ path in $C_c(i,j)$ which can be split off to yield the edge $y_iy_j$.
\end{itemize}
We end up with a complete graph on $M$, and so conclude that $G_1\cup G_2$ contains $K_t$ as an immersion (since $|M|\ge t$), which is a contradiction.
\end{cproof}
\begin{claim}\label{claim:odd2}
$d_{H_2}(x)<t$ for every $x\in X$.
\end{claim}
\begin{cproof}
The proof is quite similar to the proof of Claim \ref{claim:odd1}. We suppose that there exists $x\in X$ such that $d_{H_2}(x)\ge t$. Note that $X$ is a stable set in $H$, and so all neighbors of $x$ in $H_2$ are in $Z$.
Let $M_o$ ($M_e$) be the set of vertices adjacent to $x$ in $H_2$ by an odd-edge (by an even-edge, respectively). Then $M_o\cup M_e\subseteq Z$ and $|M_o|+|M_e|\ge t$.
Every edge $uv$ in $H_2$ with $u,v\in M_o$ ($u,v\in M_e$, respectively) must be even; otherwise, $uvx$ is an odd-triangle of type 2, contradicting the maximality assumption of ${\cal T} _2$.
Similarly, every edge $uv$ in $H_2$ with $u\in M_o$ and $v\in M_e$ must be odd.
We now swap $M_o$. The new graph $H_2[M_o\cup M_e]$ contains only even-edges.
Let $M=\{y_i:z_i\in M_o\cup M_e\}$. Then $|M|=|M_o|+|M_e|\ge t$.
For every odd-triangle in $\mathcal{T}_1$, we split off corresponding edges in $G_1$ by method \ref{enumerate:Alph1} to get $y_iy_j,y_jy_k,y_ky_i$.
For every odd-triangle in $\mathcal{T}_2$, we split off corresponding edges in $G_1$ by method \ref{enumerate:Alph3} to get $y_iy_j$.
Then for any distinct vertices $y_i,y_j\in M$, we have
\begin{itemize}
\item if $z_iz_j\in H_2$, then $z_iz_j$ is even, and hence $y_iy_j\in G_1$.
\item if $z_iz_j\in H_1 - E(H_2)$, then $z_iz_j$ belongs to some odd-triangle in $\mathcal{T}_2$, and we showed above that we can obtain $y_iy_j$ by splitting off edges of $G_1$ by method \ref{enumerate:Alph3}.
\item if $z_iz_j\in H - E(H_1)$, then $z_iz_j$ belongs to some odd-triangle in $\mathcal{T}_1$, and we showed above that we can obtain $y_iy_j$ by splitting off edges of $G_1$ by method \ref{enumerate:Alph1}.
\item if $z_iz_j\notin H$, we split off a $y_i - y_j$ path in $C_c(i,j)$ in $G_2$ to obtain $y_iy_j$.
\end{itemize}
We end up with a complete on $M$, and so $G_1\cup G_2$ contains $K_t$ as an immersion (since $|M|\ge t$), which is a contradiction.
\end{cproof}
The next claim guarantees that at least half of edges in $H_2$ are even.
\begin{claim}\label{claim:bigswap}
There exists a subset $S$ of vertices such that after swapping $S$ in $H_2$, the number of even-edges in $H_2$ is at least the number of odd-edges.
\end{claim}
\begin{cproof}
We first show that there exits a sequence of swaps resulting in the number of even-edges in $H_2[Z]$ being at least the number of odd-edges $H_2[Z]$.
If there is $z\in Z$ such that $z$ is incident with more odd-edges than even-edges in $H_2[Z]$, we swap $z$, then repeat.
The process will halt since the number of even-edges in $H_2[Z]$ strictly increases after each swap.
When the process halts, every $z\in Z$ is incident with at least as many even-edges as with odd-edges in $H_2[Z]$, and so in total, the number of even-edges in $H_2[Z]$ at least the number of odd-edges $H_2[Z]$.
If the number of even-edges from $Z$ to $X$ in $H_2$ is less than the number of odd-edges from $Z$ to $X$ in $H_2$, we swap the set $Z$. After the switch, the number of even-edges from $Z$ to $X$ in $H_2$ is at least the number of odd-edges from $Z$ to $X$ in $H_2$. Moreover, edges in $H_2[Z]$ are not affected by swapping $Z$. Finally, note that there is no edge in $H_2[X]$.
Thus at the end of this series of swaps, the number of even-edges in $H_2$ is at least the number of odd-edges in $H_2$, proving the claim.
\end{cproof}
By Claim \ref{claim:bigswap}, we may assume that at least half of edges in $H_2$ are even.
We now split off edges in $G_1\cup G_2$ to obtain a dense graph on $X\cup \{y_1,...,y_\beta\}$ as follows.
For every odd-triangle in $\mathcal{T}_1$, we split off its corresponding edges in $G_1$ by method \ref{enumerate:Alph1}. For every odd-triangle in $\mathcal{T}_2$, we split off its corresponding edges in $G_1$ by method \ref{enumerate:Alph2}, which implies that we obtain two of three edges in the set $\{x_iy_j, y_jy_k, y_kx_i\}$.
For every pairs $b_i,b_j$ in case \ref{enumerate:alph3}, we also split off a path in $C_c(i,j)$ to get the edge $y_iy_j$ as guaranteed by \ref{enumerate:alph3}.
We denote by $\hat G$ the induced subgraph of the new graph on $X\cup \{y_1,...,y_\beta\}$. Note that $\hat G$ is an immersion of $G_1\cup G_2$, and so does not contain an immersion of $K_t$.
We will show that $\hat G$ is dense, specifically by counting the number of non-edges in $\hat G$. We first observe that by construction, $\hat G[X]$ is complete.
Thus, all non-edges in $\hat G$ arise from odd-edges of $H$ for which the corresponding edge of $\hat G$ cannot be reconstructed through odd-triangles.
Observe that $H=(H - E(H_1)) \cup (H_1 - E(H_2)) \cup H_2$. We consider each of the subgraphs $H - E(H_1)$, $H_1 - E(H_2)$, and $H_2$ and how they can contribute non-edges to $\hat G$ separately.
\begin{itemize}
\item Each odd-triangle $z_iz_jz_k\in \mathcal{T}_1$ contributes zero missing edge to $\hat G$ since we obtain $y_iy_j,y_jy_k,y_ky_i$ by method \ref{enumerate:Alph1}. Hence $H - E(H_1)$ (the union of odd-triangles in $\mathcal{T}_1$) contributes zero missing edge to $\hat G$.
\item Each odd-triangle $x_iz_jz_k\in \mathcal{T}_2$ contributes exactly one missing edge to $\hat G$ since we obtain two edges among $x_iy_j,y_jy_k,y_kx_i$ by method \ref{enumerate:Alph2}. Hence $H_1 - E( H_2)$ (the union of odd-triangles in $\mathcal{T}_2$) contributes $|\mathcal{T}_2|$ missing edge to $\hat G$.
\item Each odd-edge (even-edge) in $H_2$ contributes exactly one (zero, respectively) missing edge to $\hat G$. Hence $H_2$ contributes at most $|E(H_2)|/2$ missing edges to $\hat G$ by Claim \ref{claim:bigswap}.
\end{itemize}
We conclude that the number of missing edges in $\hat G$ is at most $|E(H_2)|/2+ |\mathcal{T}_2|$. We next give an explicit bound for the number of missing edges in $\hat G$.
\begin{claim}\label{cl:df}
The number of missing edges in $\hat G$ is at most $(\alpha\beta+\alpha t+\beta t)/4$.
\end{claim}
\begin{cproof}
Let $p= |E(H_2)|/2$ and $q=|\mathcal{T}_2|$. Then the number of missing edges in $\hat G$ is at most $p+q$.
By claim \ref{claim:odd1}, $H_1[Z]$ has $\beta$ vertices and minimum degree less than $t$, and so $E(H_1[Z])< \beta t/2$. Hence
\begin{align*}
2p+3q &\le |E(H_2)|+|E(H_1 - E(H_2))|\\
&= |E(H_1)|\\
&= |X||Z|+\big|E(H_1[Z])\big|\\
&\le \alpha\beta+\beta t/2.
\end{align*}
Claim \ref{claim:odd2} states that every $x\in X$ is adjacent to at most $t$ vertices of $Z$ in $H_2$, and so is adjacent to at most $|Z|-t$ vertices of $Z$ in $H_1 - E(
H_2)$.
This implies that for every $x\in X$, there are at least $(|Z|-t)/2=(\beta-t)/2$ odd-triangles in $\mathcal{T}_2$ containing $x$ (since $H_1 - E(H_2)$ is the union of odd-triangles in $\mathcal{T}_2$). This means that $$q=|\mathcal{T}_2|\ge |X|(\beta-t)/2= \alpha(\beta-t)/2.$$
Hence
$$p+q = \frac{(2p+3q)-q}{2}\le \frac{(\alpha\beta+\beta t/2)-\alpha(\beta-t)/2}{2}=\frac{\alpha\beta+\alpha t+\beta t}{4}.$$
Hence the number of missing edges in $\hat G$ is at most $(\alpha\beta+\alpha t+\beta t)/4$.
\end{cproof}
We next show that if $|V(\hat G)|=\alpha+\beta$ is large, then we can apply Lemma \ref{lemma:average2} to yield a contradiction that $\hat G$ contains an immersion of $K_t$. Hence $\alpha+\beta$ is small, which contradicts \eqref{equation:2alpha}, and the proof of Theorem \ref{theorem:chromatic} is complete.
\begin{claim}\label{claim:alphabeta}
$\alpha+\beta<2.62(t+1)$.
\end{claim}
\begin{cproof}
Let $n=|V(\hat G)|=\alpha+\beta$ and suppose for a contradiction that $n\ge2.62(t+1)$. Let $\gamma=\frac{1}{n}\sum_{v\in \hat G}f_{\hat G}(v)$. Then $n\gamma/2$ is the number of missing edges in $\hat G$, and so by Claim \ref{cl:df} we have
\begin{equation}
2\gamma\le\frac{\alpha\beta+\alpha t+\beta t}{n}=\frac{\alpha\beta}{n} +t
\label{equation:ff}.
\end{equation}
If $\gamma<n/4$, then by Lemma \ref{lemma:average2}, $\hat G$ contains an immersion of $K_{t'}$, where $t'=\lfloor n/2\rfloor\ge t$.
Hence $\hat G$ contains an immersion of $K_t$, a contradiction.
Otherwise, since $\alpha\beta \le (\alpha+\beta)^2/4=n^2/4$, we have $2\gamma<n/4 +t<n$. Thus by applying Lemma \ref{lemma:average2}, $\hat G$ contains an immersion of $K_{t'}$, where $t'=\lfloor n-2\gamma \rfloor> n-2\gamma-1$.
We conclude that $n-2\gamma-1<t$ since $\hat G$ does not contain $K_t$ as an immersion.
Recall that by \eqref{claim:alpha}, we have that $\alpha<n/2$. For every $x$ such that $\alpha<x<n/2$, we have
$$\alpha\beta =\alpha(n-\alpha) <x(n-x).$$
Since $\alpha<t+1<n/2$, we can choose $x:=t+1$, and so
\begin{align*}
(n-2\gamma-1)-t &\ge n-\bigg(\frac{\alpha\beta}{n}+t\bigg)-t-1\\
&\ge n-\frac{\alpha\beta}{n}-2x\\
&> \frac{n^2-3nx+x^2}{n}.
\end{align*}
The assumption of the claim is that $n\ge 2.62x$, and hence $n^2-3nx+x^2\ge 0$ (by solving the quadratic equation). This gives $n-2\gamma-1\ge t$, which contradicts what we obtained above that $n-2\gamma-1<t$. This prove the claim.
\end{cproof}
Combining Claim \ref{claim:alphabeta} with \eqref{claim:alpha}, we obtain $2\alpha+\beta< 3.62t+3$, which contradicts \eqref{equation:2alpha}. This completes the proof of Theorem \ref{theorem:chromatic}.
\end{proof}
\section{Immersion in graphs with no stable set of size 3} \label{section:2.5}
We begin by reformulating Theorem \ref{theorem:2.5}.
\begin{theorem}\label{theorem:2.5.2}
For all $t \ge 1$, every graph $G$ with at least $5t$ vertices and no stable set of size three has a strong immersion of $K_{2t}$.
\end{theorem}
\begin{proof}
Assume that the theorem is false, and pick a counterexample $G$ which minimizes $|V(G)| + |E(G)|$. Assume that $G$ has at least $5t+5$ vertices and no strong immersion of $K_{2t+2}$. Since every graph on at least 5 vertices with no independent set of size three contains an edge, we may assume that $t \ge 1$. By minimality, we may assume that $n = |V(G)| = 5t+5$. Furthermore, as $G-e$ does not contain a strong immersion of $K_{2t+2}$ for all edges $e$, by minimality it follows that deleting any edge results in a stable set of size three. All index arithmetic in the following proof is done mod 5.
\begin{claim}
$G$ contains an induced cycle of length $5$.
\end{claim}
\begin{cproof}
If $G$ were the disjoint union of cliques, since it contains no stable set of size three then it must be a disjoint union of at most two cliques. One of the two cliques has at least $\lceil n/2\rceil \ge 2t+2$ vertices, and so $G$ contains a strong $K_{2t+2}$-immersion, a contradiction.
Thus $G$ is not a disjoint union of cliques, and there exist two adjacent vertices $a_1,a_2$ such that $N(a_1)\ne N(a_2)$. Without loss of generality, we may suppose that $N(a_2)\backslash N(a_1)\ne \emptyset$ and let $a_3\in N_G(a_2)\backslash N_G(a_1)$.
This gives $a_1a_3\notin E$ and $a_2a_3\in E$.
Observe that there is $a_4$ with $a_1a_4,a_2a_4\notin E$; otherwise, we can remove the edge $a_1a_2$ without creating any stable set of size three, which contradicts the minimality of $G$.
By the same argument, there is $a_5$ with $a_2a_5,a_3a_5\notin E$. Note that $G$ does not contain any stable set of size three and $a_1a_4,a_1a_3\notin E$, and so $a_3a_4\in E$. Similarly, $a_1a_5,a_4a_5\in E$. Thus $a_1a_2a_3a_4a_5$ forms an induced cycle of length 5 in $G$.
\end{cproof}
Let $C=\{a_i:1\le i\le 5\}$ induce a cycle of length five, and let $U=V\backslash C$. Then $G[U]$ has $n-5 = 5t$ vertices and $G[U]$ contains no stable set of size three. By minimality of $G$, $G[U]$ contains a strong immersion of $K_{2t}$ with with some set of branch vertices $M$.
Let $Q=U\backslash M$, and for every $i, 1\le i\le 5$, let $M_i$ be the set of vertices in $M$ not adjacent to $a_i$.
In the following claim, we show that if there are two large disjoint sets $X_1,X_3$ in $Q$ with some desired property, then for every $v\in M$, we can split off paths $a_1xv$ or $a_1xa_iv$ (of length 2 or 3) with $x\in X_1,i\in\{2,4,5\}$ to get the edge $a_1v$, and similarly to get the edge $a_3v$, and so get a strong clique immersion of size $2t+2$ on $M\cup \{a_1,a_3\}$, which is a contradiction.
\begin{claim}\label{claim:2.5X}
Suppose that there are disjoint sets $X_1,X_3\subseteq Q$ satisfying
\begin{enumerate}[label=(\roman*)]
\item \label{enum:2.5.2} $|X_1|\ge |M_1|$, and $|X_3|\ge |M_3|$;
\item \label{enum:2.5.1}
for every $x\in X_1$, we have $xa_1,xa_5\in E$ and either $xa_2\in E$ or $xa_4\in E$; and
\item \label{enum:2.5.3} for every $x\in X_3$, we have $xa_3,xa_4\in E$ and either $xa_2\in E$ or $xa_5\in E$.
\end{enumerate}
Then $G$ has a strong immersion of $K_{2t}$, where the set of branch vertices is $M\cup \{a_1,a_3\}$.
\end{claim}
\begin{cproof}
Let $E_1$ be the set of edges in $G$ from $C$ to $M_1\cup X_1$.
We wish to split off paths in $E_1$ to obtain edges from $a_1$ to every vertex in $M_1$. The process of splitting off is as follows.
\begin{itemize}
\item Arbitrarily pair each vertex $v\in M_1$ with a vertex $x_v\in X_1$ such that $x_v\ne x_{v'}$ for every $v\ne v'$ (such a choice of $x_v$ exists by \ref{enum:2.5.2}).
\item For every $v\in M_1$, note that $va_4\in E$ (otherwise, $\{a_1,v,a_4\}$ is a stable set of size three) and either $va_2\in E$ or $va_5\in E$ (otherwise, $\{a_2,v,a_5\}$ is a stable set of size three).
If $va_5\in E$, we split off the path $va_5x_va_1$ to get an edge $va_1$.
\item Otherwise, $va_4\in E$ and $va_2\in E$. By \ref{enum:2.5.1}, either $x_va_2\in E$ or $x_va_4\in E$.
If $x_va_2\in E$, we split off the path $va_2x_va_1$ to get the edge $va_1$. Otherwise, we split off the path $va_4x_va_1$ to get the edge $va_1$.
\end{itemize}
Note that in this process we only use edges of $E_1$ and at the end we obtain all edges from $a_1$ to $M_1$, and so obtain all edges from $a_1$ to $M$.
Let $E_3$ be the set of edges in $G$ from $C$ to $M_1\cup X_3$. Note that $E_1\cap E_3=\emptyset$, and hence we can split off paths in $E_3$ in the same manner to obtain all edges from $a_3$ to $M_3$, and so obtain all edges from $a_3$ to $M$.
By minimality, we can split off edges of $G[U]$ to obtain a $K_{2t}$ on $M$. Note that $E_1$, $E_3$ and $E(G[U])$ are pairwise disjoint, so we never split off an edge twice. By splitting off $a_1a_2,a_2a_3$, we obtain $a_1a_3$, and hence obtain a complete graph on $M\cup\{a_1,a_3\}$.
Clearly, all split off paths are internally edge-disjoint from $M\cup\{a_1,a_3\}$.
Hence $G$ contains a strong immersion of $K_{2t+2}$, where the set of branch vertices is $M\cup\{a_1,a_3\}$, a contradiction.
\end{cproof}
To reach the contradiction, it only remains to show that such sets $X_1,X_3$ exist up to shifting indices.
For every $i,1\le i\le 5$, let $A_i$ be the set of non-neighbors of $a_i$ in $G[U]$. Note that $A_i\cup\{a_{i-2},a_{i+2}\}$ is a clique, and so $|A_i|\le 2t$ since $G$ does not contain any $K_{2t+2}$-immersion.
Also note that since $G$ contains no stable set of size three, and hence $A_i\cap A_{i+2}=\emptyset$ for every $i$.
As discussed above, we wish to find sets $X_1,X_3$ satisfying Claim \ref{claim:2.5X}. One might hope to choose $X_1:=A_3\cap Q$ and $X_3:=A_1\cap Q$; these sets indeed satisfy \ref{enum:2.5.1} and \ref{enum:2.5.3} but may fail to meet \ref{enum:2.5.2} in the case either $|A_1|$ or $|A_3|$ is small. This problem can be avoided by enlarging $A_1$ and $A_3$. This leads to the following definition of $A_1'...,A_5'$.
Let $A_1',...,A_5'$ be subsets of $U$ such that $\sum_{i=1}^{5}|A'_i|$ is as large as possible, and
\begin{equation}
\label{eq:2.5}
\left\{\begin{array}{l}
A_i\subseteq A_i',\\
|A_i'|\le 2t,\\
A_i'\cap A'_{i+2}=\emptyset,
\end{array}\right.
\ \ \ \forall 1\le i\le 5.
\end{equation}
\begin{claim}\label{claim:sum2t}
There exists $i$ such that $|A_i'|=|A_{i+2}'|=2t$.
\end{claim}
\begin{cproof}
Assume the claim is false.
Then there exists $j$ such that $|A'_{j}|,|A'_{j+1}|,|A'_{j+2}|<2t$.
Without loss of generality, assume $|A_1'|,|A'_2|,|A'_3|<2t$.
For every $i$, let $B_{i,i+1}=A'_i\cap A'_{i+1}$, and $D_i=A'_i\backslash (A_{i-1}\cup A'_{i+1})$. Then all 10 sets $D_i, B_{i,i+1}$ are pairwise disjoint, and $A'_i=B_{i-1,i}\cup D_i \cup B_{i,i+1}$.
Note also that $D_i\cap A'_{i+1}=\emptyset$ and $D_i\cap A'_{i-1}=\emptyset$ for every $i$.
Suppose that there exists $v\in U$ such that $v\notin \bigcup_{i=1}^{5}A'_i$. Then $A_1'\cup\{v\},A_2',...,A_5'$ satisfy (\ref{eq:2.5}), while the sum of their cardinalities is larger, a contradiction.
This gives $\bigcup_{i=1}^{5}A'_i=U$. In other words,
$$\Big(\bigcup_{i=1}^{5}D_i\Big)\cup \Big(\bigcup_{i=1}^{5}B_{i,i+1}\Big)=U.$$ Since all these sets are pairwise disjoint, we have
\begin{equation}\label{eq:2.5.2}
\sum_{i=1}^{5}|D_i|+\sum_{i=1}^{5}|B_{i,i+1}|=|U|\ge 5t.
\end{equation}
Observe that if $|A'_i|<2t$ and there exists $v\in D_{i-1}\cup D_{i+1}$, then $(A'_i\cup\{v\})\cap A'_{i+2}=\emptyset$ and $(A'_i\cup\{v\})\cap A'_{i-2}=\emptyset$. Hence
$A'_i\cup\{v\},A'_{i+1},...,A'_{i+4}$ satisfy (\ref{eq:2.5}), violating our choice to maximize the sum of their cardinalities. Hence if $|A_i'|<2t$, then $D_{i-1}=\emptyset$ and $D_{i+1}=\emptyset$.
Recall the assumption that $|A'_1|,|A'_2|,|A'_3|<2t$.
By the observation in the previous paragraph, we have $D_j=\emptyset$ for every $j$. Hence from (\ref{eq:2.5.2}) we have $\sum_{i=1}^{5}|B_{i,i+1}|\ge 5t$. Also note that $|A_i'|=|B_{i,i-1}|+|D_i|+|B_{i,i+1}|=|B_{i,i-1}|+|B_{i,i+1}|$ for every $i$. This gives
$$10t\le2\sum_{i=1}^{5}|B_{i,i+1}|=\sum_{i=1}^{5}|A'_i|<10t,$$
a contradiction. This proves the claim.
\end{cproof}
Without loss of generality, we may suppose that $|A'_1|=|A'_3|=2t$.
\begin{claim}\label{cl:2.5s}
Let $X_1=A_3'\cap Q$ and $X_3=A_1'\cap Q$. Then $X_1,X_3$ satisfy conditions in Claim \ref{claim:2.5X}.
\end{claim}
\begin{cproof}
We first show that \ref{enum:2.5.3} holds for $X_3$.
Recall that $A_3\subseteq A_3'$, $A_4\subseteq A_4'$ and
$A_1'\cap (A_3'\cup A_4')=\emptyset$. Then $A_1'\cap (A_3\cup A_4)=\emptyset$, and so $X_3\cap (A_3\cup A_4)=\emptyset$ since $X_3\subseteq A_1'$.
Hence for every $v\in X_3$, we have $va_3\in E$ and $va_4\in E$ (otherwise, $G$ contains a stable set of size three).
Note that $B_{5,1}\cap B_{1,2}=\emptyset$. Hence for every $v\in X_3$, either $v\notin B_{5,1}$ or $v\notin B_{1,2}$.
If $v\notin B_{5,1}$ then $v\notin A_5'$ (since $v\in A'_1$), and so $v \notin A_5$. This means that $v$ is adjacent to $a_5$. Otherwise, $v\notin B_{1,2}$, and by the same argument, $v$ is adjacent to $a_2$.
Hence, \ref{enum:2.5.3} holds for $X_3$.
We now show that \ref{enum:2.5.2} holds for $X_3$. Let $M_1'=A_1'\cap M$ and $M_3'=A_3'\cap M$. Then by (\ref{eq:2.5}), we have $M_1\subseteq M_1'$, $M_3\subseteq M_3'$, and $M_1'\cap M_3'=\emptyset$. Besides, $X_3\cap M_1'\subseteq Q\cap M=\emptyset$ and $$X_3\cup M_1'=(A_1'\cap Q)\cup(A_1'\cap M)=A_1'\cap U=A_1'.$$
This gives $|X_3|+|M_1'|=|A_1'|=2t$, and so
$$|X_3|=2t-|M_1'|= |M|-|M_1'|=|M\backslash M_1'|\ge |M_3'|\ge |M_3|.$$
Hence \ref{enum:2.5.2} holds for $X_3$.
By the same arguments, \ref{enum:2.5.2} and \ref{enum:2.5.1} hold for $X_1$. This proves the claim.
\end{cproof}
Claims \ref{claim:2.5X} and \ref{cl:2.5s} complete the proof of Theorem \ref{theorem:2.5}.
\end{proof}
\end{document} |
\begin{document}
\title{The Complexity of Max-Min $k$-Partitioning}
\author{Anisse Ismaili\vspace*{5mm}}
\begin{abstract}
In this paper we study a max-min $k$-partition problem on a weighted graph,
that could model a robust $k$-coalition formation.
We settle the computational complexity of this problem as complete for class $\Sigma_2^P$. This hardness holds even for $k=2$ and arbitrary weights, or $k=3$ and non-negative weights, which matches what was known on \textsc{MaxCut} and \textsc{Min-3-Cut} one level higher in the polynomial hierarchy.
\end{abstract}
\keywords{$k$-Partition; Robustness; Complexity}
\maketitle
\vspace*{5mm}
\section{Preliminaries}
A max-min $k$-partition instance is defined by $\langle N, L, w, k, m, \theta \rangle$.
\begin{itemize}
\item $(N, L, w)$ is a weighted undirected graph.
$N=[n]$, where $n \in \mathbb{N}$
is a set of nodes.\footnote{Given $n \in \mathbb{N}$, $[n]$ is shorthand of $\{1, \ldots, n\}$.}
The set of links $L \subseteq {N \choose 2}$ consists of unordered node pairs.
Link $\ell=\{i,j\}$ maps to weight $w_{ij} \in \mathbb{Z}$.
{Equivalently, $w:N^2\rightarrow\mathbb{Z}$ satisfies for any $(i,j)\in N^2$ that $w(i,i)=0$, $w(i,j)=w(j,i)$ and $w(i,j)\neq 0\Rightarrow\{i,j\}\in L$.}
\item $k$ is the size of a partition, $2 \le k < n$.
\item $m \in \mathbb{N}$ is the number of nodes that could be removed.
\item $\theta \in \mathbb{Z}$ is a threshold value.
\end{itemize}
Let $\pi$ denote a $k$-partition of $N$, which is a collection of node-subsets
$\{S_1, \ldots, S_k\}$,
such that for each $i \in [k]$, $S_i \subseteq N$, and
$\forall S_i, S_j\in\pi$, where $i\neq j$, $S_i\cap S_j=\emptyset$ holds.
We say that a $k$-partition $\pi$ is complete when
$\bigcup_{i \in [k]} S_i = N$ holds (otherwise, it is incomplete).
For a complete partition $\pi$ and an incomplete partition $\pi'$,
we say that $\pi$ subsumes $\pi'$ when
$S_i \supseteq S'_i$ holds for all $i \in [k]$.
For node $i\in N$, $\pi(i)$ is the node-subset to which it belongs.
For any $S \subseteq N$,
define
$$W(S)=\sum\nolimits_{\{i,j\}\subseteq S}w(i,j).$$
Then, let $W(\pi)$ denote $\sum_{S \in \pi} W(S)$. We require that no
node-subset be empty; hence, if some node-subset is empty, we set $W(\pi)=-\infty$.
Given a $k$-partition $\pi=\{S_1, \ldots, S_k\}$ and a set $M \subseteq N$,
the remaining incomplete partition $\pi_{-M}$ after removing $M$ is
defined as $\{S'_1, \ldots, S'_k\}$, where $S'_i = S_i \setminus M$.
Let $W_{-m}(\pi)$ denote the minimum value after removing at most $m$ nodes,
i.e., it is defined as:
$$W_{-m}(\pi)=\min\limits_{M\subseteq N,|M|\leq m}\{W(\pi_{-M})\}.$$
To obtain $W_{-m}(\pi)\neq-\infty$,
every $S\in\pi$ needs to contain at least $m+1$ nodes,
so that no node-subset of $\pi_{-M}$ is emptied.
For partition $\pi=\{S_1, \ldots, S_k\}$, we define its deficit count
$\text{df}(\pi)$ as $\sum_{i\in[k]} \max(0, m+1 - |S_i|)$.
Thus, $\text{df}(\pi)=0$ must hold in order to obtain
$W_{-m}(\pi)\neq-\infty$.
\begin{definition}
The decision version (1) of our main problem is defined below. It may also be referred to as the defender's problem.
\begin{enumerate}
\item \textsc{Max-Min-$k$-Partition}:
Given a max-min $k$-partition instance,
is there any $k$-partition $\pi$
satisfying $W_{-m}(\pi)\geq \theta$?
\item \textsc{Max-Min-$k$-Partition/Verif}:
Given an instance of a max-min $k$-partition and
a partition $\pi$,
does $W_{-m}(\pi)\geq \theta$ hold?
\end{enumerate}
A key step is to study the natural verification problem (2), to which complement we refer as the attacker's problem. (Does an attack $M\subseteq N,|M|\leq m$ on $\pi$ exist such that $W(\pi_{-M})\leq\theta-1$?)
\end{definition}
\vspace*{5mm}
\section{Complexity of \textsc{Max-Min-$k$-Partition}}
\label{sec:complexity}
\vspace*{2mm}
In this section, we address the computational complexity of the defender's problem.
The verification (resp. attacker's) problem itself turns out to be coNP-complete (resp. NP-complete),
which intricates one more level in the polynomial hierarchy (PH).
We show that \textsc{Max-Min-$k$-Partition} is complete for class $\Sigma_2^P$, even in two cases:
\begin{enumerate}
\item[(a)] when $k=2$ for arbitrary link weights $w\lessgtr 0$, or
\item[(b)] when $k=3$ for non-negative link weights $w\geq 0$.
\end{enumerate}
These results seem to match what was known on
\textsc{MaxCut} \cite{karp1972reducibility} (contained in \textsc{Min-2-Cut} when $w\lessgtr 0$ and NP-complete) and
\textsc{Min-3-Cut} \cite{Dahlhaus:1992:CMC:129712.129736} (NP-complete for $w\geq 0$ when one node is fixed in each node-subset),
but one level higher in PH.
\begin{observation}\label{rk:1}
\textsc{Max-Min-$k$-Partition/Verif} is coNP-complete.
It holds even for $k=1$, weights $w$ in $\{0,1\}$ and threshold $\theta=1$.
\end{observation}
\begin{proof}
Decision problem \textsc{Max-Min-$k$-Partition/Verif} is in class coNP, since for any no-instance, a failing set $M$ such that $W(\pi_{-M})\leq\theta-1$ is a no-certificate verifiable in polynomial-time.
We show coNP-hardness by reduction from \textsc{MinVertexCover} to the (complement) attacker's problem.
Let graph $G=(V,E)$ and vertex number $m\in\mathbb{N}$ be any instance of \textsc{MinVertexCover}.
\textsc{MinVertexCover} asks whether there exists a vertex-subset $U\subseteq V,|U|\leq m$ such that $\forall \{i,j\}\in E, i\in U\mbox{ or } j\in U$, i.e. every edge is covered by a vertex in $U$. We reduce it to an attacker's instance with nodes $N\equiv V$, weights $w(i,j)\in\{0,1\}$ equal to one if and only if $\{i,j\}\in E$ and threshold $\theta=1$. The verified partition is simply $\pi=\{N\}$. The idea is that constraint $W(\pi_{-M})\leq 0$ is equivalent to damaging every link, hence to finding a vertex-cover $U\equiv M$ with $|M|\leq m$.
\end{proof}
We now proceed with the computational complexity of the main defender's problem under $w\lessgtr 0$ and $w\geq 0$.
We show $\Pi_2^P$-hardness of the $\forall\exists$ complement by reduction from \textsc{MaxMinVertexCover} or \textsc{$\forall\exists$3SAT}.
The idea is to (1) enforce that only some \emph{proper} partitions are meaningful. One possible proper partition corresponds to one choice on $\forall$ in the original problem. Then, (2) within one particular node-subset of a proper partition, we represent the subproblem (e.g. \textsc{VertexCover} or $\textsc{3-SAT}\leq \textsc{IndependentSet}=\textsc{VertexCover}$).
\begin{theorem}\label{th:k2}
Problem \textsc{Max-Min-$k$-Partition} is $\Sigma_2^P$-complete,
even for $k=2$ node-subsets and $w\in\{-n^2,1,2\}$.
\end{theorem}
\begin{figure}
\caption{Reduction from \textsc{MaxMinVertexCover}
\label{fig:th:2}
\end{figure}
\begin{proof}
Decision problem \textsc{Max-Min-$k$-Partition} asks whether
$\exists~k\mbox{-partition } \pi,
\forall~M\subseteq N, |M|\leq m,
W(\pi_{-M})\geq\theta$.
Therefore, it lies in class $\Sigma_2^P$, since, for yes-instances, such a $k$-partition $\pi$ is a certificate that can be verified by an NP-oracle on the remaining coNP problem \textsc{Max-Min-$k$-Partition/Verif}. We show $\Sigma_2^P$-hardness by a (complementary) reduction from $\Pi_2^P$-complete problem \textsc{MaxMinVertexCover}, defined as follows. Given graph $G=(V,E)$ whose vertices are partitioned by index set $I$ into $V=\bigcup_{i\in I}(V_{i,0}\cup V_{i,1})$, for a function $p:I\rightarrow\{0,1\}$, we define $V^{(p)}=\bigcup_{i\in I}V_{i,p(i)}$ and induced subgraph $G^{(p)}=(V^{(p)},E^{(p)})$. Given $m\in\mathbb{N}$, it asks whether:
$$
\forall p\!:\!I\!\rightarrow\!\{0,1\},\quad
\exists U\subseteq V^{(p)}\!\!,|U|\!\leq\! m,\quad
U\mbox{ is a vertex cover of } G^{(p)}.
$$
where ``$U$ is a vertex cover of $G^{(p)}$'' means $\forall \{u,v\}\in E[V^{(p)}]$, $u\in U$ or $v\in U$.
Since edges between $V_{i,0}$ and $V_{i,1}$ are never relevant, we can remove them. By \cite[Th. 10, proof]{Ko1995}, all $V_{i,j}$ sets have the same size, hence set $V^{(p)}$ has a constant size $n$ for any $p$.
The reduction is described in Figure \ref{fig:th:2}.
We reduce any instance of \textsc{MaxMinVertexCover} (as described above) to the following \emph{complementary} instance of \textsc{Max-Min-$k$-Partition}.
Nodes $N\equiv V$ are identified with vertices, hence can also be partitioned by $I\times\{0,1\}$ into $N=\bigcup_{i\in I}(N_{i,0}\cup N_{i,1})$ with $N_{i,j}\equiv V_{i,j}$ . We ask for $k=2$ node-subsets and choose a large number $\Lambda$, e.g. $\Lambda=n^2$. For every link $\{i,j\}\in{N \choose 2}$, if $\{i,j\}\in E$, we define synergy $w(i,j)=2$; otherwise if $\{i,j\}\notin E$, we define $w(i,j)=1$. However, for every $\ell\in I$ and every $(i,j)\in N_{\ell,0}\times N_{\ell,1}$, we define negative weight $w(i,j)=-\Lambda$. Here, up to $2m$ nodes might fail, and threshold $\theta=f_{n,m}(m)+1$ is defined in the proof.
Since we are working on a complementary instance, the question is whether
$$
\forall 2\mbox{-partition }\pi,\quad
\exists M\subseteq N,|M|\leq 2m,\quad
W(\pi_{-M})\leq f_{n,m}(m),
$$
where $f_{n,m}:[0,2m]\rightarrow[0,n^2]$ is defined later.
This condition is trivially satisfied on 2-partitions $\pi$ where for some $\ell\in[I]$, two nodes $(i,j)\in N_{\ell,0}\times N_{\ell,1}$ are in the same node-subset. Indeed, even with an empty attack $M=\emptyset$, weight $W(\pi_{-\emptyset})$ incurs synergy $w(i,j)=-\Lambda$ and $W(\pi_{-\emptyset})<0\leq f_{n,m}(m)$.
Therefore, the interesting part of this condition is on the other 2-partitions: the \emph{proper} 2-partitions $\pi=\{S_1,S_2\}$, which satisfy $\forall\ell\in[I],\forall (i,j)\in N_{\ell,0}\times N_{\ell,1}, \pi(i)\neq\pi(j)$.
It's easy to see that $\pi$ can be characterized by a function $p\!:\!I\!\rightarrow\!\{0,1\}$ such that $S_1=\bigcup_{i\in I}N_{i,p(i)}$ and $S_2=\bigcup_{i\in I}N_{i,1-p(i)}$, and $|S_1|=|S_2|=n$.
Since the remaining weights inside $S_1$ and $S_2$ are positive, the largest failures are the most damaging, $|M|=2m$ holds.
We now define function $f_{n,m}$. It maps $x\in[0,2m]$ to the number of in-subset pairs in a proper 2-partition $\pi=\{S_1,S_2\}$ ($|S_1|\!=\!|S_2|\!=\!n$) after $x$ nodes fail in $S_1$ and $2m-x$ in $S_2$ (total $2m$ failures). One has:
$$
f_{n,m}(x)
\enskip=\enskip 2{n\choose 2}-\sum\limits_{i=1}^{x}(n\!-\!i)-\sum\limits_{j=1}^{2m-x}(n\!-\!j)
\enskip=\enskip g_{n,m}+ x(x-2m),
$$
where $g_{n,m}$ is constant w.r.t. $x$.
Since $f'_{n,m}(x)=2(x-m)$ and $f''_{n,m}(x)=2$,
it is a strictly convex function with minimum point at $x=m$.
Therefore, for integers $x\in[2m]$, if $x\neq m$, the inequality $f_{n,m}(x) > f_{n,m}(m)$ holds.
By definition, $f_{n,m}(x)$ is a lower bound on $W(\pi_{-M})$ (by assuming that all remaining weights in $\pi_{-M}$ have a value of $1$, instead of $1$ or $2$). Therefore, the main condition can only be satisfied by \emph{balanced} failures $M=M_1\cup M_2$ such that $M_1\subseteq S_1$, $M_2\subseteq S_2$ and crucially: $|M_1|=|M_2|=m$.
(yes$\Rightarrow$yes) Any subgraph $G^{(p)}$ admits a vertex cover $U\subseteq V^{(p)}$ with size $|U|\leq m$. Let us show that any proper 2-partition $\pi=\{S_1,S_2\}$ (characterized by a function $p:I\rightarrow\{0,1\}$) can be failed down to $f_{n,m}(m)$. Let $M_1\subseteq S_1$ correspond to the vertex cover of subgraph $G^{(p)}$ and $M_2\subseteq S_2$ to the vertex cover of subgraph $G^{(1-p)}$. Then, the failing set $M=M_1\cup M_2$ has a size of $|M|\leq 2m$, is balanced, and any node pair $\{i,j\}$ of weight two in $\pi$ (edge in $E$) has $i$ or $j$ in $M$, by the vertex covers. All in all, $W(\pi_{-M})=f_{n,m}(m)$.
(yes$\Leftarrow$yes) Any proper 2-partition $\pi=\{S_1,S_2\}$ (characterized by function $p:I\rightarrow\{0,1\}$) admits a well balanced failing set $M=M_1\cup M_2$ such that $W(\pi_{-M})\!\leq\!f_{n,m}(m)$. Then it must be the case that $M_1$ (and $M_2$) covers all the node pairs of synergy two in $S_1$ (resp. $S_2$) that correspond to the edges of $G^{(p)}$ (resp. $G^{(1-p)}$).
Then, for any subgraph $G^{(p)}$, attack $U\equiv M_1$ is a vertex cover.
\end{proof}
Adding a constant to all weights does not preserve optimal solutions.
Thus, we cannot modify a problem with negative weights to an
equivalent non-negative weight problem.
Still, a hardness result for $k=3$ can also be obtained
from \textsc{$\forall\exists$3SAT}.
\begin{theorem}\label{th:k3}
\textsc{Max-Min-$k$-Partition} is $\Sigma_2^P$-complete,
even for $k=3$ node-subsets and weights $w\in\{0,\Lambda,\Lambda+1\}$, where $\Lambda\geq n^2$.
\end{theorem}
\begin{proof}
Let us first recall a classical reduction from \textsc{3SAT} to \textsc{IndependentSet}, and how the later relates to \textsc{VertexCover}.
Let any 3SAT instance be defined by formula $F=C_1\wedge\ldots\wedge C_{\alpha}$, where $C_i$ is a 3-clause on variables $X$. Every clause $C_i=\ell_{i,1}\vee\ell_{i,2}\vee\ell_{i,3}$ is reduced to triangle of vertices $V_i=\{v_{i,1},v_{i,2},v_{i,3}\}$ representing the literals of the clause. The set of $3\alpha$ vertices is then $V=\cup_{i=1}^{\alpha} V_i$. Between any two subsets $V_i,V_j$, edges exist between two vertices if and only if the corresponding literals are on the same variable and are complementary (hence incompatible). It is easy to see that an independent-set $U\subseteq V$ of size $\alpha$ must have exactly one vertex per triangle $V_i$, and will exist (no edges within) if and only if there exists an instantiation of $X$ that makes at least one literal per clause $C_i$ true.
Given a graph $G=(V,E)$, if $U\subseteq V$ is an independent-set,
it means that
$i\in U\wedge j\in U\Rightarrow \{i,j\}\notin E$.
Hence, contraposition
$\{i,j\}\in E\Rightarrow (i\!\in\!V\!\setminus\!U)\vee (j\!\in\!V\!\setminus\!U)$
means that $V\!\setminus\!U$ is a vertex cover.
For instance, in the reduction from \textsc{3SAT}, one can equivalently ask for a vertex cover $V\setminus U$ with size $2\alpha$; that is, two vertices per triangle $V_i$:
Set $V$ of third vertices shall have no edge left to cover.
Let any instance of \textsc{$\forall\exists$3SAT} be defined by 3CNF formula $F(X,Y)=\bigwedge_{i=1}^{\alpha}C_i$ on variables $X=\{x_1,\ldots,x_{|X|}\}$ and $Y=\{y_1,\ldots,y_{|Y|}\}$. This problem asks whether:
$$
\forall \tau_x:X\rightarrow\{0,1\},\quad
\exists \tau_y:Y\rightarrow\{0,1\},\quad
F(\tau_x,\tau_y)\text{ is true}.
$$
Without loss of generality, one can assume there is at most one $X$-literal per clause $C$.
Indeed, if there are three $X$-literals, some $\tau_x$ can make the clause false, and it is trivially a no-instance.
If there are two $X$-literals: $C=x\vee x'\vee y$, then by adding a fresh $Y$-variable $z$, one easily obtains $C=(x\vee z\vee y)\wedge(x'\vee \neg z\vee y)$.
For ease of presentation, we assume exactly one $X$-literal and two $Y$-literals. We extend this proof to including clauses with no $X$-literal, in its final remark. Let $X(C)$ be the $X$-\emph{literal} in clause $C$.
We build a \textsc{Max-Min-3-Partition} instance on $n\!=\!10\alpha + 2$ nodes with $m=2\alpha$ failures . We first describe the nodes.
To every clause $C_i=\ell^{x}_{i}\vee\ell^{y}_{i}\vee\ell^{y'}_{i}$, we associate two node tetrads $N_{i,0}=\{v^{x}_{i,0},v^{y}_{i,0},v^{y'}_{i,0},v^{z}_{i,0}\}$ and $N_{i,1}=\{v^{x}_{i,1},v^{y}_{i,1},v^{y'}_{i,1},v^{z}_{i,1}\}$ (both depicted in Figure \ref{fig:tetrads}) which represent the two scenarios on $X$-literal $\ell^{x}_{i}$: false or true. Hence, there are $2\alpha$ node tetrads and a total of $4m=8\alpha$ nodes in $T=\cup_{i=1}^{\alpha}\cup_{j\in\{0,1\}}N_{i,j}$. There is also a set $K$ of $m=2\alpha$ nodes, and two nodes $v^{1/2},v^{2/2}$. This construct is depicted in Figure \ref{fig:many-tetrads}.
\begin{figure}
\caption{For clause $C_i=\ell^x_i\vee\ell^{y}
\label{fig:tetrads}
\end{figure}
To describe the weights, we define a number $\Lambda\gg 1$,
and only three different link weights $0,\Lambda,\Lambda+1$.
We call $\Lambda$-link any link with weight $\Lambda$ or $\Lambda+1$.
We call 1-link any link with weight $\Lambda+1$.
Every pair of nodes in $\cup_{i=1}^{\alpha}\cup_{j\in\{0,1\}}N_{i,j}$
are linked by weight $\Lambda$ or $\Lambda+1$,
except ($\star$) we set weights \emph{zero} (and no link) for every $i,i'\in[\alpha]$:
\begin{itemize}
\item[when] $X(C_i)=X(C_{i'})$ between $N_{i,j}$ and $N_{i',1-j}$ for $j\in\{0,1\}$, or
\item[when] $X(C_i)= \neg X(C_{i'})$ between $N_{i,j}$ and $N_{i',j}$ for $j\in\{0,1\}$.
\end{itemize}
The rationale is to forbid two inconsistent scenarios on a same $X$-variable to coexist in one node-subset.
Whether the $\Lambda$-link is also a 1-link is determined as follows.
Inside every node tetrad $N_{i,j}=\{v^{x}_{i,j},v^{y}_{i,j},v^{y'}_{i,j},v^{z}_{i,j}\}$,
there is a triangle of 1-links:
$\{v^{x}_{i,j},v^{y}_{i,j}\}$,
$\{v^{y}_{i,j},v^{y'}_{i,j}\}$ and
$\{v^{y'}_{i,j},v^{x}_{i,j}\}$.
Only in negative tetrads $N_{i,0}$, there is a 1-link $\{v^{x}_{i,0},v^{z}_{i,0}\}$.
Given any tetrad $N_{i,j}$, node $v^{x}_{i,j}$ is not involved in any outgoing 1-link,
but only links with weight $\Lambda$.
Between any tetrads $N_{i,j}$ and $N_{i',j'}$ except ($\star$),
there is a 1-link between complementary nodes of $Y$-literals; that is, a 1-link exists when the later's literal is the negation of the former's.\footnote{It is the same idea as in the standard reduction from 3SAT to \textsc{IndependentSet}.}
Assuming w.l.o.g. that $\alpha$ is even,
let $\mu_1$ be the number of 1-links in $\bigcup_{i=1}^{i=\alpha/2} N_{i,0}$
and $\mu_2$ in $\bigcup_{i=\alpha/2+1}^{i=\alpha} N_{i,0}$.
Inside $K$, every pair of nodes is linked by weight $\Lambda$.
Also, every node in $K$ is linked to every node in tetrads $T$ by weight $\Lambda$.
Node $v^{1/2}$ is linked to every node in $\bigcup_{i=1}^{i={\alpha}/{2}}\bigcup_{j\in\{0,1\}}N_{i,j}$
by weight $\Lambda$, except for nodes $v^z_{i,1}$ by weight $\Lambda+1$; the same holds from node $v^{2/2}$
to every node in $\bigcup_{i={\alpha}/{2}+1}^{i=\alpha}\bigcup_{j\in\{0,1\}}N_{i,j}$.
All other weights are zeros. We achieve this construct by defining threshold $\theta$ as:
$$
\theta-1 \quad=\quad {2m\choose 2}\Lambda + 2{m+1\choose 2}\Lambda + \mu_1 + \mu_2,
$$
and asking whether
$
\forall 3\text{-part }\pi,
\exists M\!\subseteq\!N,|M|\!\leq\!m,
W(\pi_{-M})\leq \theta\!-\!1.
$
A \emph{proper}-3-partition $\pi=\{S^{(p)},S^{1/2},S^{2/2}\}$ is characterized by an instantiation $p:X\rightarrow\{0,1\}$ of $X$ variables extended to literals by $p(\neg x)=1-p(x)$, and which defines:
$$
\begin{array}{cccclr}
S^{(p)} &=& K &\cup& \bigcup_{i=1}^{i=\alpha} N_{i,p(X(C_i))} & (3m\text{ nodes})\\
S^{1/2} &=& \{v^{1/2}\} &\cup& \bigcup_{i=1}^{i={\alpha}/{2}} N_{i,1-p(X(C_i))} & (m+1\text{ nodes})\\
S^{2/2} &=& \{v^{2/2}\} &\cup& \bigcup_{i={\alpha}/{2}+1}^{i=\alpha} N_{i,1-p(X(C_i))} & (m+1\text{ nodes})
\end{array}
$$
Note that in $S^{1/2}$ (resp. $S^{2/2}$) the number of 1-links is constant $\mu_1$ (resp. $\mu_2$) for any $p$, since the formula on $Y$-literals is the same and 1-link $\{v^{1/2},v^z_{i,1}\}$ (resp. $\{v^{2/2},v^z_{i,1}\}$) compensates for $\{v^x_{i,0},v^z_{i,0}\}$.
\begin{figure}
\caption{From \textsc{$\forall\exists$3SAT}
\label{fig:many-tetrads}
\end{figure}
We show that in our construct, any 3-partition which is not a \emph{proper}-3-partition
does trivially satisfy the complement question above.
First, let us reason as if all three node-subsets were cliques of $\Lambda$-links.
Crucially, in a node-subset of size $\nu$, the number of links ${\nu\choose 2}$ is quadratic.
Therefore, the largest node-subsets will be the first attacked, and the only way $\pi_{-M}$ contains as many as ${2m\choose 2}+2{m+1\choose 2}$ $\Lambda$-links is if the node-subsets of $\pi$ had sizes $3m$, $m+1$ and $m+1$.
Second, assume $\Lambda$-links are missing in some node-subsets.
Then, an attack would focus on more connected subsets and $\pi_{-M}$ cannot contain as many as ${2m\choose 2}+2{m+1\choose 2}$ $\Lambda$-links.
Therefore, 3-partition $\pi$ must consist in $\Lambda$-link cliques of size $3m$, $m+1$ and $m+1$.
If the largest did not follow consistently some instantiation $p:X\rightarrow\{0,1\}$,
then some $\Lambda$-links would be missing (see ($\star$)). Also, the only way to obtain two $\Lambda$-linked cliques of size $m+1$ on $N\setminus S^{(p)}$ is by $S^{1/2}$ and $S^{2/2}$.
We also know that $S^{1/2}$ and $S^{2/2}$ contain $\mu_1+\mu_2$ 1-links.
Crucially, attack $M$ always occurs where it does the largest damage w.r.t. $\Lambda$-links: on node-subset $S^{(p)}$, and the number of remaining $\Lambda$-links is ${2m\choose 2} + 2{m+1\choose 2}$.
Given a proper-3-partition, what could make the inequality false would be a surviving 1-link in $S^{(p)}\setminus M$. Consequently, condition $\exists M,W(\pi_{-M})\leq\theta-1$ amounts to a $2\alpha$ node attack $M$ that covers every 1-link in $\bigcup_{i=1}^{i=\alpha} N_{i,p(X(C_i))}$.
A crucial observation is that we necessarily attack/cover exactly two nodes per tetrad $N_{i,j}$, since each tetrad contains a triangle.
In negative tetrads $N_{i,0}$, because of 1-link $\{v^{x}_{i,0},v^{z}_{i,0}\}$, one of these nodes has to be $v^{x}_{i,0}\in M$.
In positive tetrads $N_{i,1}$, since node $v^{x}_{i,1}$ is not involved in other 1-links than the triangle,
choosing both $v^{y}_{i,1}$ and $v^{y'}_{i,1}$ in 1-link cover $M$ is the best choice.
As in $\textsc{3SAT}\leq\textsc{IndependentSet}$, this amounts to a 1-link-independent-set $\overline{M}= S^{(p)}\setminus (K\cup M)$ with size $2\alpha$ and two nodes per tetrad $N_{i,j}$: first, node $v^{z}_{i,j}$, second if $j=0$ then $v^{y}_{i,0}$ xor $v^{y'}_{i,0}$, otherwise if $j=1$ then $v^{x}_{i,1}$.
(yes$\Rightarrow$yes) Assume that for every $\tau_x:X\rightarrow\{0,1\}$, there exists $\tau_y:Y\rightarrow\{0,1\}$ such that in every clause $C_i$ with $\tau_x(X(C_i))=0$, a $Y$-literal is made true by instantiation $\tau_y$. We show that given any proper-3-partition $\{S^{(p)},S^{1/2},S^{2/2}\}$, in $S^{(p)}\setminus K=\bigcup_{i=1}^{i=\alpha} N_{i,p(X(C_i))}$, there exists a 1-link-independent-set $\overline{M}$ of size $2\alpha$, as below.
Taking $\tau_x\equiv p$, let $\tau_y:Y\rightarrow\{0,1\}$ be as above mentioned.
Then,
\begin{eqnarray*}
\overline{M} &=& \bigcup_{i\in[\alpha]}\left\{
\begin{array}{ll}
\text{if } p(X(C_i))\!=\!0 \text{:} & \{v^z_{i,0}, \text{one } v^y_{i,0}\mid\tau_y(\ell_i^y)=1\}\\
\text{if } p(X(C_i))\!=\!1 \text{:} & \{v^z_{i,1}, v^x_{i,1}\}
\end{array}\right.
\end{eqnarray*}
is a 1-link-independent-set of size $2\alpha$: node $v^y_{i,0}$ exists since instantiation $\tau_y$ gives at least one true literal per clause where $\tau_x(X(C_i))=0$, and nodes are not 1-linked (no literal contradiction).
(yes$\Leftarrow$yes) Assume that for any $\tau_x\equiv p:X\rightarrow\{0,1\}$, a 1-link-independent-set $\overline{M}$ with size $2\alpha$ exists in node-subset $S^{(p)}\setminus K=\bigcup_{i=1}^{i=\alpha} N_{i,p(X(C_i))}$.
Then, nodes $v^y_{i,0}\in\overline{M}$ consistently define $\tau_y:Y\rightarrow\{0,1\}$ that makes any clause $C_i$ true whenever $\tau_x(X(C_i))=0$.
Crucially, we also include clauses without any $X$-literal in the same construct.
Assume w.l.o.g. that there are less than $\alpha/2$ such $Y$-clauses, within the first indexes in $[\alpha]$. To any $Y$-clause $C=\ell^{y}_{i}\vee\ell^{y'}_{i}\vee\ell^{y''}_{i}$, one associates
two tetrads $N_{i,j}=\{v^{y}_{i,j}, v^{y'}_{i,j}, v^{y''}_{i,j}, v^{z}_{i,j}\}$, $j\in\{0,1\}$.
For $C_{i},C_{i'}$ $Y$-clauses, between $N_{i,0}$ and $N_{i',1}$ weights are zero.
Negative tetrads $N_{i,0}$ are fully $\Lambda$-linked inside, between themselves, with previous tetrads of one $X$-variable and set $K$.
Positive tetrads $N_{i,1}$ are fully $\Lambda$-linked inside, between themselves and with $v^{1/2}$.
Given a $Y$-clause $C$, we define $X(C)=\emptyset$.
For proper-3-partitions, we extend $p(\emptyset)=0$;
hence in $\{S^{(p)},S^{1/2},S^{2/2}\}$, for $C_i$ a $Y$-clause, one has $N_{i,0}\subseteq S^{(p)}$ and $N_{i,1}\subseteq S^{1/2}$.
Similarly, in any $Y$-clause tetrad $N_{i,j}$, there are 1-links
$\{\{v^{y}_{i,j}, v^{y'}_{i,j}\},
\{v^{y'}_{i,j}, v^{y''}_{i,j}\},
\{v^{y''}_{i,j}, v^{y}_{i,j}\}\}$,
(optional 1-links $\{v^{z}_{i,1},v^{1/2}\}$),
and whenever two $Y$-literals are complementary.
It follows that the same proof holds.
\end{proof}
\section{Related Work}
Partitioning of a set into (non-empty) subsets may also be referred as coalition structure formation of a set of agents into coalitions.
When a number of coalitions $k$ is required and there are synergies between vertices/agents, this problem is referred as $k$-cut, or $k$-way partition, where one minimizes the weight of edges/synergies between the coalitions, or maximizes it inside the coalitions.
For positive weights and $k\geq 3$, this problem is NP-complete \cite{Dahlhaus:1992:CMC:129712.129736}, when one vertex is fixed in each coalition.
For positive weights and fixed $k$, a polynomial-time $O(n^{k^2}T(n,m))$ algorithm exists \cite{10.2307/3690374}, when no vertex is fixed in coalitions, and where $T(n,m)$ is the time to find a minimum $(s,t)$ cut on a graph with $n$ vertices and $m$ edges.
When not too many negative synergies exist (that is, negative edges can be covered by $O(\log(n))$ vertices), an optimal $k$-partition can be computed in polynomial-time \cite{SLESS2018217}.
\end{document} |
\begin{document}
\title{A Class of Continued Radicals}
\author{Costas J. Efthimiou}
\date{}
\maketitle
\begin{abstract}
We compute the limits of a class of continued radicals extending the results of a previous note in which only periodic radicals of the class were considered.
\end{abstract}
\section{Introduction.}
In \cite{Efthimiou} the author discussed the values for a class of periodic continued radicals of the form
{\footnotesize
\begin{equation}
a_0\sqrt{2+a_1\sqrt{2+a_2\sqrt{2+a_3\sqrt{2+\cdots}}}} ~,
\label{eq:OurRadical}
\end{equation}
}
where for some positive integer $n$,
$$
a_{n+k} ~=~ a_k~,~~~k=0,1,2,\dots~,
$$
and
$$
a_k\in\{-1,+1\}~,~~~k=0,1,\dots,n-1~.
$$
It was also shown that the radicals given by equation \eqref{eq:OurRadical} have limits two times the
fixed points of the Chebycheff polynomials $T_{2^n}(x)$, thus unveiling an interesting relation between these topics.
In \cite{ZH}, the authors defined the set $S_2$ of all continued radicals of the form \eqref{eq:OurRadical} (with $a_0=1$) and they
investigated some of their properties by assuming that the limit of the radicals exists. In particular, they showed that all elements
of $S_2$ lie between 0 and 2, any two radicals cannot be equal to each other, and $S_2$ is uncountable.
My previous note hence partially bridged this gap but left unanswered the question `\textit{what are the limits if the radicals
are not periodic?}' I answer the question in this note. The result is easy to establish, but I realized it only as I was reading the proof of
my previous note. Such is the working of the mind!
\section{The Limits.}
Towards the desired result, I present the following lemma from \cite{Shklarsky}, also used in the periodic case, which is an extension of the well known trigonometric formulas of the angles $\pi/2^n$.
\begin{lemma}
For $a_i\in\{-1,1\}$, with $i=0,1,\dots,n-1$, we have that
{\footnotesize
$$
2\, \sin \left\lbrack \left( a_0+{a_0a_1\over2}+\dots+{a_0a_1\cdots a_{n-1}\over2^{n-1}} \right) {\pi\over4} \right\rbrack
~=~
a_0\sqrt{2+a_1\sqrt{2+a_2\sqrt{2+\dots+a_{n-1}\sqrt{2}}}}~.
$$
}
\end{lemma}
The lemma is proved in \cite{Shklarsky} using induction.
According to this lemma, the partial sums of the continued radical \eqref{eq:OurRadical} are given by
$$
x_n ~=~ 2\sin \left\lbrack \left( a_0+{a_0a_1\over2}+\dots+{a_0a_1\cdots a_{n-1}\over2^{n-1}} \right) {\pi\over4} \right\rbrack~.
$$
The series
\begin{equation*}
a_0+{a_0a_1\over2}+\dots+{a_0a_1\cdots a_{n-1}\over2^{n-1}} +\cdots
\end{equation*}
is absolutely convergent and thus it converges to some number $a$. Therefore, the original continued radical
converges to the real number
$$
x ~=~ 2\sin{a\pi\over4}~.
$$
We can find a concise formula for $x$. For this calculation it is more useful to use the products
$$
P_m ~=~ \prod_{k=0}^{m} a_k~,~~~\text{for } m=0,1,2,\dots~,
$$
which take the values $\pm1$. We will refer to these as partial parities. (When the pattern is periodic of period $n$ only the first $n$
parities $P_0, P_1, \dots, P_{n-1}$ are independent.) Using the notation with the partial parities, set
\begin{eqnarray*}
a &=& P_0 + {P_1\over2} + {P_2\over 2^2} + \cdots+ {P_{n-1}\over 2^{n-1}} + {P_n\over 2^{n}} + \cdots ~.
\end{eqnarray*}
We now define
$$
Q_m ~=~ {1+P_m\over2}~.
$$
Since $P_m\in\{-1,1\}$, it follows that $Q_m\in\{0,1\}$. Inversely, $P_m=2Q_m-1$. Thus
\begin{equation*}
a ~=~ \sum_{m=0}^\infty {P_m\over 2^m}
~=~ \sum_{m=0}^\infty {Q_m\over 2^{m-1}} - \sum_{m=0}^\infty {1\over 2^m}
~=~ 4\, \sum_{m=0}^\infty {Q_m\over 2^{m+1}} - 2~.
\end{equation*}
Notice that the sum
$$
Q ~=~ \sum_{m=0}^\infty {Q_m\over 2^{m+1}}
$$
in the previous equation is the number $Q$ whose binary expression is $0.Q_0Q_1\cdots Q_{n-3}Q_{n-2}\cdots$.
Therefore $a=4Q-2$.
In \cite{ZH}, the authors noticed that all continued radicals of the form \eqref{eq:OurRadical} (with $a_0=1$) are in one-to-one
correspondence with the set of decimals between 0 and 1 as written in binary notation (and that's how they determined that the set
$S_2$ is uncountable). But, with the above calculation, this correspondence is made deeper. It gives the limit of the radical
\eqref{eq:OurRadical} as follows
$$
x ~=~ -2\cos \left( Q\pi \right) ~.
$$
For example, if $a_k=1$ for all $k$, then also $Q_k=1$ for all $k$ and the number $Q=0.111111111\cdots$ written in the binary system
is the number $Q=1$ in the decimal system; hence $x=2$. We thus recover the well known result
{\footnotesize
\begin{equation*}
2 ~=~ \sqrt{2+\sqrt{2+\sqrt{2+\sqrt{2+\cdots}}}} ~.
\end{equation*}
}
\section{Conclusion.}
Having found the limit of \eqref{eq:OurRadical}, the next obvious question is to determine the limit of the radical
{\footnotesize
\begin{equation*}
a_0\sqrt{y+a_1\sqrt{y+a_2\sqrt{y+a_3\sqrt{y+\cdots}}}} ~,
\end{equation*}
}
for values of the variable $y$ that make the radical (and the limit) well defined. However, a direct application of the above method fails and
so far a convenient variation has been elusive. Therefore, the limit of the last radical in the general case remains an open problem although it is
known in at least two cases \cite{ZH}.
\noindent\textit{
Department of Physics, University of Central Florida, Orlando, FL 32816 \\
[email protected]
}
\end{document} |
\begin{document}
\title{Joint models as latent Gaussian models - not
reinventing the wheel}
\begin{abstract}
Joint models have received increasing attention during recent
years with extensions into various directions; numerous hazard
functions, different association structures, linear and non-linear
longitudinal trajectories amongst others. Many of these resulted
in new R packages and new formulations of the joint model.
However, a joint model with a linear bivariate Gaussian
association structure is still a latent Gaussian model (LGM) and
thus can be implemented using most existing packages for LGM's. In
this paper, we will show that these joint models can be
implemented from a LGM viewpoint using the \textit{R-INLA}
package. As a particular example, we will focus on the joint model
with a non-linear longitudinal trajectory, recently developed and termed the
partially linear joint model. Instead of the usual spline
approach, we argue for using a Bayesian smoothing spline framework for the
joint model that is stable with respect to knot selection and
hence less cumbersome for practitioners.
\end{abstract}
\section{Introduction}
Latent Gaussian models (LGM's) is a group of models that contains most
statistical models used in practice. Indeed, most generalized linear mixed models (GLMM's) and general additive models (GAM's) that we are able to perform inference with, are
examples of LGMs. In the context of
joint models, this viewpoint has largely been under-presented and
merely mentioned in \cite{martino2011}. A joint model is unique in the
sense that there are two different likelihoods and shared random
effects in the model. Extensions of linear joint models like spatial
random effects, non-linear trajectories and multiple end-points
amongst others, are used in the context of joint models to address
certain practical challenges. Each of these new joint models is still
a latent Gaussian model and thus no special implementation package is
needed for each one. The \textit{R-INLA} package based on the INLA
methodology \citep{rue2009}, has been used extensively for latent
Gaussian models and could thus be used for joint models as well. Most
longitudinal likelihoods and hazard assumptions can be facilitated in
this framework, leaving no need for developing a new joint model for
each set of assumptions. As a particular example, we will focus on the
recently proposed partially linear joint model by
\cite{kim2017}.\\ \\
Non-linear or partially linear joint models, in particular, is a
natural extension to the linear joint model since this is often the
case in real datasets. \cite{kim2017} introduced a
frequentist approach to fit a partially linear joint model using
splines and presented a selection method for the knot set based on
some model selection metrics. A Bayesian P-splines approach is adopted
in a joint model framework by \cite{kohler2017} where the number of
knots are also based on the value of some model selection metrics. The
approach proposed by \cite{kohler2017} uses the R package
\textit{bamlss} and the authors commented that the implementation of
this model is not computationally feasible. \\ \\ In this paper, we
present a Bayesian approach embedded within the \textit{R-INLA}
package \citep{rue2009} to fit a partially linear/non-linear joint
model, without the burden of choosing a specific set of knots. We use
a Bayesian smoothing spline model described by \cite{lindgren2008} and
\cite{yue2014}, that is the solution of a stochastic
differential equation (SDE) resulting in a second-order random walk
(see \cite{lindgren2008} and \cite{simpson2012} for further details)
in contrast to the semi-parametric Bayesian method proposed by
\cite{rizopoulos2011} that also depends on knot selection. Using this
methodology, the model is stable with regards to the choice of the
number and placement of the knots needed to form a spline basis as in
\cite{kim2017}, since it is a continuous time model. Additionally, our approach introduces a hyperparameter
pertaining to the spline component that is interpretable and can be
used to assess the appropriateness of the non-linear component. \\ \\
In Section \ref{jointmodelsection}, we present the partially linear
joint model as defined by \cite{kim2017}. Also, we present various
forms of the linear shared random effect that can be fitted using
\textit{R-INLA}, but not with most of the other available packages for
joint models. The Bayesian smoothing spline is discussed in
Section \ref{splines}. Latent Gaussian models and a synopsis of the
INLA methodology underpinning the \textit{R-INLA} package is presented
in Section \ref{lgmjointsec}. In this section, we also discuss how
joint models fit into the LGM framework. In Section
\ref{applicationsection}, we present an example of our approach and
compare it to that presented in \cite{kim2017} using the simluated PSA
dataset presented in the \textit{jplm} function in the \textit{JointModel} package. The paper is concluded
by concluding remarks in Section \ref{conclusionsection}.
\section{Partially linear joint model}\label{jointmodelsection}
A joint model consists of two marginal models, linked by shared,
correlated random effects. The motivation for the construction of such
a model is foundin the biological or physical process generating
the data, since multiple types of data generated by the same
individual is inherently correlated. The joint modeling of time to
event and longitudinal data is a fundamental tool in these type of
studies since insights about the survival component can be gained from
the longitudinal series (see
\cite{wulfsohn1997,hu2003,tsiatis2004,guo2004} amongst others for more
details). This is especially beneficial in studies where the events
are lengthy to observe or scarce. Usually, the model is constructed as
the combination of a longitudinal model to analyze data measured at
multiple time points based on the same investigative subject and a
survival model for the time to event data. This setup is quite common
in most studies where subjects are followed two-fold, biomarkers are
collected at multiple time points to investigate the behaviour of some
physical process (usually after some intervention/treatment) as well
as the absence/presence of a certain linked event (usually a relapse
or a fatal event).\\ \\ The models are jointly fitted by sharing a set
of random effects from the longitudinal submodel to the survival
submodel. This provides insights into the biological process acting as
the driving force behind various diseases such as prostate cancer
\citep{serrat2015}, ovarian cancer \citep{huang2018}, AIDS
\citep{guo2004,huang2018}, Dermatomyositis \citep{van2018} and Renal
disease \citep{rizopoulos2011}, to mention but a few. The exact form of
the shared random effect can vary. The most popular form currently
used is a linear random effect in time as the sum of a random
intercept and random slope over time, as implemented in the R packages
\textit{JMbayes, JointModel} and the function \textit{jplm} of which the latter can
incorporate a non-linear trajectory over time, in the longitudinal
submodel. Both Bayesian and frequentist methods have been developed
for joint models as summarized in the aforementioned packages, amongst
others. The linear shared random effect assumption has recently been
challenged by \citep{andrinopoulou2016}.
In this paper, however, we will focus on the case of linear shared
random effects.\\ \\
We denote $\pmb{y}$ and $\pmb{s}$ as the response vectors of the
longitudinal and survival submodels, respectively. Additionally,
$\pmb{X}$ and $\pmb{Z}$ is a set of available covariates for the
longitudinal and survival submodels, respectively.
\subsection{Longitudinal submodel}\label{longsec}
In various real-life situations, numerous datapoints are collected
from the same individual at different timepoints. This forms a
longitudinal series of data and cannot be modelled using standard
techniques like generalized linear models since the assumption of
independent and identically distributed observations does not hold.
Instead, conditional on the subject and/or group-specific random
effects, the observations are independent and identically distributed
in the context of a generalized linear mixed model. \\ \\ For each individual $i,i=1,...,N$ we have a vector of observations
$y_{ijk}=y_{ij}(t_{ijk})$ at various timepoints $t_{ijk}$, for groups $j=1,...,N_t$ where $ k=1,...,N_{ij}$, such that $\sum_i\sum_j N_{ij}=N_L$.
The longitudinal submodel
is a generalized mixed model for the longitudinal outcome in
continuous time. We assume that the conditional longitudinal outcomes
$y_{ijk}|(\pmb{\beta},\pmb{X}_{ijk},u_{ijk})$ are conditionally independent and follow a well-defined
distribution, $\mathscr{G}$ with some density function $g$, linear
predictor $\eta^L$ and hyperparameters $\pmb\theta_L$. In practice, a
Gaussian likelihood is often assumed, although this is not necessary
and any well-defined distribution can easily be facilitated in our
computational procedure. The longitudinal submodel is as follows:
\begin{eqnarray}
y_{ijk}|(\pmb{\beta},\pmb{X}_{ijk},u_{ijk})\sim
\mathscr{G}(\eta^L=\alpha(t_{ijk})+\pmb{\beta}^{T}\pmb{X}_{ijk}+u_{ijk})
\label{longsub}
\end{eqnarray}
In essence, the submodel is composed by a set of fixed effects,
$\pmb{\beta}^T\pmb{X}_{ijk}$, and a set of random effects,
$\alpha(t_{ijk})+u_{ijk}$. In this specification, $\pmb{\alpha}$
denotes the longitudinal trajectory which can assume any form, also
non-linear, with hyperparameters $\pmb{\theta}_{\pmb\alpha}$. The
random effects $\pmb{u}$ are the shared components, linear in time,
which forms the basis of the joint model. Specifically, we formulate,
\begin{equation}
u_{ijk}=u_{ij}(t_{ijk})=w_{ij}+v_{ij}t_{ijk}
\label{sharedrandomeff}
\end{equation}
where $w_{ij}$ and $v_{ij}$ follow a bivariate Gaussian distribution
with zero mean, precision matrix $\pmb{Q}_u$ (inverse covariance matrix) and correlation
coefficient $\rho$. This Gaussian assumption is mostly used in literature.
\subsection{Survival submodel}\label{survsec}
Survival datasets are unique in the sense that the response for
subject $l$ consists of an event time, $s_l$, as well as a censoring
variable, $c_l$, to indicate if the time was censored $(c_l=0)$ or not
$(c_l=1)$. Right censoring is most commonly found in practice since
this results from terminating a study before all subjects experienced
the event. Let $s^*_l$ be the event time, if the subject experienced
the event $(c_l=0)$ and suppose $s^X$ is the last timepoint in the
study period, then, in the case of right censoring,
$s_l=\min(s^*_l,s^X)$. The construction of the time variable $s_l$, is
slightly different under different censoring schemes, so we will focus on right censoring in this paper.\\ \\The
specification of the form of the baseline hazard function $h_0(t)$ can
be achieved parameterically (exponential (constant hazard), Weibull
(monotonic hazard), log-Gaussian or log-Logistic (non-monotonic)
baseline hazard function) or semi-parametrically (Cox piecewise
constant model). Each of the afore-mentioned models can be used in our
computational procedure, so we will propose the survival submodel in a
general form and later present the specific details for each case. The
survival submodel is defined using the hazard rate as:
\begin{equation}
h_l(s)=h_0(s)\exp\left(\pmb{\gamma}^T\pmb{Z} +
\pmb{\nu}\circ(w_{l},v_{l}s)+m_l\right).
\label{survsub}
\end{equation}
and some hyperparameters $\pmb{\theta}_S$. If
$\pmb{\nu}\circ(w_{l},v_{l}s)$ is independent of time, then we have a
proportional hazards model. The plausibility of proportional hazards
should be investigated using exploratory analysis of the empirical
survival/hazard curves. The random effect $m_l$ is used to model
subject-specific variability in the survival time, often called a
frailty component, resulting in a frailty variable $\exp(m_l)$. The
association between the longitudinal and survival sub-models is
established by the term $\pmb{\nu}\circ(w_{l},v_{l}s)$.
\subsection{Possible linear association structures}
The joint model is solely developed based on the shared effects from
both submodels. A subset of the random effects in the longitudinal
model enter the hazard rate model through the well-defined combination
$\pmb{\nu}\circ(w_{l},v_{l}s)$ as in \eqref{survsub}. This combination
can be time-dependent, resulting in an accelerated failure time model.
The functional form of $\pmb{\nu}\circ(w_{l},v_{l}s)$, where $\circ$ is the component-wise product, can assume
various structures, some commonly found are summarized below
\citep{henderson2000}:
\begin{eqnarray}
\pmb{\nu}\circ(w_{l},v_{l}s)&=&\nu
w_l \label{share1}\\
\pmb{\nu}\circ(w_{l},v_{l}s)&=&\nu(v_ls)\label{share2}\\
\pmb{\nu}\circ(w_{l},v_{l}s)&=&\nu(w_l+v_ls)\label{share3}\\
\pmb{\nu}\circ(w_{l},v_{l}s)&=&\nu_1w_l+\nu_2(v_ls)\label{share4}
\end{eqnarray}
With the addition of \eqref{share3} in the \textit{R-INLA} package, all these functional forms, \eqref{share1}-\eqref{share4}, can be assumed in \textit{R-INLA}, while most
currently available R-packages for joint models, as well as
\textit{jplm}, can only facilitate \eqref{share3}. Within our
computational framework, the frailty variable should have a
log-Gaussian distribution, a priori. The log-gamma frailty has been
included in a test version and is discussed thoroughly in
\cite{martins2014}. In this paper, we will only focus on log-Gaussian
frailties to not distract from the main aim.
\section{Bayesian smoothing spline model}\label{splines}
As noted previously, the main aim of this paper is to formulate a
partially/non-linear joint model that can capture non-linear
trajectories presented by the data as an LGM. Traditionally, these
spline models have been based on selecting a number of knots or basis
functions, $B_k$, and then formulating a dependency structure through
coefficients $\lambda_k$, to give the random effect
$$\alpha(t) = \pmb{B}(t) \pmb{\lambda}.$$
The choice of the placement and number of knots has been addressed in
various different ways. \cite{zhou2001} proposed a knot
relocation and search method instead of the stepwise addition and
deletion approach. A two-stage knot selection approach using wavelet
decomposition and then statistical model selection techniques was
proposed by \cite{he2001}, while
\cite{spiriti2013} introduced a stochastic search algorithm as an
improvement on multivariate adaptive regression splines (MARS)
\citep{friedman1991} to produce a near-optimal knot set in the squared
error sense. A Bayesian approach based on the joint posterior of the
placement and number of knots using piecewise polynomials is presented
in \cite{denison1998}, while
\cite{leitenstorfer2007} proposed using boosting techniques with
radial basis functions. Irrespective of the method used to find a knot
set, the main issue is that the number and locations of knots or basis
functions can change the model in fundamental ways. The reason the
model is unstable with respect to the choice of knots, is that the
covariance structure is built on the spline coefficients $\lambda$,
instead of on the spline $\alpha(t)$.\\ \\ A different approach to spline models is implemented in the
\textit{R-INLA} package and described by \cite{lindgren2008} and \cite{yue2014}. This approach
is based on finite element methods, frequently used in numerics and
mathematical modelling in general, where the focus is on approximating
some continuous spline $\alpha(t)$ on a discrete set of knots. The
covariance structure on $\lambda$ is derived by approximating the
desired covariance structure of $\alpha(t)$, following the theory of
numerical discrete approximations to continuous equations. Different
choices of knots or basis functions will approximate the same
continuous model, and, as the number of knots grow large, become
stable (and converge to the continuous model), contrary to most used
methods for spline regression.\\ \\ For the second order random walk, the continuous SPDE model is
$$\alpha''(t) = \mathcal W, $$
where $\alpha''$ denotes the second derivative and $\mathcal W$ the Gaussian white noise
process.
For regular intervals, this can be approximated by
\begin{equation}
\alpha(t-1) -2 \alpha(t) + \alpha(t+1) \,{\buildrel d \over =}\ w_t,
\label{rw2}
\end{equation}
where $w_t$ is Gaussian white noise with precision $\tau_\alpha$. The
use of irregular locations is found in \cite{lindgren2008}, and all
good approximations (the definition of ``good'' is studied in
numerical mathematics) will give very similar models. The next
modelling challenge with the random walk order 2, is that the size of
the spline (the range of values the spline can take) is difficult to
interpret, and it depends on the total number of observations.
This
challenge was resolved by \cite{sorbye2014scaling},
and is implemented in R-INLA through the scale.model option.
With this approach, we can interpret the ``size'' of the
spline to be the overall deviation from a straight line (the straight
line has second derivative equal to zero).
\section{Latent Gaussian joint model}\label{lgmjointsec}
In this section we will briefly present the concept of latent Gaussian
models and the INLA methodology. Preference is given to the details
useful for this paper, further details can be found in \cite{rue2009}.
Joint models as presented in this paper, are shown to be LGM's and
hence fit into the INLA framework.
\subsection{Latent Gaussian models and INLA}\label{lgmsection}
Hierarchical Bayesian additive models are widely used in various
applications. A specific subset of Bayesian additive models is the
class of latent Gaussian models (LGM). An LGM can be efficiently
modelled using the INLA methodology implemented in the \textit{R-INLA}
package. This class comprises of well-known models such as mixed
models, temporal and spatial models. An LGM is defined as a model
having a specific hierarchical structure, as follows: The likelihood
is conditionally independent based on the likelihood parameters
(hyperparameters), $\pmb{\theta}$ and the linear predictors,
$\eta_i$, such that the complete likelihood can be expressed as
\begin{equation}
\pi(\pmb{y}|\pmb{\eta},\pmb{\theta})=\prod_{i=1}^{N}
\pi(y_i|\eta_i(\pmb{\mathcal{X}}),\pmb{\theta}).
\end{equation}
The linear predictor
is formulated as follows:
\begin{equation}
\eta_i=\beta_0+\pmb{\beta}^T\pmb{X}_i+\pmb{u}_i(\pmb{z}_i)+\epsilon_i
\label{additive predictor}
\end{equation}
where $\pmb{\beta}$ represent the linear fixed effects of the
covariates $X$, $\pmb{\epsilon}$ is the unstructured random effects
and $\pmb{\gamma}$ represents the known weights of the unknown
non-linear functions $\pmb{u}$ of the covariates $\pmb{z}$. The
unknown non-linear functions, also known as structured random effects,
$\pmb{u}$ include spatial effects, temporal effects, non-seperable
spatio-temporal effects, frailties, subject or group-specific
intercepts and slopes etc. This class of models include most models
used in practice since time series models, spline models and spatial
models, amongst others, are all included within this class. The main
assumption is that the data, $\pmb{Y}$ is conditionally independent
given the partially observed latent field, $\pmb{\mathcal{X}}$ and some
hyperparameters $\pmb{\theta}_1$. The latent field $\pmb{\mathcal{X}}$ is
formed from the structured predictor as
$(\pmb{\beta},\pmb{u},\pmb{\eta})$ which forms a Gaussian Markov
random field with sparse precision matrix $\pmb{Q}(\pmb{\theta}_2)$,
i.e.\ $\pmb{\mathcal{X}}\sim N(\pmb{0},\pmb{Q}^{-1}(\pmb{\theta}_2))$. A prior,
$\pmb{\pi}(\pmb{\theta})$ can then be formulated for the set of
hyperparameters $\pmb{\theta}=(\pmb{\theta}_1,\pmb{\theta}_2)$. The
joint posterior distribution is then given by:
\begin{equation}
\pmb{\pi}(\pmb{\mathcal{X}},\pmb{\theta})\varpropto\pmb{\pi}(\pmb{\theta})\pmb{\pi}
(\pmb{\mathcal{X}}|\pmb{\theta})\prod_{i}\pi(Y_i|\pmb{\mathcal{X}},\pmb{\theta})
\label{postINLA}
\end{equation}
The goal is to approximate the joint posterior density \eqref{postINLA} and subsequently compute the marginal posterior densities,
$\pmb{\pi}(\mathcal{X}_i|\pmb{Y}),i=1...n$ and
$\pmb{\pi}(\pmb{\theta}|\pmb{Y})$. Due to the possibility of a non-Gaussian likelihood, the Laplace approximation to approximate this analytically intractable joint posterior density. The sparseness assumption on the precision of
the latent Gaussian field ensures efficient computation \citep{rue2005} .
\subsection{Joint models as latent Gaussian models}
In this section, we will briefly show that the joint model is indeed
an LGM as defined in Section \ref{lgmsection}. The likelihood for the
survival submodel from Section \ref{survsec} is
\begin{equation*}
\pi_S(\pmb s|\pmb{Z},\pmb{\gamma})=\prod_{l=1}^{N}\pi_l(s|\pmb{Z},\pmb{\gamma})
=\prod_{l=1}^N f_l(s)^{c}[1-F_l(s)]^{1-c}
\label{longlik}
\end{equation*}
where $f_l(s)=h_l(s)\exp\left(-\int_0^s h_l(u)du\right)$ from
\eqref{survsub}. The likelihood for the longitudinal biomarker from
Section \ref{longsec} is
\begin{equation*}
\pi_L(\pmb{y}|\pmb{X},\pmb{\beta})=\prod_{i=1}^{N_L} g(y_i).
\label{survlik}
\end{equation*}
The associated linear predictors are
\begin{eqnarray}
\eta^S&=&\pmb\gamma^T\pmb Z+\pmb{\nu}\circ(w_{l},v_{l}s)+m_l\notag\\
\eta^L&=&\alpha(t)+\pmb\beta^T\pmb X+u.\label{jointexample}
\end{eqnarray}
Note that each longitudinal observation is connected to the latent
field through the linear predictor $\eta^L$ in \eqref{longsub} and each
survival time through the linear predictor $\eta^S$ in \eqref{survsub}.
Now consider the hyperparameters
$\pmb\theta=\{\pmb\theta_l,\pmb\theta_S,\pmb\theta_m,\tau_\alpha,\pmb\nu,\pmb
Q_u,\rho\}$, the latent field
$\pmb{\mathcal{X}}=(\pmb\eta^L,\pmb\eta^S,\pmb\beta,\pmb\gamma,\pmb{v},\pmb{w},\pmb{m},\pmb{\alpha})$
conditioned on $\pmb\theta$ has a Gaussian distribution with precision
matrix $\pmb Q(\pmb\theta )$. From this construction of the latent
field, the observations $(\{y_{ijk}\},\{s_l,c_l\})$ have a complete
likelihood that depends on $\pmb{\mathcal{X}}$ only through one of the
linear predictors, $(\{\pmb\eta^L\},\{\pmb\eta^S\})$. Finally the
hyperparameters $\pmb\theta$ are assigned a prior distribution
$\pi(\pmb\theta)$. Hence, the partially linear joint model as
presented here, is an LGM and we can thus use the INLA methodology for
efficient Bayesian inference. A simple example using a simulated dataset with a non-linear longitudinal trend is available in Appendix \ref{appendixjoint} for illustration purposes.
\section{Example: PSA study}\label{applicationsection}
In prostate cancer studies, Prostate-specific Antigen (PSA) has been
identified as a biomarker for the status of prostate cancer. High
levels of PSA are indicative of increased risk of prostate cancer or
recurrence. Radiation therapy is a common course of treatment often
prescribed for patients with prostate cancer. If successful, the PSA
levels are expected to drop and remain at a low level. On the
contrary, PSA levels will drop initially and then rise again
\citep{zagars1995}. Hence, it is desirable to develop a flexible model
to capture this nonlinear temporal trend of PSA levels per patient. A
challenge is that the follow-up of PSA is stopped when salvage hormone
therapy is initiated, which is known to change the PSA level or when
prostate cancer recurred, resulting in possibly informative drop-out.
If this informative drop-out is unaccounted for, it can lead to
considerable bias in the PSA trajectory estimation. The objective of
this analysis is thus, to identify the trajectory of post-radiation
PSA change, while correctly accounting for the informative drop-out.
More details about the clinical impact of such a study can be found in
\cite{proust2009}.
\subsection{Partially linear joint model}
In \cite{kim2017} a partially linear joint model is proposed utilizing
a spline component to capture the non-linear trajectory. They
developed a procedure using BIC for the knot selection needed to fit
this spline. In this paper, however, we use the Bayesian
smoothing spline model as presented in Section \ref{splines} to
capture the non/semi-linear trajectory of PSA levels using INLA. This
approach facilitates a computationally efficient and user-friendly
implementation of these types of models, and is stable with regards to
the knot set. This approach produces reproducible and reliable
results. The joint model under consideration in this application from
\eqref{jointexample}, is:
\begin{eqnarray}
\log(\text{PSA})(t)&=&\eta^L+\epsilon(t)\notag \\
h(s)&=&h_0(s)\exp(\eta^S)\notag
\end{eqnarray}
where $\epsilon\sim N(0,\sigma^2_\epsilon)$. We assume a Weibull
baseline hazard function, hence $h_0(s)=\kappa s^{\kappa -1}$ which is
non-constant over time. The exponential baseline hazard function with
constant hazard can be achieved as a special case when $\kappa=1$. In
\cite{kim2017} the functional form $\pmb{\nu}\circ(w,vs)=\nu(w+vs)$ as
in \eqref{surv1} is used, which is the most commonly used form of
shared effects in joint models. This form has now been included in the
\textit{R-INLA} package using the model "intslope". To facilitate a more
general structure, we also consider
$\pmb{\nu}\circ(w,vs)=\nu_1w+\nu_2(vs)$ as in \eqref{surv2}, hence the
linear predictors are formulated as:
\begin{eqnarray}
\eta^L&=&\alpha(t)+\beta \log(\text{PSA}_\text{base})+w+vt\notag\\
\eta^S_{1}(s)&=&\gamma \log(\text{PSA}_\text{base})+\nu(w+vs)\label{surv1}\end{eqnarray}
and
\begin{eqnarray}
\eta^L&=&\alpha(t)+\beta log(\text{PSA}_\text{base})+w+vt\notag\\
\eta^S_{2}(s)&=&\gamma log(\text{PSA}_\text{base})+\nu_1w+\nu_2(vs)\label{surv2}
\end{eqnarray}
where
$$\begin{bmatrix} w \\ v \end{bmatrix}\sim
N \begin{pmatrix} \begin{bmatrix}0 \\ 0 \end{bmatrix}, \begin{bmatrix}
\sigma^2_w & \rho\sigma_w\sigma_v\\ \rho\sigma_w\sigma_v &
\sigma^2_v \end{bmatrix} \end{pmatrix}$$ and $\alpha(t)$ is a
second order random walk model as described in Section \ref{splines}.
Within the INLA framework, the number of groups for the local spline
should be specified. This number should have minimal influence on the
estimated result due to the construction presented in Section
\ref{splines}. On the contrary, it is well-known that the number of
knots greatly influence the estimated spline using more traditional
methods as in \cite{kim2017}. This conjecture is further discussed in
teh presentation of the results for the dataset under discussion.
\subsection{Bayesian inference}
The linear predictors under consideration \eqref{jointexample}
contains various components of the latent field and also some
hyperparameters. The prior for the latent field is assumed to be
multivariate Gaussian. The regression coefficients for the fixed
affects are assigned vague independent Gaussian priors. Following
\cite{simpson2017}, we assign penalized complexity priors for the
hyperparameters in the model as far as possible.
The random walk order two model $\alpha(t)$ in \eqref{rw2} has one
hyperparameter, $\tau$ which is assigned a penalized complexity prior
with prior density
$$\pi(\tau_\alpha)=\lambda_\alpha\tau_\alpha^{-\frac{3}{2}}
\exp(-\lambda_\alpha\tau^{-\frac{1}{2}})$$ such that
$P(\frac{1}{\sqrt{\tau_\alpha}}>1)=0.01$, i.e.\
$\lambda_{\alpha}=ln(0.01)$, which is the Gumbel type 2 distribution.
The bivariate random effect $\begin{bmatrix} w \\ v \end{bmatrix}$
assumes a bivariate Gaussian prior with covariance matrix
$\tau^{-1}_{w,v}\pmb{R}^{-1},\pmb{R}\geq\pmb{0}$ with a penalized
complexity prior for $\tau_{w,v}$ as the Gumbel type 2 distribution
with parameter $\log(0.01)$, as well. \\ \\The motivation for employing
penalized complexity priors for the precision hyperparameters are
founded in the fact that the usual priors for the variance components,
i.e. independent inverse-gamma priors as in \cite{huang2018}, overfits
and cannot contract to the simpler model in which the respective model
component has trivial variance. This is especially important in the
case of joint models since the effect of overfitting is exacerbated by
the influence of the shared random effect on all the linear
predictors.
\subsection{Results}\label{secpsaresults}
The two aforementioned models \eqref{surv1} and \eqref{surv2} were
both fitted using \textit{R-INLA} (for more details see the Appendix)
and \eqref{surv1} was also fitted using \textit{jplm} for comparison
purposes (the code is available in Web Appendix A). Firstly, the estimated post-treatment PSA trajectories are
presented in Figure \ref{fignonlin}. It is apparent that the number of
knots changes the shape of the estimated trajectory to a large extent.
For a low number of knots, the estimated trajectory is strictly convex
but as the number of knots increase, the trajectory contains concave
and convex parts. This challenge is not present in the trajectories
estimated using \textit{R-INLA}. Even for differing number of groups and
knot placement, the shape of the estimated trajectory is preserved. It
is clear from Figure \ref{fignonlin} that the trajectories estimated
from \textit{R-INLA} are supported by the data to a larger extent than
some of the trajectories estimated from \textit{jplm}. This behaviour
of a spline is inherent in the formulation and construction of the
spline model as a combination of basis functions with associated
random weights, as opposed to the formulation as presented in Section
\ref{splines} and implemented in \textit{R-INLA}.
\begin{figure}
\caption{Estimated non-linear post PSA trajectory using
\textit{R-INLA}
\label{fignonlin}
\end{figure}
Secondly, the resulting estimated joint model is presented. The
results are summarized in Table \ref{tableres1}. It is evident that
the two estimation procedures provide similar results although the
uncertainty from using \textit{R-INLA} is higher. This is an expected
result from a Bayesian viewpoint.
\begin{table}[h]
\begin{tabular}{|c||c|c||c|c|}
\hline
\multirow{2}{6em}{\textbf{Parameter}} & \textbf{Posterior Mode} & \textbf{Posterior SD} & \textbf{Point estimate} & \textbf{Standard error} \\ \cline{2-5}
& \multicolumn{2}{c||}{\textbf{Joint model 1 - INLA}} & \multicolumn{2}{c|}{\textbf{Joint model 1 - jplm}} \\ \hline
$\beta$ & $0.450$ & $0.061$& \cellcolor{gray!10} $0.443$ & \cellcolor{gray!10}$0.004$ \\
$\gamma$ & $0.743$ & $0.198$ & \cellcolor{gray!10}$0.742$ &\cellcolor{gray!10} $0.041$\\
$\sigma^2_\epsilon$ & $0.091$ & $0.005$ & \cellcolor{gray!10}$0.089$ &\cellcolor{gray!10} $0.003$\\
$\sigma^2_\alpha$ & $0.226$ & $0.143$ &\cellcolor{gray!10} NA &\cellcolor{gray!10} NA\\
$\sigma^2_w$ & $0.342$ & $0.053$ & \cellcolor{gray!10}$0.328$ &\cellcolor{gray!10} $0.003$\\
$\sigma^2_v$ & $0.216$ & $0.054$& \cellcolor{gray!10}$0.181$ &\cellcolor{gray!10} $0.002$ \\
$\rho$ & $-0.131$ &$0.149$&\cellcolor{gray!10} $-0.172$ & \cellcolor{gray!10}$0.025$ \\
$\nu$ & $0.921$ & $0.137$ &\cellcolor{gray!10} $1.122$ &\cellcolor{gray!10} $0.121$ \\
$\kappa$ & $0.806$ & $0.092$ &\cellcolor{gray!10} NA &\cellcolor{gray!10} NA \\\hline
\end{tabular}
\caption{Results for the PSA dataset using \textit{R-INLA} and
\textit{jplm} for the specification in \eqref{surv1}}
\label{tableres1}
\end{table}
It is quite clear from Table \ref{tableres1} that the hazard of
informative dropout is correlated with the longitudinal PSA biomarker
since $\nu=0.919$ with $95\%$ credible interval $(0.645;1.193)$. This
result confirms that the joint model approach is supported by the data
and should be preferred to the separate models. The structure of the
association term as in \eqref{surv1} is quite restrictive but has been
used extensively. We also investigate the possibility of changing the
association structure to \eqref{surv2} and present the results in
Table \ref{tableres2}.
\begin{table}[h]
\begin{tabular}{|c||c|c|}
\hline
\multirow{2}{6em}{\textbf{Parameter}} & \textbf{Posterior Mode} & \textbf{Posterior SD} \\ \cline{2-3}
& \multicolumn{2}{c|}{\textbf{Joint model 2 - INLA}} \\ \hline
$\beta$ & $0.389$ & $0.098$ \\
$\gamma$ & $0.698$ & $0.193$ \\
$\sigma^2_\epsilon$ & $0.125$ & $0.007$ \\
$\sigma^2_\alpha$ & $0.166$ & $0.105$ \\
$\sigma^2_w$ & $0.201$ & $0.032$ \\
$\sigma^2_v$ & $0.365$ & $0.203$ \\
$\rho$ & $-0.431$ & $0.257$ \\
$\nu_1$ & $1.025$ & $0.270$ \\
$\nu_2$ & $0.562$ & $0.308$ \\
$\kappa$ & $0.817$ & $0.093$ \\ \hline
\end{tabular}
\caption{Results for the PSA dataset using \textit{R-INLA} for the
specification in \eqref{surv2}}
\label{tableres2}
\end{table}
In comparison, the results between models 1 and 2 are very similar for
the fixed effects and variance hyperparameters. The interesting
difference between the two models as presented in Tables
\ref{tableres1} and \ref{tableres2}, is that the values of $\nu_1$ and
$\nu_2$ are quite different from each other, and from $\nu$ in Table
\ref{tableres1}. This implies that the structure of the shared effect
presented in \eqref{surv1} is not supported by the data in this
example and the more flexible model as in \eqref{surv2} should rather
be used. The model in \eqref{surv2} is not available in most of the
packages mentioned throughout the paper, but is feasibly implemented
in the \textit{R-INLA} package.\\ \\
In Figure \ref{figpat}, we present some of the longitudinal
trajectories and survival curves (or in the context of this
application, the non-dropout probabilities) for individual patients
based on \eqref{surv2}. The vertical line indicates the time at which
the dropout (solid) or censoring (dashed) occurred. The stepwise curve
is the Kaplan-Meier estimate of the survival curve for all patients,
the solid curve indicates the estimated mean survival curve from our
model and the dashed curve is the patient-specific survival curve.
\begin{figure}
\caption{Post PSA trajectories and Survival functions for specific
patients}
\label{figpat}
\end{figure}
\noindent The association between the PSA biomarker and the risk of
dropout is evident from Figure \ref{figpat}. Patients with the
distinctive decrease-increase behaviour are at higher risk of dropout
(lower survival function) and most eventually dropped out as indicated
by the solid vertical line. We have included two patients (8 and 15)
whose dropout times are censored but based on their PSA biomarker
levels, their survival functions are higher that the mean survival and
they would thus be considered for non-dropout. On the contrary,
patients 37 and 38 display the typical decrease-increase behaviour and
their survival functions indicate the higher probability of dropout.
The patient specific results can be used for dynamic predictions to
identify those patients who are most at risk of dropout, amongst other
things. The appropriateness of our proposed model is clear from the
detailed discussion of this specific example. The method can be
applied to various other datasets usually used in joint model analysis
using the \textit{R-INLA} package.
\section{Conclusion}\label{conclusionsection}
Joint models is one of the most common approaches used to analyze clinical time to event data. Consequently, various extensions and generalizations have been
developed, each with its own implementation structure. There are
various R packages available as mentioned, from both frequentist and
Bayesian viewpoints. In this paper, however, we showed that any joint
model with linear association structure, is indeed a simple latent
Gaussian model and all tools for LGM's can thus be applied in the
context of joint models. One of the most established and popular tools
for LGM's, is the INLA framework embedded in the \textit{R-INLA}
package. This affords the use of complicated joint models with relative ease, even as the models evolve in complexity. Model based evaluation of the assumptions, like the assumed association structure or non-linearity, is done with little effort within the \textit{R-INLA} framework since a multitude of joint model structures can be facilitated in this framework. \\ \\
As an example, we focused on a partially linear joint model with a
spline component to accommodate for non-linear longitudinal
trajectories. Instead of the usual splines approach with a set of
basis functions and corresponding regression coefficients. From a
Bayesian perspective, priors are usually assumed for the regression
coefficients. We proposed an alternative approach, that assumes priors
for the spline itself. This results in a spline component where
the user is relieved of the burden of knot selection. Subsequently, we
assumed penalized complexity priors to achieve shrinkage in the joint
model. The applicability of this proposal was illustrated using data
from a Prostate cancer study using PSA levels and time to dropout.\\ \\
Ultimately, the developments presented in this paper grants the application of complexities in joint models, such as non-linear or spatial components, not readily available for practical use in most other R packages. The proposed approach is useful and wieldy for practitioners and
statisticians alike, using the \textit{R-INLA} package for efficient
implementation.
\section*{Supplementary Materials}
Web Appendix A, referenced in Section~\ref{secpsaresults}, is available with
this paper at the Biometrics website on Wiley Online
Library.\vspace*{-8pt}
\section{Appendix}
\subsection{Computational considerations for joint models in using INLA}\label{appendixjoint}
The likelihood of a joint model basically consists of two types of likelihoods and this can be facilitated in the INLA framework. It is essential to construct the response matrix and the covariate matrices correctly for the estimation procedure. For the purpose of this paper, we will present only the case where the joint model consists of longitudinal and survival submodels. This can be extended to include more marginal submodels in the case of multiple endpoint modeling. \\ \\
Within the context of this paper, consider the following structured predictors of the longitudinal and survival submodels, respectively:
\begin{eqnarray}
\eta^L_{ijk}&=&\alpha(t_{ijk})+\pmb{\beta}^{T}\pmb{X}_{ijk}+w_{ij}+v_{ij}t_{ijk}\notag\\
\eta^S_{l}(s)&=&\pmb{\gamma}^{T}\pmb{Z}_{l}+\pmb{\nu}\circ(w_{l},v_{l}s)\label{jointexample1}
\end{eqnarray}
Consider the case where the data consists of $N_i,i=1,...,N$ observations for each of the $N$ individuals, so that in total there are $N_L$ longitudinal observations and correspondingly $N_S=N$ event times and censoring indicators $(s_i,c_i),i=1,...,N$. The data is then composed as a list in which each variable consists of $N_L+N_S$ elements. To achieve this, we include zeros for fixed effects if the covariate is not included in that specific submodel and NA's for the random effects. In the case of \eqref{jointexample1}, the new response is defined as a list of the $y_{ijk}$ and $(s_i, c_i)$. The fixed effect covariates are constructed as $(\pmb{X},\pmb{0}_{1,...,N_S})$ and $(\pmb{0}_{1,...,N_L},\pmb{Z})$ while the random effects are constructed as $(\pmb{\alpha},\pmb{NA}_{1,...,N_S})$. \\ \\
The main contribution in this area is the estimation of $\pmb{\alpha}$. Most of the commonly used approaches to estimate the non-linear trend invloves the use of knots. This method was also used in \cite{kim2017}. In this paper we propose the use of a time-continuous spline model manifested as a second-order random walk presented in Section \ref{splines}.
\subsection{Example: Simulated joint model}\label{appsim}
In this example we simulated data from the following scenario:
\begin{eqnarray*}
\eta^L(t)=t^2+v_i\notag\\
\eta^S=\beta_Sv_i\notag
\end{eqnarray*}where $v_i\sim N(0,\sigma^2_v)$ are the subject-specific random effects that are shared in this joint model. The aim of this example is to illustrate the practical method to fit a joint model in \textit{R-INLA}. The R code is available at \url{http://www.r-inla.org/examples/case-studies/van-niekerk-bakka-and-rue-2019}.
\subsection{Example: PSA study - computational framework information}\label{apppsa}
The R code used to obtain the results as presented in Section \ref{applicationsection} is available at \url{http://www.r-inla.org/examples/case-studies/van-niekerk-bakka-and-rue-2019}.\\
The computational time needed was $83.2$ and $15.7$ seconds, respectively, for models 2 and 1 fitted using the \textit{INLA} package.
The computer used is an Apple Macbook Pro i5 3.1GHz with 16GB 2133 MHz LPDDR3.
\end{document} |
\binomegin{document}
\leftline{The paper will appear in International Journal of Number
Theory.}
\par\quad\par\quad
\centerline {\binomf On the number of representations of $n$ as a}
\centerline {\binomf \quadq \quadq linear combination of four triangular
numbers}
$$\quad$$
\centerline{Min Wang$^1$ and Zhi-Hong Sun$^2$}
\par\quad\newline
\centerline{$\ ^1$School of Mathematical Sciences, Soochow
University,}
\centerline{Suzhou, Jiangsu 215006,
P.R. China}
\centerline{Email: [email protected]}
\par\quad\newline
\centerline{$\ ^2$School of Mathematical Sciences, Huaiyin Normal
University,} \centerline{Huaian, Jiangsu 223001, P.R. China}
\centerline{Email: [email protected]} \centerline{Homepage:
http://www.hytc.edu.cn/xsjl/szh}
\alphabstract{Let $\Bbb Z$ and $\Bbb N$ be the set of integers
and the set of positive integers, respectively. For
$a,b,c,d,n\in\Bbb N$ let $t(a,b,c,d;n)$ be the number of
representations of $n$ by $ax(x-1)/2+by(y-1)/2+cz(z-1)/2
+dw(w-1)/2$ $(x,y,z,w\in\Bbb Z$). In this paper we obtain explicit
formulas for $t(a,b,c,d;n)$ in the cases
$(a,b,c,d)=(1,2,2,4),\ (1,2,4,4),\ (1,1,4,4),\ (1,4,4,4)$,
$(1,3,9,9),\ (1,1,3,9)$, $(1,3,3,9)$,
$(1,1,9,9),\ (1,9,9,9)$ and $(1,1,1,9).$
\par\quad
\newline Keywords: representation; triangular number
\newline Mathematics Subject Classification 2010: Primary 11D85,
Secondary 11E25}
\equivndabstract
\let\texthefootnote\relax \fracootnotetext {The second author is the corresponding
author.}
\section*{1. Introduction}
\par\quad Let $\Bbb Z$ and $\Bbb N$ be the set of integers
and the set of positive integers, respectively.
For $n \in \Bbb N$ let
$$\sigma(n)=\sum_{d \mid n,d\in\Bbb N}d.$$ For convenience
we define $\sigma(n)=0$ for $n\notin \Bbb N$. For $a,b,c,d\in\Bbb N$ and $n\in\Bbb N \cup \{0\}$ set
$$N(a,b,c,d;n)=\binomig|\{(x,y,z,w)\in \Bbb Z^4\ |\ n=ax^2+by^2+cz^2+dw^2
\}\binomig|$$ and $$t(a,b,c,d;n)=\Big|\Big\{(x,y,z,w)\in \Bbb Z^4\ |\ n\
=a\frac{x(x-1)}2+ b\frac{y(y-1)}2+c\frac{z(z-1)}2+d\frac{w(w-1)}2\Big\}\Big|.$$
The numbers $\frac{x(x-1)}2\ (x\in\Bbb Z)$ are called triangular
numbers.
\par In 1828 Jacobi showed that
$$N(1,1,1,1;n)=8\sum_{d\mid n,4\nmid d}d.\textag 1.1$$
In 1847 Eisenstein (see [D]) gave formulas for $N(1,1,1,3;n)$ and
$N(1,1,1,5;n)$. From 1859 to 1866 Liouville made about 90
conjectures on $N(a,b,c,d;n)$ in a series of papers. Most
conjectures of Liouville have been proved. See [A1, A2,
AALW1-AALW4], Cooper's survey paper [C], Dickson's historical
comments [D] and Williams' book [W2].
\par
Let
$$t'(a,b,c,d;n)=\Big|\Big\{(x,y,z,w)\in \Bbb N^4\ |\ n=a\frac{x(x-1)}2+
b\frac{y(y-1)}2+c\frac{z(z-1)}2+d\frac{w(w-1)}2\Big\}\Big|.$$ As $\frac12
a(a+1)=\frac12(-a-1)(-a-1+1)$ we have
$$t(a,b,c,d;n)=16t'(a,b,c,d;n).$$
In [L] Legendre stated that
$$t'(1,1,1,1;n)=\sigma(2n+1).\textag 1.2$$
In 2003, Williams [W1] showed that
$$t'(1,1,2,2;n)=\frac 14\sum_{d\mid 4n+3}\binomig(d-(-1)^{\frac{d-1}2}\binomig).$$
For $a,b,c,d\in\Bbb N$ with $1<a+b+c+d\le 8$ let
$$C(a,b,c,d)=16+4i_1(i_1-1)i_2+8i_1i_3,$$
where $i_j$ is the number of elements in $\{a,b,c,d\}$ which are
equal to $j$. When $1<a+b+c+d\le 7$, in 2005 Adiga, Cooper and Han
[ACH] showed that
$$C(a,b,c,d)t'(a,b,c,d;n)=N(a,b,c,d;8n+a+b+c+d).\textag 1.3$$ When
$a+b+c+d=8$, in 2008 Baruah, Cooper and Han [BCH] proved that
$$C(a,b,c,d)t'(a,b,c,d;n)=N(a,b,c,d;8n+8)-N(a,b,c,d;2n+2).\textag 1.4$$
In 2009,
Cooper [C] determined $t'(a,b,c,d;n)$ for $(a,b,c,d)=(1,1,1,3),\
(1,3,3,3),$ $(1,2,2,3),\ (1,3,6,6),\ (1,3,4,4),\ (1,1,2,6)$ and
$(1,3,12,12)$.
\par In this paper, by using some formulas for $N(a,b,c,d;n)$
in [A1, A2, AALW1-AALW4] and Ramanujan's theta functions we obtain
explicit
formulas for $t(a,b,c,d;n)$ in the cases
$(a,b,c,d)=(1,2,2,4),\ (1,2,4,4),\ (1,1,4,4),\ (1,4,4,4)$,
$(1,3,3,9)$,
$(1,1,9,9),\ (1,9,9,9)$, $(1,1,1,9)$, $(1,3,9,9)$ and $(1,1,3,9).$
\par For $m,n\in \Bbb N$ with $2\mid m$ and $2\nmid n$ we define
$$S_m(n) =\sum\Sb(r,s)\in \Bbb Z \textimes
\Bbb Z\\n=r^2+m{s^2}\\r\equiv 1\pmod 4
\equivndSb r.$$
As ${r^2}+2{s^2}\equiv 0,1,2,3,4,6\pmod 8$ for $r,s\in\Bbb Z$, we see
that $S_2(n)=0\quadtq{for} n\equiv 5,7\pmod 8.$ Also, ${r^2}+4{s^2}\equiv
0,1\pmod 4$ for $r,s\in\Bbb Z$ implies that $S_4(n)=0\quadtq{for} n\equiv
2,3\pmod 4.$ In this paper, following [AALW4] we also define
$$S(n)=\sum_{d\mid n} \frac nd\Ls 2d,$$ where $\sls am$ is the
Legendre-Jacobi-Kronecker symbol.
\section*{2. Formulas for $t(1,3,9,9;n)$ and $t(1,1,3,9;n)$}
\par Ramanujan's theta functions $\varphi(q)$ and $\psi(q)$ are defined
by
$$\varphi(q)=\sum_{n=-\infty}^{\infty}q^{n^2}=1+2\sum_{n=1}^{\infty}
q^{n^2}\quadtq{and} \psi(q)=\sum_{n=0}^{\infty}q^{n(n+1)/2}\ (|q|<1).$$
It is evident that for $|q|<1$,
$$\sum_{n=0}^{\infty}N(a,b,c,d;n)q^{n}=\varphi(q^a)
\varphi(q^b)\varphi(q^c)\varphi(q^d),$$
$$\sum_{n=0}^{\infty}t'(a,b,c,d;n)q^{n}=\psi(q^a)\psi(q^b)
\psi(q^c)\psi(q^d).$$ From [BCH, Lemma 4.1] we know that for
$|q|<1$,
$$\varphi(q)=\varphi(q^4)+2q\psi(q^8)\textag 2.1$$
and
$$\psi(q)\psi(q^3)=\varphi(q^6)\psi(q^4)+q\psi(q^{12})
\varphi(q^2).\textag
2.2$$
\pro{Theorem 2.1} Let $n\in\Bbb N$. Then
$$N(1,3,9,9;8n+22)=40t'(1,3,9,9;n).$$
\equivndpro
Proof. By (2.1), for $|q|<1$ we have
$$\varphi(q^k)=\varphi(q^{4k})+2q^k\psi(q^{8k})
=\varphi(q^{16k})+2q^{4k}\psi(q^{32k})+2q^k\psi(q^{8k}).$$ Thus, for
$|q|<1$ we have
$$\alphalign&\sum_{n=0}^{\infty}N(1,3,9,9;n)q^{n}\\&=
\varphi(q)\varphi(q^3)\varphi(q^9)^2
\\&=(\varphi(q^{16})+2q^4\psi(q^{32})+2q\psi(q^8))(\varphi(q^{48})+2q^{12}\psi(q^{96})+
2q^3\psi(q^{24}))\\&\quad\textimes(\varphi(q^{144})+2q^{36}\psi(q^{288})+
2q^9\psi(q^{72}))^2
\\&=\binomig(\varphi(q^{16})\varphi(q^{48})+4q^{16}\psi(q^{32})\psi(q^{96})
+2q\psi(q^{8})\varphi(q^{48})+2q^3 \psi(q^{24})\varphi(q^{16})
\\&\quad+2q^4 \psi(q^{32})\varphi(q^{48})
+4q^4\psi(q^8)\psi(q^{24})
+2q^{12}\psi(q^{96})\varphi(q^{16})+4q^7 \psi(q^{24})\psi(q^{32})
\\&\quad+4q^{13} \psi(q^8)\psi(q^{96})\binomig)
\binomig(\varphi(q^{144})^2+4q^{72}\psi (q^{288})^2
+4q^{36}\varphi(q^{144})\psi(q^{288})+4q^{18}\psi(q^{72})^2
\\&\quad+4q^9\varphi(q^{144})\psi(q^{72})
+8q^{45}\psi(q^{288})\psi(q^{72})\binomig).
\equivndalign$$
Since
$$\varphi(q^{8k})=1+2\sum_{n=1}^{\infty}
q^{8kn^2}\quadtq{and} \psi(q^{8k})=\sum_{n=0}^{\infty}q^{8kn(n+1)/2}\
(|q|<1),$$ we see that for any nonnegative integers $k_1,k_2,m_1$
and $m_2$,
$$\varphi(q^{8k_1})^{m_1}\psi(q^{8k_2})^{m_2}
=\sum_{n=0}^{\infty}b_nq^{8n}\quad (|q|<1).$$
Now from the above we deduce that for $|q|<1$,
$$\alphalign &\sum_{n=0}^{\infty}N(1,3,9,9;8n+6)q^{8n+6}
\\&=2q\psi(q^{8})\varphi(q^{48})\cdot
8q^{45}\psi(q^{288})\psi(q^{72})+2q^4 \psi(q^{32})\varphi(q^{48})
\cdot 4q^{18}\psi(q^{72})^2
\\&\quad+4q^4\psi(q^8)\psi(q^{24})\cdot
4q^{18}\psi(q^{72})^2+2q^{12}\psi(q^{96})\varphi(q^{16})
\cdot 4q^{18}\psi(q^{72})^2
\\&\quad+4q^{13} \psi(q^8)\psi(q^{96})
\cdot 4q^9\varphi(q^{144})\psi(q^{72})
\\&=16q^{46}\varphi(q^{48})\psi(q^8)\psi(q^{72})\psi(q^{288})
+8q^{22}\varphi(q^{48})\psi(q^{32})\psi(q^{72})^2
+16q^{22}\psi(q^8)\psi(q^{24})\psi(q^{72})^2
\\&\quad+8q^{30}\varphi(q^{16})\psi(q^{96})\psi(q^{72})^2
+16q^{22}\varphi(q^{144})\psi(q^8)\psi(q^{72})\psi(q^{96})
\equivndalign$$
and so
$$\alphalign&\frac 18\sum_{n=0}^{\infty}N(1,3,9,9;8n+6)q^{8n-16}
\\&=2q^{24}\varphi(q^{48})\psi(q^8)\psi(q^{72})\psi(q^{288})
+\varphi(q^{48})\psi(q^{32})\psi(q^{72})^2
+2\psi(q^8)\psi(q^{24})\psi(q^{72})^2
\\&\quad+q^8\varphi(q^{16})\psi(q^{96})\psi(q^{72})^2
+2\varphi(q^{144})\psi(q^8)\psi(q^{72})\psi(q^{96}).
\equivndalign$$
Replacing $q$ with $q^{1/8}$ in the above we obtain
$$\alphalign &\frac 18\sum_{n=0}^{\infty}N(1,3,9,9;8n+22)q^n
\\&=\frac 18\sum_{n=0}^{\infty}N(1,3,9,9;8n+6)q^{n-2}
\\&=2q^3\varphi(q^{6})\psi(q)\psi(q^{9})\psi(q^{36})
+\varphi(q^{6})\psi(q^{4})\psi(q^{9})^2
+2\psi(q)\psi(q^{3})\psi(q^{9})^2
\\&\quad+q\varphi(q^{2})\psi(q^{12})\psi(q^{9})^2
+2\varphi(q^{18})\psi(q)\psi(q^{9})\psi(q^{12}).
\equivndalign$$
Now applying (2.2) we get
$$\alphalign&\frac 18\sum_{n=0}^{\infty}N(1,3,9,9;8n+22)q^n
\\&=2\psi(q)\psi(q^3)\psi(q^9)^2
+\psi(q^9)^2\psi(q)\psi(q^3)+2\psi(q)\psi(q^9)\psi(q^3)\psi(q^9)
\\&=5\psi(q)\psi(q^3)\psi(q^9)^2
=5\sum_{n=0}^{\infty}t'(1,3,9,9;n)q^n.\equivndalign$$ Comparing the
coefficients of $q^n$ in the above expansion we obtain the result.
$\square$
\par\quad\par For $n\in \Bbb N$ following [AALW3] we define
$$\alphaligned &A(n)=\sum_{d\mid n}d\Ls{12}{n/d},
\quad B(n)=\sum_{d\mid n}d\Ls{-3}{d}\Ls{-4}{n/d},
\\&C(n)=\sum_{d\mid n}d\Ls{-3}{n/d}\Ls{-4}{d}
\quadtq{and}D(n)=\sum_{d\mid n}d\Ls{12}{d}.\equivndaligned$$ Let $(a,b)$
be the greatest common divisor of integers $a$ and $b$. Suppose that
$n\in\Bbb N$ and $n=2^{\alpha}3^{\binometa}n_1$, where $\alpha$ and $\binometa$ are
non-negative integers, $n_1\in\Bbb N$ and
$(n_1,6)=1$. From [AALW3, Theorem 3.1] we know that
$$\alphaligned
&A(n)=2^{\alpha}3^{\binometa}A(n_1), \quad
B(n)=(-1)^{\alpha+\binometa}2^{\alpha}\Ls{-3}{n_1}A(n_1),
\\&C(n)=(-1)^{\alpha+\binometa+\frac{n_1-1}2}3^{\binometa}A(n_1)
\quadtq{and} D(n)=\Ls 3{n_1}A(n_1).\equivndaligned\textag 2.3$$
\pro{Lemma 2.1 ([A1, Theorem 1.2])} Let $n\in\Bbb N$. Then
$$\alphalign N(1,3,9,9;n)=\cases 2A(n/3)+2B(n/3)-C(n/3)-D(n/3)
&\text{if $n\equiv0\pmod3$,}
\\2A(n)-\frac23B(n)+C(n)-\frac13D(n)&\text{if $n\equiv1\pmod3$,}
\\0&\text{if $n\equiv2\pmod3$.}\equivndcases\equivndalign$$
\equivndpro
\pro{Theorem 2.2} Let $n\in\Bbb N$. Then
$$\alphalign &t(1,3,9,9;n)\\&=\cases 0&\text{if $3\mid n-2$},
\\\frac 43\sum_{d\mid 4n+11}d\sls 3d&\text{if $3\mid n$},
\\2\binomig(3^{\binometa-1}\sls 3{n_1}-1\binomig)\sum_{d\mid n_1}d\sls 3d
&\text{if $3\mid n-1$
and $4n+11=3^{\binometa}n_1$ $(3\nmid n_1)$}.
\equivndcases\equivndalign$$
\equivndpro
Proof. By Theorem 2.1,
$$t(1,3,9,9;n)=16t'(1,3,9,9;n)=\frac 25N(1,3,9,9;8n+22).$$
Now applying Lemma 2.1 and (2.3) we deduce that
$$\alphalign t(1,3,9,9;n)=\cases 0&\text{if $3\mid n-2$},
\\\frac 43A(4n+11)&\text{if $3\mid n$},
\\2(3^{\binometa-1}-\sls 3{n_1})A(n_1)&\text{if $3\mid n-1$
and $4n+11=3^{\binometa}n_1$ $(3\nmid n_1)$}.
\equivndcases\equivndalign$$
To see the result, we note that
$$A(m)=\sum_{d\mid m}d\ls{12}m\ls{12}d=\ls 3m\sum_{d\mid m}
d\ls 3d\quadtq{for $m\in\Bbb N$ with $(6,m)=1$.}\square\textag 2.4$$
\pro{Lemma 2.2 ([A1, Theorem 1.3])} Let $n\in\Bbb N$. Then
$$\alphalign N(1,1,3,9;n)=\cases 2A(n/3)+2B(n/3)-C(n/3)-D(n/3)
&\text{if $n\equiv0\pmod3$,}
\\4A(n)-\frac43B(n)+2C(n)-\frac23D(n)&\text{if $n\equiv1\pmod3$,}
\\2A(n)-\frac23B(n)+C(n)-\frac13D(n)&\text{if $n\equiv2\pmod3$.}
\equivndcases\equivndalign$$
\equivndpro
\pro{Theorem 2.3} Let $n\in\Bbb
N$. Then
$$ t(1,1,3,9;n)=\cases -\frac 83\sum_{d\mid 4n+7}d\sls 3d
&\text{if $3\mid n$},
\\\frac 83\sum_{d\mid 4n+7}d\sls 3d &\text{if $3\mid n-1$},
\\2\binomig(3^{\binometa-1}\sls 3{n_1}-1\binomig)\sum_{d\mid n_1}d\sls 3d
&\text{if $3\mid n-2$ and $4n+7=3^{\binometa}n_1$ $(3\nmid
n_1)$}.\equivndcases$$
\equivndpro
Proof. Suppose $|q|<1$. Then clearly
$$\sum_{n=0}^{\infty}N(1,1,3,9;n)q^{n}
=\varphi(q)^2\varphi(q^3)\varphi(q^9).$$
Since $\varphi(q^k)=\varphi(q^{4k})+2q^k\psi(q^{8k})
=\varphi(q^{16k})+2q^{4k}\psi(q^{32k})+2q^k\psi(q^{8k})$ by (2.1),
we see that
$$\alphalign&\varphi(q)^2\varphi(q^3)\varphi(q^9)
\\&=
\binomig(\varphi(q^{16})+2q^4\psi(q^{32})+2q\psi(q^8)\binomig)^2
\binomig(\varphi(q^{48})+2q^{12}\psi(q^{96})+2q^3\psi(q^{24})\binomig)
\\&\quad\textimes
\binomig(\varphi(q^{144})+2q^{36}\psi(q^{288})+2q^9\psi(q^{72})\binomig)
\\&=\binomig(\varphi(q^{16})^2+4q^4\psi(q^{32})\varphi(q^{16})
+4q^8\psi(q^{32})^2
\\&\quad+4q^2\psi(q^{8})^2+4q\varphi(q^{16})\psi(q^{8})+8q^{5}
\psi(q^{8})\psi(q^{32})\binomig)
\\&\quad\textimes
\binomig(\varphi(q^{48})\varphi(q^{144})
+2q^{36}\varphi(q^{48})\psi(q^{288})
+2q^9\varphi(q^{48})\psi(q^{72})+2q^{12}\psi(q^{96})\varphi(q^{144})
\\&\quad\ +4q^{48}\psi(q^{96})\psi(q^{288})+ 4q^{21}\psi(q^{96})\psi(q^{72})+2q^3\psi(q^{24})
\varphi(q^{144}\binomig)
\\&\quad\ +4q^{39}\psi(q^{24})\psi(q^{288})
+4q^{12}\psi(q^{24})\psi(q^{72})\binomig).
\equivndalign$$
Note that $\varphi(q^{8k_1})^{m_1}\psi(q^{8k_2})^{m_2}
=\sum_{n=0}^{\infty}b_nq^{8n}$ for $|q|<1$ and any nonnegative
integers $k_1,k_2,m_1$ and $m_2$. From the above we deduce that
$$\alphalign&\sum_{n=0}^{\infty}N(1,1,3,9;8n+6)q^{8n+6}
\\&=4q\varphi(q^{16})\psi(q^{8})\cdot 4q^{21}\psi(q^{96})\psi(q^{72})
+8q^{5} \psi(q^{8})\psi(q^{32})\cdot 2q^9\varphi(q^{48})\psi(q^{72})
\\&\quad+4q^2\psi(q^{8})^2\cdot 2q^{36}\varphi(q^{48})\psi(q^{288})
+4q^2\psi(q^{8})^2\cdot 2q^{12}\psi(q^{96})\varphi(q^{144})
\\&\quad +4q^2\psi(q^{8})^2\cdot 4q^{12}\psi(q^{24})\psi(q^{72})
\equivndalign$$ and so
$$\alphalign&\frac 18\sum_{n=0}^{\infty}N(1,1,3,9;8n+6)q^{8n-8}
\\&=2q^{8}\varphi(q^{16})\psi(q^8)\psi(q^{72})\psi(q^{96})
+2\psi(q^8)\psi(q^{32})\varphi(q^{48})\psi(q^{72})
\\&\quad+q^{24}\psi(q^8)^2 \varphi(q^{48})\psi(q^{288})
+\psi(q^8)^2\psi(q^{96})\varphi(q^{144})
+2\psi(q^8)^2\psi(q^{24})\psi(q^{72}).\equivndalign$$
Replacing $q$ with
$q^{1/8}$ in the above we obtain
$$\alphalign
&\frac 18\sum_{n=0}^{\infty}N(1,1,3,9;8n+14)q^n
\\&=\frac 18\sum_{n=0}^{\infty}N(1,1,3,9;8n+6)q^{n-1}
\\&=2q\varphi(q^{2})\psi(q)\psi(q^{9})\psi(q^{12})
+2\psi(q)\psi(q^{4})\varphi(q^{6})\psi(q^{9})+q^3\psi(q)^2
\varphi(q^{6})\psi(q^{36})
\\&\quad+\psi(q)^2\psi(q^{12})\varphi(q^{18})
+2\psi(q)^2\psi(q^{3})\psi(q^{9}).
\equivndalign$$
Now applying (2.2) we get
$$\alphalign&\frac 18\sum_{n=0}^{\infty}N(1,1,3,9;8n+14)q^n
\\&=2\psi(q)^2\psi(q^3)\psi(q^9)
+\psi(q)^2\psi(q^3)\psi(q^9)+2\psi(q)^2\psi(q^3)\psi(q^9)
\\&=5\psi(q)^2\psi(q^3)\psi(q^9)
=5\sum_{n=0}^{\infty}t'(1,1,3,9;n)q^n =\frac
5{16}\sum_{n=0}^{\infty}t(1,1,3,9;n)q^n.\equivndalign$$ Comparing the
coefficients of $q^n$ we obtain
$$t(1,1,3,9;n)=\frac 25N(1,1,3,9;8n+14).$$
Now applying Lemma 2.2, (2.3) and (2.4) we deduce the result.
$\square$
\section*{3. Formulas for $t(1,1,4,4;n)$, $t(1,4,4,4;n)$, $t(1,2,2,4;n)$
and $t(1,2,4,4;n)$}
\pro{Lemma 3.1}
Let $a,b,c,d,n\in\Bbb N$. Then
$$\alphalign &t(a,b,c,d;n)\\&=N(a,b,c,d;8n+a+b+c+d)-N(a,b,c,4d;8n+a+b+c+d)
\\&\quad-N(a,b,4c,d;8n+a+b+c+d)+N(a,b,4c,4d;8n+a+b+c+d)
\\&\quad-N(a,4b,c,d;8n+a+b+c+d)+N(a,4b,c,4d;8n+a+b+c+d)
\\&\quad+N(a,4b,4c,d;8n+a+b+c+d)-N(a,4b,4c,4d;8n+a+b+c+d)
\\&\quad-N(4a,b,c,d;8n+a+b+c+d)+N(4a,b,c,4d;8n+a+b+c+d)
\\&\quad+N(4a,b,4c,d;8n+a+b+c+d)-N(4a,b,4c,4d;8n+a+b+c+d)
\\&\quad+N(4a,4b,c,d;8n+a+b+c+d)-N(4a,4b,c,4d;8n+a+b+c+d)
\\&\quad-N(4a,4b,4c,d;8n+a+b+c+d)+N(4a,4b,4c,4d;8n+a+b+c+d).\equivndalign$$
\equivndpro
Proof. It is clear that
$$\alphalign &t(a,b,c,d;n)\\&=\binomig|\{(x,y,z,w)\in
\Bbb Z^4\binomigm|n=a\frac{x(x-1)}2+
b\frac{y(y-1)}2+c\frac{z(z-1)}2+d\frac{w(w-1)}2\}\binomig|
\\&=\binomig|\{(x,y,z,w)\in \Bbb Z^4
\binomigm|8n+a+b+c+d\\&\quadq\quadq=a{(2x-1)^2}+b{(2y-1)^2}+c{(2z-1)^2}
+d{(2w-1)^2}\}\binomig|
\\&=\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=a{x^2}+b{y^2}+c{z^2}+d{w^2},
2\mid{xyzw-1}\}\binomig|
\\&=\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=a{x^2}+b{y^2}+c{z^2}+d{w^2},
2\mid{yzw-1}\}\binomig|
\\&\quad-\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=4a{x^2}+b{y^2}+c{z^2}+d{w^2},
2\mid{yzw-1}\}\binomig|
\\&=\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=a{x^2}+b{y^2}+c{z^2}+d{w^2},
2\mid{zw-1}\}\binomig|
\\&\quad-\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=a{x^2}+4b{y^2}+c{z^2}+d{w^2},
2\mid{zw-1}\}\binomig|
\\&\quad-\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=4a{x^2}+b{y^2}+c{z^2}+d{w^2},
2\mid{zw-1}\}\binomig|
\\&\quad+\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=4a{x^2}+4b{y^2}+c{z^2}+d{w^2},
2\mid{zw-1}\}\binomig|
\\&=\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=a{x^2}+b{y^2}+c{z^2}+d{w^2},
2\mid{w-1}\}\binomig|
\\&\quad-\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=a{x^2}+b{y^2}+4c{z^2}+d{w^2},
2\mid{w-1}\}\binomig|
\\&\quad-\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=a{x^2}+4b{y^2}+c{z^2}+d{w^2},
2\mid{w-1}\}\binomig|
\\&\quad+\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=a{x^2}+4b{y^2}+4c{z^2}+d{w^2},
2\mid{w-1}\}\binomig|
\\&\quad-\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=4a{x^2}+b{y^2}+c{z^2}+d{w^2},
2\mid{w-1}\}\binomig|
\\&\quad+\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=4a{x^2}+b{y^2}+4c{z^2}+d{w^2},
2\mid{w-1}\}\binomig|
\\&\quad+\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=4a{x^2}+4b{y^2}+c{z^2}+d{w^2},
2\mid{w-1}\}\binomig|
\\&\quad-\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=4a{x^2}+4b{y^2}+4c{z^2}+d{w^2},
2\mid{w-1}\}\binomig|
\\&=\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=a{x^2}+b{y^2}+c{z^2}+d{w^2}\}\binomig|
\\&\quad-\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=a{x^2}+b{y^2}+c{z^2}+4d{w^2}\}\binomig|
\\&\quad-\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=a{x^2}+b{y^2}+4c{z^2}+d{w^2}\}\binomig|
\\&\quad+\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=a{x^2}+b{y^2}+4c{z^2}+4d{w^2}\}\binomig|
\\&\quad-\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=a{x^2}+4b{y^2}+c{z^2}+d{w^2}\}\binomig|
\\&\quad+\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=a{x^2}+4b{y^2}+c{z^2}+4d{w^2}\}\binomig|
\\&\quad+\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=a{x^2}+4b{y^2}+4c{z^2}+d{w^2}\}\binomig|
\\&\quad-\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=a{x^2}+4b{y^2}+4c{z^2}+4d{w^2}\}\binomig|
\\&\quad-\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=4a{x^2}+b{y^2}+c{z^2}+d{w^2}\}\binomig|
\\&\quad+\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=4a{x^2}+b{y^2}+c{z^2}+4d{w^2}\}\binomig|
\\&\quad+\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=4a{x^2}+b{y^2}+4c{z^2}+d{w^2}\}\binomig|
\\&\quad-\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=4a{x^2}+b{y^2}+4c{z^2}+4d{w^2}\}\binomig|
\\&\quad+\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=4a{x^2}+4b{y^2}+c{z^2}+d{w^2}\}\binomig|
\\&\quad-\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=4a{x^2}+4b{y^2}+c{z^2}+4d{w^2}\}\binomig|
\\&\quad-\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|n=4a{x^2}+4b{y^2}+4c{z^2}+d{w^2}\}\binomig|
\\&\quad+\binomig|\{(x,y,z,w)\in \Bbb Z^4\binomigm|
n=4a{x^2}+4b{y^2}+4c{z^2}+4d{w^2}\}\binomig|.
\equivndalign$$
Thus the result follows. $\square$
\par For general positive integer $n$, in a series of papers A. Alaca, S. Alaca, M.F. Lemire and
K.S. Williams (see [AALW1, AALW2, AALW4]) gave explicit formulas for
$N(a,b,c,d;n)$ in the cases $(a,b,c,d)=(1,1,4,4)$, $(1,1,16,16)$,
$(1,1,4,16)$, $(1,4,4,4), (1,4,16,16),$ $(1,4,4,$ $16),\ (1,2,2,4),\
(1,2,2,16)$, $(1,2,16,16),\ (1,2,4,16),\ (1,2,4,8), \ (1,2,4,4),\
(1,2,8,16),$ $(1,4,8,8)$ and $(1,8,8,16)$. Based on Lemma 3.1, we
need some special results in [AALW1, AALW2, AALW4] to prove our
formulas for $t(1,1,4,4;n)$, $t(1,4,4,4;n)$, $t(1,2,2,4;n)$ and
$t(1,2,4,4;n)$.
\pro{Lemma 3.2 ([AALW1,
Theorem 1.11])} Let $n\in\Bbb N$ with $n\equiv 2\pmod 4$. Then
$N(1,1,4,4;n)=4\sigma(n/2).$
\equivndpro
\pro{Lemma 3.3 ([AALW2, Theorems 4.6 and 4.8])} Let $n\in\Bbb N$ and
$n\equiv 2\pmod 8$. Then
$$N(1,1,16,16;n)=N(1,1,4,16;n)
=2\sigma\binomig(\frac n2\binomig)+2\ls 2{n/2}S_4(\frac n2).$$
\equivndpro
\pro{Theorem 3.1} Let $n\in\Bbb N$. Then
$$t(1,1,4,4;n)=2\binomig(\sigma(4n+5)+(-1)^nS_4(4n+5)\binomig).$$
\equivndpro
Proof. Since $x^2\not\equiv 2\pmod 4$ for $x\in\Bbb Z$, from Lemma 3.1 we
see that
$$\alphaligned &t(1,1,4,4;n)\\&=N(1,1,4,4;8n+10)-N(1,1,4,16;8n+10)
\\&\quad-N(1,1,16,4;8n+10)+N(1,1,16,16;8n+10)-N(1,4,4,4;8n+10)
\\&\quad+N(1,4,4,16;8n+10)+N(1,4,16,4;8n+10)-N(1,4,16,16;8n+10)
\\&\quad-N(4,1,4,4;8n+10)+N(4,1,4,16;8n+10)+N(4,1,16,4;8n+10)
\\&\quad-N(4,1,16,16;8n+10)+N(4,16,4,4;8n+10)-N(4,4,4,16;8n+10)
\\&\quad-N(4,4,16,4;8n+10)+N(4,4,16,16;8n+10)
\\&=N(1,1,4,4;8n+10)-2N(1,1,4,16;8n+10)+N(1,1,16,16;8n+10).
\equivndaligned$$
Now applying Lemmas 3.2 and 3.3 we obtain
$$\alphalign
t(1,1,4,4;n)&=4\sigma(4n+5)-2\Big(2\sigma(4n+5)+2\Ls2{4n+5}S_4(4n+5)\Big)
\\&\quad+2\sigma(4n+5)+2\Ls2{4n+5}S_4(4n+5)
\\&=2\Big(\sigma(4n+5)-\Ls2{4n+5}S_4(4n+5)\Big).\equivndalign$$
This yields the result. $\square$
\pro{Lemma 3.4 ([AALW1, Theorem 1.18])} Let $n\in\Bbb N$ and $n\equiv
1\pmod 4$. Then
$$N(1,4,4,4;n)= 2\sigma(n).$$
\equivndpro
\pro{Lemma 3.5 ([AALW2, Theorem 4.5])} Let $n\in\Bbb N$ and $n\equiv
1\pmod 4$. Then
$$N(1,4,16,16;n)=\frac12\sigma(n)+
\frac12(2+(-1)^{\frac{n-1}4})S_4(n).$$
\equivndpro
\pro{Lemma 3.6 ([AALW2, Theorem 4.7])} Let $n\in\Bbb N$ and $n\equiv
1\pmod 4$. Then
$$N(1,4,4,16;n)=\sigma(n)+S_4(n).$$
\equivndpro
\pro{Theorem 3.2} Let $n\in\Bbb N$. Then
$$ t(1,4,4,4;n)=\frac 12\Big(\sigma(8n+13)-3S_4(8n+13)\Big).$$
\equivndpro
Proof. Since $x^2\equiv 0,1,4\pmod 8$ for $x\in\Bbb Z$, using Lemma 3.1
we see that
$$\alphaligned &t(1,4,4,4;n)\\&=N(1,4,4,4;8n+13)-N(1,4,4,16;8n+13)
\\&\quad-N(1,4,16,4;8n+13)+N(1,4,16,16;8n+13)-N(1,16,4,4;8n+13)
\\&\quad+N(1,16,4,16;8n+13)+N(1,16,16,4;8n+13)-N(1,16,16,16;8n+13)
\\&\quad-N(4,4,4,4;8n+13)+N(4,4,4,16;8n+13)+N(4,4,16,4;8n+13)
\\&\quad-N(4,4,16,16;8n+13)+N(4,16,4,4;8n+13)-N(4,16,4,16;8n+13)
\\&\quad-N(4,16,16,4;8n+13)+N(4,16,16,16;8n+13)
\\&=N(1,4,4,4;8n+13)-3N(1,4,4,16;8n+13)+3N(1,4,16,16;8n+13).
\equivndaligned$$
Now applying Lemmas 3.4, 3.5 and 3.6 we obtain
$$\alphalign &t(1,4,4,4;n)\\&=2\sigma(8n+13)-3(\sigma(8n+13)+S_4(8n+13))
+\frac 32(\sigma(8n+13)+S_4(8n+13))
\\&=\frac12\Big(\sigma(8n+13)-3S_4(8n+13)\Big).\equivndalign$$
This proves the theorem. $\square$
\pro{Lemma 3.7 ([AALW1, Theorem 1.14])} Let $n\in\Bbb N$ with
$2\nmid n$. Then $$N(1,2,2,4;n)=2\sigma(n).$$
\equivndpro
\pro{Lemma 3.8 ([AALW2, Theorems 4.9, 4.11 and
4.13])} Let $n\in\Bbb N$ and $n\equiv 1\pmod 8$. Then
$$N(1,2,2,16;n)=N(1,8,8,16;n)=N(1,2,8,16;n)=\sigma(n)+S_4(n).$$
\equivndpro
\pro{Lemma 3.9 ([AALW2, Theorems 4.1 and 4.4])} Let $n\in\Bbb N$ and
$n\equiv 1\pmod 4$. Then
$$N(1,2,4,8;n)=N(1,4,8,8;n)=\sigma(n)+(-1)^{\frac{n-1}4}S_4(n).$$
\equivndpro
\pro{Theorem 3.3} Let $n\in\Bbb N$. Then
$$t(1,2,2,4;n)=\sigma(8n+9)-S_4(8n+9).$$\equivndpro
Proof. From Lemma 3.1 we have
$$\alphaligned &t(1,2,2,4;n)\\&=N(1,2,2,4;8n+9)-N(1,2,2,16;8n+9)
\\&\quad-N(1,2,4,8;8n+9)+N(1,2,8,16;8n+9)-N(1,2,4,8;8n+9)
\\&\quad+N(1,2,8,16;8n+9)+N(1,4,8,8;8n+9)-N(1,8,8,16;8n+9)
\\&\quad-N(4,2,2,4;8n+9)+N(4,2,2,16;8n+9)+N(4,2,8,4;8n+9)
\\&\quad-N(4,2,8,8;8n+9)+N(4,8,4,4;8n+9)-N(4,8,2,16;8n+9)
\\&\quad-N(4,8,8,4;8n+9)+N(4,8,8,16;8n+9)
\\&=N(1,2,2,4;8n+9)-N(1,2,2,16;8n+9)-2N(1,2,4,8;8n+9)
\\&\quad+2N(1,2,8,16;8n+9)+N(1,4,8,8;8n+9)-N(1,8,8,16;8n+9).
\equivndaligned$$ Now applying
Lemmas 3.7, 3.8 and 3.9 we obtain
$$\alphalign &t(1,2,2,4;n)
\\&=2\sigma(8n+9)-(\sigma(8n+9)+S_4(8n+9))-2(\sigma(8n+9)+S_4(8n+9))
\\&\quad+2(\sigma(8n+9)+S_4(8n+9))+\sigma(8n+9)+S_4(8n+9)-(\sigma(8n+9)+S_4(8n+9))
\\&=\sigma(8n+9)-S_4(8n+9),
\equivndalign$$
which completes the proof. $\square$
\equivndpro
\pro{Lemma 3.10 ([AALW2, Theorems 4.17 and 4.18])} Let $n\in\Bbb N$
and $n\equiv 1,3\pmod8$. Then
$$N(1,2,4,16;n)=N(1,2,16,16;n)=S(n)+S_2(n).$$
\equivndpro
\pro{Lemma 3.11 ([AALW4, Theorem 5.4])} Let $n\in\Bbb N$ with
$2\nmid n$. Then
$$N(1,2,4,4;n)=
2S(n).$$
\equivndpro
\pro{Theorem 3.4} Let $n\in\Bbb N$. Then
$$t(1,2,4,4;n)=-\sum_{d\mid 8n+11}d\Ls 2d-S_2(8n+11).$$
\equivndpro
Proof. Since $x^2\equiv 0,1\pmod 4$ for $x\in\Bbb Z$, from Lemma 3.1 we
see that
$$\alphalign &t(1,2,4,4;n)\\&=N(1,2,4,4;8n+11)-N(1,2,4,16;8n+11)
\\&\quad-N(1,2,16,4;8n+11)+N(1,2,16,16;8n+11)-N(1,8,4,4;8n+11)
\\&\quad+N(1,8,4,16;8n+11)+N(1,8,16,4;8n+11)-N(1,8,16,16;8n+11)
\\&\quad-N(4,2,4,4;8n+11)+N(4,2,4,16;8n+11)+N(4,2,16,4;8n+11)
\\&\quad-N(4,2,16,16;8n+11)+N(4,8,4,4;8n+11)-N(4,8,4,16;8n+11)
\\&\quad-N(4,8,16,4;8n+11)+N(4,8,16,16;8n+11)
\\&=N(1,2,4,4;8n+11)-2N(1,2,4,16;8n+11)+N(1,2,16,16;8n+11).
\equivndalign$$
Now applying Lemmas 3.10 and 3.11 we obtain $$\alphalign
t(1,2,4,4;n)&=2S(8n+11)-2(S(8n+11)+S_2(8n+11))+(S(8n+11)+S_2(8n+11))
\\&=S(8n+11)-S_2(8n+11).
\equivndalign$$
Since
$$S(8n+11)=\sum_{d\mid 8n+11}\frac nd\Ls 2d=\sum_{d\mid 8n+11}d\Ls
2{(8n+11)/d}=-\sum_{d\mid 8n+11}d\Ls 2d,$$ from the above we
deduce the result. $\square$
\section*{4. Formulas for $t(1,3,3,9;n)$, $t(1,1,9,9;n)$, $t(1,9,9,9;n)$ and $t(1,1,1,9;n)$}
\par For $a,b,c,d,n\in\Bbb N$ let
$$N_0(a,b,c,d;n)=\binomig|\binomig\{(x,y,z,w)\in\Bbb Z^4\binomigm|
n=ax^2+by^2+cz^2+dw^2,\ 2\nmid xyzw\binomig\}\binomig|.$$ From the proof of
Lemma 3.1 we know that
$$t(a,b,c,d;n)=N_0(a,b,c,d;8n+a+b+c+d).\textag 4.1$$
\pro{Lemma 4.1} Let $n\in\Bbb N$ and $n+1=2^{\alphalpha}3^{\binometa}n_1$
with $(6,n_1)=1$. Then
$$t(1,1,3,3;n)=\cases 4\sigma(n_1)&\text{if $2\mid n$,}
\\2^{\alpha+4}\sigma(n_1)&\text{if $2\nmid n$.}
\equivndcases$$\equivndpro
Proof. By [BCH, Theorem 1.5],
$$t(1,1,3,3;n)=16t'(1,1,3,3;n)=\cases 4N(1,1,3,3;n+1)
&\text{if $2\mid n$,}
\\2(N(1,1,3,3;2n+2)-N(1,1,3,3;n+1))&\text{if $2\nmid n$.}
\equivndcases$$
Ramanujan (see [Be, pp. 114,223]) gave theta function identities that yields
the following Liouville's conjecture (see [D]):
$$N(1,1,3,3;n+1)=\cases 16\sigma(n_1)&\text{if $2\mid n$,}
\\4(2^{\alpha+1}-3)\sigma(n_1)&\text{if $2\nmid n$.}
\equivndcases$$
Since $2n+2=2^{\alpha+1}3^{\binometa}n_1$, combining all the above yields
the result. $\square$
\pro{Theorem 4.1} Let $n\in\Bbb N$ and $n+2=2^{\alpha}3^{\binometa}n_1$ with
$(6,n_1)=1$. Then
$$t(1,3,3,9;n)=\cases 0&\text{if $n\equiv 2,5\pmod 6$,}
\\ 16\sigma(n_1)&\text{if $n\equiv 1\pmod 6$,}
\\2^{\alpha+4}\sigma(n_1)&\text{if $n\equiv 4\pmod 6$,}
\\8\sigma(n_1)&\text{if $n\equiv 3\pmod 6$,}
\\2^{\alpha+3}\sigma(n_1)&\text{if $n\equiv 0\pmod 6$.}
\equivndcases$$
\equivndpro
Proof. From (4.1) we know that $t(1,3,3,9;n)=N_0(1,3,3,9;8n+16)$. If
$3\mid n-2$, then $8n+16\equiv 2\pmod 3$. Since $x^2\not\equiv 2\pmod 3$ for
any $x\in\Bbb Z$, we get $t(1,3,3,9;n)=N_0(1,3,3,9;8n+16)=0$. If
$3\mid n-1$, then $3\mid 8n+16$ and so
$$\alphalign t(1,3,3,9;n)
&=N_0(1,3,3,9;8n+16) \\&=\binomig|\binomig\{(x,y,z,w)\in\Bbb Z^4\binomigm|
8n+16=(3x)^2+3y^2+3z^2+9w^2,\ 2\nmid xyzw\binomig\}\binomig|
\\&=\binomig|\binomig\{(x,y,z,w)\in\Bbb Z^4\binomigm|
\frac{8n+16}3=3x^2+y^2+z^2+3w^2,\ 2\nmid xyzw\binomig\}\binomig|
\\&=N_0(1,1,3,3;8(n-1)/3+8)=t(1,1,3,3;(n-1)/3).
\equivndalign$$
If $3\mid n$, since $x^2+y^2\equiv 8n+16\equiv 1\pmod 3$ implies $3\mid
x$ or $3\mid y$ we see that
$$\alphalign t(1,1,3,3;n+1)
&=N_0(1,1,3,3;8n+16)
\\&=\binomig|\binomig\{(x,y,z,w)\in\Bbb Z^4\binomigm|
8n+16=(3x)^2+y^2+3z^2+3w^2,\ 2\nmid xyzw\binomig\}\binomig|
\\&\quad+\binomig|\binomig\{(x,y,z,w)\in\Bbb Z^4\binomigm|
8n+16=x^2+(3y)^2+3z^2+3w^2,\ 2\nmid xyzw\binomig\}\binomig|
\\&=2N_0(1,3,3,9;8n+16)=2t(1,3,3,9;n).\equivndalign$$
Now combining the above with Lemma 4.1 yields the result. $\square$
\par\quad\par
For $k\in\Bbb N$
and variable $q$ with $|q|<1$ define
$$\alphalign &E_k=E_k(q):=\prod_{n=1}^{\infty}(1-q^{kn}),
\\& qE_6^4=\sum_{n=1}^{\infty}c(n)q^n\quadtq{and}
\frac{E_2^{17}E_3}{E_1^7E_4^6E_6}=\sum_{n=0}^{\infty}a(n)q^n.
\equivndalign$$ From [KF, p.374] or [M, p.121] we know that
$$c(n)=\frac 13\sum\Sb x,y\in\Bbb Z
\\ n=x^2+3xy+3y^2\\ 3\mid x-2,\ 2\mid y-1\equivndSb
(-1)^xx.$$ Thus,
$$c(n)=\frac 13\sum\Sb x,y\in\Bbb Z\\n=x^2+3x(1+2y)+3(1+2y)^2
\\x\equiv 2\pmod 3\equivndSb(-1)^xx
=\frac 13\sum\Sb x,y\in\Bbb Z\\4n=x^2+3(x+2+4y)^2
\\x\equiv 2\pmod 3\equivndSb(-1)^xx$$ and so
$$c(n)=\frac 13\sum\Sb 4n=a^2+3b^2 \ (a,b\in\Bbb Z)
\\alpha\equiv 2\pmod 3,b\equiv a+2\pmod 4\equivndSb(-1)^aa.\textag 4.2$$
\pro{Lemma
4.2 ([A1, Theorems 1.5 and 1.6])} For $n\in\Bbb N$ we have
$$ N(1,1,9,9;n)=\cases 4\sigma(n)-8\sigma(
n/2)&\text{if $n\equiv2,4\pmod 6$,}\\\frac 43\sigma(n)&\text{if $n\equiv5\pmod
6$,}\\8\sigma(n/9)-32\sigma( n/36)&\text{if $n\equiv0\pmod 6$}\equivndcases$$
and
$$ N(1,9,9,9;n)=\cases 8\sigma(n/9)&\text{if $n\equiv3\pmod 6$,}\\2\sigma(n)-4\sigma(
n/2)&\text{if $n\equiv4\pmod 6$,}\\8\sigma(n/9)-32\sigma(n/36)&\text{if
$n\equiv0\pmod 6$.}\equivndcases$$
\equivndpro
\pro{Lemma 4.3 ([A2, Theorems 2.5 and 2.10])} For $n\in\Bbb N$ with
$4\mid n$ we have
$$
N(1,1,36,36;n)=\cases \frac43\sigma(n/4)-\frac{16}3\sigma(n/16)+\frac
83c(n/4)&\text{if $n\equiv4\pmod {12}$,}
\\\frac43\sigma(n/4)-\frac{16}3\sigma(n/16)&\text{if $n\equiv8\pmod {12}$,}
\\8\sigma(n/36)-32\sigma(n/144)&\text{if $n\equiv0\pmod {12}$}
\equivndcases$$
and
$$ N(1,4,36,36;n)=\cases \frac43\sigma(n/4)-\frac{16}3\sigma(n/16)+\frac 83c(n/4)&\text{if
$n\equiv4\pmod {12}$,}
\\\frac43\sigma(n/4)-\frac{16}3\sigma(n/16)&\text{if $n\equiv8\pmod{12}$,}
\\8\sigma(n/36)-32\sigma(n/144)&\text{if $n\equiv0\pmod {12}$.}
\equivndcases$$\equivndpro
\pro{Lemma 4.4 ([A2, Theorem 2.4])} For $n\in\Bbb N$ with $4\mid n$
we have
$$ N(1,1,9,36;n)=\cases \frac43\sigma(n/4)-\frac{16}3\sigma(n/16)+\frac 83c(n/4)&\text{if $n\equiv4\pmod
{12}$,}
\\\frac43\sigma(n/4)-\frac{16}3\sigma(n/16)&\text{if $n\equiv8\pmod {12}$,}
\\8\sigma(n/36)-32\sigma(n/144)&\text{if $n\equiv0\pmod {12}$.}
\equivndcases$$
\equivndpro
\pro{Lemma 4.5 ([A2, Theorem 2.8])} For $n\in\Bbb N$ with $4\mid n$
we have
$$ N(1,4,9,9;n)=\cases \frac43\sigma(n/4)-\frac{16}3\sigma(n/16)+\frac 83c(n/4)&\text{if $n\equiv4\pmod
{12}$,}
\\\frac43\sigma(n/4)-\frac{16}3\sigma(n/16)&\text{if $n\equiv8\pmod {12}$,}
\\8\sigma(n/36)-32\sigma(n/144)&\text{if $n\equiv0\pmod {12}$.}
\equivndcases$$\equivndpro
\pro{Lemma 4.6 ([A2, Theorem 2.9])} For $n\in\Bbb N$ with $4\mid n$
we have
$$ N(1,4,9,36;n)=\cases \frac43\sigma(n/4)-\frac{16}3\sigma(n/16)+\frac 83c(n/4)
&\text{if $n\equiv4\pmod{12}$,}
\\\frac43\sigma(n/4)-\frac{16}3\sigma(n/16)&\text{if $n\equiv8\pmod {12}$,}
\\8\sigma(n/36)-32\sigma(n/144)&\text{if $n\equiv0\pmod {12}$.}
\equivndcases$$\equivndpro
\pro{Lemma 4.7 ([A2, Theorem 2.15])} For $n\in\Bbb N$ with $4\mid
n$ we have
$$ N(4,4,9,9;n)=\cases
\frac43\sigma(n/4)-\frac{16}3\sigma(n/16)+\frac83c(n/4)&\text{if $n\equiv4\pmod
{12}$,}
\\\frac43\sigma(n/4)-\frac{16}3\sigma(n/16)&\text{if $n\equiv8\pmod{12}$,}
\\8\sigma(n/36)-32\sigma(n/144)&\text{if $n\equiv0\pmod {12}$.}\equivndcases$$
\pro{Lemma 4.8 ([A2, Theorem 2.16])} For $n\in\Bbb N$ with $4\mid
n$ we have
$$N(4,4,9,36;n)=\cases
\frac43\sigma(n/4)-\frac{16}3\sigma(n/16)+\frac83c(n/4)&\text{if $n\equiv4\pmod
{12}$,}
\\\frac43\sigma(n/4)-\frac{16}3\sigma(n/16)&\text{if $n\equiv8\pmod {12}$,}
\\8\sigma(n/36)-32\sigma(n/144)&\text{if $n\equiv0\pmod {12}$.}
\equivndcases$$
\equivndpro
\pro{Theorem 4.2} Let $n\in\Bbb N.$ Then
$$\alphaligned t(1,1,9,9;n)=\cases \frac83\sigma(2n+5)&\text{if $n\equiv0\pmod 3$,}
\\{16}\sigma(\frac{2n+5}9)&\text{if $n\equiv2\pmod 9$,}
\\0&\text{if $n\equiv5,8\pmod 9$,}
\\\frac 83(\sigma(2n+5)-c(2n+5))&\text{if $n\equiv1\pmod 3$.}
\equivndcases\equivndaligned$$
\equivndpro
Proof. For $n\equiv 2\pmod 3$ we see that $3\mid 8n+20$ and so
$$\alphalign &t(1,1,9,9,;n)\\&=N_0(1,1,9,9;8n+20)
\\&=\binomig|\binomig\{(x,y,z,w)\in\Bbb Z^4\binomigm|
8n+20=(3x)^2+(3y)^2+9z^2+9w^2,\ 2\nmid xyzw\binomig\}\binomig|
\\&=\cases 0&\text{if $9\nmid n-2$,}
\\N_0(1,1,1,1;\frac{8n+20}9)=t(1,1,1,1;\frac{n-2}9)=16\sigma(\frac{2n+5}9)
&\text{if $9\mid n-2$.}
\equivndcases\equivndalign$$
\par Now assume $n\equiv 0,1\pmod 3$.
By Lemma 3.1,
$$\alphalign &t(1,1,9,9;n)\\&=N(1,1,9,9;8n+20)-2N(1,1,9,36;8n+20)+N(1,1,36,36;8n+20)
\\&\quad-2N(1,4,9,9;8n+20)+4N(1,4,9,36;8n+20)-2N(1,4,36,36;8n+20)
\\&\quad+N(4,4,9,9;8n+20)-2N(4,4,9,36;8n+20)+N(1,1,9,9;2n+5).\equivndalign$$
For $n\equiv0\pmod3$ applying Lemmas 4.2-4.8 we see that
$$\alphalign
&t(1,1,9,9;n)\\&=4\sigma(8n+20)-8\sigma(\frac{8n+20}2)-2(\frac43\sigma(\frac{8n+20}4)
-\frac{16}3\sigma(\frac{8n+20}{16}))
\\&\quad+\frac43\sigma(\frac{8n+20}4)-\frac{16}3\sigma(\frac{8n+20}{16})-2(\frac43\sigma(\frac{8n+20}4)
-\frac{16}3\sigma(\frac{8n+20}{16}))
\\&\quad+4(\frac43\sigma(\frac{8n+20}4)-\frac{16}3\sigma(\frac{8n+20}{16}))-2(\frac43\sigma(\frac{8n+20}4)
-\frac{16}3\sigma(\frac{8n+20}{16}))
\\&\quad+\frac43\sigma(\frac{8n+20}4)-\frac{16}3\sigma(\frac{8n+20}{16})-2(\frac43\sigma(\frac{8n+20}4)
-\frac{16}3\sigma(\frac{8n+20}{16}))+\frac43\sigma(2n+5)
\\&=28\sigma(2n+5)-24\sigma(2n+5)-\frac83\sigma(2n+5)+\frac{32}3\sigma(\frac{4n+5}4)+\frac43\sigma(2n+5)
\\&=\frac83\sigma(2n+5).\equivndalign$$
For $n\equiv 1\pmod 3$, applying Lemmas 4.2-4.8 we find that
$$\alphalign &t(1,1,9,9;n)\\&=4\sigma(8n+20)-8\sigma(\frac{8n+20}2)-2(\frac43\sigma(\frac{8n+20}{4})
-\frac{16}3\sigma(\frac{8n+20}{16})+\frac83c(\frac{8n+20}4))
\\&\quad+\frac43\sigma(\frac{8n+20}{4})
-\frac{16}3\sigma(\frac{8n+20}{16})+\frac83c(\frac{8n+20}4)-2(\frac43\sigma(\frac{8n+20}{4})
-\frac{16}3\sigma(\frac{8n+20}{16})
\\&\quad+\frac83c(\frac{8n+20}4))
+4(\frac43\sigma(\frac{8n+20}{4})
-\frac{16}3\sigma(\frac{8n+20}{16})+\frac83c(\frac{8n+20}4))-2(\frac43\sigma(\frac{8n+20}{4})
\\&\quad-\frac{16}3\sigma(\frac{8n+20}{16})+\frac83c(\frac{8n+20}4))+\frac43\sigma(\frac{8n+20}{4})
-\frac{16}3\sigma(\frac{8n+20}{16})+\frac83c(\frac{8n+20}4)
\\&\quad-2(\frac43\sigma(\frac{8n+20}{4})
-\frac{16}3\sigma(\frac{8n+20}{16})+\frac83c(\frac{8n+20}4))+\frac43\sigma(2n+5)+\frac83c(2n+5)
\\&=28\sigma(2n+5)-24\sigma(2n+5)-\frac83\sigma(2n+5)-\frac{16}{3}c(2n+5)+\frac43\sigma(2n+5)
+\frac83c(2n+5)
\\&=\frac83(\sigma(2n+5)-c(2n+5)).\equivndalign$$
The proof is now complete. $\square$
\pro{Theorem 4.3} Let $n\in\Bbb N$. Then
$$t(1,9,9,9;n)=\cases 16\sigma(\frac{2n+7}9)&\text{if $n\equiv1\pmod9,$}
\\0&\text{if $n\equiv2,4,5,7,8\pmod9$},
\\\frac 43(\sigma(2n+7)-c(2n+7))&\text{if $n\equiv0\pmod 3$}.\equivndcases$$
\equivndpro
Proof. For $x\in\Bbb Z$ we see that $x(x-1)/2\equiv 0,1,3,6\pmod 9$.
Thus, $t(1,9,9,9;n)=0$ for $n\equiv2,4,5,7,8\pmod9$. Now we assume that
$n\equiv 0,1,3,6\pmod 9$. For $n\equiv 1\pmod 9$ we see that $9\mid 8n+28$ and
so
$$\alphalign t(1,9,9,9;n)&=N_0(1,9,9,9;8n+28)
\\&=\binomig|\binomig\{(x,y,z,w)\in\Bbb Z^4\binomigm|
8n+20=(3x)^2+9y^2+9z^2+9w^2,\ 2\nmid xyzw\binomig\}\binomig|
\\&=N_0\binomig(1,1,1,1;\frac{8n+28}9\binomig)=t\binomig(1,1,1,1;\frac{n-1}9\binomig)
=16\sigma\binomig(\frac{2n+7}9\binomig).
\equivndalign$$
For $n\equiv 0\pmod 3$ we see that
$$\alphalign t(1,1,9,9;n+1)
&=N_0(1,1,9,9;8n+28)
\\&=\binomig|\binomig\{(x,y,z,w)\in\Bbb Z^4\binomigm|
8n+28=(3x)^2+y^2+9z^2+9w^2,\ 2\nmid xyzw\binomig\}\binomig|
\\&\quad+\binomig|\binomig\{(x,y,z,w)\in\Bbb Z^4\binomigm|
8n+28=x^2+(3y)^2+9z^2+9w^2,\ 2\nmid xyzw\binomig\}\binomig|
\\&=2N_0(1,9,9,9;8n+28)=2t(1,9,9,9;n).\equivndalign$$
Now applying Theorem 4.2 we deduce the result in this case.
$\square$
\pro{Theorem 4.4} Let $n\in\Bbb N$. Then
$$ t(1,1,1,9;n)=\cases 4\sigma(2n+3)+12\sigma(\frac{2n+3}9)\
&\text{if $n\equiv0\pmod 3$,}
\\8\sigma(2n+3)&\text{if $n\equiv1\pmod 3$,}
\\4(\sigma(2n+3)-c(2n+3))&\text{if $n\equiv2\pmod 3$.}\equivndcases$$
\equivndpro
Proof. For $n\equiv 0\pmod 3$ we see that $3\mid 8n+12$. If
$8n+12=x^2+y^2+z^2+w^2$ for $x,y,z,w\in\Bbb Z$, then either $x\equiv y\equiv
z\equiv w\equiv 0\pmod 3$ or $xyzw\equiv \pm 3\pmod 9$. Thus,
$$N_0(1,1,1,1;8n+12)=4N_0(1,1,1,9;8n+12)-3N_0(9,9,9,9;8n+12).$$
This together with (4.1) yields
$$t(1,1,1,1;n+1)=\cases 4t(1,1,1,9;n)&\text{if $n\equiv 0,6\pmod 9$,}
\\4t(1,1,1,9;n)-3t(1,1,1,1;\frac{n-3}9)&\text{if $n\equiv 0\pmod 3$.}
\equivndcases$$ Now combining the above with (1.2) yields the result in this
case.
\par Suppose $n\equiv 1\pmod3$. Then $8n+12\equiv 2\pmod 3$. If $8n+12
=x^2+y^2+z^2+9w^2$ for $x,y,z,w\in\Bbb Z$, then $3\mid xyz$ but
$9\nmid xyz$. Thus,
$$\alphalign t(1,1,1,9;n)&=N_0(1,1,1,9;8n+12)
\\&=\binomig|\binomig\{(x,y,z,w)\in\Bbb Z^4\binomigm|
8n+12=(3x)^2+y^2+z^2+9w^2,\ 2\nmid xyzw\binomig\}\binomig|
\\&\quad+\binomig|\binomig\{(x,y,z,w)\in\Bbb Z^4\binomigm|
8n+12=x^2+(3y)^2+z^2+9w^2,\ 2\nmid xyzw\binomig\}\binomig|
\\&\quad+\binomig|\binomig\{(x,y,z,w)\in\Bbb Z^4\binomigm| 8n+12=x^2+y^2+(3z)^2+9w^2,\
2\nmid xyzw\binomig\}\binomig|
\\&=3N_0(1,1,9,9;8n+12)
=3t(1,1,9,9;n-1).\equivndalign$$
This together with Theorem 4.2 yields
the result in this case.
\par For $n\equiv 2\pmod 3$ we see that $8n+12\equiv 1\pmod 3$ and so
$$\alphalign t(1,1,1,9;n)&=N_0(1,1,1,9;8n+12)
\\&=\binomig|\binomig\{(x,y,z,w)\in\Bbb Z^4\binomigm|
8n+12=(3x)^2+(3y)^2+z^2+9w^2,\ 2\nmid xyzw\binomig\}\binomig|
\\&\quad+\binomig|\binomig\{(x,y,z,w)\in\Bbb Z^4\binomigm|
8n+12=x^2+(3y)^2+(3z)^2+9w^2,\ 2\nmid xyzw\binomig\}\binomig|
\\&\quad+\binomig|\binomig\{(x,y,z,w)\in\Bbb Z^4\binomigm| 8n+12=(3x)^2+y^2+(3z)^2+9w^2,\
2\nmid xyzw\binomig\}\binomig|
\\&=3N_0(1,9,9,9;8n+12)=3t(1,9,9,9;n-2).\equivndalign$$
Now combining the above with Theorem
4.3 yields the result in the case $n\equiv 2\pmod 3$. The proof is now
complete. $\square$
\par\quad\par In conclusion we pose the following
conjecture.
\pro{Conjecture 4.1} Suppose $n\in\Bbb N$ and
$8n+9=3^{\binometa}n_1$ with $3\nmid n_1$. Then
$$ t(1,1,3,4;n)=\frac 12\Big(3^{\binometa+1}\Ls 3{n_1}-1\Big)
\sum_{d\mid n_1}d\Ls 3d -\sum\Sb a,b\in\Bbb N,\ 2\nmid
a\\4(8n+9)=a^2+3b^2\equivndSb (-1)^{\frac{a-1}2}a.$$
\equivndpro
\par Conjecture 4.1 has been checked for $n\le 1000$.
\par\quad
\newline{\binomf Acknowledgement}
\newline The second author is supported by the National Natural Science
Foundation of China (grant No. 11371163).
\binomegin{thebibliography}{AALW3}
\binomibitem [ACH]{} C. Adiga, S. Cooper and J. H. Han, {\it
A general relation between sums of squares and sums of triangular
numbers}, Int. J. Number Theory. {\binomf 2}(2005), 175-182.
\binomibitem [A1]{} A. Alaca, {\it Representations by quaternary quadratic
forms whose coefficients are $1,3$ and $9$}, Acta Arith. {\binomf
136}(2009), 151-166.
\binomibitem [A2]{} A. Alaca, {\it Representations by quaternary quadratic
forms whose coefficients are $1,4,9$ and $36$}, J. Number Theory.
{\binomf 131}(2011), 2192-2218.
\binomibitem [AALW1]{} A. Alaca, S. Alaca, M.F. Lemire and
K.S. Williams, {\it Nineteen quaternary quadratic forms}, Acta
Arith. {\binomf 130} (2007), 277¨C310.
\binomibitem [AALW2]{} A. Alaca, S. Alaca, M.F. Lemire and
K.S. Williams, {\it Jacobi's identity and representations of
integers by certain quaternary quadratic forms}, Int. J. Modern
Math. {\binomf 2}(2007), 143-176.
\binomibitem [AALW3]{} A. Alaca, S. Alaca, M.F. Lemire and
K.S. Williams, {\it Theta function identities and representations by
certain quaternary quadratic forms}, Int. J. Number Theory. {\binomf
4}(2008), 219-239.
\binomibitem [AALW4]{} A. Alaca, S. Alaca, M.F. Lemire and
K.S. Williams, {\it The number of representations of a positive
integer by certain quaternary quadratic forms}, Int. J. Number
Theory. {\binomf 5}(2009), 13-40.
\binomibitem [BCH]{} N. D. Baruah, S. Cooper and M. Hirschhorn, {\it
Sums of squares and sums of triangular numbers induced by partitions
of 8}, Int. J. Number Theory. {\binomf 4}(2008), 525-538.
\binomibitem [Be] {} B.C. Berndt, {\it Ramanujan's Notebooks}, Part III,
Springer, New York, 1991.
\binomibitem [C]{} S. Cooper, {\it On the number of
representations of integers by certain quadratic forms II}, J.
Combin. Number Theory. {\binomf 1}(2009), 153-182.
\binomibitem [D]{} L.E. Dickson, {\it History of the Theory of Numbers},
Vol. III, Carnegie Institute of Washington,
Washington D.C., 1923. Reprinted by AMS Chelsea, 1999.
\binomibitem [KF]{} F. Klein, R. Fricke, {\it Vorlesungen $\ddot u$ber die
Theorie der elliptischen Modulfunktionen}, Vol. II, Teubner,
Leipzig, 1892.
\binomibitem [L] {} A.M. Legendre, {\it Trait\'e des Fonctions
Elliptiques}, Vol. 3, Paris, 1832.
\binomibitem [M] {} L.J. Mordell, {\it On Mr Ramanujan's
empirical expansions of modular functions}, Proc. Cambridge Philos.
Soc. {\binomf 19}(1917), 117-124.
\binomibitem [W1] {} K.S. Williams, {\it
$n=\Delta+\Delta+2(\Delta+\Delta)$}, Far East J. Math. Sci. {\binomf
11}(2003), 233-240.
\binomibitem [W2] {} K.S. Williams, {\it Number Theory in the Spirit of
Liouville}, Cambridge Univ. Press, New York, 2011.
\equivnd{thebibliography}
\equivnd{document} |
\begin{document}
\begin{frontmatter}
\title{Soundness-Preserving Composition of Synchronously and Asynchronously Interacting Workflow Net Components}
\author[label1]{Luca Bernardinello}
\author[label2]{Irina Lomazova}
\author[label1,label2]{Roman Nesterov}
\author[label2]{Lucia Pomello}
\affiliation[label1]{organization={University of Milano-Bicocca},
addressline={Viale Sarca 336 - Edificio U14},
city={Milan},
postcode={20126},
country={Italy}}
\affiliation[label2]{organization={HSE University},
addressline={11 Pokrovskiy Boulevard},
city={Moscow},
postcode={101000},
country={Russia}}
\begin{abstract}
In this paper, we propose a compositional approach to constructing correct formal models of information systems from correct models of interacting components.
Component behavior is represented using workflow nets --- a class of Petri nets.
Interactions among components are encoded in an additional interface net.
The proposed approach is used to model and compose synchronously and asynchronously interacting workflow nets.
Using Petri net morphisms and their properties, we prove that the composition of interacting workflow nets preserves the correctness of components and of an interface.
\end{abstract}
\begin{keyword}
Petri nets \sep workflow nets \sep interaction \sep soundness \sep morphisms \sep composition
\end{keyword}
\end{frontmatter}
\section{Introduction}
\label{}
Formal models are essential for the specification and analysis of a distributed information system behavior.
The precise semantics of such models helps to prove various important properties, which concern reliability and smooth operation of information systems.
\emph{Petri nets} \cite{Reisig13} are widely recognized as one of the most convenient formalisms for modeling and analyzing the behavior of complex distributed systems.
Petri net composition has been extensively studied in the literature.
Researchers considered various aspects, including \emph{architectural} concepts of compositional modeling and \emph{semantical} issues relating to compositional analysis of Petri net behavior.
The ubiquity of service-oriented and multi-agent architectures of information systems retains the relevance of further research on these aspects of Petri net composition.
Wolfgang Reisig, in his recent works \cite{Reisig2018,ReiComp20}, defined a general setting for compositional modeling of service-oriented information systems.
He addressed \emph{architectural} problems behind the composition of Petri net components in the context of algebraic properties.
The compositional analysis of \emph{semantical} aspects has been performed for different classes of Petri nets and behavioral properties.
Among works in this direction, we note the one \cite{Wolf09} by Christian Stahl and Karsten Wolf, who studied the compositional analysis of deadlock-freeness in open Petri nets.
Boundedness and deadlock-freeness are two components of \emph{soundness} --- the crucial correctness property of \emph{workflow} (WF) nets \cite{Aalstwf02}.
WF-nets form a class of Petri nets used to model the \emph{control-flow} of processes in information systems.
The method proposed by C.\,Stahl and K.\,Wolf does not consider the compositional proof of the absence of livelocks --- the third component of soundness.
Our study is focused on semantical aspects of compositional WF-net modeling, namely, the construction of sound WF-nets from sound models of interacting components.
The following example illustrates that unregulated interactions of sound WF-nets can easily violate soundness. Figure \ref{in_ex} shows two WF-net components $N_1$ and $N_2$.
They both are sound with respect to their initial and final states, where $s_1$ ($s_2$) is the initial state in $N_1$ ($N_2$), and $f_1$ ($f_2$) is the final state in $N_1$ ($N_2$).
\begin{figure}
\caption{Two sound WF-net components}
\label{in_ex}
\end{figure}
Let us suppose that $N_1$ and $N_2$ interact \emph{synchronously}, i.e., execute a simultaneous activity.
Let transitions $c$ and $g$ correspond to this activity.
Thus, we need to fuse transitions $c$ and $g$, preserving the original arcs.
Figure \ref{in_ex21} shows the result, where the merged transition is denoted by $(c, g)$.
This synchronization produces a deadlock since a marking with tokens in $p_2$ and $p_3$ is not always reachable.
Next, let us suppose that $N_1$ and $N_2$ interact \emph{asynchronously}, i.e., exchange messages through channels.
Let $N_1$ send messages via transition $d$, and let $N_2$ receive messages via transition $h$.
Then we add a place $m$ to model a channel between transitions $d$ and $h$.
We need to connect these transitions with the added place according to sending and receiving operations.
Figure \ref{in_ex22} shows the insertion of a place between transitions $d$ and $h$.
This asynchronous interaction leads to potential overflow in the added place $m$.
Therefore, this composition of $N_1$ and $N_2$ is unbounded.
\begin{figure}
\caption{synchronous\label{in_ex21}
\caption{asynchronous\label{in_ex22}
\caption{Interactions between the WF-net components from Fig. \ref{in_ex}
\label{in_ex21}
\label{in_ex22}
\end{figure}
Our study discusses theoretical backgrounds to justify a correct composition of interacting workflow nets.
We define an operation of composing synchronously and asynchronously interacting WF-nets.
This composition is defined on a syntactical level and does not guarantee to preserve soundness.
We use two types of components in compositional modeling: agent models and an interface that describes how agents interact.
Both agent and interface models are sound WF-nets.
An interface plays a significant role in formulating conditions for a semantically correct composition of sound WF-nets.
An interface represents the \emph{abstract} view of a complete system, where the behavior of an agent corresponds to a subnet describing requirements imposed on the structure of an agent.
The correspondence between an agent behavior and an interface subnet is defined through an abstraction/refinement relation based on $\alpha$-morphisms \cite{Bernardinello2013}.
We prove that replacing an interface subnet with the corresponding agent behavior preserves interface soundness.
Thus, the main contributions of our paper are:
\begin{enumerate}
\item Formal definition and semantical properties of asynchronous-synchronous composition of interacting WF-nets.
\item Structural and behavioral properties of place refinement and subnet abstraction in WF-nets based on $\alpha$-morphisms.
\item Proof of the theorem that component refinement in asynchronous-synchronous composition of WF-nets preserves soundness.
\end{enumerate}
The remainder of the paper proceeds as follows.
The next section provides basic definitions on Petri nets, workflow nets, and their behavior.
In Section~\ref{sec:comp}, we define an asynchronous-synchronous workflow net composition and study its properties.
In Section~\ref{sec:alpha}, using $\alpha$-morphisms, we define an abstraction/refinement relation on workflow nets and study the relevant properties of this relation.
Section~\ref{sec:main} describes how to preserve the properties of workflow net components in their asynchronous-synchronous composition through the use of the abstraction/refinement relation.
Section~\ref{sec:relw} gives a review of related works, and Section~\ref{sec:concl} concludes the paper.
\section{Preliminaries}\label{sec:prel}
This section provides the basic definitions on Petri nets used in the paper.
Let $A, B$ be two sets.
A function $f$ from $A$ to $B$ is denoted by $f \colon A \to B$, where $A$ is the \emph{domain} of $f$ (denoted by $\dom{f}$) and $B$ is the \emph{range} of $f$ (denoted by $\rng{f}$).
A \emph{restriction} of a function $f$ to a subset $A' \subseteq A$ is denoted by $f \vert_{A'} \colon A' \to B$.
A \emph{partial} function $g$ from $A$ to $B$ is a function from $A'$ to $B$, where $A' \subseteq A$.
A partial function is denoted by $g \colon A \nrightarrow B$.
When $g$ is not defined for $a \in A$, we write $g(a) = \perp$.
Let $\mathbb{N}$ denote the set of non-negative integers.
A \textit{multiset} $m$ over a set $S$ is a function $m \colon S \rightarrow \mathbb{N}$.
If $m(s)\geq 1$, we write $s \in m$.
If $m(s) \leq 1$ for all $s \in S$, then $m$ corresponds a set $S' \subseteq S$.
Let $m_1, m_2$ be two multisets over the same set $S$.
Then $m_1 \subseteq m_2 \Leftrightarrow m_1(s)\leq m_2(s)$ for all $s \in S$.
Also, $m'=m_1+m_2 \Leftrightarrow m'(s)=m_1(s)+m_2(s)$, $m''=m_1-m_2 \Leftrightarrow m''(s)=\max(m_1(s)-m_2(s), 0)$ for all $s\in S$.
Let $A^+$ denote the set of all finite non-empty \emph{sequences} over $A$, and $A^* = A^+\cup \{\varepsilon\}$, where $\varepsilon$ is the empty sequence.
Then for $w \in A^*$ and $B \subseteq A$, $w \vert_{B}$ denotes the \emph{projection} of $w$ on $B$, i.e., $w\vert_{B}$ is the sub-sequence of $w$ built from elements in $B$.
For example, if $A = \{a, b, c\}$, $B=\{b\}$, and $w = aabbbcc \in A^*$, then $w\vert_{B} = bbb$.
A \emph{Petri net} is a triple $N=(P, T, F)$, where $P$ and $T$ are two disjoint sets of places and transitions, i.e.,
$P \cap T = \varnothing$, and $F \subseteq (P \times T) \cup (T \times P)$ is a flow relation.
Pictorially, places are shown by circles, transitions are shown by boxes, and $F$ is shown by arcs.
For $N_1$, shown in Fig. \ref{in_ex}, $P = \{s_1, p_1, p_2, f_1\}$, $T = \{a, b, c, d\}$ and $F=\{(s_1, a), (a, p_1), (p_1, b), (p_1, d), (d, p_2), (p_2, c), (c, p_1),$ $(b, f_1)\}$.
Let $N=(P, T, F)$ be a Petri net, and $X = P \cup T$.
The set $\pre{x} = \{y \in X \,\vert\, (y, x) \in F\}$ is called the \textit{preset} of $x \in X$.
The set $\post{x} = \{y \in X \,\vert\, (x, y) \in F\}$ is called the \textit{postset} of $x \in X$.
The set $\neighb{x} = \pre{x} \cup \post{x}$ is called the \textit{neighborhood} of $x \in X$.
$N$ is \emph{P-simple} iff $\forall p_1, p_2 \in P \colon \pre{p_1} = \pre{p_2}$ and $\post{p_1} = \post{p_2}$ implies $p_1 = p_2$.
In our study, we consider Petri nets, such that $\nexists x \in X \colon \pre{x} = \varnothing = \post{x}$ and $\forall t \in T \colon \abs{\pre{t}} \geq 1$ and $\abs{\post{t}} \geq 1$.
Self-loops are forbidden, i.e., $\forall x \in X \colon \pre{x} \cap \post{x} = \varnothing$.
Let $N=(P, T, F)$ be a Petri net, and $A \subseteq X$.
Then $\pre{A} = \bigcup_{x \in A}\pre{x}$, $\post{A} = \bigcup_{x \in A}\post{x}$, $\neighb{A} = \pre{A} \cup \post{A}$.
Let $N(A)$ denote a \textit{subnet} of $N$ \textit{generated by} $A$, i.e., $N(A) = (P\cap A, T \cap A, F \cap (A \times A))$.
The set $\inp{N(A)} = \{y \in A \,\vert\, (\exists z \in X \setminus A \colon (z, y) \in F) \text{ or } (\pre{y} = \varnothing)\}$ contains the \textit{input} elements,
and the set $\outp{N(A)} = \{y \in A \,\vert \,(\exists z \in X \setminus A \colon (y, z) \in F) \text{ or } (\post{y} = \varnothing)\}$ contains the \emph{output} elements of the subnet $N(A)$.
A \emph{marking} (state) in a Petri net $N=(P, T, F)$ is a multiset over $P$.
A marking $m$ is designated by putting $m(p)$ black tokens inside a place $p \in P$.
Transition $t \in T$ has a \emph{contact} at a marking $m$ if $\pre{t} \subseteq m$ and $\post{t} \cap m \neq \varnothing$.
A \emph{marked} Petri net is a quadruple $N=(P, T, F, m_0)$, where $(P, T, F)$ is a Petri net and $m_0$ is the \emph{initial} marking.
Further, the term ``marked'' can be omitted while referring to marked Petri nets.
A \emph{state machine} is a connected Petri net $N =(P, T, F)$, where $\forall t \in T\colon \abs{\pre{t}}=\abs{\post{t}}=1$.
A subnet of a Petri net $N = (P, T, F, m_0)$ identified by a subset of places $A \subseteq P$ and its neighborhood, i.e.,
$N(A \, \cup \, ({\neighb{A}}))$, is a \emph{sequential component} of $N$ iff it is a state machine and has a single token in the initial marking.
$N$ is \emph{covered} by sequential components if every place in $N$ belongs to at least one sequential component of $N$.
In this case, $N$ is said to be \emph{state machine decomposable} (SMD).
For instance, subnet $N(A \, \cup \, ({\neighb{A}}))$ identified by the set of places $A = \{s_1, p_1, p_2, f_1\}$ and by the corresponding set of transitions $\neighb{A} = \{a, b, c, d\}$ is a sequential component of the Petri net shown in Fig. \ref{in_ex22}.
However, this Petri net is not state machine decomposable, since there is no sequential component containing place $m$.
The behavior of a Petri net is defined according to the \emph{firing rule}, which specifies when a transition may fire and how a state in a Petri net changes.
A marking $m$ in $N=(P, T, F, m_0)$ \emph{enables} a transition $t \in T$, denoted $m\reach{t}$, if $\pre{t} \subseteq m$.
When $t$ \emph{fires}, $N$ evolves to a new marking $m' = m - \pre{t} + \post{t}$.
We write $m \reach{t} m'$.
A sequence $w \in T^*$ is a \emph{firing sequence} of $N =(P, T, F, m_0)$ iff $w=t_1t_2\dots t_n$ and $m_0\reach{t_1} m_1 \reach{t_2}\dots m_{n-1}\reach{t_n} m_n$. Then we write $m_0\reach{w}m_n$. The set of all firing sequences of $N$ is denoted by $F\!S(N)$.
A marking $m$ in $N =(P, T, F, m_0)$ is \emph{reachable} if $\exists w \in F\!S(N) \colon m_0\reach{w}m$.
The set of all markings in $N$ reachable from $m$ is denoted by $\reach{m}$.
$N$ is \emph{safe} iff $\forall p \in \!P$, $\forall m \in \reach{m_0} \colon m(p) \leq 1$.
Thus, a reachable marking in a safe Petri net is a set of places.
Petri nets covered by sequential components are free of contacts \cite{Rozenberg96}.
That is why Petri nets covered by sequential components are safe.
The \emph{concurrent} semantics of a Petri net is captured by its \emph{unfolding}.
Let $N = (P, T, F)$ be a Petri net, and $F^*$ be the reflexive transitive closure of F. Then $ \forall x, y \in P \cup T \colon$
$x$ and y are in \emph{causal} relation, denoted $x \leq y$, if $(x, y) \in F^*$; $x$ and $y$ are in \emph{conflict} relation, denoted $x \# y$, if $\exists t_x, t_y \in T$, such that $t_x \neq t_y$, $\pre{t_x} \cap \pre{t_y} \neq \varnothing$, and $t_x \leq x$, $t_y \leq y$.
\begin{definition}
A Petri net $O = (B, E, F)$ is an occurrence net iff:
\begin{enumerate}
\item $\forall b \in B \colon \abs{\pre{b}} \leq 1$.
\item $F^*$ is a partial order.
\item $\forall x \in B \cup E \colon \{y \in B \cup E \,\vert\, y \leq x\}$ is finite.
\item $\forall x, y \in B \cup E \colon x \# y \Rightarrow x \neq y$.
\end{enumerate}
\end{definition}
By definition, $O$ is acyclic.
Let $M\!in(O)$ denote the set of minimal nodes of $O$ w.r.t. $F^*$, i.e., the elements with the empty preset.
Since we consider nets having transitions with non-empty presets and postsets, $M\!in(O) \subseteq B$.
\begin{definition}
Let $N=(P, T, F, m_0)$ be a safe Petri net, $O=(B, E, F)$ be an occurrence net, and $\pi:B\cup E\to P\cup T$ be a map. A couple $(O, \pi)$ is a branching process of $N$ iff:
\begin{enumerate}
\item $\pi(B) \subseteq P$ and $\pi(E) \subseteq T$.
\item $\pi \vert_{M\!in(O)}$ is a bijection from $M\!in(O)$ to $m_0$.
\item $\forall t \in T\colon \pi \vert_{\pre{t}}$ is a bijection between $\pre{t}$ and $\pre{\pi(t)}$, and similarly for $\post{t}$ and $\post{\pi(t)}$.
\item $\forall t_1, t_2 \in T \colon$ if $\pre{t_1} = \pre{t_2}$ and $\pi(t_1) = \pi(t_2)$, then $t_1 = t_2$.
\end{enumerate}
\end{definition}
The \emph{unfolding} of $N$, denoted $\mathcal{U}(N)$, is the maximal branching process of $N$, such that any other branching process of $N$ is isomorphic to a subnet of $\mathcal{U}(N)$, where the map $\pi$ is restricted to the elements of this subnet.
The map associated with the unfolding is denoted $u$ and called \emph{folding}.
\emph{Workflow nets} form a subclass of Petri nets used for modeling processes and services.
They have unique input and output places.
We define a \emph{generalized} workflow net with the initial state $m_0$ (exactly corresponding to its initial marking) and the final state $m_f$ below.
\begin{definition}\label{GWF}
A generalized workflow (GWF) net $N = (P, T, F, m_0, m_f)$ is a Petri net $(P, T, F,$ $m_0)$ equipped with $m_f$, where:
\begin{enumerate}
\item $\forall p \in m_0 \colon \pre{p} = \varnothing$. \label{instwf}
\item $m_f \subseteq P$ such that $m_f \neq \varnothing$ and $\forall p \in m_f \colon \post{p} = \varnothing$.\label{outstwf}
\item $\forall x \in P \cup T \,\,\exists s \in m_0 \,\,\exists f \in m_f \colon (s, x) \in F^* \text{ and } (x, f) \in F^*$.\label{connwf}
\end{enumerate}
\end{definition}
Since SMD Petri nets are safe, GWF-nets covered by sequential components are safe as well.
The important correctness property of GWF-nets is \emph{soundness} \cite{Aalst11} formally defined below.
\begin{definition}\label{sound}
A GWF-net $N = (P, T, F, m_0, m_f)$ is sound iff:
\begin{enumerate}
\item $\forall m \in \reach{m_0} \colon m_f \in \reach{m}$.\label{sndprop}
\item $\forall m \in \reach{m_0} \colon m_f \subseteq m \Rightarrow m=m_f$.\label{sndclean}
\item $\forall t \in T \, \exists m \in \reach{m_0} \colon m\reach{t}$.\label{sndlive}
\end{enumerate}
\end{definition}
Soundness is threefold.
Firstly, the final state in a sound GWF-net is reachable from any reachable state (\emph{proper termination}).
Secondly, the final state in sound GWF-net cannot be contained in any other reachable state (\emph{clean termination}).
Finally, each transition in a sound GWF-net can fire.
\section{Asynchronous-Synchronous Composition of GFW-Nets}\label{sec:comp}
In this section, we develop an approach to modeling synchronous and asynchronous interactions among components in a system.
Components are represented using GWF-nets.
We introduce \emph{transition labels} and a corresponding \emph{AS-composition} which merges synchronous transitions and adds channels between asynchronously interacting transitions in component models.
Some basic properties of the AS-composition are also studied here.
\subsection{Labeled GWF-Nets}
We introduce two kinds of transition labels to model the \emph{asynchronous} and \emph{synchronous} interaction among system components.
Their behavior is modeled with the help of GWF-nets covered by sequential components.
When components interact asynchronously, they exchange messages using \emph{channels}.
Correspondingly, components can \emph{send} (\emph{receive}) messages \emph{to} (\emph{from}) channels.
Let $\mathfrak{C} = \{c_1, c_2, \dots, c_k \}$ denote the set of all channels.
Channels are represented by places.
The set $\Lambda$ of sending/receiving actions implemented over channels is defined as $\Lambda = \{c!, c? \, \vert \, c \in \mathfrak{C} \}$, where ``$c!$'' indicates sending a message to a channel $c$, and ``$c?$'' indicates receiving a message from a channel $c$.
Thus, some transitions in a GWF-net are labeled by asynchronous actions from $\Lambda$.
For actions in $\Lambda$, we define a function $\mathbf{ch}\colon \Lambda \to \mathfrak{C}$, which maps a sending/receiving action to a corresponding channel, i.e. $\mathbf{ch}(c!) = \mathbf{ch}(c?) = c$.
Given $\Lambda' \subseteq \Lambda$, $\mathbf{ch}(\Lambda') = \bigcup_{\lambda' \in \Lambda'} \mathbf{ch}(\lambda')$.
A GWF-net may also have transitions with \emph{complement} asynchronous labels (``$c!$'' is complement to ``$c?$'' and vice versa).
They are denoted using overline, i.e., $\overline{c!} = c?$ and $\overline{c?} = c!$.
Then we require that there exists a place labeled by ``$c$'' connecting all transitions labeled by ``$c!$'' to all transitions labeled by ``$c?$''.
Labeled places are necessary to establish the logical dependence between transitions with complement labels, i.e., receiving from a channel $c$ should be done after a message is sent to this channel.
However, other places can also connect transitions with complement labels.
Synchronous interactions among components result in merging transitions representing simultaneous actions.
In our work, simultaneous actions are modeled using identical transition labels.
Let $S = \{ s_1, s_2, \dots, s_n \}$ denote the set of synchronous actions.
Similar to the asynchronous interaction, some transitions in a GWF-net are labeled by synchronous actions from $S$.
We formalize these aspects of synchronous and asynchronous interactions among components in Definition \ref{lwf_def}, where a GWF-net is equipped with two transition labeling functions and a place labeling function.
Figure \ref{lgwf_ex} shows a labeled GWF-net, where labeled places are distinguished by the smaller size.
By convention, labels are put either inside or near nodes.
\begin{definition}\label{lwf_def}
Let $\mathfrak{C}$ be a set of channels, and $\Lambda = \{c!, c? \,\vert\, c \in \mathfrak{C}\}$ be a set of sending/receiving actions over $\mathfrak{C}$.
Let $S$ be a set of synchronous actions.
A labeled GWF-net (LGWF-net) $N = (P, T, F, m_0, m_f, h, \ell, k)$ is a GWF-net $(P, T,$ $F, m_0, m_f)$ with two transition labeling functions $h$, $\ell$ and a place labeling function $k$, where:
\begin{enumerate}
\item $h \colon T \nrightarrow \Lambda \text{ is a partial function}$.
\item $\ell \colon T \nrightarrow S$ is a partial function, $\dom{h} \cap \dom{\ell} = \varnothing$.
\item $k \colon P \nrightarrow \mathfrak{C}$ is a partial injective function, such that: \label{chanlab}
\begin{enumerate}
\item $\forall t_1,t_2 \in T \colon$ if
$h(t_1) = c!$ and $h(t_2)=c?$, then \\ $\exists p\in P \colon k(p) = c$ and $(t_1, p), (p, t_2) \in F$; \label{chcon}
\item $\forall p \in P \colon$ if
$k(p) = c$, then ($\pre{p} \neq \varnothing$ and $\forall t \in \pre{p} \colon h(t) = c!$) and \\ ($\post{p} \neq \varnothing$ and $\forall t \in \post{p} \colon h(t)=c?$).
\end{enumerate}
\end{enumerate}
\end{definition}
\begin{figure}
\caption{Labeled generalized workflow net}
\label{lgwf_ex}
\end{figure}
By Definition \ref{lwf_def}, it is easy to see that there is a \emph{unique} place labeled by ``$c$'' connecting only nonempty sets of transitions with complement labels ``$c!$'' and ``$c?$'' in an LGWF-net.
This place is called a channel.
Other unlabeled places can also connect transitions with complement labels.
For instance, in Fig.~\ref{lgwf_ex}, there is the unique place labeled by ``$h$'' with the single incoming arc from transition ``$h!$'' and the single outgoing arc to transition ``$h?$''.
However, there is no place labeled by ``$f$'' in this LGWF-net, since there are no sending transitions labeled by ``$f!$''.
We also note that the number of transitions with ``$c!$'' label is not less than the number of transitions with ``$c?$'' label in any firing sequence of an LGWF-net, for any place labeled by $c$.
In other words, the number of times one can receive a message from a channel cannot be greater than the number of times a message has been sent to this channel.
This follows from the fact that transition $c?$ can fire only after transition $c!$ if the latter is present in an LGWF-net.
There will be a unique labeled place $c$ that is an input place to transition $c?$ and an output place to transition $c!$.
Let $N^- = (P, T, F, m_0, m_f)$ denote the \emph{underlying} GWF-net obtained from an LGWF-net $N = (P, T, F, m_0, m_f, h, \ell, k)$ by removing labels from transitions and places.
Correspondingly, an LGWF-net $N$ is sound if its underlying GWF-net $N^-$ is sound.
\subsection{AS-Composition of LGWF-Nets}
Here we define an \emph{AS-composition} of LGWF-nets.
It captures synchronous and asynchronous interactions among system components according to transition labels.
The AS-composition of LGWF-nets yields a complete model of a distributed system.
The AS-composition is defined for \emph{structurally disjoint} LGWF-nets.
Intuitively, to compose LGWF-nets, it is necessary to:
\begin{itemize}
\item add and connect labeled places with transitions having complement asynchronous labels;
\item merge transitions with identical synchronous labels.
\end{itemize}
The formalization of the AS-composition is given in Definition \ref{comp} for the basic case of composing two LGWF-nets.
It is easy to see that both channel addition and transition synchronization do not lead to the violation of the structural requirements imposed by Definition \ref{GWF} for a GWF-net.
Thus, in this definition, we explicitly construct an LGWF-net by the AS-composition.
\begin{definition}\label{comp}
Let $N_i = (P_i, T_i, F_i, m_0^i, m_f^i, h_i, \ell_i, k_i)$ be an LGWF-net for $i=1,2$, such that $(P_1 \cup T_1) \cap (P_2 \cup T_2) = \varnothing$.
Let $P_i^u = P_i \setminus \dom{k_i}$ and $T_i^a = T_i \setminus \dom{\ell_i}$ for $i=1, 2$.
The AS-composition of $N_1$ and $N_2$, denoted $N_1\! \circledast \!N_2$, is the LGWF-net $(P, T, F, m_0, m_f, h, \ell, k)$, where:
\begin{enumerate}
\item $P = P_1^u \cup P_2^u \cup P_c$ where \\
$\abs{P_c} = \abs{C},$
$C= \{ c \in \mathbf{ch}(\rng{h}) \, \vert \, \exists t, t' \in T_1^a \cup T_2^a \colon \mathbf{ch}(h(t)) = c \land h(t) = \overline{h(t')}\}$.\label{opluspl}
\item $m_0 = m_0^1 \cup m_0^2$ and $m_f = m_f^1 \cup m_f^2$.\label{oplusinst}
\item $T = T_1^a \cup T_2^a \cup T_{sync}$ where \\
$T_{sync} = \{(t_1, t_2)\, \vert \, t_1 \in \dom{\ell_1}, t_2 \in \dom{\ell_2}, \ell_1(t_1) = \ell_2(t_2)\}$.
\item $F$ is defined by the following four cases:
\begin{enumerate}
\item $\forall p \in P_i^u , \forall t \in T_i^a$ for $i=1, 2$
\begin{itemize}
\item $(p, t) \in F \Leftrightarrow (p,t) \in F_i$ and
\item $(t, p) \in F \Leftrightarrow (t,p) \in F_i$.
\end{itemize}
\item $\forall p \in P_1^u, \forall t = (t_1, t_2) \in T_{sync}$
\begin{itemize}
\item $(p, t) \in F \Leftrightarrow (p, t_1) \in F_1$ and
\item $(t, p) \in F\Leftrightarrow (t_1, p) \in F_1$.
\end{itemize}
\item $\forall p \in P_2^u, \forall t = (t_1, t_2) \in T_{sync}$
\begin{itemize}
\item $(p, t) \in F \Leftrightarrow (p, t_2) \in F_2$ and
\item $(t, p) \in F\Leftrightarrow (t_2, p) \in F_2$.
\end{itemize}
\item $\forall p \in P_c , \forall t \in T_i^a$ for $i=1, 2$
\begin{itemize}
\item $(k(p) = c) \land (h_i(t) = c!) \Rightarrow (t,p) \in F$ and
\item $(k(p) = c) \land (h_i(t) = c?) \Rightarrow (p,t) \in F$.
\end{itemize}
\end{enumerate}
\item $h \colon T \nrightarrow \Lambda$, such that $\forall t \in T_{sync} \colon h(t) = \perp $ and $\forall t \in T_i^a \colon h(t) = h_i(t)$ for $i=1, 2$.
\item $\ell \colon T \nrightarrow S$, such that $\forall t = (t_1, t_2) \in T_{sync} \colon \ell(t) = \ell_1(t_1) = \ell_2(t_2)$ and \\
$\forall t_i \in T_i^a \colon \ell(t_i) = \perp$ for $i=1, 2$.
\item $k \colon P \nrightarrow C$, such that $k\vert_{P_c}$ is a bijection and $\forall p \notin P_c \colon k(p) = \perp$.\label{opluspllab}
\end{enumerate}
\end{definition}
Consider the example shown in Fig.~\ref{comp_ex}.
We compose two LGWF-nets $N_1$ and $N_2$ shown in Fig.\,\ref{comp_ex1}.
They exchange messages via two channels $x$ and $y$.
They also synchronize when transitions $b$ and $f$ fire.
This fact is given by the common synchronization label $s$ of transitions $b$ and $f$.
As a result, we need to introduce two labeled places $x$ and $y$ and connect them according to the sending/receiving labels of transitions.
In addition, we merge transitions $b$, $f$ obtaining a single transition $(b,f)$ in the AS-composition $N_1\! \circledast\! N_2$ shown in Fig.\,\ref{comp_ex2}.
Note also that after synchronizing transitions, the AS-composition can have places, whose neighborhoods coincide.
Such places \emph{can} be merged into a single one to make a net \emph{P-simple}.
Correspondingly, in Fig.\,\ref{comp_ex2}, we have merged the output places of the synchronized transitions $b$ and $f$.
\begin{figure}
\caption{interacting components\label{comp_ex1}
\label{comp_ex1}
\caption{$N_1 \circledast N_2$\label{comp_ex2}
\label{comp_ex2}
\caption{AS-composition of two LGWF-nets\label{comp_ex}
\label{comp_ex}
\end{figure}
The AS-composition of LGWF-nets enjoys several properties that are easy to verify.
Firstly, it is both a commutative and associative operation.
These algebraic properties directly follow from the construction rules.
Thus, it can be generalized to the case of composing more than two LGWF-nets.
Secondly, a reachable marking in an AS-composition of LGWF-nets can be decomposed into three ``sub-markings'': the reachable markings of component LGWF-nets together with a marking of labeled places.
This follows from the fact that we can project the AS-composition firing sequences on the transitions in component LGWF-nets.
Then we obtain the corresponding firing sequences of components (see Proposition \ref{markdec}, where we formalize this property).
\begin{proposition}\label{markdec}
Let $N_i = (P_i, T_i, F_i, m_0^i, m_f^i, h_i, \ell_i, k_i)$ be an LGWF-net for $i=1, 2$, and $N_1\!\circledast\!N_2 = (P, T, F, m_0, m_f, h, \ell, k)$ be the AS-composition of $N_1$ and $N_2$.
Then $\forall m \in \reach{m_0} \colon m = (m_1 \setminus \dom{k_1}) \cup (m_2 \setminus \dom{k_2}) \cup m_c$, where $m_1 \in \reach{m_0^1}$, $m_2 \in \reach{m_0^2}$ and $m_c \subseteq \dom{k}$.
\end{proposition}
However, the AS-composition of LGWF-nets studied above may not preserve behavioral and structural properties of component LGWF-nets.
For instance, if $N_1$ and $N_2$ are two sound LGWF-nets, their composition $N_1\! \circledast\! N_2$ might not be sound.
Consider the example provided in Fig.\,\ref{nsound}, where the system $N_1\! \circledast\! N_2$ is composed of two sound LGWF-nets.
$N_1\! \circledast\! N_2$ may reach a final marking $\{f_1, s_2\}$ different from the expected final marking $\{f_1, f_2\}$ if $N_1$ does not send a message to channel $d$.
This makes $N_1\! \circledast\! N_2$ lose soundness.
Moreover, it is no longer covered by sequential components.
\begin{figure}
\caption{AS-composition may not preserve component properties}
\label{nsound}
\end{figure}
The preservation of component properties in their AS-composition is the main problem we address in the paper.
For this purpose, instead of considering the AS-composition of LGWF-nets directly, we will analyze an underlying \emph{abstract interface}.
It is an LGWF-net that models how components interact.
Component models are mapped on corresponding subnets in an interface net via \emph{morphisms} discussed in the following section.
Then in Section \ref{sec:main}, we will apply these morphisms to achieve the preservation of component soundness in the AS-composition.
\section{Abstraction and Refinement in GWF-Nets Based on Morphisms}\label{sec:alpha}
This section describes a basic technique supporting abstraction and refinement in Petri nets based on $\alpha$-morphisms.
We study the properties of $\alpha$-morphisms relevant to GWF-nets.
These properties are further used to address the problem of preserving the soundness of LGWF-nets in their AS-composition.
\subsection{Place Refinement and $\alpha$-Morphisms}
The class of \emph{$\alpha$-morphisms} was introduced in \cite{Bernardinello2013} to support abstraction and refinement in Petri nets covered by sequential components.
An $\alpha$-morphism example is provided in Fig. \ref{alpha_ex}, where refinement of places is depicted by shaded subnets, i.e., the subnet $N_1(\omr{p_2})$ in $N_1$ refines the place $p_2$ in $N_2$.
Refinement of transitions is explicitly given by their names, i.e., two transitions $f_1$ and $f_2$ in $N_1$ refine the same transition $f$ in $N_2$.
In other words, refinement may lead to splitting transitions of an abstract net.
After providing the formal definition of $\alpha$-morphisms, we also discuss the general intuition behind them.
\begin{definition}\label{alpham}
Let $N_i = (P_i, T_i, F_i, m_0^i)$ be an SMD Petri net, $X_i = P_i \cup T_i$ for $i=1, 2$, where $X_1 \cap X_2 = \varnothing$.
An $\alpha$-morphism from $N_1$ to $N_2$ is a total surjective map $\varphi \colon X_1 \to X_2$, also denoted $\varphi \colon N_1 \to N_2$, where:
\begin{enumerate}
\item $\varphi(P_1) = P_2$.
\item $\varphi(m_0^1) = m_0^2$.\label{instate}
\item $\forall t_1 \in T_1 \colon$ if $\om{t_1} \in T_2$, then $\om{\pre{t_1}}=\pre{\om{t_1}}$ and $\om{\post{t_1}}=\om{t_1}^\bullet$.\label{tTOt}
\item $\forall t_1 \in T_1 \colon$ if $\om{t_1} \in P_2$, then $\om{\neighb{t_1}}=\{\om{t_1}\}$.\label{tTOp}
\item $\forall p_2 \in P_2 \colon$
\begin{enumerate}
\item $N_1(\varphi^{-1}(p_2))$ is an acyclic net or $\omr{p_2} \subseteq P_1$.\label{acycsub}
\item $\forall p_1 \in \inp{N_1(\varphi^{-1}(p_2))} \colon \om{\pre{p_1}} \subseteq \pre{p_2}$ and if $\pre{p_2} \neq \varnothing$, then $\pre{p_1} \neq \varnothing$.\label{inP}
\item $\forall p_1 \in \outp{N_1(\varphi^{-1}(p_2))} \colon \om{\post{p_1}} = \post{p_2}$.\label{outP}
\item $\forall p_1 \in P_1 \cap \varphi^{-1}(p_2) \colon p_1 \notin \inp{N_1(\varphi^{-1}(p_2))} \Rightarrow \om{\pre{p_1}}=p_2 \text{ and }$\\ $p \notin \outp{N_1(\varphi^{-1}(p_2))} \Rightarrow \om{\post{p_1}} = p_2$.\label{INsub}
\item $\forall p_1 \in P_1 \cap \varphi^{-1}(p_2)\colon$ there is a sequential component $N' = (P', T', F')$ in $N_1$, such that $p_1 \in P'$, $\varphi^{-1}(\neighb{p_2}) \subseteq T'$.\label{scomp}
\end{enumerate}
\end{enumerate}
\end{definition}
\begin{figure}
\caption{The $\alpha$-morphism $\varphi \colon N_1 \to N_2$}
\label{alpha_ex}
\end{figure}
According to the definition, $\alpha$-morphisms allow us to refine places in $N_2$ by replacing them with \emph{acyclic} subnets in $N_1$, where $N_2$ is called an abstract net and $N_1$ is its refinement.
If a transition in $N_1$ is mapped to a transition in $N_2$, then the neighborhood of the transition in $N_1$ should be mapped on the neighborhood of the corresponding transition in $N_2$
(by Definition \ref{alpham}.\ref{tTOt}).
If a transition in $N_1$ is mapped to a place in $N_2$, then the neighborhood of this transition should be mapped to the same place (by Definition \ref{alpham}.\ref{tTOp}).
The main motivation behind $\alpha$-morphisms is the possibility to ensure that the behavioral properties of an abstract model hold in its refinement as well.
Therefore, each output place in a subnet should have the same choices as its abstraction does (by Definition \ref{alpham}.\ref{outP}).
Input places do not need this constraint (by Definition \ref{alpham}.\ref{inP}).
A choice between them is made before, since there are no concurrent transitions in the neighborhood of a subnet (by Definition \ref{alpham}.\ref{scomp}).
Moreover, by Definition \ref{alpham}.\ref{INsub}, neighborhoods of places internal to a subnet are mapped to the same place as the subnet.
To sum up, requirements imposed by Definition \ref{alpham}.\ref{acycsub}-\ref{scomp} ensure the main intuition behind $\alpha$-morphisms.
If a subnet in $N_1$ refines a place in $N_2$, then this subnet should behave ``in the same way'' as the place in $N_2$ does.
More precisely, let $N_1(\omr{p_2})$ be a subnet in $N_1$ refining a place $p_2$ in $N_2$. Then the following holds:
\begin{enumerate}
\item No tokens are left in $N_1(\omr{p_2}) \cap P_1$ after firing an output transition in $\post{(\outp{N_1(\omr{p_2})})}$;
\item No transitions are enabled in $\pre{(\inp{N_1(\omr{p_2})})}$ whenever there is a token in $N_1(\omr{p_2}) \cap P_1$.
\end{enumerate}
\subsection{Properties Preserved and Reflected by $\alpha$-Morphisms}\label{ssec:prop}
Here we study properties \emph{preserved} and \emph{reflected} by $\alpha$-morphisms (see Fig.~\ref{propscheme}).
In \cite{Bernardinello2013} several properties of $\alpha$-morphisms have already been studied.
We will refer to some of the proven properties here and consider other properties of $\alpha$-morphisms relevant to generalized workflow nets.
\begin{figure}
\caption{Relation between properties of an abstract system and its refinement}
\label{propscheme}
\end{figure}
The following proposition states that the structure of GWF-nets is preserved by $\alpha$-morphisms.
\begin{proposition}\label{strpr}
Let $N_i=(P_i, T_i, F_i,$ $m_0^i)$ be an SMD Petri net, and $X_i = P_i \cup T_i$ for $i=1, 2$, such that there is an $\alpha$-morphism $\varphi \colon N_1 \to N_2$.
If $N_1$ is a GWF-net, then $N_2$ is a GWF-net.
\end{proposition}
\begin{proof}
We show that $N_2$ satisfies the three structural conditions of GWF-nets, see Definition \ref{GWF}.
\textbf{1.} By Definition \ref{alpham}.\ref{instate}, $\om{m_0^1} = m_0^2$.
Suppose $\exists p_2 \in m_0^2 \colon \pre{p_2} \neq \varnothing$.
By Definition \ref{alpham}.\ref{inP}, $\forall p_1 \in \inp{N_1(\varphi^{-1}(p_2))} \colon$ if $\pre{p_2} \neq \varnothing$, then $\pre{p_1} \neq \varnothing$.
Take $p_1 \in m_0^1$, such that $\om{p_1} = p_2$.
Since $p_1 \in \inp{N_1(\varphi^{-1}(p_2))}$, then $\pre{p_1} \neq \varnothing$.
By Definition \ref{GWF}.\ref{instwf}, $\forall p \in m_0^1 \colon \pre{p} = \varnothing$.
Then, $\pre{p_2} = \varnothing$ and $\forall p \in m_0^2 \colon \pre{p} = \varnothing$.
\textbf{2.} By Definition \ref{GWF}.\ref{outstwf}, $m_f^1 \subseteq P_1$, such that $\post{(m_f^1)} = \varnothing$.
Denote $\om{m_f^1}$ by $m_f^2\!\subseteq\!P_2$.
Suppose $\exists p_2 \in m_f^2 \colon \post{p_2} \neq \varnothing$.
Take $p_1 \in m_f^1$, such that $\om{p_1} = p_2$.
By Definition \ref{alpham}.\ref{outP}, $\forall p_1 \in \outp{N_1(\varphi^{-1}(p_2))} \colon \om{\post{p_1}} = \post{p_2}$.
Since $p_1 \in \outp{N_1(\varphi^{-1}(p_2))}$, $\post{p_1} \neq \varnothing$.
But by Definition \ref{GWF}.\ref{outstwf}, $p_1 \in m_f^1$ and $\post{p_1} = \varnothing$.
Then, $\post{p_2} = \varnothing$ and $\forall p \in m_f^2 \colon \post{p} = \varnothing$.
\textbf{3.} Suppose $\exists x_2 \in X_2$, such that $\forall p \in m_0^2 \colon (p, x_2) \notin F_2^*$.
Since an $\alpha$-morphism is a surjective map, $\omr{x_2} \neq \varnothing$.
Thus, $\varphi^{-1}(x_2) = \{x_1^1, \dots, x_1^k\} \subseteq X_1$, where $k \geq 1$.
If $x_2 \in T_2$, then $\varphi^{-1}(x_2) \subseteq T_1$, and we take $x_1 \in \omr{x_2}$.
If $x_2 \in P_2$, then we take $x_1 \in \inp{N_1(\omr{x_2})}$.
By Definition \ref{GWF}.\ref{connwf}, $\exists s \in m_0^1 \colon (s,x_1) \in F_1^*$.
Then, $\om{\pre{x_1}} \in \pre{x_2}$ or $\om{\pre{x_1}} = x_2$.
We follow backward the whole path from $s$ to $x_1$ in $N_1$ mapping it on $N_2$ with $\varphi$.
Thus, we obtain that $\exists x' \in X_2 \colon (x', x_2) \in F_2^*$ and $\om{s}=x'$, where $x' \in m_0^2$.
Suppose $\exists x_2 \in X_2$, such that $\forall p \in m_f^2 \colon (x_2, p) \notin F_2^*$.
Since an $\alpha$-morphism is a surjective map, $\omr{x_2} \neq \varnothing$.
Thus, $\varphi^{-1}(x_2) = \{x_1^1, \dots, x_1^k\} \subseteq X_1$, where $k \geq 1$.
If $x_2 \in T_2$, then $\varphi^{-1}(x_2) \subseteq T_1$, and we take $x_1 \in \omr{x_2}$.
If $x_2 \in P_2$, then we take $x_1 \in \outp{N_1(\omr{x_2})}$.
By Definition \ref{GWF}.\ref{connwf}, $\exists f \in m_f^1 \colon (x_1, f) \in F_1^*$.
Then, $\om{\post{x_1}} \in \post{x_2}$ or $\om{\post{x_1}} = x_2$.
We follow the whole path forward from $x_1$ to $f$ in $N_1$ mapping it on $N_2$ with $\varphi$.
Thus, we obtain that $\exists x' \in X_2 \colon (x_2, x') \in F_2^*$ and $\om{f} = x'$, where $x' \in m_f^2$. \qed
\end{proof}
It follows from Proposition \ref{strpr} that $\om{m_f^1} = m_f^2$, i.e., final markings of GWF-nets are also preserved by $\alpha$-morphisms.
In the general case, the converse of Proposition \ref{strpr} is not valid.
Indeed, $\alpha$-morphisms do not reflect the initial markings of GWF-nets properly (see Fig.\,\ref{gwf_nonrefl}).
A refined net $N_1$ is \emph{well marked} w.r.t. $\varphi$ if each input place in a subnet in $N_1$, refining a marked place in an abstract net $N_2$, is marked as well.
Consider again the $\alpha$-morphism shown in Fig. \ref{gwf_nonrefl}, the token in the shaded subnet must be placed into $p$ to make $N_1$ well marked w.r.t. to $\varphi$.
In the following proposition, we prove that $\alpha$-morphisms reflect the structure of GWF-nets under the well-markedness of $N_1$.
\begin{proposition}\label{strref}
Let $N_i=(P_i, T_i, F_i,$ $m_0^i)$ be an SMD Petri net, and $X_i = P_i \cup T_i$ for $i=1, 2$, such that there is an $\alpha$-morphism $\varphi \colon N_1 \to N_2$.
If $N_2$ is a GWF-net and $N_1$ is well marked w.r.t. $\varphi$, then $N_1$ is a GWF-net.
\end{proposition}
\begin{proof}
We show that $N_1$ satisfies the three structural conditions of GWF-nets, see Definition \ref{GWF}.
\textbf{1.} By Definition \ref{GWF}.\ref{instwf}, $\forall s_2 \in m_0^2 \colon \pre{s_2} = \varnothing$.
Since $N_1$ is well-marked w.r.t. $\varphi$, $m_0^1 = \{ \inp{N_1(\omr{s_2})} \,\vert\, s_2 \in m_0^2 \}$.
Take $s_2 \in m_0^2$ and the corresponding subnet $N_1(\omr{s_2})$.
Suppose $\exists p \in \inp{N_1(\omr{s_2})}$, such that $\pre{p} \neq \varnothing$.
Then $\om{p} = s_2$ and, by Definition \ref{alpham}.\ref{inP}, $\om{\pre{p}} \subseteq \pre{s_2} = \varnothing$ contradicting the total surjectivity of $\varphi$.
\textbf{2.} By Definition \ref{GWF}.\ref{outstwf}, $\forall f_2 \in m_f^2 \colon \post{f_2} = \varnothing$.
Take $f_2 \in m_f^2$ and the corresponding subnet $N_1(\omr{f_2})$.
Also take $p \in \outp{N_1(\omr{f_2})}$.
Then $\om{p} = f_2$.
By Definition~\ref{alpham}.\ref{outP}, $\om{\post{p}} = \post{f_2} = \varnothing$ contradicting the total surjectivity of $\varphi$.
Thus, we obtain the final marking of $N_1$, i.e., $m_f^1 = \{ \outp{N_1(\omr{f_2})} \,\vert\, f_2 \in m_f^2 \}$ and $\post{(m_f^1)} = \varnothing$.
\textbf{3.} Suppose $\exists x_1 \in X_1$, such that $\forall s_1 \in m_0^1 \colon (s_1, x_1) \notin F_1^*$.
If $(x_1, x_1) \notin F_1^*$, we follow the path from $x_1$ to the first node $x_1' \in X_1$ in $N_1$ backward, such that $\pre{x_1'} = \varnothing$.
Since $\forall t_1 \in T_1 \colon \abs{\pre{t_1}} \geq 1$, $x_1' \in P_1$.
If $x_1' \notin m_0^1$, then $N_1$ is not well-marked w.r.t. $\varphi$.
If $(x_1, x_1) \in F_1^*$, then, by Definition \ref{alpham}.\ref{acycsub}, there is a corresponding image cycle in $N_2$.
Take $x_2 \in X_2$, such that $\om{x_1} = x_2$.
By Definition \ref{GWF}.\ref{outstwf}, $\exists s_2 \in m_0^2 \colon (s_2, x_2) \in F_2^*$.
Take $x_2' \in X_2$ belonging to this cycle, where at least one node in $\pre{x_2'}$ is not in the cycle.
By surjectivity of $\varphi$, $\exists x_1' \in X_1 \colon \om{x_1'} = x_2'$ belonging to the cycle $(x_1, x_1) \in F_1^*$.
If $x_2' \in T_2$, then $\omr{x_2'} \subseteq T_1$.
By Definition \ref{alpham}.\ref{tTOt}, the neighborhood of transitions is preserved by $\varphi$.
Then, $\forall t_1 \in \omr{x_2'} \colon \om{\pre{t_1}} = \pre{x_2'}$, i.e., there is a place in $\pre{\omr{x_2'}}$ which does not belong to the cycle $(x_1, x_1) \in F_1^*$.
If $x_2' \in P_2$, then take $\inp{N_1(\omr{x_2'})}$.
At least one place in $\inp{N_1(\omr{x_2'})}$ has an input transition which does not belong to the cycle $(x_1, x_1) \in F_1^*$, since there is a node in $\pre{x_2'}$ which is not in the image cycle in $N_2$.
We have shown that $\exists x \in \pre{x_1'}$, such that $x$ does not belong to the cycle $(x_1, x_1) \in F_1^*$.
Thus, either there is a path from $\widetilde{x}$ to $x$ with $\pre{\widetilde{x}} = \varnothing$, or there is another cycle $(\widetilde{x}, \widetilde{x}) \in F_1^*$.
Applying a similar reasoning, we prove that $\forall x_1 \in X_1 \, \exists f_1 \in m_f^1 \colon (x_1, f_1) \in F_1^*$.
The only difference is that we follow paths forward. \qed
\end{proof}
\begin{figure}
\caption{\label{gwf_nonrefl}
\caption{\label{sound_nonrefl}
\caption{Two $\alpha$-morphisms with the same range}
\label{gwf_nonrefl}
\label{sound_nonrefl}
\end{figure}
In Propositions \ref{strpr} and \ref{strref}, we have proven two structural properties of $\alpha$-morphisms.
Further, we study preservation and reflection of behavioral properties, i.e., whether reachable markings are preserved and reflected by $\alpha$-morphisms.
According to the following proposition, it was proven that $\alpha$-morphisms preserve reachable markings and transition firings.
\begin{proposition}[see \cite{Bernardinello2013}]
Let $N_i=(P_i, T_i, F_i,$ $m_0^i)$ be an SMD Petri net, and $X_i = P_i \cup T_i$ for $i=1, 2$, such that there is an $\alpha$-morphism $\varphi \colon N_1 \to N_2$.
Let $m_1 \in \reach{m_0^1}$. Then $\om{m_1} \in \reach{m_0^2}$.
If $m_1 \reach{t}m_1'$, where $t \in T_1$, then:
\begin{enumerate}
\item $\om{t} \in T_2 \Rightarrow \om{m_1}\reach{\om{t}}\om{m_1'}$.
\item $\om{t} \in P_2 \Rightarrow \om{m_1} = \om{m_1'}$.
\end{enumerate}\label{markpres}
\end{proposition}
In the general case, $\alpha$-morphisms do not reflect both reachable markings and transition firings.
More precisely, having a reachable marking $m_2 \in \reach{m_0^2}$, such that $m_2\reach{t_2}$ for a transition $t_2$ in $N_2$, we cannot say that $\forall t_1 \in \omr{t_2}\, \exists m_1 = \omr{m_2} \in \reach{m_0^1} \colon m_1 \reach{t_1}$ in $N_1$.
Note that the reflection of reachable markings is a crucial property, since we seek to deduce the behavioral properties of a refined system from those of its abstraction.
It is necessary to check additional \emph{local} conditions based on the unfolding to achieve the reflection of reachable markings.
We briefly describe this technique first introduced in \cite{Bernardinello2013}.
Let $N_1$ and $N_2$ be two Petri nets related via the $\alpha$-morphism $\varphi: N_1 \to N_2$,
Recall that $N_1$ is called a refinement, and $N_2$ is called an abstraction of $N_1$.
For every place $p_2$ in $N_2$, refined by a subnet in $N_1$, we construct a \emph{local} net, denoted by $S_2(p_2)$, by taking the neighborhood transitions of $p_2$ with artificial input and output places if necessary.
The same is done for the refined system $N_1$.
We construct the corresponding local net, denoted by $S_1(p_2)$, by taking the subnet in $N_1$ refining $p_2$ via $\varphi$, i.e., $N_1(\omr{p_2})$ and the transitions $\varphi^{-1} (\pre{p_2}) \cup \varphi^{-1} (\post{p_2})$ with artificial input and output places if necessary.
As a result, we have two local nets $S_1(p_2)$ and $S_2(p_2)$.
Since there is the $\alpha$-morphism $\varphi: N_1 \to N_2$, there is also the $\alpha$-morphism $\varphi^S \colon S_1(p_2) \to S_2(p_2)$ corresponding to the restriction of $\varphi$ to the places and transitions in $S_1(p_2)$.
Recall that the unfolding of a Petri net $N$, denoted by $\mathcal{U}(N)$, is the maximal branching process of $N$, such that any other branching process is isomorphic to a subnet in $\mathcal{U}(N)$. The nodes in $\mathcal{U}(N)$ are mapped to the nodes in $N$ via the \emph{folding} function $u$.
In Lemma \ref{unf}, taking the unfolding of $S_1(p_2)$, we prove that the associated folding function $u$ composed with the $\alpha$-morphisms $\varphi^S$ is also the $\alpha$-morphism under the soundness of $N_1$.
Note that since $S_1(p_2)$ is acyclic (by Definition \ref{alpham}.\ref{acycsub}), its unfolding is finite.
This helps us to assure that the ``final'' marking in a subnet in $N_1$, refining a place $p_2$ in the abstract model $N_2$, enables exactly the inverse image of transitions in $\post{p_2}$.
After providing Lemma \ref{unf}, we also discuss a specific example of checking these local conditions.
\begin{lemma}\label{unf}
Let $N_i=(P_i, T_i, F_i,$ $m_0^i)$ be an SMD Petri net, and $X_i = P_i \cup T_i$ for $i=1, 2$, such that there is an $\alpha$-morphism $\varphi \colon N_1 \to N_2$.
Let $\mathcal{U}(S_1(p_2))$ be the unfolding of $S_1(p_2)$ with the folding function $u$, and $\varphi^S$ be an $\alpha$-morphism from $S_1(p_2)$ to $S_2(p_2)$, where $p_2 \in P_2$.
Let $N_1$ be a sound GWF-net. Then, the map from $\mathcal{U}(S_1(p_2))$ to $S_2(p_2)$ obtained as $\varphi^S \circ u$ is an $\alpha$-morphism.
\end{lemma}
\begin{proof}
Since $N_1$ is a GWF-net, $S_1(p_2)$ is also a GWF-net.
By Lemma 1 of \cite{Bernardinello2013}, when a transition in $\omr{\post{p_2}}$ fires, it empties the subnet $N_1(\omr{p_2})$.
Then $S_1(p_2)$ is sound, and, by Def.\ref{GWF}.\ref{sndlive}, each transition in $S_1(p_2)$ will occur at least once.
Thus, the folding $u$ is a surjective function from $\unf{S_1(p_2)}$ to $S_1(p_2)$ and the composition $\varphi^S \circ u$ is the $\alpha$-morphism from $\unf{S_1(p_2)}$ to $S_2(p_2)$. \qed
\end{proof}
Figure \ref{locunf} provides a negative example of checking the local unfolding condition when $N_1$ is not sound.
We use the $\alpha$-morphism previously shown in Fig.\,\ref{sound_nonrefl}.
In this case, local nets coincides with the original $N_1$ and $N_2$.
When we unfold $N_1$, there are no occurrences of transitions $y_1$ and $y_2$.
Thus, the composition of the corresponding folding function and the $\alpha$-morphism $\varphi \circ u$ is not an $\alpha$-morphism.
The ``final'' marking in the subnet $N_1(\omr{p_2})$, which refines $p_2$ in $N_2$, enables transitions $x_1$ and $x_2$ only, whereas, in $N_2$, transition $y$ is also enabled.
Therefore, transitions in the inverse image of $y$ in $N_2$ cannot be enabled by the final marking in the subnet $N_1(\omr{p_2})$.
\begin{figure}
\caption{A negative example of checking local conditions based on the unfolding}
\label{locunf}
\end{figure}
One should check the unfolding condition for all properly refined places in an abstract net.
A properly refined place is a place that is refined by a subnet rather than by a set of places.
Taking the above discussion into account, we obtain that $\alpha$-morphisms reflect reachable markings and transition firings under the soundness of a refinement (see Proposition \ref{markrefl}).
\begin{proposition}\label{markrefl}
Let $N_i=(P_i, T_i, F_i,$ $m_0^i)$ be an SMD Petri net, and $X_i = P_i \cup T_i$ for $i=1, 2$, such that there is an $\alpha$-morphism $\varphi \colon N_1 \to N_2$.
Let $N_1$ be a sound GWF-net.
Then $\forall m_2 \in \reach{m_0^2} \, \exists m_1 \in \reach{m_0^1} \colon$ $ \varphi(m_1) = m_2$.
If $m_2\reach{t_2}$, then $\forall t \in \varphi^{-1} (t_2) \, \exists m_1 = \varphi^{-1}(m_2) \in \reach{m_0^2} \colon $ $m_1\reach{t_1}$.
\end{proposition}
\begin{proof}
Follows from Lemma \ref{unf}. \qed
\end{proof}
The following theorem expresses the main result of this section.
We prove that $\alpha$-morphisms preserve soundness of GWF-nets.
\begin{theorem}\label{spres}
Let $N_i=(P_i, T_i, F_i,$ $m_0^i)$ be an SMD Petri net, and $X_i = P_i \cup T_i$ for $i=1, 2$, such that there is an $\alpha$-morphism $\varphi \colon N_1 \to N_2$.
If $N_1$ is a sound GWF-net, then $N_2$ is a sound GWF-net.
\end{theorem}
\begin{proof}
We show that $N_2$ satisfies the three behavioral conditions of a sound GWF-net, see Definition \ref{sound}.
\textbf{1.} By Definition \ref{sound}.\ref{sndprop}, for all $m_1 \in \reach{m_0^1} \colon m_f^1 \in \reach{m_1}$.
Then, $\exists w \in FS(N_1) \colon$ $m_1\reach{w}m_f^1$. i.e $w=t_1t_2\dots t_n$ and $m_1\reach{t_1}m_1^1\dots m_1^{n-1}\reach{t_n}m_f^1$.
Using Proposition \ref{markpres}, it is possible to simulate $w$ in $N_2$.
By Proposition \ref{strpr}, $\om{m_f^1} = m_f^2$.
Suppose $\exists m_2 \in \reach{m_0^2} \colon m_f^2 \notin \reach{m_2}$.
By Proposition \ref{markrefl}, $\exists m_1' \in \reach{m_0^1} \colon \omr{m_2} = m_1'$.
By Definition \ref{sound}.\ref{sndprop}, $m_f^1 \in \reach{m_1'}$.
Thus, $\exists w' \in FS(N_1) \colon$ $m_1'\reach{w'}m_f^1$.
Using Proposition \ref{markpres}, it is again possible to simulate $w'$ in $N_2$.
Then, $m_f^2 \in \reach{m_2}$.
\textbf{2.} Suppose $\exists m_2' \in \reach{m_0^2} \colon m_2' \supseteq m_f^2$.
Then $m_2' = m_f^2 \cup P_2'$, where $P_2' \cap m_f^2 = \varnothing$.
By Proposition \ref{markrefl}, take $m_1 \in \reach{m_0^1}$, such that $\omr{m_1} = m_2'$ and $m_f^1 \nsubseteq m_1$.
By Definition \ref{sound}.\ref{sndprop}, $m_f^1 \in \reach{m_1}$ and $\exists w \in FS(N_1) \colon m_1 \reach{w} m_f^1$.
Using Proposition \ref{markpres}, it is possible to simulate $w$ in $N_2$.
By Proposition \ref{strpr}, $\om{m_f^1} = m_f^2$.
The only way to completely empty places in $P_2'$ is to consume at least one token from $m_f^2$.
Then, $\exists f_2 \in m_f^2 \colon \post{f_2} \neq \varnothing$ which contradicts Definition \ref{GWF}.\ref{outstwf}.
\textbf{3.} By Definition \ref{sound}.\ref{sndlive}, $\forall t_1 \in T_1 \, \exists m_1 \in \reach{m_0^1} \colon m_1 \reach{t_1}$.
Sincs $\varphi$ is a surjective map, $\forall t_2 \in T_2 \, \exists t_1 \in T_1 \colon \om{t_1} = t_2$.
By Proposition \ref{markpres}, $m_1 \reach{t_1} m_1' \Rightarrow \om{m_1}\reach{\om{t_1}}\om{m_1'}$.
Then, $\forall t_2 \in T_2 \, \exists m_2 \in \reach{m_0^2} \colon m_2 \reach{t_2}$. \qed
\end{proof}
However, the converse of Theorem \ref{spres} is not true in general.
Consider again the example shown in Fig.\,\ref{sound_nonrefl}, where $N_2$ is sound and $N_1$ is not sound, since transitions $y_1$ and $y_2$ cannot fire.
Thus, $\alpha$-morphisms do not reflect soundness, which follows from the fact that reachable markings are not reflected in the general case.
In our study, soundness reflection is a sought property of $\alpha$-morphisms.
We apply $\alpha$-morphisms to achieve the preservation of LGWF-net soundness in their AS-composition.
Component interactions are encoded in an \emph{abstract interface}, which also represents the abstract view of a complete system.
In the next section, we provide a technique when the soundness of an abstract interface implies the soundness of a refined system model, i.e., the associated $\alpha$-morphism reflects the soundness of an abstract interface.
\section{Preserving Soundness in the AS-Composition via Morphisms}\label{sec:main}
The AS-composition of LGWF-nets, discussed in Section \ref{sec:comp}, preserves the soundness of LGWF-nets through the use of an \emph{abstract interface}.
This model provides minimal detail on the local behavior of communicating components, focusing on their synchronous and asynchronous interactions.
An abstract interface is also referred to as an \emph{interface pattern} that is the AS-composition of corresponding abstract LGWF-nets.
Abstraction of components is implemented using $\alpha$-morphisms, discussed in the previous section, that we adjust to LGWF-nets.
We aim to deduce the soundness of a refined system model by verifying the soundness of an underlying interface pattern.
If $N_1$ and $N_2$ are two LGWF-nets, then an $\alpha$-morphism $\varphi \colon N_1 \to N_2$ should additionally respect transition labeling, i.e., a labeled transition in $N_1$ can only be mapped to a transition in $N_2$ with the same label.
Then a labeled transition in $N_1$ cannot be mapped to a place in $N_2$.
We formalize these restrictions on labeled transition mapping in the following definition.
\begin{definition}\label{alphahm}
Let $N_i = (P_i, T_i, F_i, m_0^i, m_f^i, h_i, \ell_i, k_i)$ be an SMD LGWF-net, and $X_i = P_i \cup T_i$ for $i=1, 2$.
An $\widehat{\alpha}$-morphism from $N_1$ to $N_2$ is a total surjective map $\varphi \colon X_1 \to X_2$, also denoted $\varphi \colon N_1 \to N_2$, where:
\begin{enumerate}
\item $\varphi(P_1) = P_2$, such that $\forall p_1 \in \dom{k_1} \colon k_2(\varphi(p_1)) = k_1(p_1)$.
\item $\om{m_0^1} = m_0^2$.
\item[2'.] $\om{m_f^1} = m_f^2$.
\item $\forall t_1 \in T_1 \colon$ if $\om{t_1} \in T_2$, then $\om{\pre{t_1}} = \pre{\om{t_1}}$ and $\om{\post{t_1}} =\om{t_1}^\bullet$.
\item[3'.] $\forall t_1 \in \dom{h_1} \cup \dom{\ell_1} \colon \om{t_1} \in T_2$ and
\begin{enumerate}
\item if $t_1 \in \dom{h_1}$, then $h_2(\om{t_1}) = h_1(t_1)$;
\item if $t_2 \in \dom{\ell_1}$, then $\ell_2(\om{t_1}) = \ell_1(t_1)$.
\end{enumerate}
\item $\forall t_1 \in T_1 \colon$ if $\om{t_1} \in P_2$, then $\om{\neighb{t_1}}=\{\om{t_1}\}$.\label{tTOp}
\item $\forall p_2 \in P_2 \colon$
\begin{enumerate}
\item $N_1(\varphi^{-1}(p_2))$ is an acyclic net or $\omr{p_2} \subseteq P_1$.\label{acycsub}
\item $\forall p_1 \in \inp{N_1(\varphi^{-1}(p_2))} \colon \om{\pre{p_1}} \subseteq \pre{p_2}$ and if $\pre{p_2} \neq \varnothing$, then $\pre{p_1} \neq \varnothing$.\label{inP}
\item $\forall p_1 \in \outp{N_1(\varphi^{-1}(p_2))} \colon \om{\post{p_1}} = \post{p_2}$.\label{outP}
\item $\forall p_1 \in P_1 \cap \varphi^{-1}(p_2) \colon p_1 \notin \inp{N_1(\varphi^{-1}(p_2))} \Rightarrow \om{\pre{p_1}}=p_2 \text{ and }$\\ $p \notin \outp{N_1(\varphi^{-1}(p_2))} \Rightarrow \om{\post{p_1}} = p_2$.\label{INsub}
\item $\forall p_1 \in P_1 \cap \varphi^{-1}(p_2)\colon$ there is a sequential component $N' = (P', T',$$ F')$ of $N_1$, such that $p_1 \in P'$, $\varphi^{-1}(\neighb{p_2}) \subseteq T'$.\label{scomp}
\end{enumerate}
\end{enumerate}
\end{definition}
Thus, an $\widehat{\alpha}$-morphism is an $\alpha$-morphism (see Definition~\ref{alpham}) that also satisfies conditions 2' and 3' of Definition \ref{alphahm}.
When two LGWF-nets are related by an $\widehat{\alpha}$-morphism, their underlying GWF-nets are related by an $\alpha$-morphism.
That is why $\widehat{\alpha}$-morphisms inherit the properties of $\alpha$-morphisms, discussed in Section \ref{ssec:prop}. We use them to achieve soundness preservation in the AS-composition of LGWF-nets.
Moreover, it also follows from Definition \ref{alphahm} that labeled places in LGWF-nets are both preserved and reflected by $\widehat{\alpha}$-morphisms.
In other words, an image of a labeled place in $N_1$ is a labeled place in $N_2$, as well as an inverse image of a labeled place in $N_2$, is a labeled place in $N_1$.
Thus, there is a bijection between the sets of labeled places in two LGWF-nets related by an $\widehat{\alpha}$-morphism.
We next discuss our approach to ensuring that the AS-composition of sound LGWF-nets yields a sound LGWF-net.
Given two sound LGWF-nets $R_1$ and $R_2$, we aim to be sure that $R_1\! \circledast\!R_2$ is sound.
It is possible to compose $R_1$ and $R_2$ using Definition \ref{comp}, but their composition may not be sound, as shown in the previous section.
A technique described below is applied to achieve soundness of $R_1\! \circledast \!R_2$ by construction.
We start with abstracting $R_1$ and $R_2$ preserving labeled transitions.
Abstractions of LGWF-nets can be constructed by applying finite sequences of transformations, as we discussed in \cite{Pnse-20}.
Thus, we obtain two abstract LGWF-nets $N_1$ and $N_2$, such that there is an $\widehat{\alpha}$-morphism $\varphi_i \colon R_i \to N_i$ with $i=1,2$.
According to Theorem \ref{spres}, $N_1$ and $N_2$ are sound.
These abstract models $N_1$ and $N_2$ are then composed by adding the same channels and synchronizing transitions with the same labels, as in $R_1$ and $R_2$.
Correspondingly, $N_1\! \circledast\! N_2$ is an interface pattern that describes interactions between LGWF-nets $R_1$ and $R_2$.
We \emph{verify} the soundness and structural properties of $N_1\! \circledast \!N_2$.
Given the sound interface pattern $N_1\! \circledast\! N_2$ and two $\widehat{\alpha}$-morphisms $\varphi_i \colon R_i \to N_i$ with $i=1, 2$, we construct two new LGWF-nets $R_1\! \circledast\! N_2$ and $N_1\! \circledast \!R_2$ representing two \emph{intermediate} refinements of the same abstract interface.
It is easy to see that these refinements of $N_1\! \circledast \!N_2$ preserve $\widehat{\alpha}$-morphisms, i.e., there is an $\widehat{\alpha}$-morphism from $R_1\! \circledast \!N_2$ to $N_1\! \circledast \!N_2$ as well as from $N_1\! \circledast \!R_2$ to $N_1\! \circledast\! N_2$.
For instance, an $\widehat{\alpha}$-morphism from $R_1\! \circledast \!N_2$ to $N_1 \!\circledast \!N_2$ is constructed from the original $\widehat{\alpha}$-morphism $\varphi_1 \colon R_1 \to N_1$ together with an identity mapping of asynchronously labeled transitions in $N_2$ and a corresponding mapping of synchronized transitions that can be refined in $R_1$.
Symmetrically, it is possible to show the construction of an $\widehat{\alpha}$-morphism from $N_1\! \circledast \!R_2$ to $N_1\! \circledast \!N_2$.
In Proposition \ref{irmor}, we additionally claim that an $\widehat{\alpha}$-morphism from an intermediate refinement $R_1 \circledast N_2$ to an interface pattern $N_1 \circledast N_2$ \emph{reflects} the connections among asynchronously labeled transitions with channels --- labeled places.
This reflection follows from the fact that labeled places are both preserved and reflected by $\widehat{\alpha}$-morphisms.
Further, we will use this property in the proof of the main theorem.
\begin{proposition}\label{irmor}
Let $R_1, N_1, N_2$ be three LGWF-nets, such that there is an $\widehat{\alpha}$-morphism $\varphi_1 \colon R_1 \to N_1$.
Let $N_1 \!\circledast \!N_2 = (P, T, F, m_0, m_f, h, \ell, k)$ and $R_1\! \circledast \!N_2 = (P', T', F',$ $m_0', m_f', h', \ell', k')$.
Then there is an $\widehat{\alpha}$-morphism $\varphi_1' \colon (R_1 \!\circledast \!N_2) \to (N_1\! \circledast \!N_2)$, where $\forall p \in \dom{k}$ and $\forall t \in T \colon$
\begin{enumerate}
\item if $(p, t) \in F$, then $\{\omr{p}\} \times \omr{t} \subseteq F'$;
\item if $(t, p) \in F$, then $\omr{t} \times \{\omr{p}\} \subseteq F'$.
\end{enumerate}
\end{proposition}
Let $N_1 \circledast N_2$ shown in Fig.\,\ref{comp_ex2} represent an interface pattern.
We refine it with two component LGWF-nets $R_1$ and $R_2$, as shown in Fig.\,\ref{intref}.
The corresponding $\widehat{\alpha}$-morphisms are indicated by the shaded ovals.
The $\alpha$-morphism between the underlying GWF-nets $R_2^-$ and $N_2^-$ is provided in Fig. \ref{alpha_ex}, where $N_1$ corresponds to $R_2^-$, and $N_2$ corresponds to $N_2^-$.
\begin{figure}
\caption{AS-composition $R_1 \circledast N_2$}
\caption{AS-composition $N_1 \circledast R_2$}
\caption{Two intermediate refinements of $N_1 \circledast N_2$ from Fig. \ref{comp_ex2}
\label{intref}
\end{figure}
Theorem \ref{mainth} expresses the main result of our study.
We prove that an $\widehat{\alpha}$-morphism from an intermediate refinement $R_1 \circledast N_2$ (symmetrically, from $N_1 \circledast R_2$) to an interface pattern $N_1 \circledast N_2$ \emph{reflects} its soundness.
In proving this fact, we use the properties of $\alpha$-morphisms discussed in Section \ref{sec:alpha}, the characterization of reachable markings in the AS-composition given in Proposition \ref{markdec}, and the property considered in Proposition \ref{irmor}.
\begin{theorem}\label{mainth}
Let $R_1, N_1, N_2$ be three sound LGWF-nets, such that there is an $\widehat{\alpha}$-morphism $\varphi_1 \colon$ $R_1\!\to\!N_1$.
If $N_1 \circledast N_2$ is sound, then $R_1 \circledast N_2$ is sound.
\end{theorem}
\begin{proof}
By Proposition \ref{irmor}, there is an $\widehat{\alpha}$-morphism $\varphi_1' \colon (R_1\! \circledast\! N_2) \to (N_1\! \circledast\! N_2)$.
We first fix a notation used in the proof.
Let $N_i = (P_i, T_i, F_i, m_0^i, m_f^i, h_i, \ell_i,$ $k_i)$, where $i=1, 2$, and $R_1 = (\underline{P}_1, \underline{T}_1, \underline{F}_1, \underline{m}_0^1,$ $\underline{m}_f^1, \underline{h}_1, \underline{\ell}_1, \underline{k}_1)$.
Also, let $N_1\! \circledast\! N_2 = (P, T, F, m_0, m_f, h, \ell, k)$, and $R_1\! \circledast\! N_2 = (P', T', F', m_0', m_f', h', \ell', k')$.
We show that $R_1\! \circledast\! N_2$ satisfies the three behavioral conditions of a sound LGWF-net imposed by Definition \ref{sound}.
\textbf{1.} Take $m' \in \reach{m_0'}$.
By Proposition \ref{markdec} for $R_1 \circledast N_2$, $m' = (\underline{m}_1 \setminus \dom{\underline{k}_1}) \cup (m_2 \setminus \dom{k_2}) \cup m_c$, where $\underline{m}_1 \in \reach{\underline{m}_0^1}$, $m_2 \in \reach{m_0^2}$ and $m_c \in \dom{k'}$.
By Proposition \ref{markpres} for $\varphi_1'$, $\varphi_1'(m') = m \in \reach{m_0}$.
By Proposition \ref{markdec} for $N_1 \circledast N_2$, $m = (m_1 \setminus \dom{k_1}) \cup (m_2 \setminus \dom{k_2}) \cup m_c$, where $m_2 \setminus \dom{k_2}$, $m_c$ are the same as in $m'$, and $m_1 = \varphi_1(\underline{m}_1)$ (by Proposition \ref{markpres} for $\varphi_1$).
Since $N_1\! \circledast\! N_2$ is sound, $\exists w \in F\!S(N_1\! \circledast\! N_2) \colon m\reach{w}m_f$.
By Definition \ref{comp}, recall that $T = T_1^a \cup T_2^a \cup T_{sync}$ in $N_1\! \circledast\! N_2$, where $T_{sync} = \{(t_1, t_2) \, \vert \, t_1 \in \dom{\ell_1}, t_2 \in \dom{\ell_2}, \text{ and } \ell_1(t_1) = \ell_2(t_2) \}$ and $T_i^a = T_i \setminus \dom{\ell_i}$ with $i=1, 2$.
Using interleaving semantics for Petri nets, we can write $w = w_2^1v$, such that $v=\varepsilon$ or $v=t_1^1w_s^1w_2^2t_1^2\dots$, where $w_2^i \in (T_2^a)^*$, $t_1^i \in T_1^a$ and $w_s^i \in T_{sync}^*$ with $i \geq 1$.
Firstly, each sub-sequence $w_2^i$ can be obviously simulated on the LGWF-net $N_2$ in $R_1\! \circledast N_2$, since $\varphi_1'$ reflects connections with labeled places (by Proposition \ref{irmor}).
Secondly, since $R_1$ is sound, $\varphi_1$ reflects reachable markings and transitions firings (by Proposition \ref{markrefl}).
Thus, there is a reachable marking $\underline{m}_1^i$ in $R_1$, belonging to $\varphi_1^{-1}(m_1^i)$ for some $m_1^i \in \reach{m_0^1}$ in $N_1$.
If $m_1^i \reach{t_1^i}$ in $N_1$, then $\underline{m}_1^i$ enables all transitions in $\varphi_1^{-1}(t_1^i)$ in $R_1$ as well.
Moreover, these transitions are also enabled in $R_1\! \circledast\! N_2$, since $\varphi_1'$ reflects connections to labeled places (by Proposition \ref{irmor}).
Finally, since $N_1\! \circledast\! N_2$ is sound, $\exists m \in \reach{m_0} \colon m \reach{(t_1, t_2)}$ for all $(t_1, t_2)$ in $w_s^i$.
By Proposition \ref{markdec}, $m = m_1 \cup m_2$, where $m_1 \in \reach{m_0^1}$ and $m_2 \in \reach{m_0^2}$ (here $m_c = \varnothing$, since transitions in $T_{sync}$ are not connected with labeled places).
Moreover, $m_1 \reach{t_1}$ and $m_2 \reach{t_2}$.
By Proposition \ref{markrefl} for $\varphi_1$, there is a reachable marking $\underline{m}_1'$ in $R_1$, such that $\underline{m}_1' = \varphi_1^{-1}(m_1)$ and $\forall \underline{t}_1 \in \varphi_1^{-1}(t_1) \colon \underline{m}_1' \reach{\underline{t}_1}$.
Correspondingly, a reachable marking $\underline{m}_1' \cup m_2$ in $R_1\! \circledast\! N_2$ enables synchronized transitions $(\underline{t}_1, t_2)$ for all $\underline{t}_1 \in \varphi_1^{-1}(t_1)$.
Hence, we reflect the complete firing sequence $w \in F\!S(N_1\! \circledast\! N_2)$ on $R_1 \circledast N_2$ reaching its final marking $m_f'$.
\textbf{2.} Suppose by contradiction $\exists m' \in \reach{m_0'} \colon m' \supseteq m_f'$ and $m' \neq m_f'$.
By Definition~\ref{comp}.2, $m_f' = \underline{m}_f^1 \cup m_f^2$.
Thus, $m' = \underline{m}_f^1 \cup m_f^2 \cup m_3$.
By Proposition \ref{markpres} for $\varphi_1'$, we have that $\varphi_1'(m') \in \reach{m_0}$.
Then, $\varphi_1'(m') = \varphi_1'(\underline{m}_f^1) \cup \varphi_1'(m_f^2) \cup \varphi_1'(m_3) = \varphi_1(\underline{m}_f^1) \cup m_f^2 \cup m_3 = m_f^1 \cup m_f^2 \cup m_3 = m_f \cup m_3$.
This reachable marking $m_f \cup m_3$ strictly covers the final marking $m_f$ in $N_1\! \circledast\! N_2$, which contradicts the assumption of its soundness.
\textbf{3.} We show that $\forall t' \in T'\,\exists m' \in \reach{m_0'} \colon m' \reach{t'}$.
By Proposition \ref{markdec}, $m' = (\underline{m}_1 \setminus \dom{\underline{k}_1}) \cup (m_2 \setminus \dom{k_2}) \cup m_c$, where $\underline{m}_1 \in \reach{\underline{m}_0^1}$, $m_2 \in \reach{m_0^2}$ and $m_c \in \dom{k'}$.
By Definition \ref{comp}.3, $\forall t' \in T' \colon t' \in\underline{T}_1^a$ or $t' \in T_2^a$ or $t' \in T_{sync}$.
If $t' \in T_2^a$, then $\exists m \in \reach{m_0} \colon m\reach{t'}$, since $N_1\! \circledast\! N_2$ is sound.
By Proposition \ref{irmor}, $(m_2 \setminus \dom{k_2}) \cup m_c$ in $R_1\! \circledast\! N_2$ also enables $t'$.
If $t' \in \underline{T}_1^a$, then there are two cases.
If $\varphi_1'(t') \in P$, then $t'$ is not connected to labeled places.
Since $R_1$ sound, $\underline{m}_1$ enables $t'$.
If $\varphi_1'(t') \in T$, then take $t \in T$, such that $\varphi_1'(t') = t$ (by the surjectivity of $\varphi_1'$).
Since $N_1\! \circledast\! N_2$ is sound, $\exists m \in \reach{m_0} \colon m \reach{t}$.
By Proposition \ref{markrefl} and \ref{irmor}, the reachable marking $\underline{m}_1 \cup m_c$ in $R_1\! \circledast\! N_2$ (being the inverse image of $m$ under $\varphi_1'$) enables $t'$.
As for the case when $t' \in T_{sync}$, we have already considered it above when proving reachability of the final marking in $R_1\! \circledast\! N_2$. \qed
\end{proof}
Having two $\widehat{\alpha}$-morphisms from the intermediate refinements $R_1\! \circledast\!N_2$ and $N_1\!\circledast\!R_2$ to the same abstract interface $N_1 \!\circledast\! N_2$, we can compose $R_1 \!\circledast \!N_2$ and $N_1\! \circledast\! R_2$ using the composition defined in~\cite{Bernardinello2013}.
It is required to (a) substitute subnets in $R_1\!\circledast\!N_2$ and $N_1\! \circledast\!R_2$ for the corresponding places in $N_1\!\circledast\!N_2$;
(b)~replace transitions in $N_1\!\circledast\!N_2$ with their inverse images merging those with identical images.
As a result, we obtain $N$ and two $\widehat{\alpha}$-morphisms from $N$ to $R_1\!\circledast\!N_2$ and $N_1\!\circledast\!R_2$, such that the diagram shown in Fig.\,\ref{diagram1} commutes, i.e., $\varphi_1' \circ \varphi_1'' = \varphi_2' \circ \varphi_2''$, where $\varphi_1' \colon (R_1\!\circledast\! N_2) \to (N_1 \!\circledast\! N_2)$, $\varphi_2' \colon (N_1 \!\circledast\! R_2) \to (N_1 \!\circledast\! N_2)$, $\varphi_1'' \colon N \to (R_1 \!\circledast\! N_2)$, and $\varphi_2'' \colon N \to (N_1 \!\circledast\! R_2)$.
\begin{figure}
\caption{$N$, isomorphic to $R_1 \circledast R_2$\label{comp_res}
\caption{diagram\label{diagram1}
\caption{Composition of $R_1 \circledast N_2$ and $N_1 \circledast R_2$ based on $\widehat{\alpha}
\label{comp_res}
\label{diagram1}
\end{figure}
Another way is to construct intermediate refinements again by refining $N_2$ in $R_1\! \circledast\! N_2$ ($N_1$ in $N_1\! \circledast \!R_2$).
As a result, we obtain $R_1 \circledast R_2$, isomorphic to the previously constructed composition $N$ up to renaming of synchronized transitions.
According to Proposition \ref{irmor}, there are two $\widehat{\alpha}$-morphisms from $R_1 \circledast R_2$ to $R_1 \circledast N_2$ as well as to $N_1 \circledast R_2$.
According to Theorem \ref{mainth}, since $R_1 \circledast N_2$ ($N_1 \circledast R_2$) is sound, $R_1 \!\circledast\! R_2$ is also sound.
Therefore, we have also shown that it is possible to simultaneously refine $N_1$ and $N_2$ in the sound abstract interface with sound LGWF-nets $R_1$ and $R_2$ (see Corollary~\ref{cor:main}).
In Fig.\,\ref{comp_res}, we show the result of composing, by means of $\widehat{\alpha}$-morphisms, intermediate refinements $R_1 \circledast N_2$ and $N_1 \circledast R_2$ provided in Fig.\,\ref{intref}.
This composition corresponds to the direct AS-composition of $R_1$ and $R_2$.
\begin{corollary}\label{cor:main}
Let $R_1, R_2, N_1, N_2$ be four sound LGWF-nets, such that there is an $\widehat{\alpha}$-morphism $\varphi_i \colon R_i \to N_i$ for $i=1, 2$.
If $N_1\!\circledast\! N_2$ is sound, then $R_1\! \circledast \!R_2$ is sound.
\end{corollary}
\section{Related Works}\label{sec:relw}
There is a considerable amount of literature devoted to the compositional modeling of Petri nets, including, among the others, \cite{Kotov78,BoxCalc,Reisig13,Valk2003}.
The recent works \cite{Reisig2018,ReiComp20,Her2021} by Wolfgang Reisig are devoted to a systematic study of compositional modeling principles applicable to various formalisms and notations, including (Colored) Petri nets, BPMN (Business Process Modeling and Notation) process models, and UML (Unified Modeling Language) diagrams.
Several works studied whether a composition of \emph{open} Petri nets preserves semantical properties of components.
Paolo Baldan et al.~\cite{Baldan01} introduced a class of open Petri nets and analyzed the categorical framework behind open Petri net composition constructed via place and transition fusion.
Kees van Hee et al.~\cite{vanHee2010,vanHee2011} considered a soundness-preserving refinement of places in open WF-nets with sound (composition of) WF-nets.
A class of superposed automata nets (SA-nets) was introduced by Fiorella De Cindo et al.~in \cite{SANets82}.
SA-nets were among the first formalisms to model systems with synchronously communicating sequential components via transition fusion.
Serge Haddad et al.~\cite{Haddad13} defined the semantics of \emph{input/output} (I/O) Petri nets and their composition constructed through the insertion of asynchronous channels.
The authors studied channel properties related to message consumption and interaction termination.
It was shown that these properties are decidable and preserved by an asynchronous composition of I/O-Petri nets.
Younes Soussi and G\'{e}rard Memmi \cite{Sous91,SousMem1} considered the problem of liveness preservation in a composition of Petri nets through an intermediate model of communication medium.
Their approach is based on global and rigid structural constraints.
Christian Stahl and Karsten Wolf \cite{Wolf09} applied \emph{operating guidelines} for compositional verification of deadlock-freeness in the composition of open Petri nets.
Their work also considered a problem to decide if one can replace a component in a composition preserving its semantical properties.
Inheritance of behavioral properties of Petri nets is also achieved with the help of morphisms~--- structural graph mappings.
The composition of Petri nets via morphisms was a subject of many works, including, for example, \cite{Winskel67,monoids90,Nielsen92,Bednarczyk03,Padberg2003,Fabre06,Nhat,Desel2010}.
We note that morphisms provide a natural and rigid framework to explore properties of Petri net composition.
In our study, the soundness preservation in the AS-composition of LGWF-nets is achieved with the help of a restriction of $\alpha$-morphisms, originally defined by Luca Bernardinello et al. in \cite{Bernardinello2013}.
They allow us to abstract subnets and refine places in Petri nets. In addition, $\alpha$-morphisms preserve and reflect reachable markings and induce the bisimulation between related models.
Several works have discussed architectural and semantical aspects of compositional approaches to WF-net modeling.
Juliane Siegeris and Armin Zimmermann \cite{WFRes} considered specific patterns of WF-net interactions preserving relaxed soundness of components admitting executions that may not terminate in a final state.
The work \cite{Lomazova13} by Irina Lomazova and Ivan Romanov addressed the problem of preserving service correctness in the context of resources produced and consumed by interacting services.
The earlier work \cite{Lomazova10} by I.\,Lomazova also proposed an approach to soundness-preserving re-engineering of hierarchical WF-nets with a two-level structure.
Yudith Cardinale et al., in the survey \cite{Cardinale13}, discussed a variety of approaches to compositional modeling of web services.
The authors stressed that there is a lack of service execution techniques based on different classes of Petri nets.
In particular, Victor Pancratius and Wolffried Stucky \cite{Pankratius05}~considered the composition of WF-nets representing web service behavior with the help of adapted relational algebra operations.
The main difference in our work is that the AS-composition of labeled GWF-nets leaves asynchronous channels and synchronous transitions open for other components to connect.
Apart from that, refinement of LGWF-nets is defined at the level of a complete net rather than specific places and transitions.
Refinement preserves the soundness of LGWF-net components and an interface, which describes interactions at the abstract level.
In our earlier paper \cite{ATAED-18}, we discussed a restricted case of modeling semantically correct asynchronous communication of workflow nets.
Results presented in this paper naturally extend the previous ones and provide the formal backgrounds for constructing sound workflow nets from sound models of interacting components.
\section{Conclusions}\label{sec:concl}
This paper has studied the theoretical backgrounds for a correct composition of interacting workflow net components.
We have developed an approach to model asynchronous and synchronous interactions among workflow nets using two kinds of transition labels.
Correspondingly, we have defined an asynchronous-synchronous composition (AS-composition).
AS-composition may not preserve the soundness of interacting components.
To solve this problem, we use an interface that describes how WF-nets interact.
An interface net represents an abstract view of a complete system.
There is a subnet in an interface corresponding to component behavior.
The correspondence between an interface and components is established with the help of $\alpha$-morphisms.
The structural and behavioral properties of the abstraction/refinement relation based on $\alpha$-morphisms have helped us to prove that refining subnets in an interface with sound WF-nets preserves the interface soundness.
We identify two main advantages of the proposed compositional approach.
Firstly, the problem of constructing a correct composition of interacting workflow net components is solved in the abstract model.
Refinement of abstract places requires checking structural constraints and only local behavioral constraints for properly refined places.
Sound models of abstract interfaces can be reused with different component refinements.
Secondly, AS-composition leaves asynchronous channels and synchronous transitions of workflow net components open for others to interact.
Our future research will be focused on the following aspects.
It is planned to relax constraints of $\alpha$-morphisms to enable the abstraction of cyclic subnets.
Thus, an acyclic abstract interface will define a broader class of sound WF-net compositions.
We also plan to systematically identify typical interface patterns that a system architect can use to organize smooth interactions among components in large-scale distributed systems.
Our earlier works \cite{TMPA-17,Macspro-19} studied patterns of asynchronous interactions within a restricted case of two interacting components.
\end{document} |
\begin{document}
\title[Local regularity estimates]{Local regularity estimates for general discrete dynamic programming equations}
{\bf a}uthor[Arroyo]{\'Angel Arroyo}
{\bf a}ddress{MOMAT Research Group, Interdisciplinary Mathematics Institute, Department of Applied Mathematics and Mathematical Analysis, Universidad Complutense de Madrid, 28040 Madrid, Spain}
{\bf e}mail{[email protected]}
{\bf a}uthor[Blanc]{Pablo Blanc}
{\bf a}ddress{Department of Mathematics and Statistics, University of Jyv\"askyl\"a, PO~Box~35, FI-40014 Jyv\"askyl\"a, Finland}
{\bf e}mail{[email protected]}
{\bf a}uthor[Parviainen]{Mikko Parviainen}
{\bf a}ddress{Department of Mathematics and Statistics, University of Jyv\"askyl\"a, PO~Box~35, FI-40014 Jyv\"askyl\"a, Finland}
{\bf e}mail{[email protected]}
\date{\today}
\keywords{ABP-estimate, elliptic non-divergence form partial differential equation with bounded and measurable coefficients, dynamic programming principle, Harnack's inequality, local H\"older estimate, p-Laplacian, Pucci extremal operator, tug-of-war with noise}
\subjclass[2010]{35B65, 35J15, 35J92, 91A50}
\maketitle
\begin{abstract}
We obtain an analytic proof for asymptotic H\"older estimate and Harnack's inequality for solutions to a discrete dynamic programming equation. The results also generalize to functions satisfying Pucci-type inequalities for discrete extremal operators. Thus the results cover a quite general class of equations.
{\bf e}nd{abstract}
\section{Introduction}
Recently a quite general method for regularity of stochastic processes was devised in \cite{arroyobp}. It is shown that expectation of a discrete stochastic process or equivalently a function satisfying the dynamic programming principle (DPP)
\begin{align}
\label{eq:dpp-intro}
u (x) ={\bf a}lpha \int_{\mathbb{R}^N} u(x+\varepsilon z) \,d\nu_x(z)+\frac{\beta}{{\bf a}bs{B_\varepsilon}}\int_{B_\varepsilon(x)} u(y)\,dy
+\varepsilon^2 f(x),
{\bf e}nd{align}
where $f$ is a Borel measurable bounded function and $\nu_x$ is a symmetric probability measure with rather mild conditions, is asymptotically H\"older regular. Moreover, the result generalizes to Pucci-type extremal operators and conditions of the form
\begin{align}
\label{eq:pucci-extremals}
\mathcal L_\varepsilon^+ u\ge -{\bf a}bs{f},\quad \mathcal L_\varepsilon^- u\le {\bf a}bs{f},
{\bf e}nd{align}
where $\mathcal L_\varepsilon^+, \mathcal L_\varepsilon^-$ are Pucci-type extremal operators related to operators of the form (\ref{eq:dpp-intro}) as in Definition~\ref{def:pucci}. As a consequence, the results immediately cover for example tug-of-war type stochastic games, which have been an object of a recent interest.
The proof in \cite{arroyobp} uses probabilistic interpretation. In the PDE setting the closest counterpart would be Krylov-Safonov regularity method \cite{krylovs79}. It gives H\"older regularity of solutions and Harnack's inequality for
elliptic equations with merely bounded and measurable coefficients. The next natural question, and the aim of this paper, is to try to obtain an analytic proof. In the PDE setting the closest counterpart would be Trudinger's analytic proof of the Krylov-Safonov regularity result in \cite{trudinger80}.
The H\"older estimate is obtained in Theorem \ref{Holder} (stated here in normalized balls for convenience) and it applies to (\ref{eq:dpp-intro}) by selecting $\rho=\sup{\bf a}bs{f}$:
\begin{theorem*}
There exists $\varepsilon_0>0$ such that if $u$ satisfies $\mathcal{L}_\varepsilon^+ u\ge -\rho$ and $\mathcal{L}_\varepsilon^- u\le \rho$ in $B_{2}$ where $\varepsilon<\varepsilon_0$, we have for suitable constants
\[
|u(x)-u(z)|\leq C\left(\sup_{B_{2}}|u|+\rho\right)\Big(|x-z|^\gamma+\varepsilon^\gamma\Big)
\]
for every $x, z\in B_1$.
{\bf e}nd{theorem*}
After establishing a H\"older regularity estimate, it is natural to ask in the spirit of Krylov, Safonov and Trudinger for Harnack's inequality. To the best of our knowledge, this was not known before in our context. The regularity techniques in PDEs or in the nonlocal setting utilize, heuristically speaking, the fact that there is information available in all scales. Concretely, a rescaling argument is used in those contexts in arbitrary small cubes. In our case, discreteness sets limitations, and these limitations have some crucial effects. Indeed, the standard formulation of Harnack's inequality does not hold in our setting as we show by a counter example.
Instead, we establish an asymptotic Harnack's inequality in Theorem \ref{Harnack}:
\begin{theorem*}
There exists $\varepsilon_0>0$ such that if $u$ satisfies $\mathcal{L}_\varepsilon^+ u\ge -\rho$ and $\mathcal{L}_\varepsilon^- u\le \rho$ in $B_{7}$ where $\varepsilon<\varepsilon_0$, we have for suitable constants
\begin{equation*}
\sup_{B_1}u
\leq
C\left(\inf_{B_1}u+\rho+\varepsilon^{2\lambda}\sup_{B_3}u\right).
{\bf e}nd{equation*}
{\bf e}nd{theorem*}
Both the asymptotic H\"older estimate and Harnack's inequality are stable when passing to a limit with the scale $\varepsilon$, and we recover the standard H\"older estimate and Harnack's inequality in the limit.
The key point in the proof is to establish the De Giorgi type oscillation estimate that roughly states the following (here written for the zero right hand side and suitable scaling for simplicity):
Under certain assumptions if $u$ is a (sub)solution to (\ref{eq:dpp-intro}) with $u\leq 1$ in a suitable bigger ball and
\[
|B_{R}\cap \{u\leq 0\}|\geq \theta |B_R|,
\]
for some $\theta>0$, then there exist ${\bf e}ta>0$ such that
\[
\sup_{B_R} u \leq 1-{\bf e}ta.
\]
This is established in Lemma~\ref{DeGiorgi}. Then we can obtain asymptotic H\"older continuity by a finite iteration combined with a rough estimate in the scales below $\varepsilon$.
It is not straightforward to interpret the probabilistic proof in \cite{arroyobp} into analytic form to obtain the proof of Lemma~\ref{DeGiorgi}. Instead, we need to devise an iteration for the level sets
\[
A=\{u\geq K^k\} \quad \text{ and }\quad B=\{u\geq K^{k-1}\}.
\]
It seems difficult to produce an estimate between the measures of $A$ and $B$ by using the standard version of the Calder\'on-Zygmund decomposition. The equation (\ref{eq:dpp-intro}) is not infinitesimal, but if we simply drop all the cubes smaller than of scale $\varepsilon$ in the decompositions, we have no control on the size of the error. To treat this, we use an additional condition for selecting additional cubes of scale $\varepsilon$. On the other hand, additional cubes should belong to the set $B$ above, so there are two competing objectives. Different nonlocal analytic arguments, Alexandrov-Bakelman-Pucci (ABP) type estimates, and suitable cut-off levels will be used.
Unfortunately, but necessarily, the additional condition produces an error term in the estimate between measures of $A$ and $B$. Nonetheless, we can accomplish the level set measure estimate in Lemma \ref{measure bound} which is sufficient to get the De Giorgi oscillation lemma.
The H\"older estimate and Harnack's inequality are key results in the theory of non-divergence form elliptic partial differential equations with bounded and measurable coefficients. They were first obtained by Krylov and Safonov in \cite{krylovs79, krylovs80} by stochastic arguments. Later, an analytic proof for strong solutions was established by Trudinger in \cite{trudinger80}, see also \cite[Section 9]{gilbargt01}. In the case of viscosity solutions for fully nonlinear elliptic equations, the ABP estimate and Harnack's inequality were obtained by Caffarelli \cite{caffarelli89}, also covered in \cite[{Chapters 3 and 4}]{caffarellic95}. For nonlocal equations, such results have been considered more recently for example in \cite{caffarellis09} or \cite{caffarellitu20}. In the case of fully discrete difference equations, we refer the reader to \cite{kuot90}.
There is a classical well-known connection between the Brownian motion and the Laplace equation. The dynamic programming principle (\ref{eq:dpp-intro}) is partly motivated by the connection of stochastic processes with the $p$-Laplace equation and other nonlinear PDEs. Our results cover (see \cite{arroyobp} for details) in particular a stochastic two player game called the tug-of-war game with noise. The tug-of-war game and its connection with the infinity Laplacian was discovered in \cite{peresssw09}. For the tug-of-war games with noise and their connection to $p$-Laplacian, see for example \cite{peress08}, \cite{manfredipr12}, \cite{blancr19} and \cite{lewicka20}.
There are several regularity methods devised for tug-of-war games with noise: in the early papers a global approach based on translation invariance was used. Interior a priori estimates were obtained in \cite{luirops13} and \cite{luirop18}. However, none of these methods seem to directly apply in the general setup of this paper. In this setup, we refer to probabilistic approaches in \cite{arroyobp} and with additional distortion bounds in \cite{arroyop20}.
\tableofcontents
\section{Preliminaries}
\label{preliminaries}
Let $\mathcal{L}ambda\geq 1$, $\varepsilon>0$, $\beta\in (0,1]$ and ${\bf a}lpha=1-\beta$.
Constants may depend on $\mathcal{L}ambda$, ${\bf a}lpha$, $\beta$ and the dimension $N$.
Further dependencies are specified later.
Throughout the article $\Omegaega \subset \mathbb{R}^N$ denotes a bounded domain, and $B_r(x)=\{y\in\mathbb{R}^N:|x-y|<r\}$ as well as $B_r=B_r(0)$.
We use $\mathbb{N}$ to denote the set of positive integers.
We define an extended domain as follows
\begin{equation*}
\widetilde\Omegaega_{\mathcal{L}ambda\varepsilon}
:\,=
\set{x\in\mathbb{R}^n}{\operatorname{dist}(x,\Omegaega)<\mathcal{L}ambda\varepsilon}.
{\bf e}nd{equation*}
We further denote
\[
\int u(x)\,dx=\int_{\mathbb{R}^N} u(x)\,dx
\quad
\text{ and }
\quad
\vint_A u(x)\,dx=\frac{1}{|A|}\int_{A} u(x)\,dx.
\]
Moreover,
\[
\|f\|_{L^N(\Omegaega)}=\left(\int_\Omegaega |f(x)|^N\,dx\right)^{1/N}
\]
and
\[
\|f\|_{L^\infty(\Omegaega)}=\sup_\Omegaega |f|.
\]
When no confusion arises we just simply denote $\|\cdot\|_N$ and $\|\cdot\|_\infty$, respectively.
For $x=(x_1,\ldots,x_n)\in\mathbb{R}^N$ and $r>0$, we define $Q_r(x)$ the open cube of side-length $r$ and center $x$ with faces parallel to the coordinate hyperplanes. In other words,
\begin{equation*}
Q_r(x)
:\,=
\{y\in\mathbb{R}^N\,:\,|y_i-x_i|<r/2,\ i=1,\ldots,n\}.
{\bf e}nd{equation*}
In addition, if $Q=Q_r(x)$ and ${\bf e}ll>0$, we denote ${\bf e}ll Q=Q_{{\bf e}ll r}(x)$.
Let $\mathcal{M}(B_\mathcal{L}ambda)$ denote the set of symmetric unit Radon measures with support in $B_\mathcal{L}ambda$ and $\nu:\mathbb{R}^N\to \mathcal{M}(B_\mathcal{L}ambda)$
such that
\begin{equation}\label{measurable-nu}
x\longmapsto\int u(x+z) \,d\nu_x(z)
{\bf e}nd{equation}
defines a Borel measurable function for every Borel measurable $u:\mathbb{R}^N\to \mathbb{R}$.
By symmetric, we mean
\begin{align*}
\nu_x(E)=\nu_x(-E).
{\bf e}nd{align*}
for every measurable set $E\subset\mathbb{R}^N$.
It is worth remarking that the hypothesis {\bf e}qref{measurable-nu} on Borel measurability holds, for example, when the $\nu_x$'s are the pushforward of a given probability measure $\mu$ in $\mathbb{R}^N$. More precisely, if there exists a Borel measurable function $h:\mathbb{R}^N\times \mathbb{R}^N\to B_\mathcal{L}ambda$ such that
\[
\nu_x=h(x,\cdot)\#\mu
\]
for each $x$, then
\[
\begin{split}
v(x)
&=\int u(x+z) \,d\nu_x(z)\\
&=\int u(x+h(x,y)) \,d\mu(y)\\
{\bf e}nd{split}
\]
is measurable by Fubini's theorem.
We consider here solutions to the Dynamic Programming Principle (DPP) given by
\begin{align*}
u (x) ={\bf a}lpha \int u(x+\varepsilon v) \,d\nu_x(v)+\beta\vint_{B_\varepsilon(x)} u(y)\,dy+\varepsilon^2 f(x).
{\bf e}nd{align*}
\begin{definition}
\label{def:solutions}
We say that a bounded Borel measurable function $u$ is a subsolution to the DPP if it satisfies
\[
u (x)\leq{\bf a}lpha \int u(x+\varepsilon z) \,d\nu_x(z)+\beta\vint_{B_\varepsilon(x)} u(y)\,dy+\varepsilon^2 f(x)
\]
in $\Omegaega$.
Analogously, we say that $u$ is a supersolution if the reverse inequality holds.
If the equality holds, we say that it is a solution to the DPP.
{\bf e}nd{definition}
If we rearrange the terms in the DPP, we may alternatively use a notation that is closer to the difference methods.
\begin{definition}
Given a Borel measurable bounded function $u:\mathbb{R}^N\to \mathbb{R}$, we define $\mathcal{L}_\varepsilon u:\mathbb{R}^N\to \mathbb{R}$ as
\[
\mathcal{L}_\varepsilon u(x)=\frac{1}{\varepsilon^2}\left({\bf a}lpha \int u(x+\varepsilon z) \,d\nu_x(z)+\beta\vint_{B_\varepsilon(x)} u(y)\,dy-u(x)\right).
\]
With this notation, $u$ is a subsolution (supersolution) if and only if $\mathcal{L}_\varepsilon u+f \geq 0 (\leq 0)$.
{\bf e}nd{definition}
By defining
\begin{align}
\label{eq:delta}
\delta u(x,y):\,=u(x+y)+u(x-y)-2u(x),
{\bf e}nd{align}
and recalling the symmetry condition on $\nu_x$ we can rewrite
\begin{equation*}
\mathcal{L}_\varepsilon u(x)=\frac{1}{2\varepsilon^2}\left({\bf a}lpha \int \delta u(x,\varepsilon z) \,d\nu_x(z)+\beta\vint_{B_1} \delta u(x,\varepsilon y)\,dy\right).
{\bf e}nd{equation*}
Our theorems actually hold for functions merely satisfying Pucci-type inequali-
ties.
\begin{definition}
\label{def:pucci}
Let $u:\mathbb{R}^N\to\mathbb{R}$ be a bounded Borel measurable function. We define the extremal Pucci type operators
\begin{equation}\label{L-eps+}
\begin{split}
\mathcal{L}_\varepsilon^+ u(x)
:\,=
~&
\frac{1}{2\varepsilon^2}\bigg({\bf a}lpha \sup_{\nu\in \mathcal{M}(B_\mathcal{L}ambda)} \int \delta u(x,\varepsilon z) \,d\nu(z) +\beta\vint_{B_1} \delta u(x,\varepsilon y)\,dy\bigg)
\\
=
~&
\frac{1}{2\varepsilon^2}\bigg({\bf a}lpha \sup_{z\in B_\mathcal{L}ambda} \delta u(x,\varepsilon z) +\beta\vint_{B_1} \delta u(x,\varepsilon y)\,dy\bigg)
{\bf e}nd{split}
{\bf e}nd{equation}
and
\begin{equation}\label{L-eps-}
\begin{split}
\mathcal{L}_\varepsilon^- u(x)
:\,=
~&
\frac{1}{2\varepsilon^2}\bigg({\bf a}lpha \inf_{\nu\in \mathcal{M}(B_\mathcal{L}ambda)} \int \delta u(x,\varepsilon z) \,d\nu(z) +\beta\vint_{B_1} \delta u(x,\varepsilon y)\,dy\bigg)
\\
=
~&
\frac{1}{2\varepsilon^2}\bigg({\bf a}lpha \inf_{z\in B_\mathcal{L}ambda} \delta u(x,\varepsilon z) +\beta\vint_{B_1} \delta u(x,\varepsilon y)\,dy\bigg),
{\bf e}nd{split}
{\bf e}nd{equation}
where $\delta u(x,\varepsilon y)=u(x+\varepsilon y)+u(x-\varepsilon y)-2u(x)$ for every $y\in B_\mathcal{L}ambda$.
{\bf e}nd{definition}
More generally we can consider functions that satisfy
\begin{equation*}
\mathcal{L}_\varepsilon^+ u\ge -\rho,\quad \mathcal{L}_\varepsilon^- u\le \rho.
{\bf e}nd{equation*}
If we omit the notation above $\mathcal{L}_\varepsilon^- u\le \rho$ reads as
\begin{align*}
u (x)
&
\geq
{\bf a}lpha\inf_{\nu\in \mathcal{M}(B_\mathcal{L}ambda)} \int u(x+\varepsilon v) \,d\nu (v)+\beta\vint_{B_\varepsilon(x)} u(y)\,dy-\varepsilon^2\rho.
{\bf e}nd{align*}
\sloppy
Observe that the natural counterpart for the Pucci operator
$P^+(D^2u)=\sup_{I \leq A\leq \mathcal{L}ambda I}{\rm tr}(AD^2u)$
is given by
\begin{align}
\label{eq:about-extremal-operators}
P_\varepsilon^+ u(x)
:\,=
\frac{1}{2\varepsilon^2}\sup_{I\leq A\leq \mathcal{L}ambda I}\vint_{B_1} \delta u(x,\varepsilon A y)\,dy.
{\bf e}nd{align}
Our operator is extremal in the sense that we have $\mathcal{L}_{\varepsilon}^+ u\geq P_\varepsilon^+ u$ for $\beta=\frac{1}{\mathcal{L}ambda^N}.$
In many places we consider $u$ defined in the whole $\mathbb{R}^N$ but only for expository reasons: we need always have the function defined in a larger set than where the equation is given so that the integrands in the operators are defined; this we always assume.
The existence of solutions to the DPP can be seen by Perron's method.
For the uniqueness in \cite{arroyobp} we employed the connection to a stochastic process.
Here we give a pure analytic proof of the uniqueness.
\begin{lemma}[Existence and uniqueness]
There exists a unique solution to the DPP with given boundary values.
{\bf e}nd{lemma}
\begin{proof}
As stated, the existence can be proved by Perron's method.
Then, there is a maximal solution that we denote $u$.
Suppose that there is another solution $v$.
We have $v\leq u$ and our goal is to show that equality holds.
We define
\[
M=\sup_{x\in\Omegaega}u(x)-v(x)
\]
and assume, for the sake of contradiction, that $M>0$.
We define
\[
A
=\frac{|\{y\in B_\varepsilon(x): \pi_1(y)>\pi_1(x)+\varepsilon/2\}|}{{\bf a}bs{B_\varepsilon}}
=\frac{|\{y\in B_1: \pi_1(y)>1/2\}|}{{\bf a}bs{B_1}}
\]
where $\pi_1$ stands for the projection in the first coordinate.
Given $\delta>0$ we consider $x_0\in \Omegaega$ such that $u(x_0)-v(x_0)>M-\delta$.
We have
\[
\begin{split}
M-\delta
&<u(x_0)-v(x_0)\\
&={\bf a}lpha \int u(x_0+\varepsilon z)-v(x_0+\varepsilon z) \,d\nu_{x_0}(z)+\beta\vint_{B_\varepsilon(x_0)} u(y)-v(y)\,dy\\
&<{\bf a}lpha M +\beta (1-A) M+\beta A \vint_{\{y\in B_\varepsilon(x_0): \pi_1(y)>\pi_1(x_0)+\varepsilon/2\}} u(y)-v(y)\,dy.
{\bf e}nd{split}
\]
Simplifying we obtain
\[
M-\frac{\delta}{\beta A}
<\vint_{\{y\in B_\varepsilon(x_0): \pi_1(y)>\pi_1(x_0)+\varepsilon/2\}} u(y)-v(y)\,dy.
\]
Then, there exists $x_1\in \{y\in B_\varepsilon(x_0): \pi_1(y)>\pi_1(x_0)+\varepsilon/2\}$ such that
\[
M-\frac{\delta}{\beta A}<u(x_1)-v(x_1).
\]
Inductively, given $x_{k-1}\in \Omegaega$ we construct $x_k$ such that $M-\frac{\delta}{(\beta A)^k}<u(x_k)-v(x_k)$ and $\pi_1(x_k)>\pi_1(x_0)+k\varepsilon/2$.
Since $\Omegaega$ is bounded and the first coordinate increases in at least $\varepsilon/2$ in every step, there exists a first $n$ such that $x_n\not\in \Omegaega$.
Observe that $n\leq n_0=\frac{\diam(\Omegaega)}{\varepsilon/2}$, therefore for $\delta$ small enough such that $M-\frac{\delta}{(\beta A)^{n_0}}>0$ we have reached a contradiction.
In fact, we have
\[
0
< M-\frac{\delta}{(\beta A)^{n_0}}
\leq M-\frac{\delta}{(\beta A)^{n}}
\leq u(x_n)-v(x_n)
\]
and $u(x_n)=v(x_n)$ since $x_n\not\in\Omegaega$.
{\bf e}nd{proof}
\subsection{Examples and connection to PDEs}
In this section, we recall some examples from \cite{arroyobp} alongside other ones, all of which are covered by our results.
First, we comment about passage to the limit with the step size $\varepsilon$ where the connection to PDEs arises.
We consider $\phi\in C^2(\Omegaega)$, and use the second order Taylor's expansion of $\phi$ to obtain
\begin{equation*}
\lim_{\varepsilon\to 0}\mathcal{L}_\varepsilon\phi(x)
=
\mathrm{Tr}\{D^2\phi(x)\, A(x)\},
{\bf e}nd{equation*}
where
\begin{equation*}
A(x)
:\,=
\frac{{\bf a}lpha}{2}\int z\otimes z\,d\nu_x(z)+\frac{\beta}{2(N+2)}\, I.
{\bf e}nd{equation*}
Above $a\otimes b$ stands for the tensor product of vectors $a,b\in\mathbb{R}^n$, that is, the matrix with entries $(a_ib_j)_{ij}$.
See Example 2.3 in \cite{arroyobp} for the details.
We have obtained a linear second order partial differential operator.
Furthermore, for $\beta\in(0,1]$, the operator is uniformly elliptic: given $\xi\in\mathbb{R}^N\setminus\{0\}$,
we can estimate
\begin{equation*}
\frac{\beta}{2(N+2)}
\leq
\frac{\langle A(x) \xi,\xi\rangle}{|\xi|^2}
\leq
\frac{{\bf a}lpha\mathcal{L}ambda^2}{2}+\frac{\beta}{2(N+2)}.
{\bf e}nd{equation*}
Roughly speaking, in the DPP (\ref{eq:dpp-intro}), the fact that $\beta$ is strictly positive corresponds to the concept of uniform ellipticity in PDEs. In stochastic terms, there is always certain level of diffusion to each direction.
It also holds, using Theorem~\ref{Holder} (cf.\ \cite[Theorem 4.9]{manfredipr12}), that under suitable regularity assumptions, the solutions $u_{\varepsilon}$ to the DPP converge to a viscosity solution $v\in C(\Omega)$ of
\begin{align*}
\mathrm{Tr}\{D^2 v(x)\,A(x)\}=f(x),
{\bf e}nd{align*}
as $\varepsilon\to 0$. This is obtained through the asymptotic Arzel\`a-Ascoli theorem \cite[Lemma 4.2]{manfredipr12}.
Moreover, by passing to the limit under suitable uniqueness considerations we obtain that
the results in this paper imply the corresponding regularity for the solutions to the limiting PDEs.
That is we obtain that the limit functions are H\"older continuous and verify the classical Harnack inequality, see
Remark \ref{harnack:limit}.
The extremal inequalities (\ref{eq:pucci-extremals}) cover a wide class of discrete operators, comparable to the uniformly elliptic operators in PDEs covered by the Pucci extremal operators, see for example \cite{caffarellic95}.
Also recall (\ref{eq:about-extremal-operators}) where we commented on this connection.
\begin{example}
Our result applies to solutions of the nonlinear DPP given by
\[
u(x)={\bf a}lpha \sup_{\nu\in B_\mathcal{L}ambda} \frac{u(x+\varepsilon \nu)+u(x-\varepsilon \nu)}{2}
+\beta\vint_{B_1} u(x+\varepsilon y)\,dy.
\]
In \cite{brustadlm20} a control problem associated to the nonlinear example is presented and, in the limit as $\varepsilon \to 0$, a local PDE involving the dominative $p$-Laplacian operator arises.
Heuristically, the above DPP can be understood by considering a value $u$ at $x$, which can be computed by summing up different outcomes with corresponding probabilities: either a maximizing controller who gets to choose $\nu$ wins (probability ${\bf a}lpha$), or a random step occurs (with probability $\beta$) within a ball of radius $\varepsilon$. If the controller wins, the position moves to $x+\varepsilon \nu$ (with probability $1/2$) or to $x-\varepsilon \nu$ (with probability $1/2$).
{\bf e}nd{example}
\begin{example}
Motivation for this article partly arises from tug-of-war games.
In particular, the tug-of-war with noise associated to the DPP
\begin{align}
\label{eq:p-dpp}
u(x)=\frac{{\bf a}lpha}{2}\left( \sup_{B_\varepsilon(x)} u + \inf_{B_ \varepsilon(x) } u\right)+\beta \vint_{B_\varepsilon(x)} u(z) dz + \varepsilon^2 f(x).
{\bf e}nd{align}
was introduced in \cite{manfredipr12}.
This can be rewritten as
\begin{align*}
\frac{1}{2\varepsilon^2}\left({\bf a}lpha\left( \sup_{B_\varepsilon(x)} u + \inf_{B_ \varepsilon(x) } u -2u(x)\right)+\beta\vint_{B_1} \delta u(x,\varepsilon y)\,dy\right)+f(x)=0.
{\bf e}nd{align*}
Since
\[
\sup_{B_\varepsilon(x)} u + \inf_{B_\varepsilon(x)} u \leq \sup_{z\in B_1}\big( u(x+\varepsilon z) +u(x-\varepsilon z)\big)
\]
we have $0\leq f+ \mathcal{L}^+_\varepsilon u$ and similarly $0\ge f+ \mathcal{L}^-_\varepsilon u$.
Therefore solutions to {\bf e}qref{eq:p-dpp} satisfy (\ref{eq:pucci-extremals}), and our results apply to these functions.
As a limit one obtains the $p$-Laplacian problem with $2<p<\infty$.
See Example 2.4 in \cite{arroyobp} for other DPPs related to the $p$-Laplacian.
{\bf e}nd{example}
\begin{example}
Consider a stochastic process where a particle jumps to a point in an ellipsoid $\varepsilon E_x$ uniformly at random ($B_1\subset E_x\subset B_\mathcal{L}ambda$), see \cite{arroyop20}.
Such a process is associated to the DPP
\[
u(x)
=
\vint_{E_x} u(x+\varepsilon y)\,dy.
\]
That DPP is covered by our results, see Example 2.7 in \cite{arroyobp}.
Such mean value property has been studied in connection with smooth solutions to PDEs in \cite{puccit76} by Pucci and Talenti.
{\bf e}nd{example}
\begin{example}
Also Isaacs type dynamic programming principle
\[
u(x)={\bf a}lpha \sup_{V \in \mathcal V}\inf_{\nu \in V} \frac{u(x+\varepsilon \nu)+u(x-\varepsilon \nu)}{2}
+\beta\vint_{B_1} u(x+\varepsilon y)\,dy,
\]
with $\mathcal V \subset\mathcal P(B_\mathcal{L}ambda)$, a subset of the power set, and $\beta>0$ can be mentioned as an example.
In particular, if we consider
\[
\mathcal V =\{\pi\cap B_\mathcal{L}ambda: \text{$\pi$ is an hyperplane of dimension $k$}\}
\]
we obtain
$$
\lambda_k (D^2u) +C \Delta u = f,
$$
as a limiting PDE, where
$$
\lambda_k (D^2 u) = \inf_{dim (V) = k} \sup_{v \in V} \langle D^2u \, v, v \rangle
$$
is the $k-$th eigenvalue of $D^2u$, see also \cite{blancr19b}.
{\bf e}nd{example}
The applicability of the results in this article is by no means limited to these examples, but rather they apply to many kind of fully nonlinear uniformly elliptic PDEs.
\section{Measure estimates}
One of the key ingredients in the proof of H\"older regularity is the measure estimate Lemma~\ref{first}. To prove it, we need an $\varepsilon$-ABP estimate Theorem~\ref{eps-ABP}, an estimate for the difference between $u$ and its concave envelope Corollary~\ref{estimate Q}, as well as a suitable barrier functions Lemma~\ref{barrier}.
\subsection{The $\varepsilon$-ABP estimate}
Next we recall a version of the ABP estimate. The discrete nature of our setting forces us to consider non-continuous subsolutions of the DPP, so the corresponding concave envelope $\Gamma$ might not be $C^{1,1}$ as in the classical setting. Moreover, in this setting it is not easy to use the change of variables formula for the integral to prove the ABP.
In our previous work \cite{arroyobp}, the ABP estimate (Theorem~\ref{eps-ABP} below) is adapted to the discrete {$\varepsilon$-setting} following an argument by Caffarelli and Silvestre (\cite{caffarellis09}) for nonlocal equations. The idea is to use a covering argument on the contact set (where $u$ coincides with $\Gamma$) to estimate the oscillation of $\Gamma$. It is also interesting to note that one can recover the classical ABP estimate by taking limits as $\varepsilon\to 0$.
However, the $\varepsilon$-ABP estimate as stated in \cite{arroyobp} turns out to be insufficient to establish the preliminary measure estimates needed in our proof of H\"older regularity. To deal with this inconvenience, and since the $\varepsilon$-ABP exhibits certain independence of the behavior of $u$ outside the contact set, we need to complement the $\varepsilon$-ABP estimate with an estimate (in measure) of the difference between the subsolution $u$ and its concave envelope $\Gamma$ (Lemma ~\ref{estimate B_eps}) in a neighborhood of any contact point.
Given $\varepsilon>0$, we denote by $\mathcal{Q}_\varepsilon(\mathbb{R}^N)$ a grid of open cubes of diameter $\varepsilon/4$ covering $\mathbb{R}^N$ up to a measure zero. Take
\begin{equation*}
\mathcal{Q}_\varepsilon(\mathbb{R}^N)
:\,=
\set{Q=Q_{\frac{\varepsilon}{4\sqrt{N}}}(x)}{x\in\frac{\varepsilon}{4\sqrt{N}}\,\mathbb{Z}^N}.
{\bf e}nd{equation*}
In addition, if $A\subset\mathbb{R}^N$ we write
\begin{equation*}
\mathcal{Q}_\varepsilon(A)
:\,=
\set{Q\in\mathcal{Q}_\varepsilon(\mathbb{R}^N)}{\overline Q\cap A\neq{\bf e}mptyset}.
{\bf e}nd{equation*}
In order to obtain the measure estimates, given a bounded Borel measurable function $u$ satisfying the conditions in Theorem~\ref{eps-ABP}, we define the concave envelope of $u^+=\max\{u,0\}$ in $B_{2\sqrt{N}+\mathcal{L}ambda\varepsilon}$ as the function
\begin{equation*}
\Gamma(x)
:\,=
\begin{cases}
\inf\set{{\bf e}ll(x)}{\text{for all hyperplanes } {\bf e}ll\geq u^+ \text{ in } B_{2\sqrt{N}+\mathcal{L}ambda\varepsilon}} & \text{ if } |x|<2\sqrt{N}+\mathcal{L}ambda\varepsilon,
\\
0 & \text{ if } |x|\geq 2\sqrt{N}+\mathcal{L}ambda\varepsilon.
{\bf e}nd{cases}
{\bf e}nd{equation*}
Moreover, we define the superdifferential of $\Gamma$ at $x$ as the set of vectors
\begin{equation*}
\nabla\Gamma(x)
:\,=\set{\xi\in\mathbb{R}^N}{\Gamma(z)\leq\Gamma(x)+\prodin{\xi}{z-x}\ \text{ for all }\ |z|<2\sqrt{N}+\mathcal{L}ambda\varepsilon}.
{\bf e}nd{equation*}
Since $\Gamma$ is concave, then $\nabla\Gamma(x)\neq{\bf e}mptyset$ for every $|x|<2\sqrt{N}+\mathcal{L}ambda\varepsilon$.
In addition, we define the contact set $K_u\subset\overline B_{2\sqrt{N}}$ as the set of points where $u$ and $\Gamma$ `agree':
\begin{equation*}
K_u
:\,=
\set{|x|\leq 2\sqrt{N}}{\limsup_{y\to x}u(y)=\Gamma(x)}.
{\bf e}nd{equation*}
We remark that the set $K_u$ is compact.
Indeed, $K_u$ is bounded and since $u\leq\Gamma$, the set of points where the equality is attained is given by $\limsup_{y\to x}u(y)-\Gamma(x)\geq 0$ and it is closed because $\limsup_{y\to x}u(y)-\Gamma(x)$ is upper semicontinuous.
Now we are in conditions of stating the $\varepsilon$-ABP estimate, whose proof can be found in \cite[Theorem 4.1]{arroyobp} (see also Remark 7.4 in the same reference).
\begin{theorem}[$\varepsilon$-ABP estimate]\label{eps-ABP}
Let $f\in C(\overline B_{2\sqrt{N}})$ and suppose that $u$ is a bounded Borel measurable function satisfying
\begin{equation*}
\begin{cases}
\mathcal{L}_\varepsilon^+u+f\geq 0 & \text{ in } B_{2\sqrt{N}},
\\
u\leq 0 & \text{ in } \mathbb{R}^N\setminus B_{2\sqrt{N}},
{\bf e}nd{cases}
{\bf e}nd{equation*}
where $\mathcal{L}_\varepsilon^+u$ was defined in (\ref{L-eps+}).
Then
\begin{equation*}
\sup_{B_{2\sqrt{N}}}u
\leq
C\bigg(\sum_{Q\in\mathcal{Q}_\varepsilon(K_u)}(\sup_Qf^+)^N|Q|\bigg)^{1/N},
{\bf e}nd{equation*}
where $C>0$ is a constant independent of $\varepsilon$.
{\bf e}nd{theorem}
All relevant information of $u$ in the proof of the $\varepsilon$-ABP estimate turns out to be transferred to its concave envelope $\Gamma$ in the contact set $K_u$, while the behavior of $u$ outside $K_u$ does not play any role in the estimate. Therefore, in order to control the behavior of $u$ in $B_{2\sqrt{N}}$, in the next result we show that $u$ stays sufficiently close to its concave envelope in a large enough portion of the $\varepsilon$-neighborhood of any contact point $x_0\in K_u$. It is also worth remarking that the result can be regarded as a refinement of Lemma 4.4 in \cite{arroyobp}, the main difference being the possible discontinuities that $u$ might present.
\begin{lemma}\label{estimate B_eps}
Under the assumptions of Theorem~\ref{eps-ABP}, let $x_0\in K_u$. Then for every $C>0$ large enough there exists $c>0$ such that
\begin{equation*}
|B_{\varepsilon/4}(x_0)\cap\{\Gamma-u\leq Cf(x_0)\varepsilon^2\}|
\geq
c\varepsilon^N.
{\bf e}nd{equation*}
{\bf e}nd{lemma}
\begin{proof}
By the definition of the set $K_u$, given $x_0\in K_u$ there exists a sequence $\{x_n\}_n$ of points in $\overline B_{2\sqrt{N}}$ converging to $x_0$ such that
\begin{equation*}
\Gamma(x_0)
=
\lim_{n\to\infty}u(x_n).
{\bf e}nd{equation*}
Recall the notation $\delta u(x_n,y):\,=u(x_n+y)+u(x_n-y)-2u(x_n)$. Then, since $u\leq\Gamma$,
\begin{equation*}
\begin{split}
\delta u(x_n,y)
\leq
~&
\delta\Gamma(x_n,y)+2[\Gamma(x_n)-u(x_n)]
\\
\leq
~&
2[\Gamma(x_n)-u(x_n)],
{\bf e}nd{split}
{\bf e}nd{equation*}
for every $y$, where the concavity of $\Gamma$ has been used in the second inequality. In particular,
\begin{equation*}
\sup_{z\in B_\mathcal{L}ambda}\delta u(x_n,\varepsilon z)
\leq
2[\Gamma(x_n)-u(x_n)]
\longrightarrow
0
{\bf e}nd{equation*}
as $n\to\infty$.
On the other hand,
\begin{equation*}
\begin{split}
\frac{1}{2}\vint_{B_1}\delta u(x_n,\varepsilon y)\,dy
=
~&
\vint_{B_\varepsilon}(u(x_n+y)-u(x_n))\,dy
\\
=
~&
\vint_{B_\varepsilon}(u(x_0+y)-\Gamma(x_0))\,dy
\\
~&
+\Gamma(x_0)-u(x_n)+\vint_{B_\varepsilon}(u(x_n+y)-u(x_0+y))\,dy,
{\bf e}nd{split}
{\bf e}nd{equation*}
and taking limits
\begin{equation*}
\lim_{n\to\infty}\frac{1}{2}\vint_{B_1}\delta u(x_n,\varepsilon y)\,dy
=
\vint_{B_\varepsilon}(u(x_0+y)-\Gamma(x_0))\,dy.
{\bf e}nd{equation*}
Replacing in the expression for $\mathcal{L}_\varepsilon^+u(x_n)$ we get
\begin{equation*}
\varepsilon^2\liminf_{n\to\infty}\mathcal{L}_\varepsilon^+u(x_n)
\leq
\beta\vint_{B_\varepsilon}(u(x_0+y)-\Gamma(x_0))\,dy.
{\bf e}nd{equation*}
Since $\mathcal{L}_\varepsilon^+u+f\geq 0$ by assumption with continuous $f$, we obtain
\begin{equation*}
\begin{split}
\frac{f(x_0)\varepsilon^2}{\beta}
\geq
~&
\vint_{B_\varepsilon}(\Gamma(x_0)-u(x_0+y))\,dy
\\
=
~&
\vint_{B_\varepsilon}(\Gamma(x_0)-u(x_0+y)+\prodin{\xi}{y})\,dy,
{\bf e}nd{split}
{\bf e}nd{equation*}
for every vector $\xi\in\mathbb{R}^N$, where the equality holds because of the symmetry of $B_\varepsilon$. Since $\nabla\Gamma(x_0)\neq{\bf e}mptyset$ by the concavity of $\Gamma$, we can fix $\xi\in\nabla\Gamma(x_0)$.
Next we split $B_\varepsilon$ in two sets: $B_\varepsilon\cap\{\mathbb{P}hi\leq Cf(x_0)\varepsilon^2\}$ and $B_\varepsilon\cap\{\mathbb{P}hi>Cf(x_0)\varepsilon^2\}$, where we have denoted
\begin{equation*}
\mathbb{P}hi(y)
:\,=
\Gamma(x_0)-u(x_0+y)+\prodin{\xi}{y}
{\bf e}nd{equation*}
for every $y\in B_\varepsilon$ for simplicity, and we study the integral of $\mathbb{P}hi$ over both subsets.
First, since $u\leq\Gamma$ and $\xi\in\nabla\Gamma(x_0)$ we have that
\begin{equation*}
\mathbb{P}hi(y)
\geq
\Gamma(x_0)-\Gamma(x_0+y)+\prodin{\xi}{y}
\geq
0
{\bf e}nd{equation*}
for every $y\in B_\varepsilon$, so we can estimate
\begin{equation*}
\begin{split}
\int_{B_\varepsilon\cap\{\mathbb{P}hi\geq Cf(x_0)\varepsilon^2\}}\mathbb{P}hi(y)\,dy
\geq
0.
{\bf e}nd{split}
{\bf e}nd{equation*}
On the other hand,
\begin{equation*}
\int_{B_\varepsilon\cap\{\mathbb{P}hi>Cf(x_0)\varepsilon^2\}}\mathbb{P}hi(y)\,dy
>
|B_\varepsilon\cap\{\mathbb{P}hi>Cf(x_0)\varepsilon^2\}|Cf(x_0)\varepsilon^2.
{\bf e}nd{equation*}
Summarizing, we have proven that
\begin{equation*}
\frac{f(x_0)\varepsilon^2}{\beta}
>
\frac{|B_\varepsilon\cap\{\mathbb{P}hi>Cf(x_0)\varepsilon^2\}|}{|B_\varepsilon|}Cf(x_0)\varepsilon^2,
{\bf e}nd{equation*}
so
\begin{equation*}
|B_{\varepsilon/4}\cap\{\mathbb{P}hi>Cf(x_0)\varepsilon^2\}|
\leq
|B_\varepsilon\cap\{\mathbb{P}hi>Cf(x_0)\varepsilon^2\}|
<
\frac{|B_\varepsilon|}{C\beta}
=
\frac{4^N}{C\beta}|B_{\varepsilon/4}|.
{\bf e}nd{equation*}
Therefore,
\begin{equation*}
|B_{\varepsilon/4}\cap\{\mathbb{P}hi\leq Cf(x_0)\varepsilon^2\}|
\geq
|B_{\varepsilon/4}|\left(1-\frac{4^N}{C\beta}\right)
=
c\varepsilon^N.
{\bf e}nd{equation*}
Finally, replacing $\mathbb{P}hi$, and since $\Gamma(x_0+y)\leq\Gamma(x_0)+\prodin{\xi}{y}$ for every $y\in B_{\varepsilon/4}$ and $\xi\in\nabla\Gamma(x_0)$, we can estimate
\begin{equation*}
\begin{split}
c\varepsilon^N
\leq
~&
|\set{y\in B_{\varepsilon/4}}{\Gamma(x_0)-u(x_0+y)+\prodin{\xi}{y}\leq Cf(x_0)\varepsilon^2}|
\\
\leq
~&
|\set{y\in B_{\varepsilon/4}}{\Gamma(x_0+y)-u(x_0+y)\leq Cf(x_0)\varepsilon^2}|
\\
=
~&
\big|B_{\varepsilon/4}(x_0) \cap \{\Gamma-u\leq Cf(x_0)\varepsilon^2\}\big|,
{\bf e}nd{split}
{\bf e}nd{equation*}
so the proof is finished.
{\bf e}nd{proof}
We obtain the same estimate in each cube $Q\in\mathcal{Q}_\varepsilon(K_u)$ immediately as a corollary of the previous lemma.
\begin{corollary}\label{estimate Q}
Under the assumptions of Theorem~\ref{eps-ABP}, there exists $c>0$ such that
\begin{equation*}
\big|3\sqrt{N}\,Q \cap \{\Gamma-u\leq C(\sup_Qf)\varepsilon^2\}\big|
\geq
c|Q|
{\bf e}nd{equation*}
for each $Q\in\mathcal{Q}_\varepsilon(K_u)$.
{\bf e}nd{corollary}
\begin{proof}
Let $Q\in\mathcal{Q}_\varepsilon(K_u)$. Then there is $x_0\in \overline{Q}\cap K_u$. On the other hand, since $\diam Q=\varepsilon/4$, if we denote by $x_Q$ the center of $Q$, we get that $|x_Q-x_0|\leq\diam Q/2$ and
\begin{equation*}
B_{\varepsilon/4}(x_0)
=
B_{\diam Q}(x_0)
\subset
B_{\frac{3}{2}\diam Q}(x_Q)
\subset
3\sqrt{N}\,Q.
{\bf e}nd{equation*}
Hence, by Lemma~\ref{estimate B_eps}, using this inclusion and recalling that $\varepsilon^N=(4\sqrt{N})^N|Q|$ we complete the proof.
{\bf e}nd{proof}
\subsection{A barrier function for $\mathcal{L}_\varepsilon^-$}
Another ingredient needed in the proof of the measure estimate Lemma~\ref{first} is a construction of a barrier for the minimal Pucci-type operator defined in {\bf e}qref{L-eps-}. To that end, we prove the following technical inequality for real numbers.
\begin{lemma}
Let $\sigma>0$. If $a,b>0$ and $c\in\mathbb{R}$ such that $|c|<a+b$ then
\begin{multline}\label{ineq:abc}
(a+b+c)^{-\sigma}+(a+b-c)^{-\sigma}-2a^{-\sigma}
\\
\geq
2\sigma a^{-\sigma-1}\left[-b+\frac{\sigma+1}{2}\left(1-(\sigma+2)\frac{b}{a}\right)\frac{c^2}{a}\right].
{\bf e}nd{multline}
{\bf e}nd{lemma}
\begin{proof}
The inequality
\begin{equation*}
(t+h)^{-\sigma}+(t-h)^{-\sigma}-2t^{-\sigma}
\geq
\sigma(\sigma+1)t^{-\sigma-2}h^2
{\bf e}nd{equation*}
holds for every $0<|h|<t$.
This can be seen by considering the Taylor expansion in $h$ of the LHS with error of order 4 and bound the error since it is positive.
Then replacing $t=a+b$ and $h=c$ we obtain that
\begin{equation*}
(a+b+c)^{-\sigma}+(a+b-c)^{-\sigma}
\geq
2(a+b)^{-\sigma}+\sigma(\sigma+1)(a+b)^{-\sigma-2}c^2.
{\bf e}nd{equation*}
Moreover, by using convexity we can estimate
\begin{equation*}
(a+b)^{-\sigma}
\geq
a^{-\sigma}-\sigma a^{-\sigma-1}b
=
a^{-\sigma}\left(1-\sigma\frac{b}{a}\right),
{\bf e}nd{equation*}
and similarly
\begin{equation*}
(a+b)^{-\sigma-2}
\geq
a^{-\sigma-2}\left(1-(\sigma+2)\frac{b}{a}\right).
{\bf e}nd{equation*}
Using these inequalities and rearranging terms we get
\begin{multline*}
(a+b+c)^{-\sigma}+(a+b-c)^{-\sigma}-2a^{-\sigma}
\\
\begin{split}
\geq
~&
2a^{-\sigma}\left(1-\sigma\frac{b}{a}\right)+\sigma(\sigma+1)a^{-\sigma-2}\left(1-(\sigma+2)\frac{b}{a}\right)c^2-2a^{-\sigma}
\\
=
~&
2\sigma a^{-\sigma-1}\left[-b+\frac{\sigma+1}{2}\left(1-(\sigma+2)\frac{b}{a}\right)\frac{c^2}{a}\right],
{\bf e}nd{split}
{\bf e}nd{multline*}
and the proof is concluded.
{\bf e}nd{proof}
Next we construct a suitable barrier function. The importance of this function, which will be clarified later, lies in the fact that, when added to a a subsolution $u$, its shape ensures that the contact set is localized in a fixed neighborhood of the origin. Recall the notation $\mathcal{L}_\varepsilon^-$ from {\bf e}qref{L-eps-}.
\begin{lemma}\label{barrier}
There exists a smooth function $\mathbb{P}si:\mathbb{R}^N\to\mathbb{R}$ and $\varepsilon_0>0$ such that
\begin{equation*}
\begin{cases}
\mathcal{L}_\varepsilon^-\mathbb{P}si+\psi\geq 0 & \text{ in } \mathbb{R}^N,
\\
\mathbb{P}si\geq 2 & \text{ in } Q_3,
\\
\mathbb{P}si\leq 0 & \text{ in } \mathbb{R}^N\setminus B_{2\sqrt{N}},
{\bf e}nd{cases}
{\bf e}nd{equation*}
for every $0<\varepsilon\leq\varepsilon_0$, where $\psi:\mathbb{R}^N\to\mathbb{R}$ is a smooth function such that
\begin{equation*}
\psi\leq\psi(0) \text{ in } \mathbb{R}^N
\qquad\text{ and }\qquad
\psi\leq 0 \text{ in } \mathbb{R}^N\setminus B_{1/4}.
{\bf e}nd{equation*}
{\bf e}nd{lemma}
\begin{proof}
The proof is constructive. Let $\sigma>0$ to be fixed later and define
\begin{equation*}
\mathbb{P}si(x)
=
A(1+|x|^2)^{-\sigma}-B
{\bf e}nd{equation*}
for each $x\in\mathbb{R}^N$, where $A,B>0$ are chosen such that
\begin{equation*}
\mathbb{P}si(x)
=
\begin{cases}
2 & \text{ if } |x|=\frac{3}{2}\sqrt{N},
\\
0 & \text{ if } |x|=2\sqrt{N}.
{\bf e}nd{cases}
{\bf e}nd{equation*}
Then $\mathbb{P}si\leq 0$ in $\mathbb{R}^N\setminus B_{2\sqrt{N}}$ and $\mathbb{P}si\geq 2$ in $Q_3\subset B_{3/2\sqrt{N}}$. We show that $\mathbb{P}si$ satisfies the remaining condition for a suitable choice of the exponent $\sigma$ independently of $\varepsilon$.
Since $\mathbb{P}si$ is radial, we can assume without loss of generality that $x=(|x|,0,\ldots,0)$. Then
\begin{equation*}
\mathbb{P}si(x+\varepsilon y)
=
A(1+|x+\varepsilon y|^2)^{-\sigma}-B
=
A(1+|x|^2+\varepsilon^2|y|^2+2\varepsilon|x|y_1)^{-\sigma}-B
{\bf e}nd{equation*}
for every $y\in\mathbb{R}^N$. Thus, recalling {\bf e}qref{ineq:abc} with $a=1+|x|^2$, $b=\varepsilon^2|y|^2$ and $c=2\varepsilon|x|y_1$ we obtain that
\begin{equation*}
\begin{split}
\delta\mathbb{P}si(x,\varepsilon y)
&
=
\mathbb{P}si(x+\varepsilon y)+\mathbb{P}si(x-\varepsilon y)-2\mathbb{P}si(x)
\\
&
\geq
2\varepsilon^2A\sigma (1+|x|^2)^{-\sigma-1}\left[-|y|^2+2(\sigma+1)\left(1-(\sigma+2)\frac{\varepsilon^2|y|^2}{1+|x|^2}\right)\frac{|x|^2}{1+|x|^2}y_1^2\right]
\\
&
\geq
2\varepsilon^2A\sigma (1+|x|^2)^{-\sigma-1}\left[-\mathcal{L}ambda^2+2(\sigma+1)(1-(\sigma+2)\mathcal{L}ambda^2\varepsilon^2)\frac{|x|^2}{1+|x|^2}y_1^2\right]
{\bf e}nd{split}
{\bf e}nd{equation*}
for every $|y|<\mathcal{L}ambda$.
Fix $\varepsilon_0=\varepsilon_0(\mathcal{L}ambda,\sigma)$ such that
\begin{equation*}
\varepsilon_0
\leq
\frac{1}{\mathcal{L}ambda\sqrt{2(\sigma+2)}},
{\bf e}nd{equation*}
so
\begin{equation*}
\delta\mathbb{P}si(x,\varepsilon y)
\geq
2\varepsilon^2A\sigma (1+|x|^2)^{-\sigma-1}\left[-\mathcal{L}ambda^2+(\sigma+1)\frac{|x|^2}{1+|x|^2}y_1^2\right]
{\bf e}nd{equation*}
for every $|y|<\mathcal{L}ambda$ and $0<\varepsilon\leq\varepsilon_0$.
In consequence we can estimate
\begin{equation*}
\inf_{z\in B_\mathcal{L}ambda}\delta\mathbb{P}si(x,\varepsilon z)
\geq
2\varepsilon^2A\sigma (1+|x|^2)^{-\sigma-1}\left[-\mathcal{L}ambda^2\right]
{\bf e}nd{equation*}
and
\begin{equation*}
\vint_{B_1}\delta\mathbb{P}si(x,\varepsilon y)\,dy
\geq
2\varepsilon^2A\sigma (1+|x|^2)^{-\sigma-1}\left[-\mathcal{L}ambda^2+\frac{\sigma+1}{N+2}\cdot\frac{|x|^2}{1+|x|^2}\right],
{\bf e}nd{equation*}
where we have used that $\vint_{B_1}y_1^2\,dy=\frac{1}{N+2}$.
Replacing these inequalities in the definition of $\mathcal{L}_\varepsilon^-\mathbb{P}si(x)$, {\bf e}qref{L-eps-}, we obtain
\begin{equation*}
\mathcal{L}_\varepsilon^-\mathbb{P}si(x)
\geq
A\sigma (1+|x|^2)^{-\sigma-1}\left[-\mathcal{L}ambda^2+\beta\frac{\sigma+1}{N+2}\cdot\frac{|x|^2}{1+|x|^2}\right]
=\,:
-\psi(x)
{\bf e}nd{equation*}
for every $x\in\mathbb{R}^N$ and $0<\varepsilon\leq\varepsilon_0$. It is easy to check that $\psi(x)\leq\psi(0)=A\sigma\mathcal{L}ambda^2$ for every $x\in\mathbb{R}^N$. Moreover
\begin{equation*}
\psi(x)
\leq
A\sigma (1+|x|^2)^{-\sigma-1}\left[\mathcal{L}ambda^2-\frac{\beta(\sigma+1)}{17(N+2)}\right]
{\bf e}nd{equation*}
for every $|x|\geq 1/4$. Choosing large enough $\sigma=\sigma(N,\mathcal{L}ambda,\beta)>0$ we get that $\psi(x)\leq 0$ for every $|x|\geq 1/4$ and the proof is finished.
{\bf e}nd{proof}
\subsection{Estimate for the distribution function of $u$}
In the next lemma we adapt \cite[Lemma 10.1]{caffarellis09} to pass from a pointwise estimate to an estimate in measure. This is done by combining the estimate for the difference between $u$ and $\Gamma$ near the contact set with the $\varepsilon$-ABP estimate.
\begin{lemma}
\label{first}
There exist $\varepsilon_0,\rho>0$, $M\geq 1$ and $0<\mu<1$ such that if $u$ is a bounded measurable function satisfying
\begin{equation*}
\begin{cases}
\mathcal{L}_\varepsilon^-u\leq\rho & \text{ in } B_{2\sqrt{N}},
\\
u\geq 0 & \text{ in } \mathbb{R}^N,
{\bf e}nd{cases}
{\bf e}nd{equation*}
for some $0<\varepsilon\leq\varepsilon_0$ and
\begin{equation*}
\inf_{Q_3}u
\leq
1,
{\bf e}nd{equation*}
then
\begin{equation*}
|\{u> M\}\cap Q_1|
\le
\mu.
{\bf e}nd{equation*}
{\bf e}nd{lemma}
\begin{proof}
The idea of the proof is as follows: first we use the auxiliary functions $\mathbb{P}si$ and $\psi$ from Lemma~\ref{barrier} to define a new function
$$
v=\mathbb{P}si-u,
$$
which satisfies the assumptions in Theorem~\ref{eps-ABP} ($\varepsilon$-ABP estimate) with $f=\psi+\rho$. Then we use the $\varepsilon$-ABP together with the pointwise estimate $\inf_{Q_3}u\leq 1$ and the negativity of $\psi$ outside $B_{1/4}$ to obtain a lower bound for the measure of the union of all cubes $Q\in\mathcal{Q}_\varepsilon(K_v\cap B_{1/4})$. Combining this with the estimate of the difference between $v$ and its concave envelope at each cube $Q$ (Corollary~\ref{estimate Q}) we can deduce the desired measure estimate for $u$.
Let $v=\mathbb{P}si-u$ where $\mathbb{P}si$ is the function from Lemma~\ref{barrier}. Since $u\geq 0$ and $\mathbb{P}si\leq 0$ in $\mathbb{R}^N\setminus B_{2\sqrt{N}}$, then $v\leq 0$ in $\mathbb{R}^N\setminus B_{2\sqrt{N}}$. On the other hand,
\begin{equation*}
\sup_{Q_3}v
\geq
\inf_{Q_3}\mathbb{P}si-\inf_{Q_3}u
\geq
1.
{\bf e}nd{equation*}
Similarly, since $\delta v(x,\varepsilon y)=\delta\mathbb{P}si(x,\varepsilon y)-\delta u(x,\varepsilon y)$, then
\begin{equation*}
\sup_{z\in B_\mathcal{L}ambda}\delta v(x,\varepsilon z)
\geq
\inf_{z\in B_\mathcal{L}ambda}\delta \mathbb{P}si(x,\varepsilon z)-\inf_{z\in B_\mathcal{L}ambda}\delta u(x,\varepsilon z){\bf e}nd{equation*}
so we have that
\begin{equation*}
\mathcal{L}_\varepsilon^+v(x)
\geq
\mathcal{L}_\varepsilon^-\mathbb{P}si(x)-\mathcal{L}_\varepsilon^-u(x)
\geq
-\psi(x)-\rho.
{\bf e}nd{equation*}
Summarizing, $v=\mathbb{P}si-u$ satisfies $\sup_{Q_3}v\geq 1$ and
\begin{equation*}
\begin{cases}
\mathcal{L}_\varepsilon^+v+\psi+\rho\geq 0 & \text{ in } B_{2\sqrt{N}},
\\
v\leq 0 & \text{ in } \mathbb{R}^N\setminus B_{2\sqrt{N}}.
{\bf e}nd{cases}
{\bf e}nd{equation*}
Moreover, since $\psi$ is continuous, we are under the hypothesis of the $\varepsilon$-ABP estimate in Theorem~\ref{eps-ABP}, and thus the following estimate holds,
\begin{equation*}
\sup_{B_{2\sqrt{N}}}v
\leq
C_1\bigg(\sum_{Q\in\mathcal{Q}_\varepsilon(K_v)}(\sup_Q\psi^++\rho)^N|Q|\bigg)^{1/N},
{\bf e}nd{equation*}
where $C_1>0$. Then, since $Q_3\subset B_{2\sqrt{N}}$ and $\sup_{Q_3}v\geq 1$, we obtain
\begin{equation*}
\begin{split}
\frac{1}{C_1}
\leq
~&
\bigg(\sum_{Q\in\mathcal{Q}_\varepsilon(K_v)}(\sup_Q\psi^++\rho)^N|Q|\bigg)^{1/N}
\\
\leq
~&
\bigg(\sum_{Q\in\mathcal{Q}_\varepsilon(K_v)}(\sup_Q\psi^+)^N|Q|\bigg)^{1/N}
+
\rho\bigg(\sum_{Q\in\mathcal{Q}_\varepsilon(K_v)}|Q|\bigg)^{1/N},
{\bf e}nd{split}
{\bf e}nd{equation*}
where the second inequality follows immediately from Minkowski's inequality. Since $K_v\subset B_{2\sqrt{N}}$ and $\diam Q=\varepsilon/4$ for each $Q\in\mathcal{Q}_\varepsilon(K_v)$ then
\begin{equation*}
\sum_{Q\in\mathcal{Q}_\varepsilon(K_v)}|Q|
\leq
|B_{2\sqrt{N}+\varepsilon/4}|
\leq
C_2^N,
{\bf e}nd{equation*}
for every $0<\varepsilon\leq\varepsilon_0$.
Replacing in the previous estimate and rearranging terms we get
\begin{equation*}
\frac{1}{C_1}-C_2\rho
\leq
\bigg(\sum_{Q\in\mathcal{Q}_\varepsilon(K_v)}(\sup_Q\psi^+)^N|Q|\bigg)^{1/N}.
{\bf e}nd{equation*}
Choosing small enough $\rho>0$
we have that
\begin{equation*}
\frac{1}{(2C_1)^N}
\leq
\sum_{Q\in\mathcal{Q}_\varepsilon(K_v)}(\sup_Q\psi^+)^N|Q|.
{\bf e}nd{equation*}
Next we observe that by Lemma~\ref{barrier}, $\psi\leq 0$ in $\mathbb{R}^N\setminus B_{1/4}$, so $\psi^+{\bf e}quiv 0$ for each $Q\in\mathcal{Q}_\varepsilon(K_v)$ such that $Q\cap B_{1/4}={\bf e}mptyset$, while we estimate $\sup_Q\psi^+\leq\psi(0)$ when $Q\cap B_{1/4}\neq{\bf e}mptyset$. Thus
\begin{equation*}
\frac{1}{(2C_1\psi(0))^N}
\leq
\sum_{Q\in\mathcal{Q}_\varepsilon(K_v\cap B_{1/4})}|Q|,
{\bf e}nd{equation*}
and recalling Corollary~\ref{estimate Q}, we obtain the following inequality,
\begin{equation*}
\frac{c}{(2C_1\psi(0))^N}
\leq
\sum_{Q\in\mathcal{Q}_\varepsilon(K_v\cap B_{1/4})}\big|3\sqrt{N}\,Q \cap \{\Gamma-v\leq C(\sup_Q\psi^++\rho)\varepsilon^2\}\big|.
{\bf e}nd{equation*}
Notice that $3\sqrt{N}\,Q\subset B_{1/2}\subset Q_1$ for each $Q\in\mathcal{Q}_\varepsilon(K_v\cap B_{1/4})$ and every $0<\varepsilon\leq\varepsilon_0$ with $\varepsilon_0>0$ sufficiently small, so
\begin{equation*}
3\sqrt{N}\,Q \cap \{\Gamma-v\leq C(\sup_Q\psi^++\rho)\varepsilon^2\}
\subset
Q_1 \cap \{\Gamma-v\leq C(\psi(0)+\rho)\varepsilon^2\}
{\bf e}nd{equation*}
for each $Q\in\mathcal{Q}_\varepsilon(K_v\cap B_{1/4})$, where the fact that $\sup_Q\psi^+\leq\psi(0)$ has been used again here.
Furthermore, if ${\bf e}ll={\bf e}ll(N)\in\mathbb{N}$ is the unique odd integer such that ${\bf e}ll-2<3\sqrt{N}\leq{\bf e}ll$, then each cube $Q\in\mathcal{Q}_\varepsilon(K_v\cap B_{1/4})$ is contained in at most ${\bf e}ll^N$cubes of the form $3\sqrt{N}\,Q'$ with $Q'\in\mathcal{Q}_\varepsilon(K_v\cap B_{1/4})$, and in consequence
\begin{equation*}
\frac{c}{(2C_1\psi(0))^N}
\leq
{\bf e}ll^N\big|Q_1 \cap \{\Gamma-v\leq C(\psi(0)+\rho)\varepsilon^2\}\big|.
{\bf e}nd{equation*}
Finally, since $\Gamma\geq 0$, $v=\mathbb{P}si-u\leq\mathbb{P}si(0)-u$ and $\varepsilon\leq\varepsilon_0$,
\begin{equation*}
\frac{c}{(2C_1\psi(0){\bf e}ll)^N}
\leq
\big|Q_1 \cap \{u\leq \mathbb{P}si(0)+C(\psi(0)+\rho)\varepsilon_0^2\}\big|.
{\bf e}nd{equation*}
Then let $M:\,=\mathbb{P}si(0)+C(\psi(0)+\rho)\varepsilon_0^2$ and $1-\mu:\,=c(2C_1\psi(0){\bf e}ll)^{-N}$, so that we get
\begin{align*}
1-\mu
\leq
\big|Q_1 \cap \{u\leq M\}\big|,
{\bf e}nd{align*}
which immediately implies the claim.
{\bf e}nd{proof}
\section{De Giorgi oscillation estimate}
A key intermediate result towards the oscillation estimate (Lemma \ref{DeGiorgi}), H\"older regularity (Theorem \ref{Holder}) and Harnack's inequality is a power decay estimate for $|\{u>t\}\cap Q_1|$. This will be Lemma~\ref{measure bound}. It is based on the measure
estimates Lemma~\ref{first} and Lemma~\ref{second}, as well as a discrete version of the Calder\'on-Zygmund decomposition, Lemma~\ref{CZ} below.
\subsection{Calder\'on-Zygmund decomposition}
The discrete nature of the DPP does not allow to apply the rescaling argument to arbitrary small dyadic cubes. To be more precise, since all the previous estimates require certain bound $\varepsilon_0>0$ for the scale-size in the DPP, and since the extremal Pucci-type operators $\mathcal{L}_\varepsilon^\pm$ rescale as $\mathcal{L}_{2^{\bf e}ll\varepsilon}^\pm$ in each dyadic cube of generation ${\bf e}ll$, the rescaling argument will only work on those dyadic cubes of generation ${\bf e}ll\in\mathbb{N}$ satisfying $2^{\bf e}ll\varepsilon<\varepsilon_0$. For that reason, the dyadic splitting in the Calder\'on-Zygmund decomposition has to be stopped at generation $L$, and in consequence the Calder\'on-Zygmund decomposition lemma has to be adapted. We need an additional criterion for selecting cubes in order to control the error caused by stopping the process at generation $L$. We use the idea from \cite{arroyobp}.
We use the following notation: $\mathcal D_{\bf e}ll$ is the family of dyadic open subcubes of $Q_1$ of generation ${\bf e}ll\in\mathbb{N}$, where $\mathcal D_0=\{Q_1\}$, $\mathcal D_1$ is the family of $2^N$ dyadic cubes obtained by dividing $Q_1$, and so on. Given ${\bf e}ll\in\mathbb{N}$ and $Q\in\mathcal D_{\bf e}ll$ we define $\mathrm{pre}(Q)\in\mathcal D_{{\bf e}ll-1}$ as the unique dyadic cube in $\mathcal D_{{\bf e}ll-1}$ containing $Q$.
\begin{lemma}[Calder\'on-Zygmund]\label{CZ}
Let $A\subset B\subset Q_1$ be measurable sets, $\delta_1,\delta_2\in (0,1)$ and $L\in\mathbb{N}$. Suppose that the following assumptions hold:
\begin{enumerate}
\item $|A|\leq\delta_1$;
\item \label{item:includedB}if $Q\in\mathcal D_{\bf e}ll$ for some ${\bf e}ll\leq L$ satisfies $|A\cap Q|>\delta_1|Q|$ then $\mathrm{pre}(Q)\subset B$;
\item \label{item:includedB2} if $Q\in\mathcal D_L$ satisfies $|A\cap Q|>\delta_2|Q|$ then $Q\subset B$;
{\bf e}nd{enumerate}
Then,
\begin{align*}
|A|
\leq
\delta_1|B|+\delta_2.
{\bf e}nd{align*}
{\bf e}nd{lemma}
\begin{proof}
We will construct a collection of open cubes $\mathcal Q_B$, containing subcubes from generations $\mathcal D_0,\mathcal D_1,\dots,\mathcal D_L$.
The cubes will be pairwise disjoint and will be contained in $B$.
Recall that by assumption
$
|Q_1 \cap A|\leq \delta_1 {\bf a}bs{Q_1}.
$
Then we split $Q_1$ into $2^N$ dyadic cubes $\mathcal D_1$. For those dyadic cubes $Q\in \mathcal D_1$ that satisfy
\begin{align}
\label{eq:treshold}
|A\cap Q|>\delta_1|Q|,
{\bf e}nd{align}
we select $\mathrm{pre}(Q)$ into $\mathcal Q_B$.
Those cubes are included in $B$ because of assumption (\ref{item:includedB}).
For other dyadic cubes that do not satisfy {\bf e}qref{eq:treshold} and are not contained in any cube already included in $\mathcal Q_B$,
we keep splitting, and again repeat the selection according to {\bf e}qref{eq:treshold}. We repeat splitting $L\in \mathbb N$ times. At the level $L$, in addition to the previous process, we also select those cubes $Q\in \mathcal D_L$ (not the predecessors) into $\mathcal Q_B$ for which
\begin{align}
\label{eq:treshold2}
|A\cap Q|> \delta_2 |Q|,
{\bf e}nd{align}
and are not contained in any cube already included in $\mathcal Q_B$.
Those cubes are included in $B$ because of assumption (\ref{item:includedB2}).
Observe that for $\mathrm{pre}(Q)$ selected according to {\bf e}qref{eq:treshold} into $\mathcal Q_B$, it holds that
\begin{align*}
|A\cap \mathrm{pre}(Q)|\le \delta_1|\mathrm{pre}(Q)|
{\bf e}nd{align*}
since otherwise we would have stopped splitting already at the earlier round. We also have $|A\cap Q|\le \delta_1|Q|$ for cubes $Q$ selected according to {\bf e}qref{eq:treshold2} into $\mathcal Q_B$, since their predecessors were not selected according to {\bf e}qref{eq:treshold}. Summing up, for all the cubes $Q\in \mathcal Q_B$, it holds that
\begin{align}
\label{eq:meas-bound}
|A\cap Q|\le \delta_1|Q|.
{\bf e}nd{align}
Next we define $\mathcal G_L$ as a family of cubes of $\mathcal D_L$ that are not included in any of the cubes in $\mathcal Q_B$.
It immediately holds a.e.\ that
\[
A\subset Q_1=\bigcup_{Q\in\mathcal Q_B} Q \cup \bigcup_{Q\in\mathcal G_L} Q.
\]
By this, using {\bf e}qref{eq:meas-bound} for every $Q\in \mathcal Q_B$, as well as observing that $|A\cap Q|\leq \delta_2|Q|$ by {\bf e}qref{eq:treshold2} for every $Q\in \mathcal G_L$, we get
\[
\begin{split}
|A|
&=\sum_{Q\in\mathcal Q_B} |A\cap Q| + \sum_{Q\in\mathcal G_L} |A\cap Q|\\
&\leq\sum_{Q\in\mathcal Q_B} \delta_1|Q| + \sum_{Q\in\mathcal G_L} \delta_2|Q|\\
&\leq \delta_1 |B|+\delta_2 .
{\bf e}nd{split}
\]
In the last inequality, we used that the cubes in $\mathcal Q_B$ are included in $B$, as well as the fact that they are disjoint by construction.
{\bf e}nd{proof}
As we have already pointed out, we use the estimate from Lemma~\ref{first} to show that the condition (\ref{item:includedB}) in the Calder\'on-Zygmund lemma is satisfied. To ensure that the remaining condition is satisfied for the dyadic cubes in $\mathcal{D}_L$ not considered before stopping the dyadic decomposition, we prove the following result using the equation. Here $\varepsilon$ is `relatively large'.
\begin{lemma}
\label{second}
Let $0<\varepsilon_0<1$ and $\rho>0$. Suppose that $u$ is a bounded measurable function satisfying
\begin{equation*}
\begin{cases}
\mathcal{L}_\varepsilon^-u\leq\rho & \text{ in } Q_{10\sqrt{N}},
\\
u\geq 0 & \text{ in } \mathbb{R}^N,
{\bf e}nd{cases}
{\bf e}nd{equation*}
for some $\frac{\varepsilon_0}{2}\leq\varepsilon\leq\varepsilon_0$. There exists a constant $c=c(\varepsilon_0,\rho)>0$ such that if
\begin{equation*}
|\{u> K\}\cap Q_1|
>
\frac{c}{K}
{\bf e}nd{equation*}
holds for some $K>0$, then
\begin{equation*}
u> 1 \quad \text{ in }Q_1.
{\bf e}nd{equation*}
{\bf e}nd{lemma}
\begin{proof}
By the definition of the minimal Pucci-type operator $\mathcal{L}_\varepsilon^-$ and since $\mathcal{L}_\varepsilon^- u(x)\leq \rho$ for every $x\in Q_{10\sqrt{N}}$ by assumption, rearranging terms we have
\[
\begin{split}
u (x)
&
\geq
{\bf a}lpha\inf_{\nu\in \mathcal{M}(B_\mathcal{L}ambda)} \int u(x+\varepsilon v) \,d\nu (v)+\beta\vint_{B_\varepsilon(x)} u(y)\,dy-\varepsilon^2\rho
\\
&\geq
\beta\vint_{B_\varepsilon(x)} u(y)\,dy-\varepsilon^2\rho,
{\bf e}nd{split}
\]
where in the second inequality we have used that $u\geq 0$ to estimate the ${\bf a}lpha$-term by zero.
Then, by considering $f=\frac{\chi_{B_1}}{|B_1|}$, we can rewrite this inequality as
\begin{equation*}
u (x)
\geq
\frac{\beta}{\varepsilon^N}\int f\Big(\frac{y-x}{\varepsilon}\Big) u(y)\,dy-\varepsilon^2\rho,
{\bf e}nd{equation*}
which holds for every $x\in Q_{10\sqrt{N}}$, and in particular for every $|x|<5\sqrt{N}$. Next observe that if $|x|+\varepsilon<5\sqrt{N}$, then $y\in Q_{10\sqrt{N}}$ for every $y\in B_\varepsilon(x)$, and thus applying twice the previous inequality we can estimate by using change of variables
\begin{equation*}
\begin{split}
u (x)
\geq
~&
\frac{\beta}{\varepsilon^N}\int f\Big(\frac{y-x}{\varepsilon}\Big) \left(\frac{\beta}{\varepsilon^N}\int f\Big(\frac{z-y}{\varepsilon}\Big) u(z)\,dz-\varepsilon^2\rho\right)\,dy-\varepsilon^2\rho
\\
=
~&
\frac{\beta^2}{\varepsilon^N}\int \left(\frac{1}{\varepsilon^N}\int f\Big(\frac{y-x}{\varepsilon}\Big) f\Big(\frac{z-y}{\varepsilon}\Big) \,dy\right)u(z)\,dz-(1+\beta)\varepsilon^2\rho
\\
=
~&
\frac{\beta^2}{\varepsilon^N}\int (f*f)\Big(\frac{z-x}{\varepsilon}\Big)u(z)\,dz-(1+\beta)\varepsilon^2\rho,
{\bf e}nd{split}
{\bf e}nd{equation*}
which holds for every $|x|<5\sqrt{N}-\varepsilon$.
Let $n\in\mathbb{N}$ to be fixed later and assume that $|x|+(n-1)\varepsilon<5\sqrt{N}$. By iterating this argument $n$ times we obtain
\begin{equation}\label{inequality}
\begin{split}
u(x)
\geq
~&
\frac{\beta^n}{\varepsilon^N}\int f^{*n}\Big(\frac{y-x}{\varepsilon}\Big) u(y)\,dy-(1+\beta+\beta^2+\cdots+\beta^{n-1})\varepsilon^2\rho
\\
\geq
~&
\frac{\beta^n}{\varepsilon^N}\int f^{*n}\Big(\frac{y-x}{\varepsilon}\Big) u(y)\,dy-\frac{\varepsilon^2\rho}{1-\beta}
{\bf e}nd{split}
{\bf e}nd{equation}
for every $|x|<5\sqrt{N}-(n-1)\varepsilon$, where $f^{*n}$ denotes the convolution of $f$ with itself $n$ times. Observe that $f^{*n}$ is a radial decreasing function and $f^{*n}>0$ in $B_n$. Thus, since $\varepsilon\geq\frac{\varepsilon_0}{2}$ by assumption,
\begin{equation*}
f^{*n}\Big(\frac{y-x}{\varepsilon}\Big)
\geq
f^{*n}\Big(\frac{2(y-x)}{\varepsilon_0}\Big),
{\bf e}nd{equation*}
which is strictly positive whenever $|y-x|<\frac{n\varepsilon_0}{2}$.
Now fix $n\in\mathbb{N}$ such that $|x|<5\sqrt{N}-(n-1)\varepsilon_0$ for every $x\in Q_1$ and $|y-x|<\frac{n\varepsilon_0}{2}$ for every $x,y\in Q_1$, that is
$n\in\mathbb{N}$ such that
\begin{equation*}
2\sqrt{N}
<
n\varepsilon_0
<
\frac{9}{2}\sqrt{N}+\varepsilon_0.
{\bf e}nd{equation*}
Then
\begin{equation*}
f^{*n}\Big(\frac{y-x}{\varepsilon}\Big)
\geq
f^{*n}\Big(\frac{2\sqrt{N}e_1}{\varepsilon_0}\Big)
=\,:
C
>
0
{\bf e}nd{equation*}
for every $x,y\in Q_1$. In this way $Q_1$ is contained in the support of $y\mapsto f^{*n}\big(\frac{y-x}{\varepsilon}\big)$ for every $x\in Q_1$, so recalling that $u\geq 0$ we can estimate
\begin{equation*}
\begin{split}
\int f^{*n}\Big(\frac{y-x}{\varepsilon}\Big) u(y)\,dy
\geq
~&
\int_{Q_1} f^{*n}\Big(\frac{y-x}{\varepsilon}\Big) u(y)\,dy
\\
\geq
~&
C\int_{Q_1}u(y)\,dy
\\
\geq
~&
C\int_{\{u> K\}\cap Q_1}u(y)\,dy
\\
>
~&
C|\{u> K\}\cap Q_1|\,K
{\bf e}nd{split}
{\bf e}nd{equation*}
for each $K>0$.
Replacing this in {\bf e}qref{inequality} and recalling that $\varepsilon\leq\varepsilon_0$ we get
\begin{equation*}
\begin{split}
u(x)
>
~&
C\frac{\beta^n}{\varepsilon^N}|\{u> K\}\cap Q_1|\,K-\frac{\varepsilon^2\rho}{1-\beta}
\\
\geq
~&
C\frac{\beta^n}{\varepsilon_0^N}|\{u> K\}\cap Q_1|\,K-\frac{\varepsilon_0^2\rho}{1-\beta}
{\bf e}nd{split}
{\bf e}nd{equation*}
for each $K>0$ and every $x\in Q_1$.
Finally, let us fix $c=\frac{\varepsilon_0^N}{C\beta^n}\big(1+\frac{\varepsilon_0^2\rho}{1-\beta}\big)$. By assumption, $|\{u> K\}\cap Q_1|\,K>c$ holds for some $K>0$, so
\begin{equation*}
\begin{split}
u(x)
>
C\frac{\beta^n}{\varepsilon_0^N}c-\frac{\varepsilon_0^2\rho}{1-\beta}
=
1
{\bf e}nd{split}
{\bf e}nd{equation*}
for every $Q_1$ and the proof is finished.
\qedhere
{\bf e}nd{proof}
\subsection{Power decay estimate}
The power decay estimate (Lemma~\ref{measure bound}) is obtained by deriving an estimate between the superlevel sets of $u$ and then iterating the estimate. In order to obtain the estimate between the superlevel sets, we use a discrete version of the Calder\'on-Zygmund decomposition (Lemma~\ref{CZ}) together with the preliminary measure estimates from Lemma~\ref{first} and Lemma~\ref{second}.
\begin{lemma}
\label{lem:main}
There exist $\varepsilon_0,\rho,c>0$, $M\geq 1$ and $0<\mu<1$ such that if $u$ is a bounded measurable function satisfying
\begin{equation*}
\begin{cases}
\mathcal{L}_\varepsilon^-u\leq\rho & \text{ in } Q_{10\sqrt{N}},
\\
u\geq 0 & \text{ in } \mathbb{R}^N,
{\bf e}nd{cases}
{\bf e}nd{equation*}
for some $0<\varepsilon\leq\varepsilon_0$ and
\begin{equation*}
\inf_{Q_3}u
\leq
1,
{\bf e}nd{equation*}
then
\begin{equation*}
|\{u> K^k\}\cap Q_1|
\leq
\frac{c}{(1-\mu)K}+\mu^k,
{\bf e}nd{equation*}
holds for every $K\ge M$ and $k\in\mathbb{N}$.
{\bf e}nd{lemma}
\begin{proof}
The values of $M$, $\mu$, $\varepsilon_0$ and $\rho$ are already given by Lemma~\ref{first}, while $c$ has been fixed in Lemma~\ref{second}.
For $k=1$, by Lemma~\ref{first}, we have
\[
|\{u> K\}\cap Q_1|\le |\{u> M\}\cap Q_1|\le\mu\le \frac{c}{K}+\mu.
\]
Now we proceed by induction.
We consider
\[
A:=A_{k}:=\{u> K^k\}\cap Q_1 \quad \text{ and } B:=A_{k-1}:=\{u> K^{k-1}\}\cap Q_1.
\]
We have $A\subset B\subset Q_1$ and $|A|\le \mu$.
We apply Lemma~\ref{CZ} for $\delta_1=\mu$, $\delta_2=\frac{c}{K}$ and $L\in\mathbb{N}$ such that $2^L\varepsilon<\varepsilon_0\leq 2^{L+1}\varepsilon$.
We have to check in two cases that certain dyadic cubes are included in $B$.
Observe that since $|A|\le \mu$, the first assumption in Lemma~\ref{CZ} is satisfied. Next we check that the remaining conditions in Lemma~\ref{CZ} are also satisfied. Given any cube $Q\in\mathcal{D}_{\bf e}ll$ for some ${\bf e}ll\leq L$, we define $\tilde u:Q_1\to\mathbb{R}$ as a rescaled version of $u$ restricted to $Q$, that is
\begin{equation}\label{tilde u}
\tilde u(y)
=
\frac{1}{K^{k-1}}\,u(x_0+2^{-{\bf e}ll}y)
{\bf e}nd{equation}
for every $y\in Q$, where $x_0$ stands for the center of $Q$. Then
\begin{equation*}
|\{\tilde u> K\}\cap Q_1|
=
2^{N{\bf e}ll}|\{u> K^k\}\cap Q|
=
\frac{|A\cap Q|}{|Q|}.
{\bf e}nd{equation*}
Let us suppose that $Q$ is a cube in $\mathcal{D}_{\bf e}ll$ for some ${\bf e}ll\leq L$ satisfying
\begin{align}
\label{eq:meas-assump}
|A\cap Q|>\mu |Q|.
{\bf e}nd{align}
We have to check that $\mathrm{pre}(Q)\subset B$. Let us suppose on the contrary that the inclusion does not hold, that is that there exists $\tilde x\in\mathrm{pre}(Q)$ such that $u(\tilde x)\le K^{k-1}$. By {\bf e}qref{tilde u} we have that
\begin{equation*}
\delta\tilde u(y,\tilde\varepsilon z)
=
\frac{1}{K^{k-1}}\,\delta u(x_0+2^{-{\bf e}ll}y,\varepsilon z),
{\bf e}nd{equation*}
where $\tilde\varepsilon=2^{\bf e}ll\varepsilon\leq 2^L\varepsilon<\varepsilon_0$, and $\delta\tilde u(y,\tilde\varepsilon z)$ is defined according to (\ref{eq:delta}). Replacing this in the definition of $\mathcal{L}_\varepsilon^-$ in {\bf e}qref{L-eps-}, and since $\mathcal{L}_\varepsilon^-u\leq\rho$ by assumption, we obtain
\begin{equation*}
\mathcal{L}_{\tilde\varepsilon}^-\tilde u(y)
=
\frac{1}{2^{2{\bf e}ll}K^{k-1}}\,\mathcal{L}_\varepsilon^-u(x_0+2^{-{\bf e}ll}y)
\leq
\frac{\rho}{2^{2{\bf e}ll}K^{k-1}}
\leq
\rho.
{\bf e}nd{equation*}
where we have used that $K\geq M\geq1$. Moreover $\tilde u\geq 0$ and $\inf_{Q_3}\tilde u\leq 1$ since $u(\tilde x)\le K^{k-1}$ by the counter assumption.
Hence, the rescaled function $\tilde u$ satisfies the assumptions in Lemma~\ref{first}, and thus
\begin{equation*}
\frac{|A\cap Q|}{|Q|}
=
|\{\tilde u> K\}\cap Q_1|
\le
\mu,
{\bf e}nd{equation*}
which contradicts (\ref{eq:meas-assump}). Thus $\mathrm{pre}(Q)\subset B$ and the second condition in Lemma~\ref{CZ} is satisfied.
Suppose now that $Q\in\mathcal{D}_L$ is a dyadic cube satisfying
\[
|A\cap Q|>\frac{c}{K}|Q|.
\]
Then
\begin{equation*}
|\{\tilde u> K\}\cap Q_1|
=
\frac{|A\cap Q|}{|Q|}
>
\frac{c}{K},
{\bf e}nd{equation*}
and by Lemma~\ref{second} we have that $\tilde u\geq 1$ in $Q_1$. Recalling {\bf e}qref{tilde u} we get that $u\geq K^{k-1}$ in $Q$, and thus $Q\subset B$ as desired.
Finally, the assumptions in Lemma~\ref{CZ} are satisfied, so we can conclude that
\begin{equation*}
|A|
\leq
\frac{c}{K}+\mu|B|,
{\bf e}nd{equation*}
so the result follows by induction.
We get
\begin{equation*}
|\{u> K^k\}\cap Q_1|
\leq
\frac{c}{K}(1+\mu+\cdots+\mu^{k-1})+\mu^k
\leq
\frac{c}{(1-\mu)K}+\mu^k
{\bf e}nd{equation*}
as desired.
{\bf e}nd{proof}
Next we show that a convenient choice of the constants in the previous result immediately leads to the desired power decay estimate for $|\{u\geq t\}\cap Q_1|$.
\begin{lemma}
\label{measure bound}
Let $u$ be a function satisfying the conditions from Lemma~\ref{lem:main}. There exist $a>0$ and $d\geq 1$ such that
\[
|\{u> t\}\cap Q_1|\leq d e^{-\sqrt{\frac{\log t }{a}}}
\]
for every $t\ge 1$.
{\bf e}nd{lemma}
\begin{proof}
Let $M\geq 1$ and $\mu\in(0,1)$ be the constants from Lemma~\ref{lem:main}. Let us fix $a=\frac{1}{\log\frac{1}{\mu}}>0$. Then given $t\geq 1$ we choose $K=K(t)=e^{\sqrt{\log(t)/a}}\geq 1$, so $t=K^{a\log K}$. We distinguish two cases.
First, if $K=K(t)\geq M$, recalling Lemma~\ref{lem:main} we have that the estimate
\begin{equation*}
|\{u>K^k\}\cap Q_1|
\leq
\frac{c}{(1-\mu)K}+\mu^k
{\bf e}nd{equation*}
holds for every $k\in\mathbb{N}$. In particular, if we fix $k=\lfloor a\log K\rfloor$ we get that
\begin{equation*}
K^k
\leq
K^{a\log(K)}
=
t
{\bf e}nd{equation*}
and
\begin{equation*}
\mu^k
<
\mu^{a\log(K)-1}
=
\frac{1}{K\mu}.
{\bf e}nd{equation*}
Using these inequalities together with the estimate from Lemma~\ref{lem:main} we obtain
\begin{equation*}
\begin{split}
|\{u> t\}\cap Q_1|
\leq
~&
|\{u> K^k\}\cap Q_1|
\\
\leq
~&
\frac{c}{(1-\mu) K}+\mu^k
\\
\leq
~&
\left(\frac{c}{1-\mu}+\frac{1}{\mu}\right)\frac{1}{K}
\\
=
~&
\left(\frac{c}{1-\mu}+\frac{1}{\mu}\right)e^{-\sqrt{\frac{\log t}{a}}},
{\bf e}nd{split}
{\bf e}nd{equation*}
where in the last equality we have used the definition of $K=K(t)$.
On the other hand, if $K(t)<M$ then we can roughly estimate
\begin{equation*}
\begin{split}
|\{u>t\}\cap Q_1|
\leq
1
<
\frac{M}{K(t)}
=
Me^{-\sqrt{\frac{\log t}{a}}}.
{\bf e}nd{split}
{\bf e}nd{equation*}
Finally, choosing $d=\max\{M,\frac{c}{1-\mu}+\frac{1}{\mu}\}\geq 1$, the result follows for every $t\geq 1$.
{\bf e}nd{proof}
We prove here the De Giorgi oscillation lemma. The lemma follows from the measure estimate in a straightforward manner. Harnack's inequality requires an additional argument that we postpone to the next section.
\begin{lemma}[De Giorgi oscillation lemma]
\label{DeGiorgi}
Given $\theta\in (0,1)$, there exist $\varepsilon_0,\rho>0$ and ${\bf e}ta={\bf e}ta(\theta)\in (0,1)$ such that if $u$ satisfies
\begin{equation*}
\begin{cases}
\mathcal{L}_\varepsilon^-u\leq {\bf e}ta\rho & \text{ in } Q_{10\sqrt{N}},
\\
u\geq 0 & \text{ in } \mathbb{R}^N,
{\bf e}nd{cases}
{\bf e}nd{equation*}
for some $0<\varepsilon<\varepsilon_0$
and
\[
|Q_{1}\cap \{u> 1\}|\geq \theta,
\]
then
\[
\inf_{Q_3} u \geq {\bf e}ta.
\]
{\bf e}nd{lemma}
\begin{proof}
We take $\varepsilon_0,\rho>0$ given by Lemma~\ref{lem:main}.
Let $m=\displaystyle\inf_{Q_3}u$ for simplicity and define $\tilde u$ the rescaled version of $u$ given by
\begin{equation*}
\tilde u(x)
=
\frac{u(x)}{m}
{\bf e}nd{equation*}
for every $x\in\mathbb{R}^N$. Then $\displaystyle\inf_{Q_3}\tilde u\leq 1$ and, by assumption,
\begin{equation*}
|\{\tilde u>\frac{1}{m}\}\cap Q_1|
=
|\{u>1\}\cap Q_1|
\geq
\theta.
{\bf e}nd{equation*}
Now suppose that $\mathcal{L}_\varepsilon^-u\leq{\bf e}ta\rho$ where $0<{\bf e}ta\leq m$ is a constant to be chosen later. Then
\begin{equation*}
\mathcal{L}_\varepsilon^-\tilde u
=
\frac{\mathcal{L}_\varepsilon^- u}{m}
\leq
\frac{{\bf e}ta\rho}{m}
\leq
\rho,
{\bf e}nd{equation*}
and recalling Lemma~\ref{measure bound} with $\tilde u$ and $t=\frac{1}{m}\geq 1$ (observe that in the case $m\geq 1$ we immediately get the result) we obtain
\begin{equation*}
\theta
\leq
|\{\tilde u>\frac{1}{m}\}\cap Q_1|
\leq
de^{-\sqrt{\frac{\log\frac{1}{m}}{a}}}.
{\bf e}nd{equation*}
Rearranging terms we get
\begin{equation*}
\inf_{Q_3}u
=
m
\geq
e^{-a\left(\log\frac{d}{\theta}\right)^2},
{\bf e}nd{equation*}
so choosing ${\bf e}ta={\bf e}ta(\theta)=e^{-a\left(\log\frac{d}{\theta}\right)^2}\in(0,1)$ we finish the proof.
{\bf e}nd{proof}
Now we are in a position to state the H\"older estimate. The proof after obtaining the De Giorgi oscillation estimate is exactly as in \cite{arroyobp}.
The statement of the De Giorgi oscillation lemma here is different from the one there. For the sake of completeness we prove that the statement here implies the one in \cite{arroyobp}.
\begin{lemma}
\label{DeGiorgi-old}
There exist $k>1$ and $C,\varepsilon_0>0$ such that
for every $R>0$ and $\varepsilon<\varepsilon_0R$, if $\mathcal{L}_\varepsilon^+u\geq-\rho$ in $B_{kR}$ with $u\leq M$ in $B_{kR}$ and
\[
|B_{R}\cap \{u\leq m\}|\geq \theta |B_R|,
\]
for some $\rho>0$, $\theta\in (0,1)$ and $m,M\in\mathbb{R}$, then there exist ${\bf e}ta={\bf e}ta(\theta)>0$ such that
\[
\sup_{B_R} u \leq (1-{\bf e}ta)M+{\bf e}ta m+ C R^2\rho .
\]
{\bf e}nd{lemma}
\begin{proof}
We can assume that $M>m$, given $\gamma>0$ we define
\[
\tilde u(x)=\frac{M-u(2Rx)}{M-m}+\gamma
\]
in $B_{k/2}$.
For $k=10N$ since $Q_{10\sqrt N}\subset B_{k/2}$ we get that $\tilde u$ is defined in $Q_{10\sqrt N}$.
Since $u \leq M$ we get $\tilde u\geq 0$.
Also, since $u\leq m$ implies $\tilde u>1$ we get
\[
|Q_{1}\cap \{u> 1\}|
\geq |B_{1/2}\cap \{u> 1\}|
\geq \frac{|B_{R}\cap \{u\leq m\}|}{|B_R|}
\geq \theta.
\]
For $\tilde \varepsilon =\frac{\varepsilon}{2R}<\varepsilon_0$, since $\mathcal{L}_\varepsilon^+u\geq-\rho$, we get $\mathcal{L}_{\tilde\varepsilon}^-\tilde u\leq \frac{4 R^2 \rho}{M-m}$.
Therefore, Lemma~\ref{DeGiorgi} implies that there exists $\tilde \rho>0$ and $\tilde{\bf e}ta=\tilde{\bf e}ta(\theta)\in (0,1)$ such that if $\frac{4 R^2 \rho}{M-m}<\tilde \rho\tilde{\bf e}ta$ we get
\[
\inf_{Q_3} \tilde u \geq \tilde{\bf e}ta.
\]
Then,
\[
\sup_{Q_{6R}} u
\leq M(1-\tilde{\bf e}ta+\gamma) + m(\tilde{\bf e}ta +\gamma).
\]
Since $B_R\subset Q_{6R}$ and this holds for every $\gamma>0$, we get
\[
\sup_{B_R} u
\leq M(1-\tilde{\bf e}ta) + m \tilde{\bf e}ta.
\]
Finally we take ${\bf e}ta=\tilde {\bf e}ta$ and $C=\frac{4}{\tilde \rho}$.
Thus, if $\frac{4 R^2 \rho}{M-m}<\tilde \rho \tilde {\bf e}ta$ the result immediately follows from above. And if $4 R^2 \rho\geq \tilde \rho\tilde{\bf e}ta(M-m)$ we have
\[
\begin{split}
\sup_{B_R} u
&\leq M\\
&= (1-\tilde{\bf e}ta)M+\tilde{\bf e}ta m+ \tilde{\bf e}ta(M-m)\\
&\leq (1-\tilde{\bf e}ta)M+\tilde{\bf e}ta m+ \frac{4 R^2 \rho}{\tilde \rho}\\
&= (1-{\bf e}ta)M+{\bf e}ta m+ C R^2\rho. \qedhere
{\bf e}nd{split}
\]
{\bf e}nd{proof}
As we already mentioned, the H\"older estimate follows as in \cite{arroyobp}.
\begin{theorem}
\label{Holder}
There exists $\varepsilon_0>0$ such that if $u$ satisfies $\mathcal{L}_\varepsilon^+ u\ge -\rho$ and $\mathcal{L}_\varepsilon^- u\le \rho$ in $B_{R}$ where $\varepsilon<\varepsilon_0R$, there exist $C,\gamma>0$ such that
\[
|u(x)-u(z)|\leq \frac{C}{R^\gamma}\left(\sup_{B_{R}}|u|+R^2\rho\right)\Big(|x-z|^\gamma+\varepsilon^\gamma\Big)
\]
for every $x, z\in B_{R/2}$.
{\bf e}nd{theorem}
\section{Harnack's inequality}
In this section we obtain an `asymptotic Harnack's inequality'.
First, we prove Lemma~\ref{lemma:apuja} that gives sufficient conditions to obtain the result.
One of the conditions of the lemma follows from Theorem~\ref{Holder} so then our task is to prove the other condition.
Before proceeding to the proof of the asymptotic Harnack we observe that the classical Harnack's inequality does not hold.
\begin{example}
\label{example}
Fix $\varepsilon\in(0,1)$.
We consider $\Omega=B_2\subset \mathbb{R}^N$ and $A=\{(x,0,\dots,0)\in \Omega: x\in \varepsilon\mathbb{N}\}$.
We define $\nu:\Omegaega\to\mathcal{M}(B_1)$ as
\begin{equation*}
\begin{split}
&
\nu_x(E)
=
\frac{|E\cap B_1|}{|B_1|}
\qquad\text{ for } x\notin A,
\\
&
\nu_x
=
\frac{\delta_{e_1}+\delta_{-e_1}}{2}
\qquad\text{ for } x\in A,
{\bf e}nd{split}
{\bf e}nd{equation*}
where $e_1=(1,0,\dots,0)$.
Now we construct a solution to the DPP $\mathcal{L}_\varepsilon u=0$ in $\Omegaega$, we assume ${\bf a}lpha>0$.
We define
\begin{equation*}
u(x)
=
\begin{cases}
a_k & \text{ if } x=(k\varepsilon,0,\ldots,0), \ k\in\mathbb{N}, \\
1 & \text{ otherwise,}
{\bf e}nd{cases}
{\bf e}nd{equation*}
where $a_1=a>0$ is arbitrary and the rest of the $a_k$'s are fixed so that $\mathcal{L}_\varepsilon u(k\varepsilon,0,\ldots,0)=0$ for each $k\in\mathbb{N}$.
Observe that if $x\notin A$ then $\delta u(x,\varepsilon y)=0$ a.e. $y\in B_1$ and thus
\begin{equation*}
\mathcal{L}_\varepsilon u(x)
=
\frac{1}{2\varepsilon^2}\vint_{B_1}\delta u(x,\varepsilon y)\, dy
=
0.
{\bf e}nd{equation*}
Otherwise, for $x=(k\varepsilon,0,\ldots,0)$ we get
\begin{equation*}
\begin{split}
\mathcal{L}_\varepsilon u(x)
=
~&
\frac{1}{2\varepsilon^2}\left({\bf a}lpha\,\delta u(x,\varepsilon e_1)+\beta\vint_{B_1} \delta u(x,\varepsilon y)\,dy\right)
\\
=
~&
\frac{1}{\varepsilon^2}\left({\bf a}lpha\,\frac{a_{k+1}+a_{k-1}}{2}+\beta-a_k\right).
{\bf e}nd{split}
{\bf e}nd{equation*}
Thus for the DPP to hold we must have
\begin{equation*}
a_k
=
1-{\bf a}lpha+{\bf a}lpha\,\frac{a_{k-1}+a_{k+1}}{2}
{\bf e}nd{equation*}
for $k\in\mathbb{N}$ where we are denoting $a_0=1$.
Clearly this determines the values of the whole sequence, we explicitly calculate it.
Let $\varphi$ and $\bar \varphi$ be the solutions to the equation $x=\frac{{\bf a}lpha}{2}(1+x^2)$, that is
\begin{equation*}
\varphi
=
\frac{1+\sqrt{1-{\bf a}lpha^2}}{{\bf a}lpha}
\quad\text{and}\quad
\bar\varphi
=
\frac{1-\sqrt{1-{\bf a}lpha^2}}{{\bf a}lpha}.
{\bf e}nd{equation*}
Then
\[
a_k=1+a\frac{\varphi^k-\bar\varphi^k}{\varphi-\bar\varphi}.
\]
Observe that $\inf_{B_1}u=1$ but $\sup_{B_1}u\geq a_1= a$, so the Harnack inequality does not hold.
{\bf e}nd{example}
Let us observe that this does not contradict the H\"older estimate since $\sup_{B_{2}}|u|$ is large compared to $a$.
We begin the proof of the asymptotic Harnack inequality with the following lemma that gives sufficient conditions to obtain the result.
The lemma is a modification of Lemma 4.1 and Theorem 5.2 in \cite{luirops13}.
Our result, however, differs from the one there since, as observed above, in the present setting the classical Harnack's inequality does not hold.
The condition (ii) in Lemma 5.1 of \cite{luirops13} requires an estimate at level $\varepsilon$ that we do not require here.
Indeed, Example~\ref{example} shows that this condition does not necessarily hold in our setting.
\begin{lemma}\label{lemma:apuja}
Assume that $u$ is a positive function defined in $B_3\subset\mathbb{R}^n$ and there is $C\geq 1$, $\rho\geq0$ and $\varepsilon>0$ such that
\begin{enumerate}
\item \label{item:for-harnack} for some $\kappa,\lambda>0$,
\[
\inf_{B_r(x)}u\leq C\left(r^{-\lambda}\inf_{B_1}u+\rho\right)
\]
for every ${|x|\leq 2}$ and $r\in (\kappa\varepsilon, 1)$,
\item for some $\gamma>0$,
\label{item:holder}
\begin{align*}
{\rm osc}\, (u,B_r(x))\leq C\left(\frac{r}{R}\right)^{\gamma} \left(\sup_{B_R(x)} u +R^2\rho\right)
{\bf e}nd{align*}
for every $|x|\leq 2$, $R\leq 1$ and $\varepsilon<r\leq\delta R$ with $\varepsilon\kappa<R\delta$ where $\delta=(2^{1+2\lambda}C)^{-1/\gamma}$.
{\bf e}nd{enumerate}
Then
\begin{equation*}
\sup_{B_1}u
\leq
\tilde C\left(\inf_{B_1}u+\rho+\varepsilon^{2\lambda}\sup_{B_3}u\right)
{\bf e}nd{equation*}
where $\tilde C=\tilde C(\kappa,\lambda,\gamma, C)=(2^{1+2\lambda}C)^{2\lambda/\gamma}\max(C2^{2+2\lambda},(2\kappa)^{2\lambda})$.
{\bf e}nd{lemma}
\begin{proof}
We define $R_k=2^{1-k}$ and $M_k=4C(2^{-k}\delta)^{-2\lambda}$ for each $k=1,\ldots,k_0$, where $k_0=k_0(\varepsilon)\in\mathbb{N}$ is fixed so that
\begin{equation*}
2^{-(k_0+1)}
\leq
\frac{\kappa\varepsilon}{2\delta}
<
2^{-k_0}.
{\bf e}nd{equation*}
Then
\begin{equation*}
\varepsilon^{2\lambda}
\geq
\left(\frac{\delta}{2\kappa}\right)^{2\lambda}\frac{M_1}{M_{k_0}}
{\bf e}nd{equation*}
and $\delta R_k\geq \delta R_{k_0}>\kappa\varepsilon$.
We assume, for the sake of contradiction, that
\begin{equation*}
\sup_{B_{1}}u
>
\tilde C\left(\inf_{B_1}u+\rho+\varepsilon^{2\lambda}\sup_{B_3}u\right)
{\bf e}nd{equation*}
with
\begin{equation*}
\tilde C
=
\max\left\{M_1,\left(\frac{2\kappa}{\delta}\right)^{2\lambda}\right\}.
{\bf e}nd{equation*}
We get
\begin{equation*}
\sup_{B_1}u
>
M_1\left(\frac{1}{M_{k_0}}\sup_{B_3}u+\inf_{B_1}u+\rho\right).
{\bf e}nd{equation*}
We define $x_1=0$ and $x_2\in B_{R_1}(x_1)=B_1(0)$ such that
\[
u(x_2)>M_1\left(\frac{1}{M_{k_0}} \sup_{B_3}u + \inf_{B_1}u+\rho\right).
\]
We claim that we can construct a sequence $x_{k+1}\in B_{R_k}(x_k)$ such that
\[
u(x_{k+1})>M_k\left(\frac{1}{M_{k_0}} \sup_{B_3}u + \inf_{B_1}u+\rho\right).
\]
for $k=1,\dots,k_0$.
We proceed to prove this by induction, we fix $k$ and assume the hipotesis for the smaller values.
Since $\delta< 1$ we have $B_{\delta R_k}(x_k)\subset B_{R_k}(x_k)$.
Observe that $|x_k|\leq R_1+\cdots+R_{k-1}\leq 2$ and $1>\delta R_k>\kappa\varepsilon$.
Then, by hypothesis (1) we get
\[
\begin{split}
\sup_{B_{R_{k}}(x_k)} u
&\geq C^{-1} \delta^{-\gamma} \left(\sup_{B_{\delta R_k}(x_k)}u - \inf_{B_{\delta R_k}(x_k)}u \right)-R_k^2\rho\\
&\geq C^{-1} \delta^{-\gamma} \left(u(x_k) - \inf_{B_{\delta R_k}(x_k)}u -C \delta^{\gamma}\rho\right).\\
{\bf e}nd{split}
\]
We apply hypothesis (2) for $B_{\delta R_k}(x_k)$, we get
\[
\begin{split}
\inf_{B_{\delta R_k}(x_k)}u+C \delta^{\gamma}\rho
&\leq C(\delta R_k)^{-\lambda}\inf_{B_1}u+C\rho+C \delta^{\gamma}\rho\\
&< 2C(\delta R_k)^{-2\lambda}\inf_{B_1}u+\frac{M_{k-1}}{2}\rho\\
&= \frac{M_{k-1}}{2}\left(\inf_{B_1}u+\rho\right)\\
&< u(x_k)/2,\\
{\bf e}nd{split}
\]
where we have used that $C(1+\delta^\gamma)\leq 2C\leq M_1/2\leq M_{k-1}/2$
and the inductive hypothesis.
Combining the last two inequalities we get
\[
\begin{split}
\sup_{B_{R_{k}}(x_k)} u
&> C^{-1} \delta^{-\gamma} \left(u(x_k) - u(x_k)/2\right)\\
&=C^{-1} \delta^{-\gamma} u(x_k)/2\\
&>C^{-1} \delta^{-\gamma} M_{k-1}/2\left(\frac{1}{M_{k_0}} \sup_{B_3}u + \inf_{B_1}u+\rho\right)\\
&=M_k\left(\frac{1}{M_{k_0}} \sup_{B_3}u + \inf_{B_1}u+\rho\right),
{\bf e}nd{split}
\]
where the last equality holds by the choice of $\delta$.
Then, we can choose $x_{k+1}\in B_{R_k}(x_k)$ such that
\[
u(x_{k+1})>M_k\left(\frac{1}{M_{k_0}} \sup_{B_3}u + \inf_{B_1}u+\rho\right).
\]
Therefore we get
\[
u(x_{k_0+1})
>
\sup_{B_3}u + M_{k_0}\left( \inf_{B_1}u+\rho\right),
\]
which is a contradiction since $x_{k_0+1}\in B_2$.
{\bf e}nd{proof}
So, now our task is to prove that solutions to the DPP satisfy the hypothesis of the previous lemma. We start working towards condition (\ref{item:for-harnack}).
\begin{theorem}\label{thm.cond2}
There exists $C,\sigma,\varepsilon_0>0$ such that if $u$ is a bounded measurable function satisfying
\begin{equation*}
\begin{cases}
\mathcal{L}_\varepsilon^-u\leq 0 & \text{ in } B_7,
\\
u\geq 0 & \text{ in } \mathbb{R}^N,
{\bf e}nd{cases}
{\bf e}nd{equation*}
for some $0<\varepsilon\leq\varepsilon_0$, then
\begin{equation*}
\inf_{B_r(z)}u
\leq
Cr^{-2\sigma}\inf_{B_1}u
{\bf e}nd{equation*}
for every $z\in B_2$ and $r\in(\kappa\varepsilon,1)$, where $\kappa=\mathcal{L}ambda\sqrt{2(\sigma+1)}$.
{\bf e}nd{theorem}
\begin{proof}
Let $\Omegaega=B_4(z)\setminus\overline{B_r(z)}$. Our aim is to construct a subsolution $\mathbb{P}si$ in the $\mathcal{L}ambda\varepsilon$-neighborhood of $\Omegaega$, ie. in $\widetilde\Omegaega=B_{4+\mathcal{L}ambda\varepsilon}(z)\setminus \overline{B_{r-\mathcal{L}ambda\varepsilon}(z)}$, such that $\mathbb{P}si\leq u$ in $\widetilde\Omegaega$.
Let $\mathbb{P}si:\mathbb{R}^N\setminus\{0\}\to\mathbb{R}$ be the smooth function defined by
\begin{equation*}
\mathbb{P}si(x)
=
A|x-z|^{-2\sigma}-B
{\bf e}nd{equation*}
for certain $A,B,\sigma>0$, which is a radially decreasing function. The constants $A$ and $B$ are fixed in such a way that $\mathbb{P}si\leq u$ in $\widetilde\Omegaega\setminus\Omegaega$, that is both in $\overline{B_r(z)}\setminus\overline{B_{r-\mathcal{L}ambda\varepsilon}(z)}$ and $B_{4+\mathcal{L}ambda\varepsilon}(z)\setminus B_4(z)$. More precisely, requiring
\begin{equation*}
\mathbb{P}si\big|_{\partial B_{r-\mathcal{L}ambda\varepsilon}(z)}
=
\inf_{B_r(z)}u
\qquad\text{ and }\qquad
\mathbb{P}si\big|_{\partial B_4(z)}
=
0,
{\bf e}nd{equation*}
and since $\mathbb{P}si$ is radially decreasing, we obtain that $\mathbb{P}si\leq u$ in $\widetilde\Omegaega\setminus\Omegaega$. Therefore these conditions determine $A$ and $B$ so that
\begin{equation*}
\mathbb{P}si(x)
=
\frac{|x-z|^{-2\sigma}-4^{-2\sigma}}{(r-\mathcal{L}ambda \varepsilon)^{-2\sigma}-4^{-2\sigma}}\inf_{B_r}u.
{\bf e}nd{equation*}
Let us assume for the moment that $z=0$ and $x=(|x|,0\ldots,0)$. Similarly as in the proof of Lemma~\ref{barrier}, using {\bf e}qref{ineq:abc} we can estimate
\begin{equation*}
\delta\mathbb{P}si(x,\varepsilon y)
\geq
2\varepsilon^2 A\sigma|x|^{-2\sigma-2}\left[-\mathcal{L}ambda^2+2(\sigma+1)\left(1-(\sigma+2)\frac{\mathcal{L}ambda^2\varepsilon^2}{r^2}\right)y_1^2\right]
{\bf e}nd{equation*}
for every $|x|>r>\mathcal{L}ambda\varepsilon$ and $|y|<\mathcal{L}ambda$ (so that $|x+\varepsilon y|>0$ and thus $\delta\mathbb{P}si(x,\varepsilon y)$ is well defined). Moreover, since $r\in(\kappa\varepsilon,1)$ we get
\begin{equation*}
1-(\sigma+2)\frac{\mathcal{L}ambda^2\varepsilon^2}{r^2}
\geq
1-(\sigma+2)\frac{\mathcal{L}ambda^2}{\kappa^2}
=
\frac{1}{2},
{\bf e}nd{equation*}
where the equality holds for
\begin{equation*}
\kappa
=
\mathcal{L}ambda\sqrt{2(\sigma+2)}
\geq
2\mathcal{L}ambda.
{\bf e}nd{equation*}
This also sets out an upper bound for $\varepsilon$: the inequality $\kappa\varepsilon<1$ is satisfied for every $0<\varepsilon\leq\varepsilon_0$ with $\varepsilon_0<\frac{1}{\mathcal{L}ambda\sqrt{2(\sigma+2)}}$. Then
\begin{equation*}
\delta\mathbb{P}si(x,\varepsilon y)
\geq
2\varepsilon^2 A\sigma|x|^{-2\sigma-2}\left[-\mathcal{L}ambda^2+(\sigma+1)y_1^2\right]
{\bf e}nd{equation*}
for every $|x|>r>\mathcal{L}ambda\varepsilon$ and $|y|<\mathcal{L}ambda$. Hence
\begin{equation*}
\inf_{z\in B_\mathcal{L}ambda}\delta\mathbb{P}si(x,\varepsilon z)
\geq
2\varepsilon^2 A\sigma|x|^{-2\sigma-2}\left[-\mathcal{L}ambda^2\right]
{\bf e}nd{equation*}
and
\begin{equation*}
\vint_{B_1}\delta\mathbb{P}si(x,\varepsilon y)\,dy
\geq
2\varepsilon^2 A\sigma|x|^{-2\sigma-2}\left[-\mathcal{L}ambda^2+\frac{\sigma+1}{N+2}\right],
{\bf e}nd{equation*}
so
\begin{equation*}
\mathcal{L}_\varepsilon^-\mathbb{P}si(x)
\geq
A\sigma|x|^{-2\sigma-2}\left[-\mathcal{L}ambda^2+\beta\frac{\sigma+1}{N+2}\right]
=\,:
-\psi(x)
{\bf e}nd{equation*}
for every $|x|>r>\mathcal{L}ambda\varepsilon$. Choosing large enough $\sigma$ depending on $N$, $\beta$ and $\mathcal{L}ambda$ we get that $\psi\leq 0$ for every $|x|>\mathcal{L}ambda\varepsilon$.
Summarizing, since $\Omegaega=B_4(z)\setminus\overline{B_r(z)}$ with $r>\kappa\varepsilon\geq 2\mathcal{L}ambda\varepsilon$, we obtain
\begin{equation*}
\begin{cases}
\mathcal{L}_\varepsilon^-\mathbb{P}si\geq-\psi
& \text{ in } \Omegaega,
\\
\mathbb{P}si\leq u & \text{ in } \widetilde\Omegaega\setminus\Omegaega.
{\bf e}nd{cases}
{\bf e}nd{equation*}
In what follows we recall the $\varepsilon$-ABP estimate to show that the inequality $\mathbb{P}si\leq u$ is satisfied also in $\Omegaega$. But before, as in the proof of Lemma~\ref{first}, we define $v=\mathbb{P}si-u$ and since by assumption $\mathcal{L}_\varepsilon^-u\leq 0$ in $\Omegaega=B_4(z)\setminus \overline{B_r(z)}\subset B_7$, we have
\begin{equation*}
\mathcal{L}_\varepsilon^+v
\geq
\mathcal{L}_\varepsilon^-\mathbb{P}si-\mathcal{L}_\varepsilon^-u
\geq
-\psi
{\bf e}nd{equation*}
in $\Omegaega$. Thus
\begin{equation*}
\begin{cases}
\mathcal{L}_\varepsilon^+v+\psi\geq 0 & \text{ in } \Omegaega,
\\
v\leq 0 & \text{ in } \widetilde\Omegaega\setminus\Omegaega.
{\bf e}nd{cases}
{\bf e}nd{equation*}
By the $\varepsilon$-ABP estimate (see Theorem~4.1 together with Remark~7.4 both from \cite{arroyobp}),
\begin{equation*}
\sup_\Omegaega v
\leq
\sup_{\widetilde\Omegaega\setminus\Omegaega}v
+
C\bigg(\sum_{Q\in\mathcal{Q}_\varepsilon(K_v)}\Big(\sup_Q\psi^+\Big)^N|Q|\bigg)^{1/N},
{\bf e}nd{equation*}
where $K_v\subset\Omegaega$ stands for the contact set of $v$ in $\Omegaega$ and $\mathcal{Q}_\varepsilon(K_v)$ is a family of disjoint cubes $Q$ of diameter $\varepsilon/4$ such that $\overline Q\cap K_v\neq{\bf e}mptyset$, so that $Q\subset\widetilde\Omegaega$. Since $v\leq 0$ in $\widetilde\Omegaega\setminus\Omegaega$ and $\psi\leq0$, we obtain that $v\leq 0$ in $\Omegaega$, that is, $\mathbb{P}si\leq u$ in $\Omegaega$. In consequence,
\begin{equation*}
\begin{split}
\inf_{B_1}u
\geq
\inf_{B_1}\mathbb{P}si
=
~&
\frac{3^{-2\sigma}-4^{-2\sigma}}{(r-\mathcal{L}ambda\varepsilon)^{-2\sigma}-4^{-2\sigma}}\inf_{B_r(z)}u
\\
\geq
~&
(3^{-2\sigma}-4^{-2\sigma})(r-\mathcal{L}ambda\varepsilon)^{2\sigma}\inf_{B_r(z)}u
\\
\geq
~&
(3^{-2\sigma}-4^{-2\sigma})\left(\frac{r}{2}\right)^{2\sigma}\inf_{B_r(z)}u
{\bf e}nd{split}
{\bf e}nd{equation*}
for every $z\in B_2$, where we have used $r>\kappa\varepsilon\geq2\mathcal{L}ambda\varepsilon$ so that $r-\mathcal{L}ambda\varepsilon>\frac{r}{2}$, so the proof is finished. \qedhere
{\bf e}nd{proof}
Now we prove that condition (\ref{item:for-harnack}) in Lemma~\ref{lemma:apuja} holds in the desired setting.
\begin{corollary}\label{coro:cond2}
There exists $C,\sigma,\varepsilon_0>0$ such that if $\rho\geq 0$ and $u$ is a bounded measurable function satisfying
\begin{equation*}
\begin{cases}
\mathcal{L}_\varepsilon^-u\leq\rho & \text{ in } B_7,
\\
u\geq 0 & \text{ in } \mathbb{R}^N,
{\bf e}nd{cases}
{\bf e}nd{equation*}
for some $0<\varepsilon\leq\varepsilon_0$, then
\begin{equation*}
\inf_{B_r(z)}u
\leq
C\Big(r^{-2\sigma}\inf_{B_1}u+\rho\Big)
{\bf e}nd{equation*}
for every $z\in B_2$ and $r\in(\kappa\varepsilon,1)$, where $\kappa=\mathcal{L}ambda\sqrt{2(\sigma+1)}$.
{\bf e}nd{corollary}
\begin{proof}
We consider $\tilde u(x)=u(x)-A\rho|x|^2$, where $A>0$ is a constant to be fixed later. Then
\begin{equation*}
\delta\tilde u(x,\varepsilon y)
=
\delta u(x,\varepsilon y)-2\varepsilon^2A\rho|y|^2
\leq
\delta u(x,\varepsilon y),
{\bf e}nd{equation*}
so
\begin{equation*}
\inf_{z\in B_\mathcal{L}ambda}\delta\tilde u(x,\varepsilon z)
\leq
\inf_{z\in B_\mathcal{L}ambda}\delta u(x,\varepsilon z)
{\bf e}nd{equation*}
and
\begin{equation*}
\begin{split}
\vint_{B_1}\delta\tilde u(x,\varepsilon y)\,dy
=
~&
\vint_{B_1}\delta u(x,\varepsilon y)\,dy
-
2\varepsilon^2A\rho\,\frac{N}{N+2},
{\bf e}nd{split}
{\bf e}nd{equation*}
where we have used that $\vint_{B_1}|y|^2\,dy=\frac{N}{N+2}$. Therefore,
\begin{equation*}
\mathcal{L}_\varepsilon^-\tilde u
\leq
\mathcal{L}_\varepsilon^-u-A\rho\beta \,\frac{N}{N+2}
\leq
\left(1-A\beta \,\frac{N}{N+2}\right)\rho
\leq
0,
{\bf e}nd{equation*}
where the last inequality holds for a sufficiently large choice of $A$.
Therefore we can apply Theorem~\ref{thm.cond2} to $\tilde u$. Observe first that since $r\in(\kappa\varepsilon,1)$ and $z\in B_2$ then $B_r(z)\subset B_3$. Thus $\tilde u\geq u-9A\rho$ in $B_r(z)$ and
\begin{equation*}
\inf_{B_r(z)}u-9A\rho
\leq
\inf_{B_r(z)}\tilde u
\leq
Cr^{-2\sigma}\inf_{B_1}\tilde u
\leq
Cr^{-2\sigma}\inf_{B_1}u
{\bf e}nd{equation*}
and the result follows.
{\bf e}nd{proof}
Now we are ready to state the main result of the section.
\begin{theorem}
\label{Harnack}
There exists $C,\lambda,\varepsilon_0>0$ such that if $u\geq 0$ in $\mathbb{R}^N$ is a bounded and measurable function satisfying $\mathcal{L}^+_\varepsilon u\geq-\rho$ and $\mathcal{L}^-_\varepsilon u\leq\rho$ in $B_7$ for some $0<\varepsilon<\varepsilon_0$, then
\begin{equation*}
\sup_{B_1}u
\leq
C\left(\inf_{B_1}u+\rho+\varepsilon^{2\lambda}\sup_{B_3}u\right).
{\bf e}nd{equation*}
{\bf e}nd{theorem}
\begin{proof}
By Corollary~\ref{coro:cond2} we have that $u$ satisfies condition (\ref{item:for-harnack}) in Lemma~\ref{lemma:apuja} for $\lambda=2\sigma$.
We deduce condition (\ref{item:holder}) by taking infimum over $x,z\in B_r$ in the inequality given by Theorem~\ref{Holder}.
We use $\varepsilon<r$ to bound $\varepsilon^\gamma<r^\gamma$.
In this way, we obtained the inequality for every $r<R/2$ and $\varepsilon<\varepsilon_0 R$.
We need it to hold for every $r\leq \delta R$ and $\varepsilon<\frac{\delta}{\kappa}R$.
Therefore we have proved the result if $\delta<1/2$ and $\frac{\delta}{\kappa}<\varepsilon_0$.
That is we have obtained the result as long as $\delta$ is small enough.
Recall that $\delta=(2^{1+2\lambda}C)^{-1/\gamma}$.
Then, it is enough to take $\gamma>0$ small enough.
We can do this since $\varepsilon_0$, $C$, $\kappa$ and $\lambda$ only depend on $\mathcal{L}ambda$, ${\bf a}lpha$, $\beta$ and the dimension $N$, and not on $\gamma$.
Also if Theorem~\ref{Holder} holds for a certain $\gamma>0$ it also holds with the same constants for every smaller $\gamma>0$.
{\bf e}nd{proof}
\begin{remark}
\label{harnack:limit}
Let $\{u_\varepsilon\,:\,0<\varepsilon<\varepsilon_0\}$ be a family of nonnegative measurable solutions to the DDP with $f=0$. In view of Theorem~\ref{Holder} together with the asymptotic Arzel\'a-Ascoli theorem \cite[Lemma 4.2]{manfredipr12}, we can assume that $u_\varepsilon\to u$ uniformly in $B_2$ as $\varepsilon\to 0$. Then by taking the limit in the asymptotic Harnack inequality
\begin{equation*}
\sup_{B_1}u_\varepsilon
\leq
C\left(\inf_{B_1}u_\varepsilon+\varepsilon^{2\lambda}\sup_{B_3}u_\varepsilon\right),
{\bf e}nd{equation*}
we obtain the classical inequality for the limit, that is
\begin{equation*}
\sup_{B_1}u
\leq
C\inf_{B_1}u.
{\bf e}nd{equation*}
Similarly if $\{u_\varepsilon\,:\,0<\varepsilon<\varepsilon_0\}$ is a uniformly convergent family of nonnegative measurable functions such that $\mathcal{L}_\varepsilon^+ u_{\varepsilon}\ge -\rho$ and $\mathcal{L}_\varepsilon^- u_{\varepsilon} \le \rho$, then for the limit we get
\begin{align*}
\sup_{B_1}u
\leq
C(\inf_{B_1}u+\rho).
{\bf e}nd{align*}
{\bf e}nd{remark}
\def$'$} \def\cprime{$'$} \def\cprime{$'${$'$} \def$'$} \def\cprime{$'$} \def\cprime{$'${$'$} \def$'$} \def\cprime{$'$} \def\cprime{$'${$'$}
\begin{thebibliography}{PSSW09}
\bibitem[ABP]{arroyobp}
{\'A}.~Arroyo, P.~Blanc, and M.~Parviainen.
\newblock H\"older regularity for stochastic processes with bounded and
measurable increments.
\newblock {{\bf e}m Ann.\ Inst.\ H.\ Poincar\'e Anal.\ Non Lin\'eaire} (2022), published online first,
\newblock {{\bf e}m https://doi.org/10.4171/aihpc/41}.
\bibitem[AP20]{arroyop20}
\'{A}. Arroyo and M.~Parviainen.
\newblock Asymptotic {H}\"{o}lder regularity for the ellipsoid process.
\newblock {{\bf e}m ESAIM Control Optim. Calc. Var.}, 26(112):Paper No. 112, pages 31, 2020.
\bibitem[BR19]{blancr19}
P.~Blanc and J.~D. Rossi.
\newblock {{\bf e}m Game Theory and Partial Differential Equations}.
\newblock De Gruyter, 2019.
\bibitem[BR19b]{blancr19b}
P.~Blanc and J.~D. Rossi.
\newblock Games for eigenvalues of the {H}essian and concave/convex envelopes.
\newblock {{\bf e}m J. Math. Pures Appl. (9)}, 127:192--215, 2019.
\bibitem[BLM20]{brustadlm20}
K.~K Brustad, P.~Lindqvist, and J.~J. Manfredi.
\newblock A discrete stochastic interpretation of the dominative $p
$-{L}aplacian.
\newblock {{\bf e}m Differential Integral Equations}, 33(9-10): 465--488, 2020.
\bibitem[Caf89]{caffarelli89}
L.~A. Caffarelli.
\newblock Interior a priori estimates for solutions of fully nonlinear
equations.
\newblock {{\bf e}m Ann. of Math. (2)}, 130(1):189--213, 1989.
\bibitem[CC95]{caffarellic95}
L.\ Caffarelli and X.~Cabr{\'e}.
\newblock {{\bf e}m Fully nonlinear elliptic equations}, volume~43 of {{\bf e}m American
Mathematical Society Colloquium Publications}.
\newblock American Mathematical Society, Providence, RI, 1995.
\bibitem[CS09]{caffarellis09}
L.\ Caffarelli and L.~Silvestre.
\newblock Regularity theory for fully nonlinear integro-differential equations.
\newblock {{\bf e}m Comm. Pure Appl. Math.}, 62(5):597--638, 2009.
\bibitem[CTU20]{caffarellitu20}
L.\ Caffarelli, R.\ Teymurazyan, and J.~M. Urbano.
\newblock Fully nonlinear integro-differential equations with deforming
kernels.
\newblock {{\bf e}m Communications in Partial Differential Equations}, pages 1--25,
2020.
\bibitem[GT01]{gilbargt01}
D.~Gilbarg and N.~S. Trudinger.
\newblock {{\bf e}m Elliptic partial differential equations of second order}.
\newblock Classics in Mathematics. Springer-Verlag, Berlin, 2001.
\newblock Reprint of the 1998 edition.
\bibitem[KS79]{krylovs79}
N.~V. Krylov and M.~V. Safonov.
\newblock An estimate for the probability of a diffusion process hitting a set
of positive measure.
\newblock {{\bf e}m Dokl. Akad. Nauk SSSR}, 245(1):18--20, 1979.
\bibitem[KS80]{krylovs80}
N.~V. Krylov and M.~V. Safonov.
\newblock A property of the solutions of parabolic equations with measurable
coefficients.
\newblock {{\bf e}m Izv. Akad. Nauk SSSR Ser. Mat.}, 44(1):161--175, 239, 1980.
\bibitem[KT90]{kuot90}
H.~J. Kuo and N.~S. Trudinger.
\newblock Linear elliptic difference inequalities with random coefficients.
\newblock {{\bf e}m Math. Comp.}, 55(191):37--53, 1990.
\bibitem[Lew20]{lewicka20}
M.~Lewicka.
\newblock {{\bf e}m A Course on Tug-of-War Games with Random Noise}.
\newblock Universitext. Springer-Verlag, Berlin, 2020.
\newblock Introduction and Basic Constructions.
\bibitem[LP18]{luirop18}
H.~Luiro and M.~Parviainen.
\newblock Regularity for nonlinear stochastic games.
\newblock {{\bf e}m Ann.\ Inst.\ H.\ Poincar{\'e} Anal.\ Non Lin{\'e}aire},
35(6):1435--1456, 2018.
\bibitem[LPS13]{luirops13}
H.\ Luiro, M.\ Parviainen, and E.~Saksman.
\newblock Harnack's inequality for $p$-harmonic functions via stochastic games.
\newblock {{\bf e}m Comm.\ Partial Differential Equations}, 38(11):1985--2003, 2013.
\bibitem[MPR12]{manfredipr12}
J.J. Manfredi, M.~Parviainen, and J.D. Rossi.
\newblock On the definition and properties of p-harmonious functions.
\newblock {{\bf e}m Ann. Scuola Norm. Sup. Pisa Cl. Sci.}, 11(2):215--241, 2012.
\bibitem[PS08]{peress08}
Y.~Peres and S.~Sheffield.
\newblock Tug-of-war with noise: a game-theoretic view of the
{$p$}-{L}aplacian.
\newblock {{\bf e}m Duke Math. J.}, 145(1):91--120, 2008.
\bibitem[PSSW09]{peresssw09}
Y.~Peres, O.~Schramm, S.~Sheffield, and D.~B. Wilson.
\newblock Tug-of-war and the infinity {L}aplacian.
\newblock {{\bf e}m J. Amer. Math. Soc.}, 22(1):167--210, 2009.
\bibitem[PT76]{puccit76}
C.~Pucci and G.~Talenti.
\newblock Elliptic (second-order) partial differential equations with
measurable coefficients and approximating integral equations.
\newblock {{\bf e}m Advances in Math.}, 19(1):48--105, 1976.
\bibitem[Tru80]{trudinger80}
N.~S. Trudinger.
\newblock Local estimates for subsolutions and supersolutions of general second
order elliptic quasilinear equations.
\newblock {{\bf e}m Invent. Math.}, 61(1):67--79, 1980.
{\bf e}nd{thebibliography}
{\bf e}nd{document} |
\begin{document}
\title{Porous numbers}
{\bf Abstract}\\
The concept of porous numbers is presented. A number $k$ which is not a multiple of 10 is called {\it porous} if every number $m$ with sum of digits = $k$ and $k$ a divisor of both $m$ and digit reversal of $m$ has a zero in its digits. It is proved that 11, 37, 74, 101 and 121 are the only porous numbers smaller than 1000.
\section{Introduction}
A number $k$ which is not a multiple of 10 is called {\it porous} if every number $m$ that fulfills these 3 requirements:
\begin{enumerate}
\item $k \; \vert \; m$
\item $k \; \vert \; rev(m)$
\item the sum of the digits of $m = k$
\end{enumerate}
must have at least one digit which is a zero. rev($m$) is the number m reversed (e.g.\ m = 123, then rev($m$) = 321).
The numbers are called porous because when you "open" them with any $m$, there are always voids in $m$.\\
\section{List of non-porous numbers}
By definition if $k$ is a multiple of 10 it is non-porous. In fact, for any $m$, rev($m$) has a number different from 0 at the end and hence is not divisible by $k$ and no $m$ exists at all that could fulfill the 3 requirements.\\
For all other numbers a computer program was executed to find an $m$ not containing any zeros and fulfilling the 3 requirements. For $k$ smaller than 200, the results of the sequence A333666 \cite{A333666} were taken if $m$ did not contain any zero. If they had a zero, the search was extended until an $m$ without a zero was found. For $k$ larger than 200 most of the $m$ were found by concatenating palindromes of length up to 10. For example: $p_1 = 6498946$ is a palindrome which fulfills $202 \; \vert \; p$ and since it is a palindrome it also fulfills $202 \; \vert \; rev(p)$. The sum of the digits of $p_1$ is 46. $p_2 = 25452$ is a second palindrome that also fulfills the same requirements. The sum of the digits of $p_2$ is 18. Since 202 = 4 x 46 + 18, we concatenate 4 times $p_1$ with $p_2$ and the result 649894664989466498946649894625452 is the $m$ we were looking for.\\
The search was automatised and for most numbers an $m$ was quickly found except for multiples of 37, 101 and 121 which still had to be treated "manually". A file containing an $m$ for all non-porous numbers for $k$ up to 1000 was produced \cite{A337832}.
\section{List of porous numbers}
A brute force method to prove that a number is porous is to test all possible numbers $m$ that have a sum of digits $k$ and that do not contain a zero if both $k$ and rev($k$) are divisible by $m$. For $k = 11$ there are just 1021 numbers to be tested (1021 = A104144(19), there is an offset of 8 in this sequence \cite{A104144}). For $k = 37$ we get already 66 billion candidates, which is still doable, but at the latest for $k = 74$ the number of possibilities of $8.85e21$ is exceeding normal computing powers. Instead analytical reasoning will bring the proofs.
\subsection{Proof that 11 is a porous number}
Let "$m_{s-1} ... m_3 m_2 m_1 m_0$" be a number $m$ with $s$ digits that fulfills for $k=11$ the three requirements listed in the introduction. We define:\\
A = $m_0 + m_2 + m_4 + $ ... ,
B = $m_1 + m_3 + m_5 + $ ... \\
A divisibility rule for 11 requires that the alternating sum of the digits must be divisible by 11. Hence:
\begin{equation}
A - B = j \cdot 11
\label{eq:1}
\end{equation}
Since the sum of the digits is 11, we have
\begin{equation}
A + B = 11
\label{eq:2}
\end{equation}
Adding eq.~\ref{eq:1} and \ref{eq:2} yields
$$ 2 A = (j + 1) \cdot 11 $$ therefore A must be 0 or 11. If A is 11, then B is 0. This means either A or B must be zero and $m$ must contain a zero. Hence 11 is a porous number.
\subsection{Proof that 37 is a porous number}
Let "$m_{s-1} ... m_3 m_2 m_1 m_0$" be a number $m$ with $s$ digits that fulfills for $k=37$ the three requirements listed in the introduction. $m_{s-1}$ is not allowed to be zero. We define:\\
A = $m_0 + m_3 + m_6 + $ ... ,
B = $m_1 + m_4 + m_7 + $ ... and
C = $m_2 + m_5 + m_8 + $ ...\\
When $10^i$ ($i \ge 0)$ is divided by 37, the remainder is either 1, 10 or 26. We define:
$$
\delta_i = \left\{
\begin{array}{rl}
1 & \mbox{if } mod(i,3) = 0 \\
10 & \mbox{if } mod(i,3) = 1 \\
26 & \mbox{if } mod(i,3) = 2 \\
\end{array}
\right.
$$
With this definition the powers of 10 can be written as:\\
$10^i = \alpha_i \cdot 37 + \delta_i$\\
And $m$ can be written as:
$$m = \sum_{i=0}^{s-1} m_i 10^i = \sum_{i=0}^{s-1} m_i (\alpha_i \cdot 37 + \delta_i) $$\\
Since $37 \; \vert \; m$ it follows
\begin{equation}
\sum_{i=0}^{s-1} m_i \delta_i = l_1 \cdot 37
\end{equation}
Hence we get:
\begin{equation}
A + 10 B + 26 C = l_1 \cdot 37
\label{eq:37}
\end{equation}
Eq.~\ref{eq:37} written for the reversed number of $m$ reads:
$$
\begin{array}{rl}
A + 10 C + 26 B = j \cdot 37 & \mbox{if } mod(s,3) = 0 \mbox{, e.g. m has 13 digits} \\
B + 10 A + 26 C = j \cdot 37 & \mbox{if } mod(s,3) = 1 \\
26 A + 10 B + C = j \cdot 37 & \mbox{if } mod(s,3) = 2 \\
\end{array}
$$
For all three cases of $s$ it is easy to demonstrate that A, B and C must be multiples of 37. We show it here only for the second case with $mod(s,3) = 1$. Subtracting the equation for the reversed number from the equation for the original number gives
$$9 (B - A) = (l_1 - j) \cdot 37 $$
Therefore $B = A + l_2 \cdot 37$. Since $C = 37 - A - B$ we get $C = - 2 A + (1 - l_2) \cdot 37$. Inserting the expressions for $B$ and $C$ into eq.~\ref{eq:37} gives:
$$ 41 A = 37 (26 - l_1 - 16 l_2) $$
which is only possible if $A$ is a multiple of 37. Then $B$ must also be a multiple of 37 and finally also $C$. The only possibility to distribute 37 over three numbers which are all multiples of 37 is to allocate the whole 37 to one of the three and the other two must be zero. Therefore all $m$ must have at least 8 digits with a zero (see e.g.~A333666(37) = 1,009,009,009,009 \cite{A333666}). \\
\subsection{Proof that 74 is a porous number}
This proof is very similar to the proof for $k = 37$. This time \\
$10^i = \alpha_i \cdot 74 + \gamma_i + \delta_i$\\
with
$$
\gamma_i = \left\{
\begin{array}{rl}
-37 & \mbox{if } i = 0 \\
0 & \mbox{else } \\
\end{array}
\right.
$$
and
$$
\delta_i = \left\{
\begin{array}{rl}
38 & \mbox{if } mod(i,3) = 0 \\
10 & \mbox{if } mod(i,3) = 1 \\
26 & \mbox{if } mod(i,3) = 2 \\
\end{array}
\right.
$$
Now $m$ can be written as:
$$m = \sum_{i=0}^{s-1} m_i 10^i = \sum_{i=0}^{s-1} m_i (\alpha_i \cdot 74 + \gamma_i + \delta_i) $$\\
Since $74 \; \vert \; m$ it follows
\begin{equation}
\sum_{i=0}^{s-1} m_i (\gamma_i + \delta_i) = l_1 \cdot 74
\end{equation}
Multiplying the individual terms gives
$$ 38 A + 10 B + 26 C - 37 m_0 = l_1 \cdot 74$$
$m_0$ must be an even number because $74 \; \vert \; m$ and therefore $37 m_0$ is a multiple of 74.\\
Hence we get:
$$ 38 A + 10 B + 26 C = l_2 \cdot 74$$
or
\begin{equation}
19 A + 5 B + 13 C = l_2 \cdot 37
\label{eq:74}
\end{equation}
Eq.~\ref{eq:74} written for the reversed number of $m$ reads:
$$
\begin{array}{rl}
19 A + 5 C + 13 B = j \cdot 37 & \mbox{if } mod(s,3) = 0 \mbox{, e.g. m has 16 digits} \\
19 B + 5 A + 13 C = j \cdot 37 & \mbox{if } mod(s,3) = 1 \\
13 A + 5 B + 19 C = j \cdot 37 & \mbox{if } mod(s,3) = 2 \\
\end{array}
$$
For all three cases of $s$ it is straightforward to demonstrate that $A$, $B$ and $C$ must be multiples of 37. Since the sum of the three is 74, at least one of them must be zero and therefore $m$ must have digits with zeros.
\subsection{Proof that 101 is a porous number}
For this proof we will exploit the fact that $101 \; \vert \; m$ if the "alternating sum of blocks of two" divides $m$.
As above, $m =$ "$m_{s-1} ... m_3 m_2 m_1 m_0$" is a number consisting of $s$ digits that fulfills for $k=101$ the three requirements listed in the introduction. We define:\\
A = $m_0 - m_2 + m_4 - m_6 + $ ... ,
B = $m_1 - m_3 + m_5 - m_7 + $ ... \\
With this definition, the alternating sum of blocks of two, which we will call {\it alterdigitsum2,} is $A + 10 B$\\
From the divisibility rule for 101 it follows:
\begin{equation}
A + 10 B = l_1 \cdot 101
\label{eq:101_1}
\end{equation}
Depending on the number of digits $s$, the alterdigitsum2 of $rev(m)$ will be
$$
\begin{array}{rl}
- 10 A - B & \mbox{if } mod(s,4) = 0 \mbox{, e.g. m has 12 digits} \\
A - 10 B & \mbox{if } mod(s,4) = 1 \\
10 A + B & \mbox{if } mod(s,4) = 2 \\
-A + 10 B & \mbox{if } mod(s,4) = 3 \\
\end{array}
$$
If $s$ is odd then alterdigitsum2(rev(m)) = $ \pm (A - 10 B)$\\
From the divisibility rule for 101 it follows:
\begin{equation}
A - 10 B = l_2 \cdot 101
\label{eq:101_2}
\end{equation}
Adding eqs.~\ref{eq:101_1} and \ref{eq:101_2} yields:
$$ 2 A = (l_1 + l_2) \cdot 101 $$
which is only possible if A is a multiple of 101. And if $A$ is a multiple of 101 also $B$ must be a multiple of 101.\\
If $s$ is even then alterdigitsum2(rev(m)) = $ \pm (10 A + B)$\\
From the divisibility rule for 101 it follows:
\begin{equation}
10 A + B = l_3 \cdot 101
\label{eq:101_3}
\end{equation}
Subtracting eq.~\ref{eq:101_1} from 10 times eq.~\ref{eq:101_3} yields:
$$ 99 A = (10 l_3 - l_1) \cdot 101 $$
which is only possible if A is a multiple of 101. And if $A$ is a multiple of 101 also $B$ must be a multiple of 101.\\
What are the options then? $A$ and $B$ cannot be both zero because the sum of all digits, 101, is an odd number which means that if $A$ is even then $B$ must be odd and vice versa.
Hence either $A$ or $B$ is $\pm 101$. But then all non-zero digits must be in the positions $j, j+4, j+8, ...$ with $j$ a number from 0 to 3 such that the $m_j$ can add up to 101. All other digits must be zero and therefore 101 is a porous number.\\
\subsection{Proof that 121 is a porous number}
As above, $m =$ "$m_{s-1} ... m_3 m_2 m_1 m_0$" is a number consisting of $s$ digits that fulfills for $k=121$ the three requirements listed in the introduction.
$s \ge 14$ because even if we fill 13 digits with a "9" the sum of digits is only 117. We define:\\
A = $m_0 + m_2 + m_4 + $ ... ,
B = $m_1 + m_3 + m_5 + $ ... \\
Since $121 \; \vert \; m$ also $11 \; \vert \; m$ and a divisibility rule for number 11 requires that the alternating sum of the digits must be divisible by 11. Hence:
\begin{equation}
A - B = j \cdot 11
\label{eq:121_1}
\end{equation}
Since the sum of all digits is 121 we also know:
\begin{equation}
A + B = 121 = 11 \cdot 11
\label{eq:121_2}
\end{equation}
Adding eqs.~\ref{eq:121_1} and \ref{eq:121_2} gives:
\begin{equation}
2 A = (j + 11) \cdot 11
\end{equation}
which means $A$ must be a multiple of 11, i.e. $j_1 \cdot 11$ and therefore $B = (11 - j_1) \cdot 11$. \\
Both $A$ and $B$ must be multiples of 11 and $j$ must be an odd number. \\
Next we exploit the fact that the multiplicative order of 10 modulo 121 equals 22:\\
$10^i = \alpha_i \cdot 121 + \beta_i \cdot 11 - 1^{i+1}$\\
with $\beta = (0, 1, 9, 3, 7, 5, 5, 7, 3, 9, 1, 0, 10, 2, 8, 4, 6, 6, 4, 8, 2, 10)$. After 22 numbers the sequence repeats, i.e. $\beta_i$ = $\beta_{i + 22}$.\\
Now $m$ can be written as:
$$m = \sum_{i=0}^{s-1} m_i 10^i = \sum_{i=0}^{s-1} m_i (\alpha_i \cdot 121 + \beta_i \cdot 11 - 1^{i+1}) $$\\
Since $121 \; \vert \; m$ it follows
\begin{equation}
\sum_{i=0}^{s-1} m_i ( \beta_i \cdot 11 - 1 ^ {i+1}) = l_1 \cdot 121
\label{eq:121_4}
\end{equation}
Note, $\sum_{i=0}^{s-1} - 1 ^ {i+1} m_i$ is the alternating sum of the digits, i.~e.~$A - B$. The first part of the sum is rewritten as:\\
$11 \cdot \left [ \vec{A} \cdot \beta_A + \vec{B} \cdot \beta_B \right ]$ \\
where $\vec{A} \cdot \beta_A = m_0 \beta_0 + m_2 \beta_2 + m_4 \beta_4 + ...$ and $\vec{B} \cdot \beta_B = m_1 \beta_1 + m_3 \beta_3 + m_5 \beta_5 + ...$\\
With this notation we can rewrite eq.~\ref{eq:121_4} as
\begin{equation}
11 \cdot \left [ \vec{A} \cdot \beta_A + \vec{B} \cdot \beta_B \right ] + A - B = l_1 \cdot 121
\label{eq:121_5}
\end{equation}
\subsubsection{Number of digits of $m$ is odd}
Now we formulate eq.~\ref{eq:121_5} for the reversed number $m$. We only need to replace $\beta_0$ with $\beta_{s-1}$, $\beta_1$ with $\beta_{s-2}$ and so forth. The result is:
\begin{equation}
11 \cdot \left [ \vec{A} \cdot \bar{\beta_A} + \vec{B} \cdot \bar{\beta_B} \right ] + A - B = l_2 \cdot 121
\label{eq:121_rev_odd}
\end{equation}
with $\bar{\beta_A} = (\beta_{s-1}, \beta_{s-3}, \beta_{s-5}, ...)$\\
Adding eqs.~\ref{eq:121_5} and \ref{eq:121_rev_odd} yields:
\begin{equation}
11 \cdot \left [ \vec{A} \cdot (\beta_A + \bar{\beta_A}) + \vec{B} \cdot (\beta_B + \bar{\beta_B}) \right ] + 2(A - B) = (l_1 + l_2) \cdot 121
\label{eq:121_betas}
\end{equation}
Now we have to evaluate eq.~\ref{eq:121_betas} for s = 15, 17, ... until 35. Afterwards, starting from $s = 37$, the results are repeating because $\beta_i$ = $\beta_{i + 22}$. We notice that all elements of $\beta_A + \bar{\beta_A}$ have the same remainder $r$ when divided by 11 and $\beta_B + \bar{\beta_B}$ all have the same remainder $11 - r$ when divided by 11. In Table~\ref{table:1} the remainders~$r$ are printed for the number of digits $s$ before they repeat: \\
\begin{table}[h!]
\centering
\begin{tabular}[h]{|c|c|c|c|c|c|c|c|c|c|c|c|}
\hline
s & 15 & 17 & 19 & 21 & 23 & 25 & 27 & 29 & 31 & 33 & 35 \\
\hline
$\beta_A + \bar{\beta_A}$ & 8 & 6 & 4 & 2 & 0 & 9 & 7 & 5 & 3 & 1 & 10 \\
$\beta_B + \bar{\beta_B}$ & 3 & 5 & 7 & 9 & 0 & 2 & 4 & 6 & 8 & 10 & 1 \\
\hline
\end{tabular}
\caption{Remainders of all elements of $\beta_A + \bar{\beta_A}$ and $\beta_B + \bar{\beta_B}$ divided by 11}
\label{table:1}
\end{table}
With this information eq.~\ref{eq:121_betas} can be written as:
$$ 11 \cdot \left [ r \cdot A + (11 - r ) \cdot B + l_3 \cdot 11 \right] + 2(A - B) = (l_1 + l_2) \cdot 121 $$
Since $A$ and $B$ are both multiples of 11 this means that $11 \cdot \left [ r \cdot A + (11 - r ) \cdot B + l_3 \cdot 11 \right ]$ is a multiple of 121 and therefore
$$ 2(A - B) = l_4 \cdot 121 $$
Since $A - B = j \cdot 11$ this equation can only be fulfilled if $j$ itself is a multiple of 11.
\subsubsection{Number of digits of $m$ is even}
If $s$ is even, the alternating digit sum of rev($m$) has the opposite sign and therefore we get instead of eq.~\ref{eq:121_rev_odd} this equation:
\begin{equation}
11 \cdot \left [ \vec{A} \cdot \bar{\beta_A} + \vec{B} \cdot \bar{\beta_B} \right ] + B - A = l_5 \cdot 121
\label{eq:121_rev_even}
\end{equation}
In this case we subtract eq.~\ref{eq:121_rev_even} from eq.~\ref{eq:121_5}. This yields:
\begin{equation}
11 \cdot \left [ \vec{A} \cdot (\beta_A - \bar{\beta_A}) + \vec{B} \cdot (\beta_B - \bar{\beta_B}) \right ] + 2(A - B) = (l_1 - l_5) \cdot 121
\label{eq:121_betas_even}
\end{equation}
Now we have to evaluate eq.~\ref{eq:121_betas_even} for s = 14, 16, ... 34. Also in this case all elements of $\beta_A - \bar{\beta_A}$ have the same remainder $r$ when divided by 11 and $\beta_B - \bar{\beta_B}$ all have the same remainder $11 - r$ when divided by 11. In Table~\ref{table:2} the remainders~$r$ are printed for the number of digits $s$ before they repeat: \\
\begin{table}[h!]
\centering
\begin{tabular}[h]{|c|c|c|c|c|c|c|c|c|c|c|c|}
\hline
s & 14 & 16 & 18 & 20 & 22 & 24 & 26 & 28 & 30 & 32 & 34 \\
\hline
$\beta_A - \bar{\beta_A}$ & 9 & 7 & 5 & 3 & 1 & 10 & 8 & 6 & 4 & 2 & 0\\
$\beta_B - \bar{\beta_B}$ & 2 & 4 & 6 & 8 & 10 & 1 & 3 & 5 & 7 & 9 & 0 \\
\hline
\end{tabular}
\caption{Remainders of all elements of $\beta_A - \bar{\beta_A}$ and $\beta_B - \bar{\beta_B}$ divided by 11}
\label{table:2}
\end{table}
With this information eq.~\ref{eq:121_betas_even} can be written as:
$$ 11 \cdot \left [ r \cdot A + (11 - r ) \cdot B + l_6 \cdot 11 \right] + 2(A - B) = (l_1 - l_5) \cdot 121 $$
Since $A$ and $B$ are both multiples of 11 this means that $11 \cdot \left [ r \cdot A + (11 - r ) \cdot B + l_6 \cdot 11 \right ]$ is a multiple of 121 and therefore
$$ 2(A - B) = l_7 \cdot 121 $$
Since $A - B = j \cdot 11$ this equation can only be fulfilled if $j$ itself is a multiple of 11.
\subsubsection{Final conclusion for $k = 121$}
For both $s$ even and odd, it was shown that $j$ must be a multiple of 11.
Since $j$ is not zero we need to have $A - B = \pm 121$, i.e. either $A$ or $B$ must be 0 and the other must be 121. Hence 121 is a porous number. \\
\section{Outlook}
The steadily increasing number of possibilities to construct an $m$ for a given $k$ suggests that the 5 numbers which were proven in the previous chapter might be the only porous numbers. But a mathematical proof for this conjecture seems a big challenge.
\end{document} |
\begin{document}
\title{Control of the geometric phase and pseudo-spin dynamics on coupled
Bose-Einstein condensates}
\author{E. I. Duzzioni$^{1}$, L. Sanz$^{1\text{,}2}$, S. S. Mizrahi$^{1}$ and M. H. Y.
Moussa$^{3}$}
\affiliation{$^{1}$Departamento de F\'{\i}sica, Universidade Federal de S\~{a}o Carlos,
13565-905, S\~{a}o Carlos, SP, Brazil}
\affiliation{$^{2}$Instituto de F\'{\i}sica, Universidade Federal de Uberl\^{a}ndia, Caixa
Postal 593, 38400-902, Uberl\^{a}ndia, Minas Gerais, Brazil}
\affiliation{$^{3}$Instituto de F\'{\i}sica de S\~{a}o Carlos, Universidade de S\~{a}o
Paulo, Caixa Postal 369, 13560-970, S\~{a}o Carlos, S\~{a}o Paulo, Brazil}
\begin{abstract}
We describe the behavior of two coupled Bose-Einstein condensates in
time-dependent (TD) trap potentials and TD Rabi (or tunneling) frequency,
using the two-mode approach. Starting from Bloch states, we succeed to get
analytical solutions for the TD Schr\"{o}dinger equation and present a
detailed analysis of the relative and geometric phases acquired by the wave
function of the condensates, as well as their population imbalance. We also
establish a connection between the geometric phases and constants of motion
which characterize the dynamic of the system. Besides analyzing the affects of
temporality on condensates that differs by hyperfine degrees of freedom
(internal Josephson effect), we also do present a brief discussion of a one
specie condensate in a double-well potential (external Josephson effect).
\end{abstract}
\pacs{03.65.Vf, 03.75.Kk, 03.75.Lm, 03.75.Mn}
\maketitle
\section{Introduction}
In recent years, concepts which has been restricted to foundation of quantum
mechanics have been considerably enlarged by spreading out to different
domains of physics. With the introduction of measurements such as separability
\cite{Peres,Horodecki,Simon} and concurrence \cite{Wootters,Werner}, and
the\ wider understanding that entanglement, and so nonlocality, is at the core
of many-body phenomena as quantum-phase transition \cite{Latorre},
superconductivity \cite{BCS} and Bose-Einstein condensation \cite{Pan05}, we
are considerably far from the time when entanglement and nonlocality were
confined to fundamental aspects of quantum mechanics. On the other hand, the
experimental techniques developed over the last decades for manipulating
atom-field interaction have enabled the building of macroscopic atomic
ensembles and the experimental verification of fundamental concepts in
macroscopic scales \cite{Korbicz}.\ It is worth mentioning the rapid growth of
quantum information theory which has conferred to its basic ingredients ---
the phenomena of superposition of states and decoherence, entanglements and
nonlocality --- a great deal of advances towards the accomplishments of
quantum logical devices.
Among the standard tools to generate and detect multipartite entanglements,
experiments in Bose-Einstein condensates (BECs) in dilute gases have deepened
our incursion towards the quantum nature of macroscopic systems. In
particular, experiments with a trapped gas of $^{87}$Rb atoms with two
different hyperfine sublevels prompt the engineering of a Josephson-like
coupling between two condensates by a laser-induced Raman
transition~\cite{Myatt97,Matthews98}. Such \textquotedblleft
internal\textquotedblright\ Josephson effect \cite{LeggettRMP} mediates
intraspecies collisions apart from interspecies one. These atom-atom
interactions empowered the investigation of the dynamics of the relative phase
of coupled condensates \cite{Hall98b} and Rabi oscillations \cite{Matthews99}
on macroscopic systems. Moreover, precise measurements of scattering lengths
has also been accomplished \cite{Hall98a}, aiming to quantify properly the
non-linear dynamics associated with collisions. In the two-mode approximation,
the coupled condensates has been employed to investigate entanglement
dynamics~\cite{Hines03,Sanz03,Pan05} and the possibility to prepare, control
and detect macroscopic superposition
states~\cite{CiracGBEC98,Gordon99,Dunningham01}. Beyond this achievements, the
analysis of the Josephson effect in this two-mode exactly soluble model may
provide a clue for the examination of macroscopic coupling arising in less
tractable form of the general theory of BECs \cite{LeggettRMP}.
Whereas in real experiments the trap potential may be considered to be a
time-independent function, excepting for small fluctuations, time-varying
scattering lengths are usually produced through Feshbach resonances while, as
pointed out in Ref. \cite{LeggettRMP}, the amplitude and phase of the laser
field may vary in time. In this connection, the present paper is devoted to
the TD version of the two-mode Hamiltonian (TMH), where the effective
frequency of the trap potential for both atomic species are TD functions, as
well as the Rabi frequency and the scattering lengths. A similar approach was
employed in Refs.~\cite{Vedral,Chen04} where, however, only the phase of the
external field inducing the Raman transition~was assumed to be a TD slowly
varying function. Instead, our treatment considers time dependence of all
Hamiltonian parameters, focusing on two particular subjects: the analyzes of
geometric phase acquired by wave function of the whole system and the
control\textbf{\ }of the dynamics of pseudo-spin states governed by the TD
TMH. Starting from Bloch states, whose preparation is achieved by applying a
laser pulse~to atoms condensed in a single hyperfine level \cite{Hall98a}, we
demonstrate that its evolution, visualized as a vector on the Bloch sphere,
can be used to control the geometric phase and the population imbalance
following from the whole wave function of the condensates. Our treatment also
permits a detailed analyzes of the relative phase between the condensed states.
In Ref.\cite{Milburn}, the authors studied the dynamics of a strongly driven
two-coupled BECs in two spatially localized modes of a double-well potential,
where the tunneling coupling between the two modes is periodically modulated.
In our work we also study the TD TMH associated to the \textquotedblleft
external\textquotedblright\ Josephson effect, analyzing its differences with
relation of the \textquotedblleft internal\textquotedblright\ Josephson effect.
Similarly to the above mentioned fundamental phenomena, the geometric phase
has overtaken its striking rule on fundamental physics to widening our
understanding of phenomena as quantum Hall effect \cite{Baily,Bruno,Kats} and
for the implementation of fault-tolerant quantum gates \cite{Zanardi}. After
its discovery by Berry on adiabatic processes \cite{Berry98}, it has been
generalized to nonadiabatic~\cite{Aharonovl87}, noncyclic~\cite{Samuel88} and
nonunitary~\cite{Tong04,Piza} quantum evolutions. Recently, it has been
investigated in different areas of physics, ranging from BECs \cite{Vedral}
and cavity quantum electrodynamics \cite{Carollo,Duzzioni} to condensed matter
\cite{Bliokh} and quantum information theory \cite{Zanardi}. In particular,
the Berry phase of mesoscopic spin in Bose-Einstein condensates, induced by a
TD slowly varying driven field, has been investigated under the TMH
\cite{Vedral,Chen04}. In our treatment, the evolution of the geometric phase
of this mesoscopic system is evaluated in a more general scenario, where all
the Hamiltonian parameters are assumed to be TD.
The paper is organized as follows. In Sec.~II we introduce and solve the
Schr\"{o}dinger equation associated to the TD TMH, presenting the evolution
operator. The dynamics of BECs for initial Bloch states is analyzed in Sec.
III, where we show that they remain as Bloch states apart from a global phase
factor accounting for the elastic collision terms. The geometric phase
acquired by the state vector of the system is presented in Sec. IV and a
detailed analyzes of its time evolution is found in Sec. V for different
regimes of the parameters. In Sec. VI we take the problem of a TMH from a
different perspective, considering the external Josephson effect instead of
the internal one. Finally, Sec. VII is devoted to our concluding remarks.
\section{The time-dependent TMH}
Under the two-mode approximation, where the quantum field operators $\Psi
_{a}=\varphi_{a}\left( \mathbf{r},t\right) a$ and $\Psi_{b}=\varphi
_{b}(\mathbf{r},t)b$ are restricted to the fundamental states $\varphi_{\ell
}\left( \mathbf{r},t\right) $ ($\ell=a,b$) \cite{CiracGBEC98,Milburn1997},
the coupled Bose-Einstein condensates are described by the TD Hamiltonian
($\hbar=1$)
\begin{align}
H\left( t\right) & =\sum_{\ell=a,b}\left[ \omega_{\ell}\left( t\right)
\ell^{\dagger}\ell+\gamma_{\ell}\left( t\right) \ell^{\dagger}\ell^{\dagger
}\ell\ell\right] +\gamma_{ab}\left( t\right) a^{\dagger}ab^{\dagger
}b\nonumber\\
& -g\left( t\right) \left( e^{-i\delta\left( t\right) }a^{\dag
}b+e^{i\delta\left( t\right) }ab^{\dag}\right) \text{,} \label{1}
\end{align}
where $a$ and $b$ are standard bosonic annihilation operators, associated with
condensation in hyperfine levels $\left\vert 2,1\right\rangle $ and
$\left\vert 1,-1\right\rangle $, respectively
\cite{CiracGBEC98,Gordon99,Villain99}. The phase $\delta\left( t\right) $ is
associated to the detuning $\Delta(t)$ from the Raman resonance between the
atomic transition $\left\vert 2,1\right\rangle $ $\leftrightarrow\left\vert
1,-1\right\rangle $, which may be a TD function (by varying the laser
frequency), through the expression $\delta\left( t\right) =\int_{t_{0}}
^{t}\Delta(\tau)$ $d\tau+$ $\delta_{0}$. The TD trap frequencies $\omega
_{\ell}$, the interspecies and intraspecies collision parameters $\gamma_{ab}$
and $\gamma_{\ell}$, and the Rabi frequency $g$, follow from
\begin{subequations}
\label{2}
\begin{align}
\omega_{\ell}\left( t\right) & =\int d^{3}{\mathbf{r}}\varphi_{\ell}
^{\ast}\left( \mathbf{r},t\right) \left[ -\frac{1}{2m}\nabla^{2}+V_{\ell
}\left( \mathbf{r},t\right) \right] \varphi_{\ell}\left( \mathbf{r}
,t\right) \text{,}\label{2a}\\
\gamma_{\ell}\left( t\right) & =\frac{4\pi A_{\ell}\left( t\right) }
{2m}\int d^{3}{\mathbf{r}}\left\vert \varphi_{\ell}\left( \mathbf{r}
,t\right) \right\vert ^{4}\text{,}\label{2f}\\
\gamma_{ab}\left( t\right) & =\frac{4\pi A_{ab}\left( t\right) }{m}\int
d^{3}{\mathbf{r}}\left\vert \varphi_{a}\left( \mathbf{r},t\right)
\varphi_{b}\left( \mathbf{r},t\right) \right\vert ^{2}\text{,}\label{2g}\\
g\left( t\right) & =\frac{\Omega\left( t\right) }{2}\int d^{3}
{\mathbf{r}}\varphi_{a}^{\ast}\left( \mathbf{r},t\right) \varphi_{b}\left(
\mathbf{r},t\right) , \label{2d}
\end{align}
where $m$ is the atomic mass. We assume that the time dependence of the trap
potential $V_{\ell}\left( \mathbf{r},t\right) $ is generated by
adiabatically varying the trapping magnetic field.\textbf{\ }Such adiabatic
variation of the trapping field has been assumed to ensure the validity of the
two-mode approximation. The time-varying scattering lengths $A_{ab}(t)$ and
$A_{\ell}(t)$, are accomplished via Feshbach resonances, by tuning a bias
magnetic field \cite{Vogels}. Finally, as mentioned above, in real experiments
with atomic BECs the Rabi frequency may be a time-varying function since the
amplitude and phase of the pumping fields may vary in time \cite{LeggettRMP}.
Except for the Josephson-like coupling, the Fock states are eigenstates of all
the terms in Hamiltonian (\ref{1}). Thus, in order to get rid of this TD
coupling in (\ref{1}), we consider a transformation with the unitary operator
\end{subequations}
\begin{equation}
V(t)=\exp\left[ \frac{r(t)}{2}\left( \operatorname*{e}\nolimits^{i\phi
(t)}ab^{\dagger}-\operatorname*{e}\nolimits^{-i\phi(t)}a^{\dagger}b\right)
\right] \text{,} \label{3}
\end{equation}
(analogous to that defined in Ref.~\cite{Chen04}) to obtain the transformed Hamiltonian
\begin{equation}
\mathcal{H}(t)=V^{\dagger}HV-iV^{\dagger}\partial_{t}V=\sum_{\ell
=a,b}\widetilde{\omega}_{\ell}(t)n_{\ell}+\mathcal{H}_{el}(t)+\mathcal{H}
_{inel}(t)\text{,} \label{4}
\end{equation}
where $n_{\ell}=\ell^{\dagger}\ell$ is the number operator associated to each
condensate having effective frequency
\begin{equation}
\widetilde{\omega}_{\ell}\left( t\right) =\omega_{\ell}\left( t\right)
+\left( 2\delta_{\ell b}-1\right) g\left( t\right) \cos\left[ \phi\left(
t\right) -\delta\left( t\right) \right] \tan\left[ r\left( t\right)
/2\right] \text{.} \label{5}
\end{equation}
In the framework associated to the transformation (\ref{3}), the system ends
up with an inelastic collision term apart from the elastic one already present
in (\ref{1}). The Hamiltonians accounting for such interactions, also weighted
by the TD function $\Lambda(t)=\gamma_{a}(t)+\gamma_{b}(t)-\gamma_{ab}(t)$,
are given by
\begin{subequations}
\label{6}
\begin{align}
\mathcal{H}_{el}(t) & =\left\{ \gamma_{a}(t)\cos^{2}\left[ r(t)/2\right]
+\gamma_{b}(t)\sin^{2}\left[ r(t)/2\right] -\frac{\Lambda(t)}{4}\sin
^{2}\left[ r(t)\right] \right\} \left( a^{\dagger}\right) ^{2}
a^{2}\nonumber\\
& +\left\{ \gamma_{a}(t)\sin^{2}\left[ r(t)/2\right] +\gamma_{b}
(t)\cos^{2}\left[ r(t)/2\right] -\frac{\Lambda(t)}{4}\sin^{2}\left[
r(t)\right] \right\} \left( b^{\dagger}\right) ^{2}b^{2}\nonumber\\
& +\left\{ \gamma_{ab}(t)+\Lambda(t)\sin^{2}\left[ r(t)\right] \right\}
a^{\dagger}ab^{\dagger}b\text{,}\label{6a}\\
\mathcal{H}_{inel}(t) & =\left\{ \frac{\left[ \gamma_{b}(t)-\gamma
_{a}(t)\right] }{2}\sin\left[ r(t)\right] -\frac{\Lambda(t)}{4}\sin\left[
2r(t)\right] \right\} \operatorname*{e}\nolimits^{-i\phi(t)}\left(
a^{\dagger}\right) ^{2}ab\nonumber\\
& +\left\{ \frac{\left[ \gamma_{b}(t)-\gamma_{a}(t)\right] }{2}\sin\left[
r(t)\right] +\frac{\Lambda(t)}{4}\sin\left[ 2r(t)\right] \right\}
\operatorname*{e}\nolimits^{-i\phi(t)}a^{\dagger}b^{\dag}b^{2}\nonumber\\
& +\frac{\Lambda(t)}{4}\sin^{2}\left[ r(t)\right] \left( \operatorname*{e}
\nolimits^{-i\phi(t)}a^{\dagger}b\right) ^{2}+\mathrm{h{.c.}}\text{.}
\label{6b}
\end{align}
The form of Hamiltonian (\ref{4}) is established provided that the TD
parameters $r(t)$ and $\phi(t)$ satisfy the coupled differential equations
\end{subequations}
\begin{subequations}
\label{7}
\begin{align}
\overset{.}{r}(t) & =2g(t)\sin\left[ \phi\left( t\right) -\delta\left(
t\right) \right] \mathrm{,}\label{7a}\\
\overset{.}{\phi}(t) & =\omega(t)+2g(t)\cot\left[ r(t)\right] \cos\left[
\phi\left( t\right) -\delta\left( t\right) \right] \text{{,}} \label{7b}
\end{align}
where
\end{subequations}
\begin{equation}
\omega\left( t\right) =\omega_{a}\left( t\right) -\omega_{b}\left(
t\right) \text{.} \label{8}
\end{equation}
The expression (\ref{8}) represents an effective frequency for the system
composed by the two-mode condensate, which plays an important role in the
solutions of the characteristic equations (\ref{7}). In the Appendix we
present a comprehensive analyzes of the analytical solutions of the Eqs.
(\ref{7}) for the on- and off-resonant regimes which are defined by comparing
the effective frequency $\omega(t)$ with the detuning between the laser field
and Raman transition $\Delta(t)$. The on-resonant regime, where $\Delta
(t)=\omega\left( t\right) $, implies that the detuning from Raman transition
must equals the effective frequency of the two-mode condensate. Otherwise, we
have the off-resonant regime, where $\Delta(t)=\omega(t)-\varpi$, $\varpi$
being some constant.
Similar coupled differential equations were obtained by Smerzi \textit{et al}.
\cite{Smerzi 1997} in a semi-classical treatment of the double-well problem,
and by Chen \textit{et al}. \cite{Chen04} in a full quantum approach of the
BECs in the two-mode approximation. In both references all the parameters in
their Hamiltonians are constants, except for $\delta\left( t\right) $ which,
in Ref. \cite{Chen04}, is an adiabatically time-varying parameter.
After the experiments by Hall and co-workers with $^{87}$Rb \cite{Hall98a},
where the scattering lengths satisfy the relation $A_{a}:A_{ab}:A_{b}
=1.03:1:0.97$, and consequently $\gamma_{a}\simeq\gamma_{b}\simeq\gamma
_{ab}/2$ (assuming spatial Gaussian function), a number of papers have driven
attention to this particular case \cite{Liliana,Chen04} whose Schr\"{o}dinger
equation is exactly soluble. However, if the proportion $1.03:1:0.97$ is
broken, the system is not exactly integrable, but admits approximated
solutions as shown in Refs. \cite{Chen04,Vedral}. In our paper we are
concerned with specific solutions of the characteristic equations (\ref{7}),
under the rotating-wave approximation, which turn negligible the contribution
of the inelastic interactions compared to the elastic one. This is done by
substituting the solutions for $r(t)$ and $\phi(t)$, obtained in the Appendix,
into Eqs. (\ref{5}) and (\ref{6}), and rewriting Hamiltonian (\ref{4}) in the
interaction picture. Thus, after a time average of the TD parameters appearing
in this Hamiltonian, we analyze the conditions leading to the effective
interaction
\begin{equation}
\mathcal{H}_{eff}(t)\simeq\sum\nolimits_{\ell=a,b}\widetilde{\omega}_{\ell
}(\tau)n_{\ell}+\mathcal{H}_{el}(\tau)\text{.} \label{9}
\end{equation}
By adopting this procedure, where the inelastic interactions becomes
despicable, we get he evolution operator $\mathcal{U}\left( t,t_{0}\right)
=\exp\left( -i\int_{t_{0}}^{t}\mathcal{H}_{eff}(\tau)d\tau\right) $ and,
consequently, a prepared state $\left\vert \psi\left( t_{0}\right)
\right\rangle $ evolves according to Hamiltonian (\ref{1}) as
\begin{equation}
\left\vert \psi\left( t\right) \right\rangle =V\left( t\right)
\mathcal{U}\left( t,t_{0}\right) V^{\dagger}\left( t_{0}\right) \left\vert
\psi\left( t_{0}\right) \right\rangle \text{.} \label{10}
\end{equation}
\section{Dynamics of BECs for initial Bloch states}
Following Arecchi \textit{et al.}~\cite{Arecchi72} and Dowling \textit{et
al.}~\cite{Dowling94}, we recall that the Bloch states (BS), also called
atomic coherent states, spanned in the Dicke basis $\left\vert
j,m\right\rangle $, where $j=N/2$ and $\left\vert m\right\vert \leq j$, are
obtained through a specific rotation on the reference state $\left\vert
j,j\right\rangle $,
\begin{equation}
\left\vert \alpha,\beta\right\rangle =e^{\frac{\alpha}{2}\left( e^{i\beta
}\hat{J}_{-}-e^{-i\beta}\hat{J}_{+}\right) }\left\vert j,j\right\rangle
\text{,} \label{15}
\end{equation}
where $N$ is the number of condensed particles, $J_{+}$, $J_{-}$ (together
with $J_{z}$), are the generators of the $su(2)$ algebra, and $\left(
\alpha,\beta\right) $ are the polar and the azimuthal angles, respectively,
defined on the Bloch sphere. The Heisenberg angular-momentum uncertainty
relation for the BS reduces to
\begin{equation}
\langle\left( \Delta J_{x^{\prime}}\right) ^{2}\rangle\langle\left( \Delta
J_{y^{\prime}}\right) ^{2}\rangle=\frac{1}{4}\left\vert \langle J_{z^{\prime
}}\rangle\right\vert ^{2}\text{,} \label{16}
\end{equation}
with the mean values being calculated in a rotated coordinate system
$x^{\prime},y^{\prime},z^{\prime}$,where $z^{\prime}$ is an axis in the
($\alpha$,$\beta$) direction through the center of the Bloch sphere.
Therefore, the Bloch vector is defined as the unit vector
\begin{equation}
\mathbf{n}=\left( \sin{\alpha}\cos{\beta}\text{,}\sin{\alpha}\sin{\beta
}\text{,}\cos{\alpha}\right) \text{,} \label{17}
\end{equation}
in $z^{\prime}$-axis. Using the Schwinger relations
\begin{subequations}
\label{17l}
\begin{align}
J_{x} & =\frac{1}{2}\left( a^{\dagger}b+ab^{\dagger}\right) \text{,}
\label{17la}\\
J_{y} & =\frac{1}{2i}\left( a^{\dagger}b-ab^{\dagger}\right)
\text{,}\label{l7lb}\\
J_{z} & =\frac{1}{2}\left( a^{\dagger}a-b^{\dagger}b\right) \text{.}
\label{17lc}
\end{align}
with $J_{\pm}=J_{x}\pm iJ_{y}$ and the basis states $\left\{ \left\vert
j,m\right\rangle =\left\vert N/2,\left( N_{a}-N_{b}\right) /2\right\rangle
\right\} \equiv\left\{ \left\vert N_{a}\right\rangle \left\vert
N_{b}\right\rangle \equiv\left\vert N_{a},N_{b}\right\rangle \right\} $,
where $N_{a}$ and $N_{b}$ ($N=N_{a}+N_{b}$) stand for the number of atoms in
the condensates, such that $\left\vert j,j\right\rangle \Longleftrightarrow
\left\vert N,0\right\rangle $, it is straightforward to check that the BS can
be defined through bosonic operators as
\end{subequations}
\begin{equation}
\left\vert \alpha,\beta\right\rangle =\frac{1}{\sqrt{N!}}\left[ \cos\left(
\frac{\alpha}{2}\right) a^{\dagger}+\sin\left( \frac{\alpha}{2}\right)
\operatorname*{e}\nolimits^{i\beta}b^{\dagger}\right] ^{N}\left\vert
0,0\right\rangle \text{.} \label{18}
\end{equation}
This state has a well-defined relative phase $\beta$ between the two bosonic modes.
Now, it is evident from relations (\ref{17l}) that the unitary transformation
$V\left( t\right) $ turns to be exactly the rotation operator $e^{\frac
{\alpha}{2}\left( e^{i\beta}\hat{J}_{-}-e^{-i\beta}\hat{J}_{+}\right) }$, if
one considers $r\left( t\right) =\alpha$ and $\phi\left( t\right) =\beta$.
Therefore, it is easy to demonstrate through Eq. (\ref{10}) that an initial BS
$\left\vert \psi\left( t_{0}\right) \right\rangle =$ $\left\vert \alpha
_{0},\beta_{0}\right\rangle $ $=$ $\left\vert r_{0},\phi_{0}\right\rangle $
evolves to another BS
\begin{align}
\left\vert {\psi}(t)\right\rangle & =\frac{e^{-iN\varphi_{N}(t)}}{\sqrt{N!}
}\left[ \cos\left( \frac{r(t)}{2}\right) a^{\dagger}+\sin\left(
\frac{r(t)}{2}\right) e^{i\phi\left( t\right) }b^{\dagger}\right]
^{N}\left\vert 0,0\right\rangle \text{,}\nonumber\\
& =e^{-iN\varphi_{N}(t)}\left\vert r(t),\phi\left( t\right) \right\rangle
\label{19}
\end{align}
apart from the global phase factor $e^{-iN\varphi_{N}(t)}$, where
\begin{align}
\varphi_{N}(t) & =\int_{t_{0}}^{t}\left\{ \widetilde{\omega}_{a}
(\tau)+(N-1)\left[ \gamma_{a}(\tau)\cos^{2}\left[ r(\tau)/2\right] \right.
\right. \nonumber\\
& +\left. \left. \gamma_{b}(\tau)\sin^{2}\left[ r(\tau)/2\right]
-\frac{\Lambda(\tau)}{4}\sin^{2}\left[ r(\tau)\right] \right] \right\}
d\tau\text{.} \label{19a}
\end{align}
The relative phase between the condensates $\phi(t)$ is associated to the mean
values $\left\langle J_{x}(t)\right\rangle =N\sin\left[ r(t)\right]
\cos\left[ \phi(t)\right] /2$ and $\left\langle J_{y}(t)\right\rangle
=N\sin\left[ r(t)\right] \sin\left[ \phi(t)\right] /2$, whereas $r(t)$ is
related to the population imbalance, $\Delta N(t)=\left\langle N_{a}
-N_{b}\right\rangle =2\left\langle J_{z}(t)\right\rangle $, through the
relation
\begin{equation}
\Delta N(t)=N\cos\left[ r(t)\right] \text{.} \label{20}
\end{equation}
We stress that according to the evolution operator, Eq. (\ref{10}), the
evolved state (\ref{19}) remains a BS (apart from a global phase factor),
since $\alpha_{0}=r_{0}$ and $\beta_{0}=\phi_{0}$. Note that the collision
parameters are restricted to the global phase $\operatorname{e}^{-iN\varphi
_{N}(t)}$, being irrelevant to the analyzes developed below of the
nonadiabatic geometric phases acquired by the state vector. On the other hand,
collisions become relevant when considering other initial states as the
product of Glauber's coherent states $\left\vert \alpha_{0}\right\rangle
\left\vert \beta_{0}\right\rangle $ instead of the BS \cite{Liliana}.
\section{Geometric phases of the BS}
To study the evolution of the geometric phase in the two-mode BECs we use the
kinematic approach developed by Mukunda and Simon \cite{Mukunda1993}, where
the geometric phase $\phi_{G}$ is obtained as the difference between the total
phase $\phi_{T}(t)=\arg(\left\langle \psi(t_{0})\right\vert \left.
\psi(t)\right\rangle )$ and the dynamical phase $\phi_{D}(t)=-i\int_{t_{0}
}^{t}\left\langle \psi(\tau)\right\vert \frac{\partial}{\partial\tau
}\left\vert \psi(\tau)\right\rangle d\tau$, resulting in
\begin{equation}
\phi_{G}(t)=\arg(\left\langle \psi(t_{0})\right\vert \left. \psi
(t)\right\rangle )+i\int_{t_{0}}^{t}\left\langle \psi(\tau)\right\vert
\frac{\partial}{\partial\tau}\left\vert \psi(\tau)\right\rangle d\tau.
\label{23}
\end{equation}
The expressions for $\phi_{G}(t)$ and $\phi_{D}(t)$ as function of the system
parameters are, respectively,
\begin{align}
\phi_{G}(t) & =N\arg\{\cos\left( r_{0}/2\right) \cos\left[ r(t)/2\right]
+\operatorname{e}^{i\left[ \phi(t)-\phi_{0}\right] }\sin\left(
r_{0}/2\right) \sin\left[ r(t)/2\right] \}\nonumber\\
& -\frac{N}{2}\int_{t_{0}}^{t}\overset{.}{\phi}(\tau)\left\{ 1-\cos\left[
r(\tau)\right] \right\} d\tau, \label{24}
\end{align}
and
\begin{equation}
\phi_{D}(t)=-N\left[ \varphi_{N}(t)-\varphi_{N}(t_{0})\right] +\frac{N}
{2}\int_{t_{0}}^{t}\overset{.}{\phi}(\tau)\left\{ 1-\cos\left[
r(\tau)\right] \right\} d\tau, \label{25}
\end{equation}
where $\varphi_{N}(t)$ was given in Eq. (\ref{19a}). In
Ref.\cite{Balakrishnan}, the authors also obtain Eq.(\ref{24}), through the
Gross-Pitaevskii equation, for the geometric phase acquired by the wave
function of a BEC in the double-well problem, under the two-mode approximation.
The time evolution of the BS can be followed\textbf{\ }on the Bloch sphere
through the vector $\mathbf{n}\left( t\right) =(\sin[r(t)]\cos[\phi(t)]$,
$\sin[r(t)]\sin[\phi(t)]$, $\cos[r(t)])$, for the different solutions $r(t)$
and $\phi(t)$ presented in the Appendix. To follow such evolution and,
consequently, to analyze its geometric phase $\phi_{G}(t)$, we must estimate
the integrals in Eq.(\ref{2}), i.e., the typical values for trap frequencies,
Josephson-like coupling, intraspecies, and interspecies collision rates. For
the sake of simplicity we model the effective frequency for both atomic
species as harmonic trap potentials where the TD distribution for each
condensate $\varphi_{\ell}\left( \mathbf{r},t\right) $ can be approximated
by a stationary Gaussian function such that
\begin{equation}
\varphi_{\ell}\left( \mathbf{r}\right) =\left( \frac{1}{2\pi x_{\ell}^{2}
}\right) ^{3/4}e^{-\mathbf{r}^{2}/4x_{\ell}^{2}}\text{,} \label{26}
\end{equation}
where $x_{\ell}=\sqrt{\hbar/2m\omega_{\ell}}$ stands for the position
uncertainty in each harmonic oscillator ground state \cite{Milburn}. With this
assumption the integrals in Eq.(\ref{2}) are immediately estimated using
typical physical parameters of the experiments with $^{87}$Rb atoms
\cite{Hall98a,Albiez2005,Gordon1998}: $m=1.4\times10^{-25}$ Kg, $\omega_{\ell
}\sim10^{1-2}$ Hz, $A_{\ell}\sim5$ nm, and $\Omega\sim10^{3}$ Hz\textbf{.} To
obtain some insights of \ the pseudo-spin dynamics under the TD Hamiltonian
parameters, we consider the trap and Rabi frequencies as harmonic functions,
oscillating around the typical constant values, as follow
\begin{subequations}
\label{27}
\begin{align}
\omega_{\ell}\left( t\right) & =\omega_{\ell0}+\widetilde{\omega}_{\ell
}\sin{\left( \chi_{\ell}t+\xi_{\ell}\right) ,}\label{27a}\\
g(t) & =g_{0}+\widetilde{g}\sin(\mu t)\text{,} \label{27b}
\end{align}
with the parameters $\omega_{\ell0}$, $\widetilde{\omega}_{\ell}$, $\chi
_{\ell}$, $\xi_{\ell}$, $g_{0}$, $\widetilde{g}$, and $\mu$ being constant.
Since the elastic collisions contribute only to a global phase factor, they
will be assumed as the standard constant parameters in the literature
\end{subequations}
\begin{subequations}
\label{28}
\begin{align}
\gamma_{\ell} & =\frac{4\pi A_{\ell}}{2m}\text{,}\label{28a}\\
\gamma_{ab} & =\frac{4\pi A_{ab}}{m}\text{.} \label{28b}
\end{align}
\section{Nonadiabatic Geometric Phases and Pseudo-Spin Dynamics in BECs}
In this section we present a detailed study of the geometric phase acquired by
the whole wave function of the two-mode condensates. To this end, we plot the
time evolution of the geometric phase (\ref{24})\textbf{, }and analyze its
behavior through the evolution of Bloch vector (a map of the wave function of
the BECs on Bloch sphere). This procedure allows for a better understanding
and visualization of the concept of geometric phase for open trajectories
introduced by Samuel and Bhandari \cite{Samuel88}. In particular, we are
interested in the dependence of the geometric phase on the constant of motions
coming from the solutions of the characteristic equations (\ref{7}) and also
on the time dependence of the Hamiltonian parameters. In spite of the general
solutions presented in the Appendix for these equations, we have assumed, for
the analysis developed below, $N=1$, $\delta_{0}=0$, and $g_{0}=625\pi$ Hz.
\subsection{Solutions for $r$ constant}
Before analyzing the geometric phases for the on- and off-resonant solutions,
it is instructive to present their evolutions for the simple case where the
parameter $r$ is kept constant while $\phi\left( t\right) $ obeys
Eq.(\ref{A9}) (since $g=0$). In this case, the expression for the geometric
phase coming from Eq. (\ref{24}), becomes
\end{subequations}
\begin{equation}
\phi_{G}(t)=N\left\{ \arg\left\{ \cos^{2}\left( r/2\right) +e^{-i\left[
\phi(t)-\phi_{0}\right] }\sin^{2}\left( r/2\right) \right\} -\frac{\left(
1-\cos r\right) }{2}\left[ \phi(t)-\phi_{0}\right] \right\} \text{.}
\label{fg1}
\end{equation}
In Fig.1 the absolute value for $\phi_{G}(t)$ is plotted against the
dimensionless $\tau=\omega_{a0}t$, assuming typical values $\omega
_{a0}=2\omega_{b0}=62.5\pi$ Hz. For $r=\pi/2$, with the Bloch vector standing
on the equatorial plane, and $\widetilde{\omega}_{a}=\widetilde{\omega}_{b}
=0$, the geometric phase evolves by jumps, as indicated by the thick solid
line. These jumps occur every time the relative phase $\phi$ connecting the
final to the initial Bloch vectors equals $\left( 2n+1\right) \pi$, $n$
being an integer. The jump discontinuities occur because there are an infinite
number of small geodesic-lengths connecting the vectors extremities, rendering
the geometric phase undefined \cite{Polavieja}. On the other hand, we observe
that before jumping to $\phi_{G}(\tau)=\pi$, i.e., for $\tau<2\pi$, the
geometric phase remain null since the small geodesic-length connecting the
extremities equals the Bloch-vector trajectory itself. As soon as the Bloch
vector acquires a relative phase larger than $\pi$, the small geodesic-length
connecting the extremities completes a loop over the equator, making the
acquired geometric phase proportional to $nN\pi$, where $n$, as defined above,
turns out to be the winding number, i.e., the number of loops around the
$z$-axis of the sphere.
The same interpretation given above for the geometric phase holds for the two
other curves obtained for $r=\pi/2.1$, except that the jump discontinuities
are substituted by high-slope curves around the points where $\phi=(2n+1)\pi$.
Moreover, the net effect coming from the TD parameters of the Hamiltonian is
to delay or advance the sequential increments of the relative phase $\phi$
and, consequently, of the geometric phase, as observed from Fig.1. The solid
line, obtained for $r=\pi/2.1$ and $\widetilde{\omega}_{a}=\widetilde{\omega
}_{b}=0$, shows that the increments of the geometric phase, besides being
smaller, present the same rate of variation when compared to the case
$r=\pi/2$. When the trap frequencies are oscillating functions, with
$\widetilde{\omega}_{a}=\widetilde{\omega}_{b}=\omega_{a0}/4$, $\chi_{a}
=\chi_{b}=\omega_{a0}/2$, $\xi_{a}=0$, and $\xi_{b}=\pi/2$, the
time-dependence shown by the dashed line, results in the oscillations of the
time intervals between the increments of the geometric phase.
To better visualize the above discussion about geometric phases acquired in
open trajectories, in Fig.2 we plot the evolution of the Bloch vectors for the
cases $r=\pi/2$ and $r=\pi/2.1$, with $\widetilde{\omega}_{a}=\widetilde
{\omega}_{b}=0$, considering the same time interval $\tau=4\pi$. The black and
grey vectors indicate the coincident positions of the initial and final Bloch
vectors, for the cases $r=\pi/2.1$ and $r=\pi/2$, respectively, after a
complete rotation around the sphere whose directions are indicated by the
arrows. The trajectories described by the black and grey vectors are indicated
by solid and dashed curves, respectively. Evidently, the geometric phase
acquired during the evolution of the Bloch vector in the case $r=\pi/2.1$ (the
solid angle comprehended by the semi-hemisphere above the solid
circumference), is smaller than that for the case $r=\pi/2$ (the solid angle
corresponding to the north hemisphere, equal to $2\pi$).
The solution with $r$ constant means steady population imbalance $\Delta N$,
whereas the relative phase $\phi(t)$, another parameter examined by
experimentalists and necessary to completely define the BS, is a linear
function of time when $\widetilde{\omega}_{a}=\widetilde{\omega}_{b}=0$ or an
oscillating function when $\widetilde{\omega}_{a}=\widetilde{\omega}
_{b}=\omega_{a0}/4$. Note that the dynamics of the population imbalance and
the relative phase may be followed through the projection of the Bloch vector
trajectory on $z$-axis and $x$-$y$ plane, respectively.
\subsection{On-resonant solution}
As indicated in the Appendix, through the constant of motion $\mathcal{C}
=\sin\left[ r(t)\right] \cos\left[ \phi(t)-\delta(t)\right] $ we obtain
the solution of the characteristic equations (\ref{7}) in the on-resonant
regime where $\Delta(t)=\omega(t)$. All the possible trajectories in the
portrait space $r(t)$ $\times$ $\left( \phi(t)-\delta(t)\right) $, are
restrained to the level curves obtained as projection of the surface plotted
in Fig.3, which follows from $\mathcal{C}$.
To better understand the geometric phase acquired by the state vector
$\left\vert \Psi(t)\right\rangle $ in the on-resonant solutions, we consider
two different cases, $\Delta=0$ and $\Delta\neq0$, and analyze its dependence
on the constant $\mathcal{C}$, through the relation
\begin{align}
\phi_{G}(t) & =N\arg\left\{ \cos\left( r_{0}/2\right) \cos\left[
r\left( t\right) /2\right] +e^{-i\left[ \phi(t)-\phi_{0}\right] }
\sin\left( r_{0}/2\right) \sin\left[ r\left( t\right) /2\right] \right\}
\nonumber\\
& -\frac{N}{2}
{\displaystyle\int\limits_{t_{0}}^{t}}
dt^{\prime}\left\{ \omega(t^{\prime})\left\{ 1-\cos\left[ r\left(
t^{\prime}\right) \right] \right\} +\frac{2\mathcal{C}g(t^{\prime}
)\cos\left[ r\left( t^{\prime}\right) \right] }{1+\cos\left[ r\left(
t^{\prime}\right) \right] }\right\} \text{.} \label{fg2}
\end{align}
\subsubsection{The case $\Delta=0$}
In Fig.4 we plot the evolution of the geometric phase against $\tau=g_{0}t$,
considering $\widetilde{\omega}_{a}=\widetilde{\omega}_{b}=\widetilde{g}=0$.
The thick solid line on the abscissa axis corresponds to the choice $r_{0}
=\pi$ and $\phi_{0}=\pi/2$, leading to $\mathcal{C}=0$, under which the
geometric phase is null or undefined as indicated by the open dots over the
abscissa axis. Note that for $r_{0}=\pi$ we get, at $t=0$, an undetermined
equation (\ref{A4b}) for $\phi(t)$. To circumvent such indetermination we
impose on Eq.(\ref{A3}) the constraint $\phi(t)-\delta(t)=(2n+1)\pi/2$ over
any time interval, to determine $\phi(t)$ independently of Eq.(\ref{A4b}).
Since for $\Delta=0$ it follows that $\delta(t)=\delta_{0}$, implying that
$\phi(t)=\delta_{0}+(2n+1)\pi/2$, the geometric phase for the case
$\mathcal{C}=\Delta=0$ simplifies to $\phi_{G}(t)=N\arg\left\{ \cos\left[
\left( r\left( t\right) -r_{0}\right) /2\right] \right\} $ and,
consequently, $\phi_{G}(t)$ is null for $\left\vert r\left( t\right)
-r_{0}\right\vert \leq\pi$ and undefined for $\left\vert r\left( t\right)
-r_{0}\right\vert =\pi$. Still in Fig. 4, the solid and dashed lines,
associated to the pairs $(r_{0}$, $\phi_{0})=(\pi/5$, $\pi/4)$ and $\left(
\pi/4\text{, }3\pi/10\right) $, respectively, correspond to the same constant
$\mathcal{C}\simeq0.41$. These curves exhibits similar behaviors due to the
fact that, with the same constant $\mathcal{C}$, they present the same
trajectory on the portrait space of Fig.3, despite starting from different
initial conditions. The dotted and dashed-dotted lines, associated to the
pairs $\left( \pi/3\text{, }0\right) $ and $\left( \pi/3\text{, }
\pi\right) $, and corresponding to the constants $\mathcal{C}\simeq0.87$ and
$\mathcal{C}\simeq-0.87$, respectively, are symmetric around the abscissa axis
$\tau$. Such property of symmetry reflection of the geometric phase $\phi
_{G}\rightarrow-\phi_{G}$, is consequence of the change $\phi_{0}
\rightarrow\phi_{0}\pm\pi$, implying that $\mathcal{C}\rightarrow-$
$\mathcal{C}$. It is worth noting that the larger the absolute value of
$\mathcal{C}$ the smaller the acquired geometric phase and \textit{vice-versa}
. \
In Fig.5 we plot the evolution of the Bloch vectors coming from the
on-resonant solution with the initial conditions $\left( \pi\text{, }
\pi/2\right) $ and $\left( \pi/3\text{, }0\right) $ corresponding to
$\mathcal{C}=0$ and $\mathcal{C}\simeq0.87$, whose initial and final positions
are represented by the black and grey vectors, respectively. As in Fig.2, we
consider the evolution of both vectors during the same time interval $\tau
=\pi$. Through the solid line trajectory described by the black vector, which
oscillates between the north and south poles, it is straightforward to
conclude that the geometric phase is null during the whole time evolution,
except when the vector reaches the north pole, where the geometric phase
becomes undetermined. As the dashed trajectory of the grey vector is not
restricted to a meridian, as in the case $\mathcal{C}=0$, the geometric phase
acquired is evidently non-null.
\subsubsection{The case $\Delta\neq0$}
As we are interested in the dependence of the geometric phase on constant
$\mathcal{C}$ and, now, on the effective frequency of the two-mode condensate
$\Delta(t)=\omega\left( t\right) $, we consider all the parameters of the
Hamiltonian being time-independent, $\widetilde{\omega}_{a}=\widetilde{\omega
}_{b}=\widetilde{g}=0$, except for $\delta(t)=\delta_{0}+
{\displaystyle\int\nolimits_{t_{0}}^{t}}
\omega(t^{\prime})dt^{\prime}$. In Fig.6 we plot the geometric phase against
$\tau=g_{0}t$ for different initial conditions ($r_{0}$, $\phi_{0}$) and
effective frequencies $\Delta$. As indicated by the thick solid line
associated to the initial conditions $\left( \pi\text{, }\pi/2\right) $,
corresponding to $\mathcal{C}=0$, with $\omega_{a}=2\omega_{b}=g_{0}/10$, the
geometric phase is not null, differently from the case $\Delta=0$. The
discontinuity exhibited by this curve follows from the $\arg$ function, whose
characteristic jumps occurs whenever $\tau_{n}=\left[ \left( 2n-1\right)
\pi g_{0}\right] /2\omega$, $n$ being a positive integer. As indicated by the
solid and dashed lines, the property of symmetry reflection of the geometric
phase in the abscissa axis still follows when changing, simultaneously,
$\phi_{0}\rightarrow\phi_{0}\pm\pi$, and $\omega$ to $-\omega$. In fact, the
solid line corresponds to the initial condition $\left( \pi/3\text{,
}0\right) $ with $\mathcal{C}\simeq0.87$ and $\omega_{a}=2\omega_{b}
=g_{0}/10$, while the dashed line corresponds to $\left( \pi/3\text{, }
\pi\right) $, with $\mathcal{C}\simeq-0.87$ and $\omega_{b}=2\omega_{a}
=g_{0}/10$. When the change $\phi_{0}\rightarrow\phi_{0}\pm\pi$, are not
followed by that $\omega$ $\rightarrow$ $-\omega$, such a symmetry is not
accomplished as indicated by the dotted line corresponding to the initial
conditions $\left( \pi/3\text{, }\pi\right) $ with $\mathcal{C}\simeq-0.87$
and, now, $\omega_{a}=2\omega_{b}=g_{0}/10$.
To visualize the acquisition of the geometric phase we again return to the
evolution of the Bloch vector. As observed from Fig.7, the solid trajectory
described by the vector associated to $\mathcal{C}=0$, whose coincident
initial and final positions are indicated by the black vector, leads to a
finite solid angle and, consequently, a finite geometric phase (during the
time evolution $\tau=\pi$\ considered for both cases presented). The control
of this solid angle may be accomplished through the parameter $\Delta$ --- the
larger $\Delta$ the larger the solid angle and \textit{vice-versa} --- as
demonstrated experimentally through the polarization vector of a photon
undergoing a Mach-Zehder interferometer \cite{Kwiat}. The evolution of the
grey vector, associated to $\mathcal{C}\simeq0.87$, exhibits a dashed
trajectory where the initial and final positions are slightly different. As
the time evolution proceeds, such trajectory leads to a geometric phase which
easily exceed that of the case $\mathcal{C}=0$, as indicated in Fig.6.
The population imbalance $\Delta N$ for the cases $\Delta=0$ and $\Delta\neq0$
is a time-oscillating function strictly dependent on the shape of Rabi
frequency $g(t)$, thus exhibiting a strong connection between the\ dynamics of
population inversion in two-level systems and population imbalance of BECs. In
fact, when written the Hamiltonian (\ref{1}) through the quase-spin operators
in Eqs.(\ref{17}), we obtain, apart from the collision terms, a driven
interaction. On the other hand, the relative phase $\phi(t)$ depends also on
the detuning $\delta(t)$ besides $g(t)$, as shown by Eqs.(\ref{A4}).
\subsection{Off-resonant solution}
To analyze the off-resonant solution, where the detuning $\Delta
(t)=\omega(t)-\varpi$ is controlled by adjusting the parameter $\varpi$, we
impose a constant Rabi frequency $g(t)=g_{0}$ which implies a constant of
motion $\mathcal{C}=\eta\sin\left[ r(t)\right] \cos\left[ \phi
(t)-\delta(t)\right] -\cos\left[ r(t)\right] $. Similarly to the
on-resonant case, all the possible trajectories for the off-resonant $r(t)$
and $\phi(t)$ are restrained to the level curves of the surface following from
$\mathcal{C}$, presented in Fig.8. When $\eta=$ $2g_{0}/\varpi\gg1$ it is
verified that the constant $\mathcal{C}$ reduces to that of the on-resonant
solutions, unless for the multiplicative factor $\eta$, and the surface
presented in Fig.8 also reduces to that of Fig.3. However, for $\eta\ll1$ we
obtain an approximated constant value of $r$. Finally, when $\eta\sim1$, we
obtain the surface whose level curves encapsulate all the possible
trajectories in the portrait space $r(t)$ $\times$ $\phi(t)-\delta(t)$, as
shown in Fig.8.
To study the effect of $\eta$ on the off-resonant geometric phase, given
by\textbf{ }
\begin{align}
\phi_{G}(t) & =N\arg\left\{ \cos\left( r_{0}/2\right) \cos\left[
r\left( t\right) /2\right] +e^{-i\left[ \phi(t)-\phi_{0}\right] }
\sin\left( r_{0}/2\right) \sin\left[ r\left( t\right) /2\right] \right\}
\nonumber\\
& -\frac{N}{2}
{\displaystyle\int\limits_{t_{0}}^{t}}
dt^{\prime}\left\{ \omega(t^{\prime})\left\{ 1-\cos\left[ r\left(
t^{\prime}\right) \right] \right\} +\varpi\cos\left[ r\left( t^{\prime
}\right) \right] \frac{C+\cos\left[ r\left( t^{\prime}\right) \right]
}{1+\cos\left[ r\left( t^{\prime}\right) \right] }\right\} \text{.}
\label{fg3}
\end{align}
we plot in Fig.9 the evolution of $\phi_{G}(t)$ for different values of $\eta
$, all starting from the point ($\pi/3$, $0)$, with $\omega_{a0}=2\omega
_{b0}=g_{0}/10$ and $\widetilde{\omega}_{a}=\widetilde{\omega}_{b}=0$. The
dotted line, following from the case where $\eta=40$, indicates a similar
behavior to the corresponding case of the on-resonant solution (represented by
the solid line in Fig. 6). For $\eta=0.1$, the solid line shows that the
geometric phase is a strongly oscillating function, a behavior that is better
visualized through the evolution of the corresponding vector in Bloch sphere,
Fig.10. Finally, when $\varpi\sim g_{0}$, as for $\eta=2$, the geometric phase
shows discontinuities, as indicated by the thick solid line, which turns out
to be a signature of the off-resonant solution. We note that the property of
reflection exhibited by the geometric phase coming from the on-resonant
solution is also present here when substituting, simultaneously, $\phi
_{0}\rightarrow\phi_{0}\pm\pi$, $\omega\rightarrow-\omega$, and $\eta
\rightarrow-\eta$.
In Fig.10 we present the evolution of the Bloch vectors in the time interval
$\tau\simeq9\pi/10$, for the cases $\eta=0.1$ and $\eta=2$, both starting from
the common point $\left( \pi/3\text{, }0\right) $. The black vector for
$\eta=0.1$, presents a behavior limited to the north hemisphere, described by
the solid trajectory, which exhibits periodic up and down motions on the
parallels, responsible for the oscillations of the geometric phase showed in
Fig.9. The grey vector for $\eta=2$, by its turn, indicates a rather
complicated dashed trajectory which descends to the south hemisphere and goes
back to the north.
Besides depending on the Rabi frequency, as in the on-resonant case, the
population imbalance for the off-resonant solution also depends on the
detuning $\varpi$ which makes its mean value not null. The relative phase
depends on $\delta(t)$, $g(t)$, and the detuning $\varpi$.
\subsection{Time-Dependent Effects on the Geometric Phases}
The behavior of the geometric phase when both, the trap and Rabi frequencies
are TD harmonic functions, as described by Eq.(\ref{27}), is analyzed in
Fig.11, where we plot $\phi_{G}(\tau)$ against $\tau=g_{0}t$, considering the
same initial conditions $(\pi/3$, $0)$ for all the curves. Starting with the
resonant solution with $\Delta=0$, we obtain the dotted curve for the
parameters $\widetilde{\omega}_{a}=\widetilde{\omega}_{b}=0$ and
$\widetilde{g}=\mu=g_{0}$, to be compared with the dotted curve of Fig.4. We
observe that both dotted curves are very close to each other, with the the
increasing rate of the geometric phase being modulated by the TD Rabi
frequency in Fig.11. The solid line corresponds to the on-resonant solution
with $\Delta\neq0$, for the parameters $\omega_{a0}=2\omega_{b0}=g_{0}/10$,
$\widetilde{\omega}_{a}=\widetilde{\omega}_{b}=0$, and $\widetilde{g}
=\mu=g_{0}$. This curve is to be compared with the solid line in Fig.6,
showing again that the increasing rate of $\phi_{G}$ can be controlled through
the TD Rabi frequency. The dashed line, also corresponding to $\Delta\neq0$,
with $\omega_{a0}=2\omega_{b0}=g_{0}$, $\widetilde{\omega}_{a}=\widetilde
{\omega}_{b}=0$, and $\widetilde{g}=\mu=g_{0}$, shows that the increasing rate
of the geometric phase may also be controlled through the trap frequencies.
Finally, the thick solid line, corresponding to the off-resonant solution,
with $\omega_{a0}=2\omega_{b0}=g_{0}$, $\widetilde{\omega}_{a}=\widetilde
{\omega}_{b}=\chi_{a}=\chi_{b}=g_{0}/2$, $\xi_{a}=0$, $\xi_{b}=-\pi/2$, and
$\widetilde{g}=0$, to be compared with the thick solid line of Fig.9, also
indicates the important role played by the time dependence of the trap
frequency in the geometric phase. We finally observe that, evidently, these TD
effects have direct implications on the population imbalance and relative
phase between the condensates.
\section{Geometric Phases and the External Josephson Effect}
In this section we analyze the two-mode condensates from a different
perspective: as a single atomic specie trapped in a symmetric or asymmetric
double-well potential. As the internal Josephson effect is here substituted by
the tunneling interaction, the laser pumping becomes unnecessary and we impose
that $\Delta=0$, such that $\delta=0$. Moreover, in the external Josephson
effect the interspecies collision rate correspond to a second order correction
compared to the intraspecies collision rates, justifying the assumption that
$\gamma_{ab}\simeq0$ \cite{Milburn1997,Ketterle}.
The Hamiltonian (\ref{1}) applied to this different physical situation leads,
under the above restrictions, to a similar transformed interaction (\ref{4})
and characteristic equations (\ref{7}). Therefore, the different solutions of
the coupled differential equations apply directly to the external Josephson
effect, with the on- and off-resonant processes describing the symmetric
($\omega=0$) and asymmetric ($\omega\neq0$) wells solutions, respectively.
\subsection{Symmetric wells}
>From the above discussion we readily verify that the solutions for $r(t)$ and
$\phi(t)$, coming from the symmetric wells, are give by Eqs.(\ref{A4}) and
(\ref{A4c}) with the constant of motion $\mathcal{C}=\sin\left[ r(t)\right]
\cos\left[ \phi(t)\right] $. We observe that the TD tunneling rate $g(t)$ is
accomplished by modulating the amplitude of the counter propagating classical
fields that generate the barrier. Similarly to the internal Josephson effect,
all the possible trajectories for $r(t)$ and $\phi(t)$ follow from the level
curves of the surface plotted in Fig.8, assuming $\delta=0$. The same level
curves were obtained, by numerical methods, in Ref.\cite{Balakrishnan}. The
geometric phases acquired by the evolution of the state vector of the BECs are
given by Fig.4 and the Bloch-vector trajectories by Fig.5, both obtained from
the on-resonant solution of the internal Josephson effect with $\delta=0$.
\subsection{Asymmetric wells}
The solutions of the characteristic equations for the asymmetric wells follow
by imposing constant values for the effective frequency $\omega=\varpi$ (since
$\Delta=0$) and the tunneling rate $g=g_{0}$. We thus obtain the solutions
(\ref{A6}) with $\eta$ replaced by $\widetilde{\eta}=2g_{0}/\omega$ and
$\delta=0$. The constant of motion becomes $\mathcal{C}=\widetilde{\eta}
\cos\left[ \phi(t)\right] \sin\left[ r(t)\right] -\cos\left[ r(t)\right]
$. The phase-space portrait $r(t)\times\phi(t)$ is given by Fig.8 (with
$\delta=0$), whose level curves indicate all the possible trajectories for
$r(t)$ and $\phi(t)$. The same level curves were obtained by numerical methods
in Ref.\cite{Balakrishnan}. As an example of the geometric phase acquired by
the evolution of the state vector in the asymmetric wells, we take the dotted
line curve of Fig.9, corresponding to the case $\omega=\varpi=g_{0}/20$. (The
other two curves in Fig.9 do not satisfy the condition $\omega=\varpi$.) The
trajectory of the Bloch vector associated to $\omega=g_{0}/20$ is
approximately give by the dashed curve in Fig.7.
\section{Concluding Remarks}
In the present work we analyze the dynamics of two interacting condensates,
with a full TD Hamiltonian. Starting from the Hamiltonian (\ref{1}) under the
two-mode approximation, an effective interaction (\ref{9}) is established
under the RWA, provided that the polar $r(t)$ and azimuthal $\phi(t)$ angles,
which define a Bloch state, satisfy coupled differential equations. This
procedure enable us to define a detuning $\Delta(t)$ from the Raman resonance
between the atomic transition, together with an effective frequency for the
condensates $\omega\left( t\right) $, as in Eq. (\ref{8}). Thus, two
different solutions arise for the differential equations coupling the
parameters\ $r(t)$ and $\phi(t)$, the on-resonant solution, where
$\Delta(t)=\omega\left( t\right) $, and the off-resonant solution where
$\Delta(t)=\omega\left( t\right) +\varpi$, $\varpi$ being a constant. After
solving analytically the coupled equations for both regimes, we present a
detailed analyzes of the geometric phases acquired by the Bloch state of the
system, also discussing the relative phase and population imbalance.
A main result of our work is the connection between geometric phases and
constant of motions of the interacting condensates which are identified
through the analytical solutions of the coupled differential equations for
$r(t)$ and $\phi(t)$. For each on- or off-resonant solution we assign a
constant of motion which determine the dynamical behavior of the state of the
system and, consequently, its geometric phase. We also note that these
constants of motions follow from level curves in the portrait space which are
also obtained analytically.
To better visualize the time-evolution of the geometric and relative phases
acquired by the state vector, together with population imbalance, we also
analyze the trajectory of the state vector mapped on Bloch sphere. Finally, we
present a brief discussion of the TD effects of the trap and Rabi frequencies
in the geometric phases together with the connection between its evolution in
both cases of internal and external Josephson coupling.
As in this work we studied only the evolution of an initial Bloch state, the
collision parameters were restricted to the global phase of the evolved state,
which remains a Bloch state under the two-mode approximation. Therefore,
collisions terms, which are also assumed as TD parameters, do not play a
decisive role in our present analyzes. It is worth to consider distinct
initial state to analyze the effects of the collisions parameters in the
dynamics of the geometric phase and population imbalance.
\textbf{Acknowledgments}
We wish to express thanks for the support from CNPq and FAPESP, Brazilian agencies.
\section{Appendix}
\subsection{Analytical solutions of the characteristic equations (\ref{7})}
In this Appendix we present some specific solutions of the characteristic
equations (\ref{7}), following a more detailed treatment in \cite{Salomon}. We
investigate two different regimes of the laser field amplification, the
on-resonant and off-resonant regimes, which are defined comparing the
effective frequency of the two-mode condensate, $\omega(t)$, with the detuning
between the laser field and the Raman transition $\Delta(t)$. As mentioned
above, in the on-resonant regime, where $\Delta(t)=\omega\left( t\right) $,
the rate of time variation of the laser field equals the effective frequency
of the two-mode condensate. Otherwise, we have the off-resonant regime.
\subsection{On-resonant process}
Defining $\chi(t)\equiv\phi(t)-\delta(t)$, the characteristic equations
(\ref{7}) becomes
\begin{subequations}
\label{A1}
\begin{align}
\overset{.}{r}(t) & =2g(t)\sin\left[ \chi(t)\right] \text{,}\label{A1a}\\
\overset{.}{\chi}(t) & =\omega(t)-\Delta(t)+2g(t)\cos\left[ \chi(t)\right]
\cot\left[ r(t)\right] \text{{,}} \label{A1b}
\end{align}
such that, in the on-resonant regime we are left with the first-order
differential equation
\end{subequations}
\begin{equation}
\frac{dr}{d\chi}=\tan\chi\tan r\mathrm{.} \label{A2}
\end{equation}
After integrating Eq.(\ref{A2}) we obtain the constant of motion
\begin{equation}
\sin\left[ r(t)\right] \cos\left[ \phi(t)-\delta(t)\right] =\mathcal{C},
\label{A3}
\end{equation}
with $\mathcal{C}$ depending on the initial values $r_{0}$, $\phi_{0}$, and
$\delta_{0}$. Thus, the resonant solutions of Eqs.(\ref{A1}), are given by
\begin{subequations}
\label{A4}
\begin{align}
\cos\left[ r(t)\right] & =\sqrt{1-\mathcal{C}^{2}}\sin\left[
u(t,t_{0})+\arcsin\left( \frac{\cos r_{0}}{\sqrt{1-\mathcal{C}^{2}}}\right)
\right] ,\label{A4a}\\
\phi(t) & =\delta(t)+\arccos\left\{ \frac{\mathcal{C}}{\sin\left[
r(t)\right] }\right\} , \label{A4b}
\end{align}
where
\end{subequations}
\begin{equation}
u(t,t_{0})=-2\int_{t_{0}}^{t}g(\tau)d\tau\mathrm{.} \label{A4c}
\end{equation}
\subsection{Off-resonant process}
Considering the off-resonant regime, where $\Delta(t)=\omega(t)-\varpi$,
$\varpi$ being a constant, Eqs. (\ref{A1}) can again be solved by quadrature
as far as we assume the Rabi frequency $g$ to be also a constant $g_{0}$. In
this regime, defining $\eta=$ $2g_{0}/\varpi$, Eq.(\ref{A3}) is replaced by
\begin{equation}
\eta\cos\left[ \phi(t)-\delta(t)\right] \sin\left[ r(t)\right]
-\cos\left[ r(t)\right] =\mathcal{C}\mathrm{,} \label{A5}
\end{equation}
which again depends on the initial values $r_{0}$, $\phi_{0}$, $\delta_{0}$.
The solutions for this case are given by
\begin{subequations}
\label{A6}
\begin{align}
\cos\left[ r(t)\right] & =\frac{\eta\sqrt{1+\eta^{2}-\mathcal{C}^{2}}
}{1+\eta^{2}}\sin\left\{ -\varpi\sqrt{1+\eta^{2}}\left( t-t_{0}\right)
\right. \nonumber\\
& \left. +\arcsin\left[ \frac{\left( 1+\eta^{2}\right) \cos
r_{0}+\mathcal{C}}{\eta\sqrt{1+\eta^{2}-\mathcal{C}^{2}}}\right] \right\}
-\frac{\mathcal{C}}{1+\eta^{2}}\mathrm{,}\label{A6a}\\
\phi(t) & =\delta(t)+\arccos\left\{ \frac{\mathcal{C}+\cos\left[
r(t)\right] }{\eta\sin\left[ r(t)\right] }\right\} \mathrm{.} \label{A6b}
\end{align}
\subsection{A constant solution for $r$}
Another solution for $r(t)$ and $\phi(t)$ arises when we impose that $r$
remains constant in time. Through this solution, given by
\end{subequations}
\begin{align}
r(t) & =r_{0},\label{A8a}\\
\phi(t) & =\delta(t)+n\pi=\phi_{0}+\int_{t_{0}}^{t}\left[ \omega
(\tau)+2(-)^{n}g\left( \tau\right) \cot\left( r_{0}\right) \right]
d\tau\text{, } \label{A8b}
\end{align}
with $r_{0}\neq n\pi$ and $n$ being an integer, the state vector of the system
acquire only relative phase $\phi(t)$. This formal solution for $\phi(t)$ can
be even simplified noting that the physical implementation of this regime
requires, necessarily, that $g\simeq0$. In fact, for the population imbalance
to be null, due to the constant value for $r$, the Rabi frequency must also be
null. therefore, Eq.(\ref{A8b}) simplifies to
\begin{equation}
\phi(t)=\phi_{0}+\int_{t_{0}}^{t}\omega(\tau)d\tau\text{.} \label{A9}
\end{equation}
Fig. 1 Absolute value of geometric phase $\phi_{G}(t)$ against the
dimensionless $\tau=\omega_{a0}t$, for $r$ constant, with $\omega_{a0}
=2\omega_{b0}=62.5\pi$ Hz.
Fig. 2 Evolution of the Bloch vectors coming from constant solutions of $r$ in
the time interval $\tau=4\pi$. The grey and black vectors correspond to the
initial conditions $(\pi/2,0)$ and $(\pi/2.1,0)$, respectively, with
$\omega_{a0}=2\omega_{b0}=62.5\pi$ Hz and $\widetilde{\omega}_{a}
=\widetilde{\omega}_{b}=0$.
Fig. 3 The portrait space $r(t)$ $\times$ $\phi(t)-\delta(t)$ obtained as
projection of the surface which follows from the on-resonant constant of
motion $\mathcal{C}=\sin\left[ r(t)\right] \cos\left[ \phi(t)-\delta
(t)\right] $.
Fig. 4 Evolution of the geometric phase $\phi_{G}(t)$ against $\tau=g_{0}t$,
for on-resonant solutions of the characteristic equations (\ref{7}), with
$\Delta=0$ and $\widetilde{\omega}_{a}=\widetilde{\omega}_{b}=\widetilde{g}=0$.
Fig. 5 Evolution of the Bloch vectors coming from the on-resonant solutions of
Eqs.(\ref{7}) in the time interval $\tau=\pi$. The black and grey vectors
correspond to the initial conditions $\left( \pi\text{, }\pi/2\right) $ and
$\left( \pi/3\text{, }0\right) $, with $\Delta=0$ and $\widetilde{\omega
}_{a}=\widetilde{\omega}_{b}=\widetilde{g}=0$.
Fig. 6 Evolution of the geometric phase $\phi_{G}(t)$ against $\tau=g_{0}t$,
for on-resonant solutions of the characteristic equations (\ref{7}), with
$\Delta\neq0$ and $\widetilde{\omega}_{a}=\widetilde{\omega}_{b}=\widetilde
{g}=0$.
Fig. 7 Evolution of the Bloch vectors coming from the on-resonant solutions of
Eqs.(\ref{7}) in the time interval $\tau=\pi$. The black and grey vectors
correspond to the initial conditions $\left( \pi\text{, }\pi/2\right) $ and
$\left( \pi/3\text{, }0\right) $, with $\Delta\neq0$ and $\widetilde{\omega
}_{a}=\widetilde{\omega}_{b}=\widetilde{g}=0$.
Fig. 8 The portrait space $r(t)$ $\times$ $\phi(t)-\delta(t)$ obtained as
projection of the surface which follows from the off-resonant constant of
motion $\mathcal{C}=\eta\sin\left[ r(t)\right] \cos\left[ \phi
(t)-\delta(t)\right] -\cos\left[ r(t)\right] $.
Fig. 9 Evolution of geometric phase $\phi_{G}(t)$ against $\tau=g_{0}t$, for
off-resonant solutions of the characteristic equations (\ref{7}) and different
values of $\eta$, all starting from the point ($\pi/3$, $0)$, with
$\omega_{a0}=2\omega_{b0}=g_{0}/10$ and $\widetilde{\omega}_{a}=\widetilde
{\omega}_{b}=0$.
Fig. 10 Evolution of the Bloch vectors coming from the off-resonant solutions
of Eqs.(\ref{7}) in the time interval $\tau\simeq9\pi/10$. The black and grey
vectors, corresponding to $\eta=0.1$ and $\eta=2$, respectively, both start
from the common point $\left( \pi/3\text{, }0\right) $, with $\omega
_{a0}=2\omega_{b0}=g_{0}/10$ and $\widetilde{\omega}_{a}=\widetilde{\omega
}_{b}=0$.
Fig. 11 Evolution of geometric phase $\phi_{G}(\tau)$ against $\tau=g_{0}t$,
for on- and off-resonant solutions of the characteristic equations (\ref{7}),
considering the same initial conditions $(\pi/3$, $0)$ for all the curves.
\end{document} |
\begin{document}
\title{A new characterization of $q_{\omega}
\begin{abstract}
In this note, we give a new characterization for an algebra to be $q_{\omega}$-compact in terms of {\em super-product operations} on the lattice of congruences of the relative free algebra.
\end{abstract}
{\bf AMS Subject Classification} Primary 03C99, Secondary 08A99 and 14A99.\\
{\bf Keywords} algebraic structures; equations; algebraic set; radical ideal;
$q_{\omega}$-compactness; filter-power; geometric equivalence; relatively free algebra; quasi-identity; quasi-variety.
\section{Introduction}
In this article, our notations are the same as \cite{DMR1}, \cite{DMR2}, \cite{DMR3}, \cite{DMR4} and \cite{ModSH}. The reader should review these references for a complete account of the universal algebraic geometry. However, a brief review of fundamental notions will be given in the next section.
Let $\mathcal{L}$ be an algebraic language, $A$ be an algebra of type $\mathcal{L}$ and $S$ be a system of equation in the language $\mathcal{L}$. Recall that an equation $p\approx q$ is a logical consequence of $S$ with respect to $A$, if any solution of $S$ in $A$ is also a solution of $p\approx q$. The radical $\mathrm{Rad}_A(S)$ is the set of all logical consequences of $S$ with respect to $A$. This radical is clearly a congruence of the term algebra $T_{\mathcal{L}}(X)$ and in fact it is the largest subset of the term algebra which is equivalent to $S$ with respect to $A$. Generally, this logical system of equations with respect to $A$ does not obey the ordinary compactness of the first order logic. We say that an algebra $A$ is $q_{\omega}$-compact, if for any system $S$ and any consequence $p\approx q$, there exists a finite subset $S_0\subseteq S$ with the property that $p\approx q$ is a consequence of $S_0$ with respect to $A$. This property of being $q_{\omega}$-compact is equivalent to
$$
\mathrm{Rad}_A(S)=\bigcup_{S_0}\mathrm{Rad}_A(S_0),
$$
where $S_0$ varies in the set of all finite subsets of $S$. If we look at the map $\mathrm{Rad}_A$ as a closure operator on the lattice of systems of equations in the language $\mathcal{L}$, then we see that $A$ is $q_{\omega}$-compact if and only if $\mathrm{Rad}_A$ is an algebraic. The class of $q_{\omega}$-compact algebras is very important and it contains many elements. For example, all equationally noetherian algebras belong to this class. In \cite{DMR3}, some equivalent conditions for $q_{\omega}$-compactness are given. Another equivalent condition is obtained in \cite{MR} in terms of {\em geometric equivalence}. It is proved that (the proof is implicit in \cite{MR}) an algebra $A$ is $q_{\omega}$-compact if and only if $A$ is geometrically equivalent to any of its filter-powers. We will discuss geometric equivalence in the next section. We will use this fact of \cite{MR} to obtain a new characterization of $q_{\omega}$-compact algebras. Although our main result will be formulated in an arbitrary variety of algebras, in this introduction, we give a simple description of this result for the case of the variety of all algebras of type $\mathcal{L}$.
Roughly speaking, a {\em super-product operation} is a map $C$ which takes a set $K$ of congruences of the term algebra and returns a new congruence $C(K)$ such that for all $\theta\in K$, we have $\theta\subseteq C(K)$. For an algebra $B$ define a map $T_B$ which takes a system $S$ of equations and returns
$$
T_B(S)=\{ \mathrm{Rad}_B(S_0):\ S_0\subseteq S,\ |S_0|<\infty\}.
$$
Suppose for all algebra $B$ we have $C\circ T_B\leq \mathrm{Rad}_B$. We prove that an algebra $A$ is $q_{\omega}$-compact if and only if $C\circ T_A=\mathrm{Rad}_A$.
\section{Main result}
Suppose $\mathcal{L}$ is an algebraic language. All algebras we are dealing with, are of type $\mathcal{L}$. Let $\mathbf{V}$ be a variety of algebras. For any $n\geq 1$, we denote the relative free algebra of $\mathbf{V}$, generated by the finite set $X=\{ x_1, \ldots, x_n\}$, by $F_{\mathbf{V}}(n)$. Clearly, we can assume that an arbitrary element $(p, q)\in F_{\mathbf{V}}(n)^2$ is an equation in the variety $\mathbf{V}$ and we can denote it by $p\approx q$. We introduce the following list of notations:\\
1- $\mathrm{P}(F_{\mathbf{V}}(n)^2)$ is the set of all systems of equations in the variety $\mathbf{V}$.\\
2- $\mathrm{Con}(F_{\mathbf{V}}(n))$ is the set of all congruences of $F_{\mathbf{V}}(n)$.\\
3- $\Sigma(\mathbf{V})=\bigcup_{n=1}^{\infty} P(F_{\mathbf{V}}(n)^2)$.\\
4- $\mathrm{Con}(\mathbf{V})= \bigcup_{n=1}^{\infty}\mathrm{Con}(F_{\mathbf{V}}(n))$.\\
5- $\mathrm{PCon}(\mathbf{V})= \bigcup_{n=1}^{\infty}\mathrm{P}(\mathrm{Con}(F_{\mathbf{V}}(n)))$.\\
6- $q_{\omega}(\mathbf{V})$ is the set of all $q_{\omega}$-compact elements of $\mathbf{V}$.\\
\newline
Note that, we have $\mathrm{Con}(\mathbf{V})\subseteq \Sigma(\mathbf{V})$. For any algebra $B\in \mathbf{V}$, the map $\mathrm{Rad}_B:\Sigma(\mathbf{V})\to \Sigma(\mathbf{V})$ is a closure operator and $B$ is $q_{\omega}$-compact, if and only if this operator is algebraic. Define a map
$$
T_B:\Sigma(\mathbf{V})\to \mathrm{PCon}(\mathbf{V})
$$
by
$$
T_B(S)=\{ \mathrm{Rad}_B(S_0):\ S_0\subseteq S,\ |S_0|<\infty\}.
$$
\begin{definition}
A map $C:\mathrm{PCon}(\mathbf{V})\to \mathrm{Con}(\mathbf{V})$ is called a super-product operation, if for any $K\in \mathrm{PCon}(\mathbf{V})$ and $\theta\in K$, we have $\theta\subseteq C(K)$.
\end{definition}
There are many examples of such operations; the ordinary product of normal subgroups in the varieties of groups is the simplest one. For another example, we can look at the map $C(K)=\mathrm{Rad}_B(\bigcup_{\theta\in K}\theta)$, for a given fixed $B\in \mathbf{V}$. We are now ready to present our main result.
\begin{theorem}
Let $C$ be a super-product operation such that for any $B\in\mathbf{V}$, we have $C\circ T_B\leq \mathrm{Rad}_B$. Then
$$
q_{\omega}(\mathbf{V})=\{ A\in \mathbf{V}:\ C\circ T_A=\mathrm{Rad}_A\}.
$$
\end{theorem}
To prove the theorem, we first give a proof for the following claim. Note that it is implicitly proved in \cite{MR} for the case of groups. \\
{\em An algebra is $q_{\omega}$-compact if and only if it is geometrically equivalent to any of its filter-powers.}\\
Let $A\in \mathbf{V}$ be a $q_{\omega}$-compact algebra and $I$ be a set of indices. Let $F\subseteq P(I)$ be a filter and $B=A^I/F$ be the corresponding filter-power. We know that the quasi-varieties generated by $A$ and $B$ are the same. So, these algebras have the same sets of quasi-identities. Now, suppose that $S_0$ is a finite system of equations and $p\approx q$ is another equation. Consider the following quasi-identity
$$
\forall \overline{x} (S_0(\overline{x})\to p(\overline{x})\approx q(\overline{x})).
$$
This quasi-identity is true in $A$, if and only if it is true in $B$. This shows that $\mathrm{Rad}_A(S_0)=\mathrm{Rad}_B(S_0)$.
Now, for an arbitrary system $S$, we have
\begin{eqnarray*}
\mathrm{Rad}_A(S)&=&\bigcup_{S_0}\mathrm{Rad}_A(S_0)\\
&=&\bigcup_{S_0}\mathrm{Rad}_B(S_0)\\
&\subseteq&\mathrm{Rad}_B(S).
\end{eqnarray*}
Note that in the above equalities, $S_0$ ranges in the set of finite subsets of $S$. Clearly, we have $\mathrm{Rad}_B(S)\subseteq \mathrm{Rad}_A(S)$, since $A\leq B$. This shows that $A$ and $B$ are geometrically equivalent. To prove the converse, we need to define some notions. Let $\mathfrak{X}$ be a prevariety, i.e. a class of algebras closed under product and subalgebra. For any $n\geq 1$, let $F_{\mathfrak{X}}(n)$ be the free element of $\mathfrak{X}$ generated by $n$ elements. Note that if $\mathbf{V}=var(\mathfrak{X})$, then $F_{\mathfrak{X}}(n)=F_{\mathbf{V}}(n)$. A congruence $R$ in $F_{\mathfrak{X}}(n)$ is called an $\mathfrak{X}$-radical, if $F_{\mathfrak{X}}(n)/R\in \mathfrak{X}$. For any $S\subseteq F_{\mathfrak{X}}(n)^2$, the least $\mathfrak{X}$-radical containing $S$ is denoted by $\mathrm{Rad}_{\mathfrak{X}}(S)$.
\begin{lemma}
For an algebra $A$ and any system $S$, we have
$$
\mathrm{Rad}_A(S)=\mathrm{Rad}_{pvar(A)}(S),
$$
where $pvar(A)$ is the prevariety generated by $A$.
\end{lemma}
\begin{proof}
Since $F_{\mathfrak{X}}(n)/\mathrm{Rad}_A(S)$ is a coordinate algebra over $A$, so it embeds in a direct power of $A$ and hence it is an element of $pvar(A)$. This shows that
$$
\mathrm{Rad}_{pvar(A)}(S)\subseteq \mathrm{Rad}_A(S).
$$
Now, suppose $(p,q)$ does not belong to $\mathrm{Rad}_{pvar(A)}(S)$. So, there exists $B\in pvar(A)$ and a homomorphism $\varphi:F_{\mathfrak{X}}(n)\to B$ such that $S\subseteq \ker \varphi$ and $\varphi(p)\neq \varphi(q)$. But, $B$ is separated by $A$, hence there is a homomorphism $\psi:B\to A$ such that
$\psi(\varphi(p))\neq \psi(\varphi(q))$. This shows that $(p, q)$ does not belong to $\ker (\psi\circ \varphi)$. Therefore, it is not in $\mathrm{Rad}_A(S)$.
\end{proof}
Note that, since $pvar(A)$ is not axiomatizable in general, so we can not give a deductive description of elements of $\mathrm{Rad}_A(S)$. But, for $\mathrm{Rad}_{var(A)}(S)$ and $\mathrm{Rad}_{qvar(A)}(S)$ this is possible, because the variety and quasi-variety generated by $A$ are axiomatizable. More precisely, we have:\\
1- Let $\mathrm{Id}(A)$ be the set of all identities of $A$. Then $\mathrm{Rad}_{var(A)}(S)$ is the set of all logical consequences of $S$ and $\mathrm{Id}(A)$.\\
2- Let $\mathrm{Q}(A)$ be the set of all identities of $A$. Then $\mathrm{Rad}_{qvar(A)}(S)$ is the set of all logical consequences of $S$ and $\mathrm{Q}(A)$.\\
We can now, prove the converse of the claim. Suppose $A$ is not $q_{\omega}$-compact. We show that
$$
pvar(A)_{\omega}\neq qvar(A)_{\omega}.
$$
Recall that for and arbitrary class $\mathfrak{X}$, the notation $\mathfrak{X}_{\omega}$ denotes the class of finitely generated elements of $\mathfrak{X}$. Suppose in contrary we have the equality
$$
pvar(A)_{\omega}= qvar(A)_{\omega}.
$$
Assume that $S$ is an arbitrary system and $(p, q)\in \mathrm{Rad}_A(S)$. Hence, the infinite quasi-identity
$$
\forall \overline{x}(S(\overline{x})\to p(\overline{x})\approx q(\overline{x}))
$$
is true in $A$. So, it is also true in $pvar(A)$. As a result, every element from $qvar(A)_{\omega}$ satisfies this infinite quasi-identity. Let $F_A(n)=F_{var(A)}(n)$. We have $F_A(n)\in qvar(A)_{\omega}$ and hence $\mathrm{Rad}_{qvar(A)}(S)$ depends only on $qvar(A)_{\omega}$. In other words, $(p, q)\in \mathrm{Rad}_{qvar(A)}(S)$, so $p\approx q$ is a logical consequence of the set of $S+\mathrm{Q}(A)$. By the compactness theorem of the first order logic, there exists a finite subset $S_0\subseteq S$ such that $p\approx q$ is a logical consequence of $S_0+\mathrm{Q}(A)$. This shows that $(p, q)\in\mathrm{Rad}_{qvar(A)}(S_0)$. But $\mathrm{Rad}_{qvar(A)}(S_0)\subseteq \mathrm{Rad}_A(S_0)$. Hence $(p, q)\in \mathrm{Rad}_A(S_0)$, violating our assumption of non-$q_{\omega}$-compactness of $A$. We now showed that
$$
pvar(A)_{\omega}\neq qvar(A)_{\omega}.
$$
By the algebraic characterizations of the classes $pvar(A)$ and $qvar(A)$, we have
$$
SP(A)_{\omega}\neq SPP_u(A)_{\omega},
$$
where $P_u$ is the ultra-product operation. This shows that there is an ultra-power $B$ of $A$ such that
$$
SP(A)_{\omega}\neq SP(B)_{\omega}.
$$
In other words the classes $pvar(A)_{\omega}$ and $pvar(B)_{\omega}$ are different. We claim that $A$ and $B$ are not geometrically equivalent. Suppose this is not the case. Let $A_1\in pvar(A)_{\omega}$. Then $A_1$ is a coordinate algebra over $A$, i.e. there is a system $S$ such that
$$
A_1=\frac{F_{\mathbf{V}}(n)}{\mathrm{Rad}_A(S)}.
$$
Since $\mathrm{Rad}_A(S)=\mathrm{Rad}_B(S)$, so
$$
A_1=\frac{F_{\mathbf{V}}(n)}{\mathrm{Rad}_B(S)},
$$
and hence $A_1$ is a coordinate algebra over $B$. This argument shows that
$$
pvar(A)_{\omega}=pvar(B)_{\omega},
$$
which is a contradiction. Therefore $A$ and $B$ are not geometrically equivalent and this completes the proof of the claim. We can now complete the proof of the theorem. Assume that $C\circ T_A=\mathrm{Rad}_A$. We show that $A$ is geometrically equivalent to any of its filter-powers. So, let $B=A^I/F$ be a filter-power of $A$. Note that we already proved that for a finite system $S_0$, the radicals $\mathrm{Rad}_A(S_0)$ and $\mathrm{Rad}_B(S_0)$ are the same. Suppose that $S$ is an arbitrary system of equations. We have
\begin{eqnarray*}
\mathrm{Rad}_A(S)&=&C(T_A(S))\\
&=&C(\{ \mathrm{Rad}_A(S_0):\ S_0\subseteq S, |S_0|<\infty\})\\
&=&C(\{ \mathrm{Rad}_B(S_0):\ S_0\subseteq S, |S_0|<\infty\})\\
&\subseteq& \mathrm{Rad}_B(S).
\end{eqnarray*}
So we have $\mathrm{Rad}_A(S)=\mathrm{Rad}_B(S)$ and hence $A$ and $B$ are geometrically equivalent. This shows that $A$ is $q_{\omega}$-compact. Conversely, let $A$ be $q_{\omega}$-compact. For any system $S$, we have
\begin{eqnarray*}
\mathrm{Rad}_A(S)&=&\bigcup_{S_0}\mathrm{Rad}_A(S_0)\\
&=&\bigvee\{ \mathrm{Rad}_A(S_0): S_0\subseteq S, |S_0|<\infty\}\\
&=&\bigvee T_A(S),
\end{eqnarray*}
where $\bigvee$ denotes the least upper bound. By our assumption, $C(T_A(S))\subseteq \mathrm{Rad}_A(S)$, so $C(T_A(S))\subseteq \bigvee T_A(S)$. On the other hand, for any finite $S_0\subseteq S$, we have $\mathrm{Rad}_A(S_0)\subseteq C(T_A(S))$. This shows that
$$
C(T_A(S))=\bigvee T_A(S),
$$
and hence $C\circ T_A=\mathrm{Rad}_A$. The proof is now completed.
\end{document} |
\begin{document}
\title{Decreasing flow uncertainty in Bayesian inverse problems through Lagrangian drifter control}
\newcommand{\slugmaster}{
\slugger{juq}{xxxx}{xx}{x}{x--x}}
\renewcommand{\arabic{footnote}}{\fnsymbol{footnote}}
\footnotetext[2]{Institute for Computational Engineering and Sciences, University of Texas at Austin, TX, USA}
\footnotetext[3]{Mathematics Department, University of North Carolina at Chapel Hill, NC, USA}
\renewcommand{\arabic{footnote}}{\arabic{footnote}}
\begin{abstract}
Commonplace in oceanography is the collection of ocean drifter positions.
Ocean drifters are devices that sit on the surface of the ocean and move
with the flow, transmitting their position via GPS to stations on land.
Using drifter data, it is possible to obtain a posterior on the underlying
flow. This problem, however, is highly underdetermined. Through controlling
an ocean drifter, we attempt to improve our knowledge of the underlying
flow. We do this by instructing the drifter to explore parts of the flow
currently uncharted, thereby obtaining fresh observations. The efficacy of
a control is determined by its effect on the variance of the posterior
distribution. A smaller variance is interpreted as a better understanding
of the flow. We show a systematic reduction in variance can be achieved by
utilising controls that allow the drifter to navigate new or ‘interesting’
flow structures, a good example of which are eddies.
\end{abstract}
\pagestyle{myheadings}
\thispagestyle{plain}
\markboth{D. McDougall and C.~K.~R.~T. Jones}{Decreasing flow uncertainty}
\section{Introduction}
\label{sec:intro}
The context of the problem we address in this paper is that of reconstructing a
flow field from Lagrangian observations. This is an identical twin experiment
in which a true flow field is unknown but from which Lagrangian type
observations are extracted. It is assumed that little is known about the
functional form of the flow field except that is barotropic, incompressible and
either steady or with simple known time dependence. Note that, since it is
incompressible and two-dimensional (barotropic), the field is given by a stream
function $\psi (x,y,t)$. The objective is then to reconstruct an estimate of
this stream function from Lagrangian observations, along with an associated
uncertainty.
The question addressed is whether the uncertainty of the reconstruction can be
reduced by strategic observations using Lagrangian type instruments. The
measuring devices are assumed to be controllable and their position can be
registered at appropriate time intervals. Since the control is known, being
prescribed by the operator, it is reasonable to believe that information can be
garnered from the position observations. The issue is whether we can improve
the information content in these observations by controlling the instruments to
move into specific flow regimes.
Estimating ocean flows has a long history. First, a comparison of model
forecast errors in a barotropic open ocean model can be found in
\cite{Robinson1981}, with emphasis on how forecasts are sensitive to boundary
information. Application of the Kalman filter with Lagrangian observations can
be seen as early as 1982 \cite{Barbieri1982, Miller1986, Parrish1985,
Carter1989}. For a variational least-squares approach to eddy estimation, the
reader is directed to \cite{Robinson1985}. A standard mathematical framework
for incorporating Lagrangian observations appeared in 2003
\cite{Kuznetsov2003}. Finally, \cite{Robel2011} exposes a novel approach to
ocean current observations involving the treatment of sea turtles as Lagrangian
observers. A good overview of some operational ocean apparatus can be found in
\cite{Rudnick2004}.
The underlying philosophy of our approach is that mesoscale ocean flow fields
are dominated by coherent features, such as jets and eddies. If the instrument
can be controlled to move into and through these structures then the
information gained should be richer in terms of capturing the key properties of
the flow field.
Of course, there is some circularity inherent in this approach; we want to get
the key features of the flow field but need to know them in order to control
the vehicle toward them. We first take a ``proof of concept'' approach and see
if we follow a simple strategy that we happen to know takes into another eddy,
as opposed to one that doesn’t, then the uncertainty is reduced. We then
postulate a good way of developing a control based purely on local information.
The idea is to use the known local information of the flow field, from
reconstructing the flow using observations up to a certain point in time, to
form a control that takes the instrument away from the eddy it is currently
stuck in.
To obtain the flow reconstruction, one needs to solve a Bayesian inverse
problem \cite{Kalnay2002}. There are numerous ways to solve Bayesian inverse
probems, with the core methods being Kalman filtering and smoothing
\cite{Kalman1960,Kalman,Sorenson1960,Evensen2006,Houtekamer1998, Anderson2003};
variational methods \cite{Lorenc2000,Bengtsson1975,Lewis1985,
Lorenc1986,LeDimet1986,Talagrand1987,Courtier1994,Lawless2005,Lawless2005a};
particle filtering \cite{Doucet2001,VanLeeuwen2010}; and sampling methods
\cite{Cotter2012,Cotter2009,Cotter2010,Lee2011,Apte2008a,Apte2007,Apte2008,
Herbei2009,Kaipio2000,Mosegaard1995,Roberts1997,Roberts1998,Roberts2001,
Beskos2009,Metropolis1953,Hastings1970,Atchade2005,Atchade2006}. The resulting
solution to a Bayesian inverse problem is a probability distribution, called
the posterior distribution, over some quanitity of interest from which one can
compute estimates with associated uncertainties. Bayesian inverse problems
enable well-informed predictions.
The paper is organised in the following manner. The second section sets up the
Bayesian inverse problem and specifies all the assumptions in the prior and
likelihood distributions. The third section does applies two flow-independent
(na\"ive) controls, a zonal (East-West) control, and a bidirectional
(North-Easterly) control. These are applied to both the perturbed and
unperturbed flows. We measure performance of the addition of each by looking
at the posterior variance on the velocity field and show two main results.
When the fluid flow drifter is trapped in a recirculation regime, the magnitude
of the control is the main player in pushing the drifter out of the eddy. We
show that, for the unperturbed flow, when the control magnitude is large enough
a significant reduction in the posterior variance is achieved. In the
perturbed flow, we show robustness of the posterior variance with respect to
the perturbation parameter. More specifically, its structure as a function of
control magnitude is carried over from the time-independent flow model.
Moreover, we observe an additional, and separate, decrease in posterior
variance as a function of control magnitude corresponding to the purely
time-dependent part of the flow. The fourth section examines the use of an a
posteriori control, a control calculated using information from a previous
Bayesian inversion done with no control present. Here the control magnitude
corresponds geometrically to the distance between the drifter and a hyperbolic
fixed point of an eddy transport barrier in the flow. As the control magnitude
increases, the drifter gets closer to a hyperbolic fixed point of the drifter
evolution equation and, for the unperturbed flow, a substantial decrease in
posterior variance is observed. Hyperbolic fixed points of the drifter
equations join transport barriers in the flow and act as a boundary to
observations. Observing near these points outweighs the negative effects
produced by polluting the observations with a large control size relative to
the size of the flow. This gives a novel geometric correspondence between the
control utilised here and the structure of the posterior variance as a function
of control magnitude. The fifth section concludes the paper.
\section{Setup}
\label{sec:setup}
We begin by prescribing the stream function of the flow field the drifters will
move in. We will call this flow field the `truth' and later we try to
reconstruct it from noisy observations. The truth flow we will use is an
explicit solution to the barotropic vorticity equations
\cite{Pierrehumbert1991},
\begin{equation*}
\psi(x, y, t) = -cy + A \sin(2 \pi k x) \sin(2 \pi y) +
\varepsilon \psi_1(x, y, t),
\label{eqn:strfn}
\end{equation*}
on the two dimensional torus $(x, y) \in \mathbb{T}^2$, where the perturbation
we will use is given by
\begin{equation*}
\psi_1(x, y, t) = \sin (2 \pi x - \pi t) \sin (4 \pi y).
\end{equation*}
The corresponding flow equation is as follows
\begin{equation}
\diffp{\mathbf{v}}{t} = \varepsilon \partial_t \nabla^{\perp} \psi_1,
\quad t > 0
\label{eqn:model}
\end{equation}
We will explore two cases. The first case is when $\varepsilon = 0$ and the
underlying flow is steady. The second case is when $\varepsilon \neq 0$ and
the time-dependent perturbation smears the underlying flow in the
$x$-direction. Drifters placed in the flow $\mathbf{v}$ will obey
\begin{equation*}
\diff{\mathbf{x}}{t} = \mathbf{v}(\mathbf{x}, t) + \mathbf{f}(\mathbf{x}, t).
\label{eqn:control}
\end{equation*}
The function $\mathbf{f}$ is called the \textit{control}, the choice of which
requires explicit diction. We consider two cases of control: a)
flow-independent; and b) a posteriori. Flow-independent controls, are controls
that do not systematically utilise information regarding the underlying flow,
$\mathbf{v}$. A posteriori controls harness information from a previous
Bayesian update. Our soup-to-nuts methodology for assessing the efficacy for
each case of control is as follows. First, drifter dynamics are obtained by
solving
\begin{align}
\diff{\mathbf{x}}{t} &= \mathbf{v}(\mathbf{x}, t),
\quad 0 < t \leq t_{K/2} \\
\diff{\mathbf{x}}{t} &= \mathbf{v}(\mathbf{x}, t) + \mathbf{f}(\mathbf{x}),
\quad t_{K/2} < t \leq t_K,
\label{eqn:fullmodel}
\end{align}
where $\mathbf{v}$ solves \eqref{eqn:model}. Then observations of the drifter
locations $\mathbf{x}$ are collected into an observation vector for both the
controlled and uncontrolled parts
\begin{align}
\mathbf{y}^1_{k} &= \mathbf{x}(t_k) + \eta_k,
\quad \eta_k \sim \mathcal{N}(0, \sigma^2 I_2),
\quad k = 1, \ldots, K, \notag \\
\leadsto \, \mathbf{y} &= \mathcal{G}(\mathbf{v}_0) + \eta,
\quad \eta \sim \mathcal{N}(0, \sigma^2 I_{2K}),
\label{eqn:forward}
\end{align}
where $\mathbf{v}_0$ is the initial condition of the model \eqref{eqn:model}.
The map $\mathcal{G}$ is called the \textit{forward operator} and maps the
object we wish to infer to the space in which observations are taken.
Flow-independent controls $\mathbf{f}$ are independent of $\mathbf{y}^1$. We
will utilise two such controls: a time-independent zonal control
$\mathbf{f}(\zeta, 0)$; and a time-independent bi-directional control
$\mathbf{f}(\zeta, \zeta)$. The a posteriori control we execute is one that
forces drifter paths to be non-transverse to streamlines of the underlying
flow. Namely, $\mathbf{f}(\mathbf{x}) = - \zeta \nabla \mathbb{P}(\psi_0 |
\mathbf{y}^1)$, where $\psi_0(\mathbf{x}) = \psi(\mathbf{x}, 0)$. Our aim is to
understand the effect of the control magnitude $\zeta$ and the resulting
drifter path on the posterior distribution over the initial condition of the
model $\mathbb{P}(\mathbf{v}_0 | \mathbf{y}^1, \mathbf{y}^2)$.
Encompassing our beliefs about how the initial condition, $\mathbf{v}_0$,
should look into a prior probability measure, $\mu_0$, it is possible to
express the posterior distribution in terms of the prior and the data using
Bayes's theorem. Bayes's theorem posed in an infinite dimensional space says
that the posterior probability measure on $\mathbf{v}_0$, $\mu^{\mathbf{y}}$,
is absolutely continuous with respect to the prior proability measure
\cite{Stuart2010}. Furthermore, the Radon-Nikodym derivative between them is
given by the likelihood distribution of the data,
\begin{equation*}
\diff{\mu^{\mathbf{y}}}{\mu_0}(\mathbf{v}_0) =
\frac{1}{Z(\mathbf{y})} \exp \left( \frac{1}{2 \sigma^2} \|
\mathcal{G}(\mathbf{v}_0) - \mathbf{y} \|^2 \right),
\end{equation*}
where the operator $\mathcal{G}$ is exactly the forward operator as described
in \eqref{eqn:forward} and $Z(\mathbf{y})$ is a normalising constant. We
utilise a Gaussian prior measure on the flow initial condition, $\mu_0 \sim
\mathcal{N}(0, (- \Delta)^{-\alpha})$. For our purposes, we choose $\alpha = 3$
so that draws from the prior are almost surely in the Sobolev space
$H^1(\mathbb{T}^2)$ \cite{Stuart2010,Bogachev1998}. The posterior is a high
dimensional non-Gaussian distribution requiring careful probing by use of a
suitable numerical method. The reader is referred to \cite{Stuart2010} for a
full and detailed treatment of Bayesian inverse problems on function spaces.
To solve the above Bayesian inverse problem, we use a Markov chain Monte Carlo
(MCMC) method. MCMC methods are a class of computational techniques for
drawing samples from a unknown target distribution. Throughout this paper, we
have chosen to sample the posterior using a random walk Metropolis-Hastings
method on function space \cite{Cotter2011,Cotter2012,Bogachev1998}. Using this
approach, one can draw samples from the posterior distribution, obtaining its
shape exactly. This is of use when the posterior distribution is not a
Gaussian and cannot be uniquely determined by its first and second moments.
The application of MCMC methods to solve Bayesian inverse problems is
widespread. For examples of their use, see
\cite{Cotter2009,Cotter2010,Lee2011,Apte2008a,
Apte2007,Apte2008,Herbei2008,Herbei2009,McKeague2005,Michalak2003,Kaipio2000,
Kaipio2007,Mosegaard1995}. The theory above is all done in an infinite
dimensional setting. Numerically and operationally, a finite dimensional
approximation is made. In the case of the Karhunen-Lo\`eve expansion this
approximation is done by truncation. A choice must be made in where to
truncate, and this choice coincides with a modelling assumption---there are no
frequencies of order larger than the truncation wavenumber. If it is feasible
that solutions to the inverse problem do in fact admit higher-order
frequencies, it is necessary to rethink this assumption. Throughout this paper
the data and initial conditions are known and the truncation is chosen to be
much larger than necessary to mitigate the effects of poor modelling
assumptions. Practically, the true solution to one's problem is unknown. In
this scenario, care and diligence are necessary in choosing appropriate prior
assumptions.
\section{Results: flow-independent control}
\label{sec:results-crude}
Flow-independent controls concern the influence of an ocean drifter without
using knowledge of the underlying flow. They are constructed in such a way as
to be independent of $\mathbf{y}^1$.
\subsection{Zonal control}
\label{sec:res-hor}
Figure~\ref{fig:ind-xd-norms} shows the variance of the horizontal component of
the flow as a function of control magnitude in the max norm, the $L^1$ norm and
$L^2$ norm. The horizontal axis denotes the strength of the control. The
vertical black dotted line corresponds to a critical value for the magnitude.
Values of $\zeta$ less than this correspond to controls not strong enough to
force the drifter out of the eddy. Conversely, values bigger correspond to
controls that push the drifter out of the eddy.
Experiments were done for $\zeta = 0, 0.25, 0.5, \ldots, 2.75, 3$. The case
$\zeta = 1.75$ was the first experiment in which we observed the drifter
leaving the recirculation regime. The black line shows the maximum value of the
variance over the domain $[0, 1] \times [0, 0.5]$. The magenta line and cyan
line show the $L^2$ norm and $L^1$ norm, respectively. The minimum value of the
variance is small enough to be difficult to see on the plot but remains
consistently small, so it has been omitted for clarity reasons. There are some
notable points to make here. Firstly, above the critical value (where the
drifter leaves the eddy) we see that the size of the variance decreases in all
of our chosen norms. We have learned more about the flow around the truth by
forcing the drifter to cross a transport boundary and enter a new flow regime.
Secondly, below the critical region (where the drifter does not leave the eddy)
we see an initial increase in the size of the variance. There are many factors
at play here. We will try to shed some light on them.
\begin{figure}
\caption{Posterior variance as a function of control magnitude, $\zeta$, for
\subref{fig:ind-xd-norms}
\label{fig:ind-xd-norms}
\label{fig:dep-xd-norms}
\label{fig:xd-norms}
\end{figure}
\begin{figure}
\caption{Horizontal component of the posterior variance for the case $\zeta =
0$. The black area in the lower left corresponds neatly with the region in
which observations are taken.}
\label{fig:varu-eg}
\end{figure}
For small $\zeta$, the controlled and uncontrolled paths along which we take
observations are close. Their closeness and the size of $\sigma^2$ creates a
delicate interplay between whether they are statistically indistinguishable or
not. If they are indistinguishable up to two or three standard deviations, this
could explain the increase and then decrease of the variance below the critical
value. Secondly, as $\zeta$ increases initially, the controlled path gets
pushed down near the elliptic stagnation point of the flow (see
figure~\ref{fig:indep-xd-true}). If this region is an area where the flow is
smaller in magnitude than the flow along the uncontrolled path, this is
equivalent to an increase in the magnitude of the control relative to the
underlying flow. This leads to the observations becoming polluted by $f$.
\begin{figure}
\caption{True glider path (black) for some positive $\zeta$ less than the
critical value. Blue lines are streamlines of the true flow. Red crosses
are zeros of the flow: fixed points of the passive glider equation.}
\label{fig:indep-xd-true}
\end{figure}
Exploring this further, we compute the mean magnitude of the flow along the
controlled path of the drifter. More formally, we solve \eqref{eqn:fullmodel}
to obtain a set of points $ \{ zk = z(t_k) \}^{K}_{k=1}$. Then we compute the
mean flow magnitude as follows
\begin{equation}
\langle v \rangle = \frac{2}{K} \sum_{k=K/2+1}^{K} v(z_k).
\label{eqn:mean-flow}
\end{equation}
This quantity is computed for each fixed $\zeta$ the result is plotted in
figure~\ref{fig:xd-rel}. The mean flow magnitude is given by the magenta line
in this figure and the black dotted line depicts the flow magnitude. Notice the
first three values of $\zeta$ which the mean flow magnitude decreases in. This
is equivalent to and increase in the magnitude of the control relative to the
magnitude of the underlying flow and so the information gain from taking
observations here decreases. This corresponds nicely with the first three
values of $\zeta$ in figure~\ref{fig:ind-xd-norms} that show an increase in
variance. Notice also that for the other values of $\zeta$ the mean flow
magnitude shows a mostly increasing trend, consistent with a decrease in the
posterior variance.
\begin{figure}
\caption{Mean magitude of the flow along the control path (purple) against
the size of the control (black dashed line). When the gradient of the flow
magnitude is large compared with that of the control magnitude, the posterior
variance is small.}
\label{fig:xd-rel}
\end{figure}
Note that the region below the critical value correspond to control magnitudes
that are too small to push the glider out of the eddy \textit{in the
unperturbed case} $\varepsilon = 0$. The region above the critical value
corresponds to values of $\zeta$ for which the glider leaves the eddy, this is
also in the unperturbed case. Experiments were done for $\zeta = 0, 0.25, 0.5,
\ldots, 2.75, 3$. In the case $\varepsilon = 0$, the value $\zeta = 1.75$ was
the first experiment in which we observed the glider leaving the recirculation
regime. The black line shows the maximum value of the variance over the domain
$[0, 1] \times [0, 0.5]$. The magenta line and cyan line show the $L^2$ norm
and $L^1$ norm, respectively. There are some notable points to make. Firstly,
below the critical magnitude (where the glider leaves the eddy in the
unperturbed case) we see a sizeable reduction of posterior variance in the
$\max$ norm as the critical magnitude is approached. To establish a connection
in uncertainty quantification between the time independent and time-periodic
case is of great scientific interest and that connection has been made evident
here. Note that as $\zeta$ increases and progresses further into the region
above the critical magnitude, the posterior variance repeats the
increasing/decreasing structure induced by the eddy that we observed in the
region below the critical control magnitude. The new effects introduced into
this region are purely form the time-dependent nature of the moving eddy. The
reason for their presence is much the same as in the time-independent case;
observations trapped within an eddy regime.
We have learned more about the flow around the truth by forcing the glider into
the meandering jet flow regime. The benefits of such a control occur at exactly
the same place as in the time-independent case; as the drifter leaves the eddy
in the unperturbed flow. However, extra care is required when the flow is
time-dependent and the eddy moves. One cannot simply apply the same control
techniques as is evidenced by the extra bump in variance in the region above
the critical magnitude. Of particular use would be extra eddy-tracking
information to construct an a posteriori control to keep the variance small.
\subsection{Bi-directional control}
\label{sec:res-dia}
Now we provide the analogue of figure~\ref{fig:ind-xd-norms} for the bi-directional
forcing function. This is shown in figure~\ref{fig:ind-xyd-norms}. We see
similar behaviour for the variance of the posterior distribution again. Below
the critical magnitude, the values of $\zeta$ for which the drifter is not
forced hard enough to leave the recirculation regime, we see an initial
increase in the size of the posterior variance. Then we observe a decrease in
posterior variance as $\zeta$ approaches a value large enough to push the
drifter out of the eddy regime, the region above the critical value.
\begin{figure}
\caption{Posterior variance as a function of control magnitude, $\zeta$, for
\subref{fig:ind-xd-norms}
\label{fig:ind-xyd-norms}
\label{fig:dep-xyd-norms}
\label{fig:xyd-norms}
\end{figure}
To explain the initial increase in the posterior variance below the critical
magnitude, we calculate the mean flow magnitude just as in
\eqref{eqn:mean-flow}. This is shown in figure~\ref{fig:xyd-rel}. We see an
initial period where the mean flow along the controlled path remains almost
constant. As a consequence, the magnitude of the forcing increases relative to
the magnitude of the flow. This pollutes the observations and leads to an
increased posterior variance just as we have observed in the previous section.
We also see the opposite effect; the big jump in flow magnitude at $\zeta =
0.5$ (and consequently when the drifter escapes the gyre) is attested as the
cause of the decrease in posterior variance as we enter the region above the
critical flow magnitude of figure~\ref{fig:ind-xyd-norms}.
\begin{figure}
\caption{Mean magitude of the flow along the control path (purple) against
the size of the control (black dashed line). When the gradient of the flow
magnitude is large compared with that of the control magnitude, the posterior
variance is small.}
\label{fig:xyd-rel}
\end{figure}
The cases of forcing explored thus far are $f(z) = (\zeta, 0)^{\top}$ and $f(z)
= (\zeta, \zeta)^{\top}$. The main results are summarised by referring to
figure~\ref{fig:ind-xd-norms} and figure~\ref{fig:ind-xyd-norms}. In these two
cases, we see strikingly similar structure of the posterior variance as a
function of control magnitude. The initial increase in posterior variance
within the eddy; decreasing posterior variance as the flow path of the drifter
approaches the transport boundary and small posterior variance (compared to the
case $\zeta = 0$) once a new flow regime is being observed. Compare the values
of $\zeta$ for with this behaviour occurs. Notice that the values of $\zeta$ in
figure~\ref{fig:ind-xd-norms} are about three times larger than those in
figure~\ref{fig:ind-xyd-norms}. One factor at play here is the relative
magnitude of the controls in each case. For $\zeta = 1$, the control has
magnitude $1$ in the zonal case, and magnitude $\sqrt{2}$ in the
bi-directional case. Even scaling the results in the bi-directional case by
$\sqrt{2}$, notice that the value of $\zeta$ for which the drifter first leaves
the eddy, is $\zeta = \frac{\sqrt{2}}{2}$ and this is still smaller than $\zeta
= 1.5$ for the $x$-directional case. The final factor affecting the scaling is
the dynamics of the system after the forcing has been applied. Controlling in
only the horizontal direction will require a larger magnitude force to push the
drifter out of the eddy than when forcing in both the $x$ and $y$ directions
simultaneously.
An analogue for figure~\ref{fig:dep-xd-norms} for the new forcing function is
shown in figure~\ref{fig:dep-xyd-norms}. We see similar behaviour for the
variance of the posterior distribution. Again, the region below the critical
magnitude corresponds to values of $\zeta$ that are not big enough to push the
drifter out of the recirculation regime in the \textit{unperturbed} case. Just
as in figure~\ref{fig:dep-xd-norms}, we see the unperturbed eddy affecting the
variance of the posterior distribution on the flow in the classic `bump'
fashion. We observe a reduction in posterior variance as $\zeta$ approaches a
value large enough to push the glider out of the eddy regime (in the case
$\varepsilon = 0$). In the region below the critical magnitude, the
time-dependent flow effects take over and push the variance up. Again, a
connection of uncertainty quantification is made between the time-independent
case and the case where the flow is perturbed by a time-periodic disturbance,
this connection lies entirely within the region below the critical control
magnitude.
\section{Results: a posteriori control}
\label{sec:results-post}
In section~\ref{sec:results-crude} we concluded that crossing a transport
boundary and entering a new flow regime has the desirable effect of reducing
the posterior variance. Crossing into new flow regimes with a stationary flow
can be translated to travelling transversely against the streamlines of the
underlying flow. For the recirculation regime located in the bottom-left area,
particles in the fluid will move in an clockwise fashion. The gradient of the
stream function will therefore point in towards the fixed point at $z =
\left( 1/4, 1/6 \right)$. The negative gradient of the stream
function points towards the fixed point at $z = \left( 3/4, 1/3
\right)$. Therefore, to escape the recirculation regime we choose,
\begin{equation}
f(z) = - \zeta \ensuremath\nabla_z (\mathbb{E}(\psi | y^1)),
\label{eqn:gradmeanctrl}
\end{equation}
for the controlled drifter model, where $\psi$ is the stream function of the
flow $v$. The rationale behind this choice is that, if the posterior mean
stream function is a good estimator of the flow, the drifter will be forced
transversely with the stream lines and escape the recirculation regime and
allow us to make observations in a new flow regime.
Figure~\ref{fig:ind-grd-norms} depicts the variance of the horizontal component
as the strength of the control, $\zeta$, is varied. Note that we do not see the
same behaviour as we do for the two na\"ive controls chosen in
section~\ref{sec:results-crude}. We see a large band of values of $\zeta$ for
which the posterior variance oscillates, leading to a lack of information gain
in the knowledge of the flow. From about $\zeta = 0.5$ to $\zeta = 0.55$, we
see a structurally significant reduction in posterior variance where we have a
sustained gain in information about the underlying flow field. This is
attributed to a drifter path that explores an `interesting' part of the flow
where a lot of information can be obtained from observations. To explore the
geometric correspondence between the variance reduction for $\zeta = 0.5$ to
$\zeta = 0.55$, we show figure~\ref{fig:ind-grd-true}. This figure
presents the true path of the drifter for $\zeta = 0.3, \ldots, 0.55$. The
light pink path corresponds to a value of $\zeta = 0.3$ and the purple path
corresponds to $\zeta = 0.55$. Notice that as $\zeta$ increases, the true path
forms a kink and forms a trajectory close to the zero of the flow at $(x, y) =
(7/12, 1/2)$. Just as we have seen in section~\ref{sec:results-crude}, we
observe a transient period in the posterior variance until we utilise a control
for which the true path explores new aspects of the flow compared with other
`nearby' controls. Interestingly, also note that we observe this reduction in
variance despite the true path navigating near a zero of the flow, where we
also satisfy the fact the the size of the control is large in comparison to the
flow. In this case, a logical conclusion here would be that the information
gain from observing near an interesting flow structure heavily outweighs the
information loss in polluting the observations with such a control. The cost of
polluting the observed data can be seen by computing the most structurally
significant reduction in the posterior variance and comparing this with
figure~\ref{fig:ind-xyd-norms}, for example. By `most structurally
significant' we loosely mean the most dramatic reduction that leads to the most
benefit in knowledge of the underlying flow. In this example, this occurs
between $\zeta = 0.52$ and $\zeta = 0.55$, where it is approximately $3 \times
10^{-5}$. In the case of the bi-directional control, where the relative size of
the flow \textit{increases} for the values of $\zeta$ that give a reduction in
variance
, it occurs between $\zeta = 0.25$
and $\zeta = 0.625$ where it is approximately $1.5 \times 10^{-4}$. This is
about an order of magnitude bigger, crystallising the tradeoff between
polluting the observed data versus exploring `interesting' parts of the flow.
If the posterior mean is a good estimator of the underlying flow, utilising a
control of this nature is beneficial if the drifters navigates close to a
hyperbolic fixed point of the passive drifter model equation.
\begin{figure}
\caption{Posterior variance as a function of control magnitude, $\zeta$, for
the a posteriori control in the case of: \subref{fig:ind-xd-norms}
\label{fig:ind-grd-norms}
\label{fig:dep-grd-norms}
\label{fig:grd-norms}
\end{figure}
The first thing to note is that we do not see the same behaviour as we do for
the two na\"ive controls chosen in section~\ref{sec:results-crude}. Nor do we
see similar structures when compared with figure~\ref{fig:ind-grd-norms}. For
each value of $\zeta$, it is the case that the true path navigates to the
time-dependent eddy surrounding the zero of the flow at the point $(x, y) =
\left( 3/4, 1/3 \right)$. The second thing to note is that for all of these
values of control magnitude, the smaller values tend to do better than the
larger ones
The variance is lower in the cases $\zeta = 0.21$ and $\zeta = 0.27$ because
the true path is navigating towards one of the hyperbolic fixed points of the
eddy. A novel connection is established between the behaviour of these two
controls in both the time-independent case and the time-periodic case.
\begin{figure}
\caption{Mean magitude of the flow along the control path (purple) against
the size of the control (black dashed line). Though the gradient of the flow
magnitude is small compared with that of the control magnitude, the posterior
variance decreases because the net gain in flow knowledge by observing
near a saddle point outweighs the net loss by the control polluting
the observations.}
\label{fig:ind-grd-rel}
\end{figure}
\begin{figure}
\caption{The true drifter paths for each value of $\zeta$ for the experiments
shown in figure \ref{fig:ind-grd-norms}
\label{fig:ind-grd-true}
\end{figure}
\section{Conclusion}
\label{sec:conc}
To summarise, we have measured the performance of two na\"ive control methods,
and one a posteriori control method, both in a time-independent and
time-dependent flow. We have done so by observing their influence on the
posterior variance in the mean flow direction. Section~\ref{sec:results-crude}
addresses the na\"ive controls and section~\ref{sec:results-post} the a
posteriori control. Each control is designed to push ocean drifters into
uncharted flow regimes. The three cases of control we employ here are a purely
zonal control; a control of equal magnitude in both the $x$ and $y$ directions;
and the gradient of the posterior mean constructed using a posteriori
information from a previous Bayesian update. In the time-independent flow, we
show a sizeable reduction of the posterior variance in the mean flow direction
for these three cases of control. We also see that on comparing the posterior
variance for the zonal and bi-directional controls, similar structures arise
when viewed as a function of control magnitude, which dictates when the drifter
leaves the eddy and is the main influence on the posterior information. In the
case of the a posteriori control in the time-independent flow, the drifter
leaves the eddy for all the values of control magnitude we have chosen. Here
we observe the variance reduction occurring when the true drifter path
approaches a hyperbolic fixed point on the transport barrier of the eddy in the
upper-right of the domain. This is evidence that oceanic transport barriers
heavily influence posterior information and sets up a novel geometric
correspondence between the flow structure and the posterior variance. Using
the na\"ive controls in the time-dependent flow, we show \textit{robustness} of
posterior variance as a function of the perturbation parameter. When the
control magnitude is such that the drifter leaves the eddy in the
\textit{unperturbed} flow, we see reduction in the posterior variance on the
initial condition for the time-periodic flow. When employing a time-dependent
a posteriori control, we see no overall net gain in posterior variance over the
uncontrolled case. For our particular flow and drifter initial condition, it
is the case that the uncontrolled drifter path explores a hyperbolic fixed
point of an eddy in the time-dependent flow more effectively than the
controlled path. This reiterates the efficacy of control strategies and their
influence on the path along which observations are made.
There are a number of ways in which this work could be generalised in order to
obtain a deeper understanding of the effects controlled ocean drifters have on
flow uncertainty. For example, (i) the study of non-periodic model dynamics;
(ii) the use of information from the posterior \textit{variance}; (iii) more
elaborate control strategies. Many other generalisations are also possible.
Non-periodic models are more dynamically consistent with regards to their
approximation of larger ocean models. We have seen the application of
posterior knowledge in the construction of a control, though only through use
of the mean. The variance of the underlying flow could be used in a similar
fashion, perhaps to control ocean drifters towards an area of large variance.
This could have a similar affect on the posterior distribution as the method of
controlling a drifter into a new, unexplored flow regime. Moreover, controls
could be constructed to better reflect reality. Ocean gliders have a limited
amount of battery power. Utilising this knowledge in designing a mission plan
to optimise a glider's lifespan certainly has its practical applications.
Controls that minimise the pollution of the observed data is also desirable.
Throughout this paper, we have only used information from one previous Bayesian
update. Constructing and executing a posteriori control strategies is a
paradigm well suited to that of a Kalman or particle filter; updating the
control every time an analysis step is performed. This is left for future
discussion.
\end{document} |
\betaegin{document}
\Lambda arge
\title{\Lambda arge Systoles of Hyperbolic 4-manifolds}
\alphauthor{Ian Agol}
\thetaanks{Agol partially supported by NSF grant DMS-0504975 and the Guggenheim Foundation}
\epsilonmail{[email protected]}
\alphaddress{MSCS UIC 322 SEO, m/c 249\\
851 S. Morgan St.\\
Chicago, IL 60607-7045}
\partialartialate{
May 18, 2006}
\betaegin{abstract}
\Lambda arge We prove that for any $\epsilon>0$, there exists a closed hyperbolic 4-manifold with a closed geodesic of length $< \epsilon$.
\epsilonnd{abstract}
\sigmaubjclass{Primary 30F40; Secondary 57M}
\muaketitle
\sigmaection{Introduction}
It has been known for a long time that closed hyperbolic surfaces
and 3-manifolds may have arbitarily short geodesics.
This follows in 2-dimensions by an explicit construction,
and holds in 3 dimensions by Thurston's hyperbolic
Dehn surgery theorem \cite[Thm. 5.8.2]{Th}. In this note, we prove the
existence of closed hyperbolic 4-manifolds which have
arbitrarily short geodesics. It is conjectured that there
is a uniform lower bound on the length of a systole in
arithmetic hyperbolic manifolds. This would follow from
Lehmer's conjecture \cite[\textsection10]{Gelander04}. Examples
of non-arithmetic hyperbolic manifolds in higher dimensions
come from the method of ``inter-breeding'' introduced
by Gromov and Piatetski-Shapiro \cite{GromovPS88}, by
taking hyperbolic manifolds with geodesic boundary produced using
arithmetic methods, and gluing them to obtain a non-arithmetic
manifold. Our result makes use of a variation on their method, which might
best be described as ``inbreeding''. The method
would extend to all dimensions if a conjecture about
the fundamental groups of arithmetic hyperbolic manifolds were known.
We speculate on this conjecture in the last section.
\sigmaection{Subgroup separability}
Let $G$ be an infinite group, and $A<G$ a subgroup. We say that $A$ is
{\iotat separable} in $G$ if $A$ is the intersection of all finite index subgroups
of $G$ which contain $A$, that is
\betaegin{equation}
A= \underset{A\lambdaeq B\lambdaeq G, [G:B]<\iotanfty}{\cap} B.
\epsilonnd{equation}
We say that a discrete group $\Gamma<\mbox{Isom}(\mathbb{H}^{n})$ is {\iotat GFERF} (short for {\iotat Geometrically
Finite Extended Residually Finite}) if every geometrically finite subgroup $A<\Gamma$
is separable in $\Gamma$. More generally, a group $G$ is {\iotat LERF} if every
finitely generated subgroup $A<G$ is separable in $G$. If $\Gamma$ is LERF,
then since geometrically finite subgroups are finitely generated, this would
imply that $\Gamma$ is GFERF. Unfortunately, the converse is not necessarily
true, since there may be finitely generated subgroups of $\Gamma$ which are
not geometrically finite.
Scott showed that $A< \Gamma$ is separable if and only if for any
compact subset $C\sigmaubset \mathbb{H}^{4}/A$, there exists $\Gamma_{1}< \Gamma$, $[\Gamma:\Gamma_{1}]< \iotanfty$, and
$C {\text{hyp}}ookrightarrow \mathbb{H}^{4}/\Gamma_{1}$ embeds under the covering map.
We will be using a fact due to Scott \cite{S}
that the group generated by reflections in the right-angled 120-cell in $\mathbb{H}^{4}$
is GFERF; for a proof see \cite{ALR}.
\sigmaection{Systoles}
\betaegin{theorem} \lambdaabel{systole}
There exist closed hyperbolic 4-manifolds with arbitrarily short geodesics.
\epsilonnd{theorem}
\betaegin{proof}
The examples will come from cutting and pasting certain covers of a 4-dimensional Coxeter orbifold.
The right-angled 120-cell $D\sigmaubset \mathbb{H}^{4}$ is the fundamental domain for a reflection group.
Let $\Gamma_{D}$ be the group generated by reflections
in the faces of $D$. Let $\mbox{\rm{O}}O$ be the ring of integers in $\muathbb{Q}(\sigmaqrt{5})$.
Then $\Gamma_{D}$ is commesurable with $\mbox{\rm{P}}O(f;\mbox{\rm{O}}O)$, where $f$ is the 5-dimensional quadratic
form $\lambdaangle 1,1,1,1,-\partialhi\rangle$, where $\partialhi=\frac{1+\sigmaqrt{5}}{2}$ \cite[Lemma 3.3]{ALR}. Let $P \sigmaubset \mathbb{H}^{4}$ be
a 3-dimensional geodesic subspace, such that $H=\mbox{Isom}(P)\cap \Gamma_{D}$ is a cocompact subgroup
of $\mbox{Isom}(P)$ (where we embed $\mbox{Isom}(P)<\mbox{Isom}(\mathbb{H}^{4})$ in the
natural fashion). If we identify $\mathbb{H}^{4}$ with a component of the hyperboloid $f(x)=-1$, then we
find such a $P$ by letting $P=\mathbb{H}^{4}\cap v^{\partialerp}$ (with respect to the inner product defined
by $f$) where $v \iotan \muathbb{Q}(\sigmaqrt{5})^{5}$, $f(v)>0$. Let $\Gamma$ be a finite index torsion-free subgroup of $\Gamma_{D}$, which exists by
Selberg's lemma.
Now, $Comm(\Gamma) > \mbox{\rm{P}}O(f, \muathbb{Q}(\sigmaqrt{5}))$, so $Comm(\Gamma)$ is dense in $\mbox{\rm{P}}O(f,\muathbb{R})=\mbox{Isom}(\mathbb{H}^{4})$.
Thus, for any $\epsilon>0$, we may find $\gamma \iotan Comm(\Gamma)$ such that $\gamma(P)\cap P = \epsilonmptyset$, and $d(P,\gamma(P)) < \frac{\epsilon}{2}$.
The plane $\gamma(P)$ is stabilized by $(\gamma H \gamma^{-1})\cap \Gamma$, which is cocompact in
$\mbox{Isom}(\gamma(P))$, since $ [\Gamma: (\gamma \Gamma \gamma^{-1} )\cap \Gamma]<\iotanfty$. Thus, $H_{\gamma}=\mbox{Isom}(\gamma(P)) \cap \Gamma$ is cocompact
in $\mbox{Isom}(\gamma(P))$, since $(\gamma H \gamma^{-1})\cap\Gamma < H_{\gamma}$.
Let $g\sigmaubset \mathbb{H}^{4}$ be a geodesic segment perpendicular to $P$ and $\gamma(P)$, and with endpoints
$p_{1}=g\cap P$, and $p_{2}= g\cap \gamma(P)$. Let $\rho: \muathbb{R}\to \muathbb{R}$ be the function $\rho(x)= \alpharctanh\ \sigmaech x$.
Using residual finiteness, choose $H_{1}< H$ such that $d(p_{1},h(p_{1})) > 2\rho(l(g)/2)$, for all $h \iotan H_{1}-\{1\}$.
Similarly, choose $H_{2}< H_{\gamma}$ such that $d(p_{2}, h(p_{2}))> 2\rho(l(g)/2)$, for all $h\iotan H_{2}-\{1\}$.
Let $\Sigma_{1}= P/H_{1}$, and $\Sigma_{2}=\gamma(P)/H_{2}$.
Claim: $G=\lambdaangle H_{1}, H_{2}\rangle \cong H_{1} \alphast H_{2}$. Moreover, $\mathbb{H}^{4}/G$ is geometrically
finite.
To prove the claim, let $E_{i}\sigmaubset \mathbb{H}^{4}$ be a Dirichlet domain about $p_{i}$ with respect to the group $H_{i}$.
Let $L$ be the 3-plane which is
the perpendicular bisector of $g$. Let $pr_{1}: \mathbb{H}^{4} \to P$, and $pr_{2}: \mathbb{H}^{4} \to \gamma(P)$.
Then $pr_{i}(L)$ is a disk about $p_{i}$ of radius $\rho(l(g)/2)$ in
$P$ or $\gamma(P)$ (see Figure \ref{projection}).
\betaegin{figure}[htb]
\betaegin{center}
\partialsfrag{a}{$\rho(l/2)$}
\partialsfrag{b}{$l/2$}
\partialsfrag{c}{$g$}
\partialsfrag{d}{$p_{1}$}
\partialsfrag{e}{$p_{2}$}
\partialsfrag{f}{$P$}
\partialsfrag{g}{$\gamma(P)$}
\partialsfrag{h}{$L$}
\partialsfrag{i}{$E_{1}$}
\partialsfrag{j}{$E_{2}$}
\epsilonpsfig{file=projection,width=\textwidth,height=.5\textwidth}
\caption{\lambdaabel{projection} Combining fundamental domains}
\epsilonnd{center}
\epsilonnd{figure}
Since $E_{i}$ is a Dirichlet domain, it must contain $pr_{i}(L)$, and
therefore $E_{i}$ contains $L$. Thus, $\partialartial E_{1} \cap \partialartial E_{2}=\epsilonmptyset$ since they are separated by the hyperplane $L$. Thus, $E_{1}\cap E_{2}$
will be a finite-sided fundamental domain for $G$, and thus $G$ is geometrically finite (see \cite{Bowditch93} for
various equivalent notions of geometric finiteness).
Topologically, $\mathbb{H}^{4}/G = (\mathbb{H}^{4}/H_{1}) \#_{L} (\mathbb{H}^{4}/H_{2})$, so $ G \cong H_{1}\alphast H_{2}$.
Let $U= \Sigma_{1} \cup_{p_{1}} g \cup_{p_{2}} \Sigma_{2}$. Then $U$ is an embedded compact spine of $\mathbb{H}^{4}/G$.
Now, we use the fact from theorem 3.1 \cite{ALR}, that $G$ is a separable subgroup of $\Gamma$.
By Scott's separability criterion, we see that we
may embed $U$ in $\mathbb{H}^{4}/\Gamma_{1}$, for some finite index subgroup $\Gamma_{1}<\Gamma$. Thus, we have $\Sigma_{1}\cup \Sigma_{2} \sigmaubset \mathbb{H}^{4}/\Gamma_{1}$.
Let $N=(\mathbb{H}^{4}/\Gamma_{1} )\betaackslash (\Sigma_{1} \cup \Sigma_{2})$, and let $M=DN$, the double of $N$
along its boundary. Since $g\sigmaubset N$ is a geodesic arc orthogonal to $\partialartial N$, we have the double
of $g$ $D(g) \sigmaubset M$ is a closed geodesic in $M$ of length $<\epsilon$.
\epsilonnd{proof}
\sigmaection{Conclusion}
\betaegin{conjecture}
There exists closed hyperbolic $n$-manifolds with arbitrarily short geodesics.
\epsilonnd{conjecture}
Hyperbolic lattices that are subgroup separable on geometrically finite subgroups are
called {\iotat GFERF}, short for {\iotat Geometrically Finite Extended Residual Finite}.
This conjecture would follow from the following conjecture, by the same proof as the main theorem.
\betaegin{conjecture}
There exist compact arithmetic hyperbolic $n$-manifolds which are defined by a quadratic form, and
which are GFERF.
\epsilonnd{conjecture}
Unfortunately, there does not exist a compact right-angled polyhedron in $\mathbb{H}^{n}$, $n\gammaeq 5$, so
the strategy of proof in \cite{ALR, S} will not work in general.
By the remark after \cite[Lemma 3.4]{ALR}, we know that $\mbox{\rm{O}}(8,1;\muathbb{Z})$ is GFERF.
The above conjecture would hold if we knew $\mbox{\rm{O}}(n,1;\muathbb{Z})$ were GFERF for all $n$, since one
may embed (up to finite index) any cocompact arithmetic lattice defined by a quadratic form into $\mbox{\rm{O}}(n,1;\muathbb{Z})$
for some $n$ by the restriction of scalars and stabilization. The following
theorem is proven the same as Theorem \ref{systole}:
\betaegin{theorem}
There exist finite volume hyperbolic $n$-manifolds with arbitrarily short geodesics for $n\lambdaeq 8$.
\epsilonnd{theorem}
At most finitely many of the manifolds produced using Theorem \ref{systole} will be
arithmetic. This follows because the groups will lie in $\mbox{\rm{O}}(f, \muathbb{Q}(\sigmaqrt{5}))$.
The integral real eigenvalues of the matrices in $\mbox{\rm{O}}(f,\muathbb{Q}(\sigmaqrt{5}))$ will
be bounded away from 1, since they have an integral minimal polynomial of degree
at most 10. Thus, the length of a geodesic of an arithmetic subgroup of $\mbox{\rm{O}}(f,\muathbb{Q}(\sigmaqrt{5}))$
will be bounded away from 0, which implies that at most finitely many examples
from Theorem \ref{systole} may be arithmetic. This method for proving the existence
of non-arithmetic uniform lattices is slightly different than the method of \cite{GromovPS88}, since
instead of breeding subgroups of incommensurable arithmetic lattices, it breeds a subgroup
of an arithmetic lattice with itself. It's possible that this ``inbreeding'' method could produce
non-arithmetic lattices in any dimension.
\betaibliographystyle{hamsplain}
\betaibliography{4Dhyperbolicsystole}
\epsilonnd{document} |
\begin{document}
\date{}
\title{Robust measurements of n-point correlation functions of driven-dissipative quantum systems on a digital quantum computer}
\author{Lorenzo Del Re}
\affiliation{Department of Physics, Georgetown University, 37th and O Sts., NW, Washington,
DC 20057, USA}
\affiliation{Max Planck Institute for Solid State Research, D-70569 Stuttgart, Germany}
\author{Brian Rost}
\affiliation{Department of Physics, Georgetown University, 37th and O Sts., NW, Washington,
DC 20057, USA}
\author{Michael Foss-Feig}
\affiliation{Quantinuum, 303 S. Technology Ct, Broomfield, Colorado 80021, USA}
\author{A. F. Kemper}
\affiliation{Department of Physics, North Carolina State University, Raleigh, North Carolina 27695, USA}
\author{J. K. Freericks}
\affiliation{Department of Physics, Georgetown University, 37th and O Sts., NW, Washington,
DC 20057, USA}
\date{\today}
\pacs{}
\begin{abstract}
We propose and demonstrate a unified hierarchical method to measure $n$-point correlation functions that can be applied to driven, dissipative, or otherwise non-equilibrium systems. In this method, the time evolution of the system is repeatedly interrupted by interacting an ancillary qubit with the system through a controlled operation, and measuring the ancilla immediately afterwards. We discuss robustness of this method versus ancilla-enabled interferometric techniques (such as the Hadamard test), and implement the method on a quantum computer in order to measure single-particle Green's functions of a driven-dissipative fermionic system. This work shows that dynamical correlation functions for driven-dissipative systems can be measured with near-term quantum computers.
\end{abstract}
\maketitle
\emph{Introduction}.---
The characterization of quantum many-body systems still poses great theoretical challenges in a variety of disciplines.
The calculation of dynamical correlation functions in particular is of the utmost importance, and reveals the spectral properties of complex models: In the case of the condensed matter models, the single-particle Green's function takes into account how electrons propagate in the lattice,
while two-particle Green's functions (susceptibilities) contain information about the fluctuations of collective modes in the different physical channels (i.e. charge, spin, pairing), and three-point correlation functions describe how fermions interact with such collective modes.
The evaluation of such quantities can get quite cumbersome, especially in the presence of strong correlation and when the entanglement increases rapidly during the system dynamics,
as for example in the case of the two-dimensional Hubbard model that, despite the recent progress of numerical methods \cite{LeBlanc2015,Schaefer2021}, still lacks a complete understanding.
For this reason, quantum simulations \cite{Somma2002} represent a powerful resource for the computation of dynamical correlation functions of many-body systems.
Most protocols that have been put forward for the measurement of Green's functions \cite{Wecker2015,Keen_2020,steckmann2021simulating} are based on the Hadamard test \cite{Somma2002}, where a controlled qubit is initialized in the $\left|+\right\rangle = \frac{1}{\sqrt{2}}(\left|0\right\rangle+\left|1\right\rangle)$ state via a Hadamard gate (H)\cite{nielsen_chuang_2010}, and then entangled with the system qubits via a controlled unitary. Subsequently, after the system is evolved in time, another controlled unitary is applied. Finally, the information of the correlation function is extracted by performing measurements on the control qubit. This approach has been successfully used to obtain correlation functions and Green's functions on quantum hardware\cite{kreula2016few,kreula2016non,chiesa2019quantum,francis2020quantum,Jaderberg2020,sun2021quantum,steckmann2021simulating}.
It has been shown that the same procedure can be used to extract quantum work statistics out of systems driven out of equilibrium \cite{Dorner2013} and for measuring OTOCs \cite{Swingle2016,mi2021}.
While many studies were focused on extracting response functions of closed systems, less attention has been drawn on the evaluation of unequal-time correlation functions of open-quantum systems where the interaction between the system and a large environment can lead to quantitative and qualitative differences in the system dynamics. Lately, progress has been achieved in the construction of efficient quantum algorithms capable of addressing time-evolution of such dissipative-systems either exploiting the hardware intrinsic decoherence \cite{tseng2000,rost2020,Sommer2021}, or by implementing Kraus maps and Lindblad operators \cite{Barreiro2011,childs2016,cleve2016,delre2020,tornow2020,hu2020,rost2021,hu2021,PSchlimgen2021,Kamakari2022}, or non-Hermitian dynamics \cite{Hubisz2021,zheng2021}, and this paves the way to the next step that would be the evaluation of dynamical correlation functions.
\par In this Letter, we first show that the Hadamard test methodology is still suitable to the case of a many-body driven-dissipative system.
Such a protocol is advantageous because the measurement of a single qubit
gives information about the correlation function of a many-body system with an arbitrary number of degrees of freedom. However, the coherent entanglement between the system and the ancilla must be preserved for the entire system dynamics, which is not normally possible without fault-tolerance.
Hence, to overcome this problem, we propose a unified ancilla-based strategy to measure generic $n$-point correlation functions that does not require keeping the system and ancilla qubit in an entangled state.
Our method is a single strategy capable of measuring arbitrary unequal-time correlation functions between multi-qubit Pauli operators, and which works for both dissipative and unitary time evolution. As such, it subsumes and unifies the approaches of Ref. \cite{Knap2013} (unequal-time commutators) and Ref. \cite{Uhrich2017} (unequal-time anticommutators).
It is hierarchical in the sense that extracting the information of an $n$-th order correlation function requires previous knowledge of lower order correlation functions; but, it is robust, because it does not require system-ancilla entanglement to be maintained during the time evolution of the system. We verify the validity of our method by performing measurements of the single-particle Green's function of a driven-dissipative fermionic model using a Quantinuum quantum computer. Our results show excellent quantitative agreement between data and the theoretical predictions.
\par
\emph{Target quantities.}--- Our goal is the calculation of correlation functions of a generic system (S) that can also dissipate energy through an interaction with a bath (E); so we employ the density matrix formalism, which is required to study open quantum systems \cite{Breuer2002}.
\par The correlation functions
are constructed as follows.
Let $\left\{O_i\right\}$ be a set of operators in the Schr\"{o}dinger representation acting on the system Hilbert space with $i = 1,2,\cdots n$, and let $\{t_i\}$ being a set of ordered time values such that $t_0<t_1<t_2< \hdots t_{n-1}<t_n$, where $t_0$ is the initial time, then we define the $n$-th rank correlation function via
\begin{align}\label{eq:corr}
&\left<O_{n}(t_{n})O_{{n-1}}(t_{{n-1}})... O_{1}(t_{1})\right>\nonumber\\
&~~~~~=\mbox{Tr}_\text{S}\left\{O_n\mathcal{V}_{t_n,t_{n-1}}... O_2\mathcal{V}_{t_2,t_1}O_1\mathcal{V}_{t_1,t_0}\rho(t_0)\right\}.
\end{align}
Here, $O_i(t_i)$ is the operator $O_i$ in the Heisenberg representation,
$\rho(t_0)$ is the system density matrix evaluated at the initial time, $\mathcal{V}_{t_{i+1},t_i}$ is the time evolution super-operator that evolves the system from time $t_i$ to $t_{i+1}$ (i.e. $\rho(t_{i+1}) = \mathcal{V}_{t_{i+1,t_i}}\rho(t_i)$ acting from left to right), and $\mbox{Tr}_\text{S}$ indicates a trace over the system subspace (meaning that the degrees of freedom of the bath have already been integrated out).
For simplicity, and without loss of generality, we assume that the operators $O_i$ are unitary and Hermitian operators; addressing this case is sufficient to demonstrate the validity of our method, because a non-unitary operator can always be expanded as a linear combination of unitaries, chosen to also be Hermitian (e.g. Pauli strings).
As will be shown in the next section, the correlation function in Eq.~(\ref{eq:corr}) can be extracted from the Hadamard test.
\par Instead, the alternative robust strategy that we propose will naturally yield correlation functions of nested commutators and anti-commutators of the form
\begin{align}\label{eq:corr_comm}
\!\! \Big\langle[O_1(t_1),\!\big[O_2(t_2),\dots\,\!\big[O_{n-1}(t_{n-1}),O_n(t_n)\big]_{\pm}\!\,\cdots\!\,\big]_{\pm}\big]_{\pm}\Big\rangle,
\end{align}
where $[.\,,.]_{\pm}$ can be either commutators (-) or anti-commutators (+), all chosen independently. The quantity displayed in Eq.~(\ref{eq:corr}) could be obtained from the one in Eq.~(\ref{eq:corr_comm}) and vice versa by performing multiple measurements and then combining the different outcomes together. The correlation function in Eq.~(\ref{eq:corr_comm}) is not the most general $n$-point correlation function, because it always maintains Keldysh time ordering on a two branch Keldysh contour, as opposed to the most general form, which requires higher-order Keldysh contours~\cite{Tsuji2017}; for concrete examples, see the supplemental information \cite{supp}. We note that in the case of two-point functions, Eq.~(\ref{eq:corr}) corresponds to lesser or greater Green's functions while Eq.~(\ref{eq:corr_comm}) to advanced, retarded, and Keldysh Green's functions \cite{stefanucci2013}, so both methods produce all the physical Green's functions needed to describe a time evolving quantum system. In general, one cannot directly calculate out-of-time-ordered correlation functions with the circuit in Fig.(\ref{fig:alt_sch}) and we leave possible generalizations of this method to future work.
\\
\par
\emph{Hadamard test for driven-dissipative systems.}---
\begin{figure}
\caption{The standard interferometric scheme for measuring the $n$-time correlation function~\protect{\cite{Somma2002}
\label{fig:Rams}
\end{figure}
\begin{figure*}
\caption{Circuit to measure a generic $n$-time correlation function of the kind defined in Eq.(\ref{eq:corr_comm}
\label{fig:alt_sch}
\end{figure*}
In Fig.~\ref{fig:Rams}, we show how the interferometry scheme proposed in Ref.~\onlinecite{Somma2002} generalizes to compute the $n$-time correlator defined in Eq.~(\ref{eq:corr}) for an open-quantum system. In order to simulate dissipative dynamics, we need a generic $k$-qubit ancilla register (called A$_2$) that we take to be initialized into the state $\left|\boldsymbol{0}\right\rangle = \ket{0}^{\otimes k}$.
A suitable unitary operation $\mathcal{U}_K^{t,t^\prime}$ that entangles A$_2$ with the system register S followed by tracing out (ignoring) the state of the ancilla register can encode
the time evolution map $\mathcal{V}_{t,t^\prime}$, which can be rewritten using the Kraus sum representation:
\begin{equation}\label{eq:Kraus}
\mathcal{V}_{t,t^\prime} \,\rho(t^\prime) = \sum_{i=0}^{2^k-1}K^{\,}_i(t,t^\prime)\rho(t^\prime)K^\dag_{i}(t,t^\prime),
\end{equation}
where $K_i$ are the so called Kraus operators satisfying the sum rule $\sum_i K^\dag_i(t,t^\prime)K^{\,}_i(t,t^\prime) = \mathbbm{I}$. They are related to the unitary evolution of the system and ancilla bank as follows: $K_i(t,t^\prime) = \left\langle i\right|\mathcal{U}_K^{t,t^\prime}\left|\boldsymbol{0}\right\rangle$, with $\{\left|i\right\rangle\} $ being a complete basis for A$_2$. In the interferometry scheme, we need an extra single-qubit ancilla register A$_1$ in which all the information about the correlation function (which is a complex number) will be stored. For example, in the case of $n = 2$, the final quantum state of the A$_1$ qubit reads:
\begin{align}\label{eq:final_rams}
\rho_{A_1} &=\left\{
\begin{array}{c}
\frac{1}{2} \hat{\mathbbm{I}}_{A_1} + \frac{1}{2}\text{Re}[C]\,\hat{\sigma}^z_{A_1} -\frac{1}{2}\text{Im}[C] \hat{\sigma}^y_{A_1}\,, \text{if } \alpha = 0,
\\ \\
\frac{1}{2} \hat{\mathbbm{I}}_{A_1} - \frac{1}{2}\text{Im}[C]\,\hat{\sigma}^z_{A_1} -\frac{1}{2}\text{Re}[C] \hat{\sigma}^y_{A_1}\,, \text{if } \alpha = 1,
\end{array}
\right.
\end{align}
where $C = \left<O_2(t_2)O_1(t_1)\right>$.
Measuring the ancilla in the $Z$ and $Y$ bases determines the real and imaginary parts of the correlation function.
\par This method is convenient because the complex information encoded in the correlation functions of a many-body system are found from single qubit measurements. However, this scheme requires maintaining the coherence of the A$_1$ ancilla (and thereby its entanglement with the system) for the full duration $t_n-t_1$. In the next section, we introduce an alternative robust scheme that does not require maintaining coherence of the $A_1$ ancilla, but at the cost of requiring a more complex measurement scheme.
\\
\par
\emph{Robust strategy.}---
In Fig.~\ref{fig:alt_sch}, we show the alternative circuit to measure the correlation function defined in Eq.~(\ref{eq:corr}). This circuit is schematic, because it encodes all possible circuits that are employed to measure the set of correlation functions in Eq.~(\ref{eq:corr_comm}). Here, each realization has chosen unitary operations acting on A$_1$ (selected from $(S)^{\alpha_i}H $, where $S$ and $H$ are the phase gate and the Hadamard gate, respectively and $\alpha = \{0,1\}$ is a binary variable)
for each time $t_i$ measured in the correlation function.
It will turn out that the circuit shown in Fig.~\ref{fig:alt_sch} naturally measures the set of correlation functions defined in Eq.~(\ref{eq:corr_comm}) with the commutator or anti-commutator chosen from the $n-1$ dimensional binary vector $\boldsymbol{\alpha}= \{\alpha_1,\alpha_2,...,\alpha_{n-1}\}$.
It is important to note that after the $S^{\alpha_i}H$ operation is performed, the ancilla qubit A$_1$ is measured immediately afterwards and the measurement outcome that we denote by $m_i$ is stored; such a measurement destroys the entanglement between A$_1$ and the state encoded in the system and the A$_2$ ancilla bank.
The state is then evolved to the next $t_i$ using the Kraus map defined in Eq.~(\ref{eq:Kraus}). The $A_1$ ancilla is then reset to its $\ket{+}$ state and the process is repeated for each operator in the correlation function. In the last step, after the final time evolution from $t_{n-1}$ to $t_n$, the $S$ register qubits will be in a final state $\rho_n$ and the operator O$_n$ is measured directly on the $S$ register qubits, yielding results that depend on $\boldsymbol{\alpha}$. The correlation function is determined by classical post-processing of the accumulated results and the choice of $\boldsymbol{\alpha}$.
\par In general, the state of the system qubits at time $t_{j+1}$ is obtained from the state at $t_{j}$ through the following map:
\begin{align}\label{eq:two_point}
&\rho_{j+1} \propto \mathcal{V}_{t_{j+1},t_{j}}\left(\rho_j+O_j\rho_jO_j+[(-1)^{m_j}i^{\alpha_j}O_j\rho_j+\text{h.c.}]\right),
\end{align}
where the proportionality constant is given by tracing the RHS of the equation. Here, $m_j = \{0,1\}$ is the result of the A$_1$ qubit measurement, and $\rho_{j = 1} = \rho(t_1)$ is given by the initial state of the system at time $t_1$ (see Fig.~\ref{fig:alt_sch}).
\par In order to show how this method works in practice, we discuss the two simplest cases: i.e. the two-point and the three-point correlation functions.
For $n = 2$,
the result of measuring $O_2$ directly on the system register will yield
\begin{align}\label{eq:two_point_av}
&\text{Tr}\, O_2\,\rho_2 =\mathcal{N}\left\{ \left<O_2(t_2)\right> + \left<O_2(t_2)\right>_{O_1} \right. \nonumber \\
&+\left.{(-1)^{m_1}}\left[i^{\alpha_1}\left<O_{2}(t_2)O_1(t_1)\right> -i^{-\alpha_1}\left<O_{1}(t_1)O_2(t_2)\right>\right]\right\}, \nonumber \\
\end{align}
where $\mathcal{N} = \{2 +\big[ (-1)^{m_1}i^{\alpha_1}\left<O_1(t_1)\right> + \text{h.c.}\big]\}^{-1}$, $\left<O_1(t_1)O_2(t_2)\right> = \text{Tr}\left(O_2\,\mathcal{V}_{t_2,t_1}[ \rho(t_1)O_1]\right)$, and $\left<O_2(t_2)\right>_{O_1} = \text{Tr}\left(O_2\,\mathcal{V}_{t_2,t_1}[O_1\rho(t_1)O_1]\right)$.
Hence, when $\alpha_1 =0, 1$ the term in square brackets in Eq.~(\ref{eq:two_point_av}) is proportional to $\left<[O_1(t_1),O_2(t_2)]_\mp\right>$. This is precisely Eq.~(\ref{eq:corr_comm}) when $n = 2$.
\par For $n = 3$, measuring $O_3^{\,}$ results in the following quantity:
\begin{eqnarray}\label{eq:three_point}
\text{Tr}\,O_3\,\rho_3 = (-1)^{m_1+m_2}\,C^{\boldsymbol{\alpha}}_{t_1,t_2,t_3} + R_{\boldsymbol{\alpha}},
\end{eqnarray}
where $C^{\boldsymbol{\alpha}}_{t_1,t_2,t_3}$ is a three-time correlation function that depends on the values of $\boldsymbol{\alpha}$. There are four possible values $\left<[O_1^{\,}(t_1),[O_2{\,}(t_2),O_3^{\,}(t_3)]_\pm]_{\pm}\right>$. In addition, there are contributions denoted by $R_{\boldsymbol{\alpha}}$, which is a remainder function. It is determined by performing additional measurements comparable to what is needed for lower-rank correlation functions (see supplemental information for details: \cite{supp} ).
\par
We note that in the case of single-qubit \cite{Knap2013,Uhrich2017,Schuckert2020} and two-qubit \cite{mitrarai2019} correlators, there are alternative ways of measuring correlation functions that do not require the extra ancillary register A$_1$.
\par
\emph{Hardware implementation}--- In order to verify the validity of the protocol, we applied it to measure the Green's function of spinless
free fermions in a lattice driven by a constant electric field that also dissipate energy through a coupling with a thermal bath.
The Hamiltonian of this chosen system plus bath can be brought into a block-diagonal form after performing a Fourier transform to momentum
space as described in Ref.~\cite{delre2020}. Hence, the system's
reduced density matrix
factorizes
as a tensor product in momentum space, i.e., $\bigotimes_k\rho_k$
, and we can define a (diagonal in $k$) master
equation for each 2$\times$ 2 $k$-dependent density matrix $\rho_k$,
\begin{equation}\label{eq:me_lin}
\partial_t \rho_k = -i[\mathcal{H}_k(t),\rho_k]+\sum_{\ell=\{1,2\}}L_{\ell}^{\,} \,\rho_k \,L_\ell^{\dag} - \{\rho_k,L^\dag_\ell L^{\,}_\ell\},
\end{equation}
where the Lindblad operators are $L_1 = \sqrt{\Gamma\,n_F(\epsilon_k(t))}d_k$ and $L_2 = \sqrt{\Gamma\,n_F(-\epsilon_k(t))}d^\dag_k$, with $d_k^{\,}$ being the destruction operator of a lattice fermion with quasi-momentum $k$, $\epsilon_k(t) = - 2J \cos\left(k+\Omega\,t\right)$, with $J$ being the hopping amplitude, $\Omega$ the amplitude of the applied DC field, and $k$ the crystalline momentum. $\Gamma$ sets the strength of the system-environment coupling and $n_F(x) = \left[1 + \exp(\beta\, x)\right]^{-1}$ is the Fermi-Dirac distribution with $\beta$ being the inverse of the bath temperature.
\begin{figure}
\caption{Circuit implementing the trotterised time evolution $\mathcal{U}
\label{fig:trott_dyn}
\end{figure}
In Fig.~\ref{fig:trott_dyn}, we show the circuit implementing $\mathcal{U}_K$ for the Kraus map related to Eq.~(\ref{eq:me_lin}).
The Lindblad operator $L_{1(2)}$ encodes the physical process of a Bloch electron (hole) with momentum $k$ to hop from the lattice to the bath with a probability given by $\Gamma n_F[-\epsilon_k(t)]$ $(\Gamma n_F[\epsilon_k(t)])$. Such a decay process introduces a time dependence of the momentum distribution function of fermions and a damping of Bloch oscillations that eventually leads to a non-zero average of the DC-current \cite{supp,han2013,delre2020,rost2021}.
\begin{figure}
\caption{ Imaginary (upper panel) and real (lower panel) parts of the retarded fermion Green's function as a function of time (parameters are wavevector $ka=-0.5$, dimensionless electric field is $\Omega=1$, the dissipation rate to the fermionic bath is $\Gamma=1/16$ and the bath temperature is 0.01 in units of the hopping). Circles represent data from a Quantinuum model-H1 quantum computer, with error bars representing 2$\sigma$ confidence intervals. The primary source of error in the implemented circuits is due to noise on the two-qubit gates [$(2-3)\times 10^{-3}
\label{fig:QC_res}
\end{figure}
\par
In Fig.~\ref{fig:QC_res}, we show the retarded fermion Green's function $G_k^{(R)}(t,t^\prime) = -i\theta(t-t^\prime)\left<[d^{\,}_k(t),d^{\dag}_k(t^\prime)]_+\right>$ measured on Quantinuum's model H1 quantum computer \footnote{Short-time data (prior to $t=20$) was taken on the Honeywell model H0 quantum computer, while later time data were obtained on an updated Quantinuum model H1 computer.}.
The retarded Green's function of the model can be computed exactly and its derivation and analytical form are given in the supplemental materials \cite{supp}.
\par
We notice an excellent quantitative agreement between the data produced by the quantum computer and the expected curves in presence of noise. It is worthwhile to note that in the presence of a driving field, the Green's function does not oscillate as a simple sinusoidal function and it presents extra features, such as the additional maxima and minima occurring between time 10 and time 19 [see Fig.~\ref{fig:QC_res}], that are faithfully reproduced by the quantum computer data.
\par
\emph{Outlook.}--- We have put forward a robust technique for the measurement of multi-point correlation functions of driven-dissipative quantum systems that could be applied in the realm of quantum simulations of complex models such as the Hubbard model.
We compared our strategy to the Ramsey interferometry scheme (generalized
to the case of dissipative systems): while the latter requires us to keep the ancilla and system qubits in an entangled state for the entire time evolution of the system, the former does not. Such an advantage comes at the cost of performing extra measurements and also requires additional circuits of lower depth than the one needed to extract the target quantity. Furthermore, our method naturally computes correlators of the form given in Eq.~(\ref{eq:corr_comm}).
We applied our method to measuring the Green's function of free fermions driven out of equilibrium and interacting with a bath. The data obtained from the quantum computer are in an excellent agreement with the curves predicted by the theory. While this data constitutes an important proof of principle enabling the measurement of correlation functions on near-term quantum computers, further work needs to be done to use this approach to solve new problems in science.
\par Interestingly, given its generality the Hadamard test has applications other than the measurement of correlation functions , for example it has been proposed for determining important overlaps in the realm of variational quantum dynamics simulations \cite{Yuan2019,Yao2021} and also for the simulation of open quantum systems using quantum imaginary-time evolution \cite{Kamakari2022}. We therefore expect our robust alternative strategy to the Hadamard test to be suitable for these other applications as well.
\emph{Acknowledgments.}--- We acknowledge financial support
from the U.S. Department
of Energy, Office of Science, Basic Energy Sciences, Division of Materials Sciences and Engineering under Grant No.
DE-SC0019469. BR was also funded by the National Science Foundation under Award No. DMR-1747426
(QISE-NET).
JKF was also funded by the McDevitt bequest at Georgetown University.
\par
\appendix
\section{Correlation functions on the Keldysh contour}
In this appendix, we discuss more in detail what kind of correlation functions can be evaluated by the circuit in Fig.(\ref{fig:alt_sch}) in the main text and what are those for which a generalization of our scheme is needed.
In general, since the operators $O_i(t_i)$ do not commute with each other, one can construct $n!$ correlation function out of all possible permutations of the operators in Eq.(\ref{eq:corr}) in the main text.
However calculating all possible nested commutators/anti-commutators of Eq.(\ref{eq:corr_comm}) gives the possibility of isolating only $2^{n-1}$ of these permutations.
\par Here we argue that the permutations we have access to correspond to components of time-ordered correlation functions on a simple two-branch Keldysh contour [see Fig.(\ref{fig:keldyshc}a) ], while the reaming ones are components of time-ordered correlation functions on a more complicated contour with multiple branches [see Fig.(\ref{fig:keldyshc}b)].
\begin{figure}
\caption{(a) Simple Keldysh contour $\mathcal{C}
\label{fig:keldyshc}
\end{figure}
\par
Instead of giving a rigorous proof of our statement, we will consider the specific example for $n = 3$. In this case, the permutations we have access to through Eq.(\ref{eq:corr_comm}) are:
$\left<O_1(t_1)O_2(t_2)O_3(t_3)\right>$, $\left<O_1(t_1)O_3(t_3)O_2(t_2)\right>$, $\left<O_2(t_2)O_3(t_3)O_1(t_1)\right>$ and $\left<O_3(t_3)O_2(t_2)O_1(t_1)\right>$, where $t_3> t_2>t_1$. Hence, the following two permutations $\left<O_3(t_3)O_1(t_1)O_2(t_2)\right>$ , $\left<O_2(t_2)O_1(t_1)O_3(t_3)\right>$ are missing.
\par In Fig.(\ref{fig:keldyshc}a) we show a two-branch Keldysh contour \cite{stefanucci2013} $\mathcal{C} = \mathcal{C}_+ + \mathcal{C}_-$, where $\mathcal{C}_\pm$ are the forward and backward branches. In the same figure we show that the particular permutation $\left<O_2O_3O_1\right>$ can be seen as a time-ordered function on the contour $\mathcal{C}$ where $t_1 = t_{1+}$ and $t_3 = t_{3+}$ lie on the forward branch while $t_2 = t_{2-}$ lie on the backward branch.
\par Conversely, the permutation $\left<O_3O_1O_2\right>$ cannot be interpreted as a time-order correlation function on $\mathcal{C}$. Instead, it could be seen as a time-ordered correlation function on the more complex contour $\mathcal{C}_2$ shown in Fig.(\ref{fig:keldyshc}b) that contains two additional branches. The same contour has to be considered for the calculation of out-of-time correlators \cite{Tsuji2017}, and therefore a generalization of our scheme is needed in order to have access to those quantities.
\section{Explicit calculations for the three-point correlation function}
Here we shall derive Eq.~(\ref{eq:three_point}) in the main text and give some insights about the remainder function and the additional measurements that are needed for its computation. In the derivation we shall use the following abstract notation for the time evolution: $\mathcal{V}_{t_{j+1},t_j}[\rho(t_j)] = \rho(t_{j+1})$, instead of its sum representation in Eq.~(\ref{eq:Kraus}). We note that everything that is contained within the brackets of $\mathcal V$ is time-evolved as the density matrix in Eq.~(\ref{eq:Kraus}).
\par
According to Eq.~(\ref{eq:two_point}), the final state before the measurement in the system register in the case of $n = 3$ reads:
\begin{eqnarray}\label{eq:rho_split}
\rho_3 &\propto& \rho_3^{(\text{I})} + \rho_3^{(\text{II})} + \rho_3^{(\text{III})} + \left[\rho_3^{(\text{III})}\right]^\dag,
\end{eqnarray}
where:
\begin{eqnarray}
\label{eq:rho_I}
\rho^{(\text{I})}_3 &=& \mathcal{V}_{t_3,t_2}[ \rho_2] \\
\label{eq:rho_II}
\rho^{(\text{II})}_3 &=& \mathcal{V}_{t_3,t_2}[O_2\rho_2O_2]\\
\label{eq:rho_III}
\rho^{(\text{III})}_3 &=& (-1)^{m_2}\,i^{\alpha_2}\mathcal{V}_{t_3,t_2}[O_2\,\rho_2]
\end{eqnarray}
Because the time evolution preserves the trace, the normalization factor will be given by:
\begin{equation}
\mathcal{N}_3^{-1} =2+\left[(-1)^{m_2}\,i^{\alpha_2}\text{Tr}\left[\rho_2O_2\right] + \text{h.c.}\right].
\end{equation}
There are a total of 16 terms on the RHS of Eq.~(\ref{eq:rho_split}): only four of them, those that are contained in $\rho^{(\text{III})}_3$ and its conjugate transpose, contain the information about the three-point correlation function. The other 12 terms give rise to the remainder function $R_{\boldsymbol{\alpha}}$ when $O_3$ is finally measured. As we shall see, these 12 spurious terms contain information about two-point and one-point (averages) correlation functions. Therefore, in order to extract the three-particle correlation function we will need to perform preliminary measurements of a few averages and two-point correlation functions that can be obtained using similar circuits as that one shown in Fig.~\ref{fig:alt_sch} for $n = 2$.
\par If we substitute $\rho_2$ from Eq.~(\ref{eq:three_point}) into Eq.~(\ref{eq:rho_I}) we obtain:
\begin{eqnarray}\label{eq:expl_I}
\rho_3^{(\text{I})} &=& \rho(t_3) + \mathcal V_{t_3,t_1}[O_1\rho(t_1)O_1]+ \nonumber \\
&(-1)^{m_1}&\,\mathcal{V}_{t_3,t_1}\left[i^{\alpha_1}O_1\rho_1 - i^{-\alpha_1}\rho_1O_1\right].
\end{eqnarray}
Analogously we obtain the explicit expression for Eq.~(\ref{eq:rho_II}), that reads:
\begin{eqnarray}\label{eq:expl_II}
\rho_3^{(\text{II})} &=& \mathcal{V}_{t_3,t_2}[O_2\rho(t_2)O_2] \nonumber \\
&+& \mathcal V_{t_3,t_2}\left(O_2\mathcal V_{t_2,t_1}[O_1\rho(t_1)O_1]O_2\right) \nonumber \\
&+&(-1)^{m_1}\,i^{\alpha_1}\mathcal V_{t_3,t_2}\left(O_2 \mathcal V_{t_2,t_1}[O_1\rho_1]O_2\right) \nonumber \\
&+&(-1)^{m_1}i^{-\alpha_1} \mathcal V_{t_3,t_2}\left(O_2 \mathcal V_{t_2,t_1}[\rho_1O_1]O_2\right).
\end{eqnarray}
It is worthwhile to note that all the terms in Eqs.~(\ref{eq:expl_I}) and (\ref{eq:expl_II}) contribute solely to the remainder function. For example, let us consider the following average that emerges from the last two terms in the RHS of Eq.~(\ref{eq:expl_II}), i.e. $i^{\alpha_1}\text{Tr}\,O_3\,\mathcal V_{t_3,t_2}\left(O_2\mathcal V_{t_2,t_1}[O_1\rho_1]O_2\right) + \text{h.c.}$: Such a quantity can be measured using the circuit shown in Fig.~\ref{fig:res_func}, that is very similar to the one displayed in Fig.~\ref{fig:alt_sch} in the case of $n = 2$, the only difference being the unitary operation $O_2$ that acts solely on the system qubits after the time evolution from $t_1$ to $t_2$ and the additional time evolution from $t_2$ to $t_3$.
\begin{figure}
\caption{One of the preliminary measurements needed in order to compute the remainder function $R_{\boldsymbol{a}
\label{fig:res_func}
\end{figure}
\par The explicit expressions of $\rho_3^{(\text{III})}$ reads:
\begin{eqnarray}\label{eq:expl_III}
\rho_3^{(\text{III})} &=&(-1)^{m_2}\,i^{\alpha_2}\mathcal V_{t_3,t_2}[O_2\rho(t_2)]\nonumber \\
&+&(-1)^{m_2}\,i^{\alpha_2}\mathcal V_{t_3,t_2}\left(O_2 \mathcal V_{t_2,t_1}[O_1\rho(t_1)O_1]\right) \nonumber \\
&+&(-1)^{m_1+m_2}i^{\alpha_2+\alpha_1}\mathcal V_{t_3,t_2}\left(O_2\mathcal V_{t_2,t_1}[O_1\rho(t_1)]\right) \nonumber \\
&+&(-1)^{m_1+m_2}i^{\alpha_2-\alpha_1}\mathcal V_{t_3,t_1}\left(O_2\mathcal V_{t_2,t_1}[\rho(t_1)O_1]\right). \nonumber \\
\end{eqnarray}
We notice that the first two terms in the RHS of the last equation contribute to the remainder function. For example, the second term in the RHS of Eq.~(\ref{eq:expl_III}) would give rise to the following average $i^{\alpha_2}\,\text{Tr}\,O_3\mathcal V_{t_3,t_2}\left(O_2\mathcal V_{t_2,t_1}[O_1\rho(t_1)O_1]\right) + \text{h.c.}$ that could be measure using the circuit displayed in Fig.\ref{fig:res_func2}.
\begin{figure}
\caption{Quantum circuit needed in order to compute a contribution to the remainder function $R_{\boldsymbol{\alpha}
\label{fig:res_func2}
\end{figure}
\par Therefore, by singling out the last two terms of Eq.~(\ref{eq:expl_III}), adding them up to their Hermitian conjugates, multiplying the results times $O_3$ and computing the trace, we obtain the following three-point correlation function:
\begin{eqnarray}
C^{\boldsymbol{\alpha}}_{t_1,t_2,t_3} &=& i^{\alpha_2+\alpha_1}\overbrace{\text{Tr}\,O_3\,\mathcal V_{t_3,t_2}\left(O_2\mathcal V_{t_2,t_1}[O_1\rho(t_1)]\right)}^{\left<\text{O}_3(t_3)\text{O}_2(t_2)\text{O}_1(t_1)\right>} \nonumber \\
& +&i^{\alpha_2-\alpha_1}\overbrace{\text{Tr}\,O_3\,\mathcal V_{t_3,t_2}\left(O_2\mathcal V_{t_2,t_1}[\rho(t_1)O_1]\right)}^{\left<\text{O}_1(t_1)\text{O}_3(t_3)\text{O}_2(t_2)\right>}
\nonumber \\
&+& \text{h.c.}
\nonumber \\
\end{eqnarray}
that is the one appearing in Eq.~(\ref{eq:three_point}) in the main text.
\par The remainder function $R_{\boldsymbol{\alpha}}$ can be obtained in a similar way but adding up all the remaining 12 terms appearing in Eq.~(\ref{eq:rho_split}) multiplying them times $O_3$ and computing the trace. In order to extract the information about $C^{\boldsymbol{\alpha}}_{t_1,t_2,t_3}$, we will need to perform additional measurements similar to those shown in Figs. \ref{fig:res_func} and \ref{fig:res_func2}.
\section{Specifics of the model system implemented on the quantum computer}
We model spinless electrons hopping on a one-dimensional lattice with nearest-neighbor hopping. The electrons are placed in an electric field and are connected to a fermionic bath that provides dissipation ton the system. See Refs.~\onlinecite{delre2020,rost2021} for more details about the model and the derivation of Eq.~(\ref{eq:me_lin}).
\subsection{Time evolution}
The time evolution of such a model from time $t$ to $t+\Delta t$ is governed by the infinitesimal Kraus map that can be derived from the master equation in Eq.~(\ref{eq:me_lin}) and it is given by the following set of operators:
\begin{align}\label{eq:kraus_toy}
K_0 &=
d^\dag_k d^{\,}_k \sqrt{1-2\,\Gamma n_F[-\epsilon_k(t)]\Delta t}\,e^{-i\epsilon_k(t) \Delta t}
\nonumber \\
&+ d^{\,}_kd^\dag_k \sqrt{1-2\Gamma n_F[\epsilon_k(t)]\Delta t},\nonumber \\
K_1 &= \sqrt{2\Gamma\,\Delta t\,n_F[\epsilon_k(t)]}\,d^\dag_k, \\
K_2 &= \sqrt{2\Gamma \Delta t\,n_F[-\epsilon_k(t)]}d_k^{\,},\nonumber
\end{align}
where $\epsilon_k(t) = - 2J \cos\left(k+\Omega\,t\right)$, with $J$ being the hopping amplitude, $\Omega$ the amplitude of the applied DC field, and $k$ the crystalline momentum. $\Gamma$ measures the strength of the system-environment coupling and $n_F(x) = \left[1 + \exp(\beta\, x)\right]^{-1}$ is the Fermi-Dirac distribution with $\beta$ being the inverse of the bath temperature. The Kraus map in Eq.~(\ref{eq:kraus_toy}) can be implemented on the system qubit using an extra-ancilla qubit as shown in the circuit in Fig.(\ref{fig:trott_dyn}).
\subsection{Calculation of the Green's function}
The Kraus map shown in Eq.~(\ref{eq:kraus_toy}) gives the time evolution from time $t$ to time $t + \Delta t$ and it is exact in the limit of $\Delta t \to 0^+$, for this reason we refer to it as an infinitesimal map. In order to write down the Green's function analytically it is useful to derive the integrated map $V_{t,t^\prime}$ that gives the time evolution from time $t$ to $t^\prime > t$ with $t^\prime-t$ being a positive finite arbitrary number. This is equivalent to determining the Choi matrix, that \cite{andersson2007,delre2020} reads: $S_{ab}(t) =
\sum_{r_0}^3\sum_{s = 0}^3F_{sr}(t)\text{Tr}[\sigma_r\sigma_a\sigma_s\sigma_b],
$ where the indices $a,b$ specify one of the four operators $\sigma_0 = \mathbbm{1}_{2\times 2}/\sqrt{2}$, $\sigma_1 = \sigma^x/\sqrt{2}$, $\sigma_2 =
\sigma^y/\sqrt{2}$, $\sigma_3 =
\sigma^z/\sqrt{2}$.
We can obtain a differential equation for the $F_{rs}(t)$ matrix by realizing that the master equation in Eq.~(\ref{eq:me_lin}) can be written as $\partial_t \rho = \Lambda_t(\rho)$, with $\Lambda_t$ being a linear map such that $\Lambda_t(\rho)$ is Hermitian and traceless. The equation of motion for $F$ reads: $\dot{F} = L(t)\cdot F(t)$, where the center dot indicates the matrix product, with initial condition $F(0) = \mathbbm{1}_{4\times 4}$ and with $L_{rs} = \text{Tr}[\sigma_r \Lambda_t(\sigma_s)]$ . Once the Choi matrix is obtained we can write the time-evolution map in the Kraus represenation with Kraus operators: $K_i = \sqrt{\lambda_i}\sum_{a = 0}^3 X(i)_a\sigma_a $,
where $\lambda_i$ and $X(i)$ are resepctively the eigenvalues and eigenvectors of S.
In this way we obtain the following Kraus map:
\begin{eqnarray}\label{eq:K_int}
K_1 &=& A_1(k,t,t^\prime)P_0+B_1(k,t,t^\prime)P_1, \nonumber \\
K_2 &=& A_2(k,t,t^\prime)P_0+B_2(k,t,t^\prime)P_1, \nonumber \\
K_3 &=& \alpha(k,t,t^\prime) X\,P_0,\nonumber \\
K_4 &=& \beta(k,t,t^\prime) X\,P_1,
\end{eqnarray}
where $P_0 = d^{\,}_kd^\dag_k$, $P_1 = d^\dag_kd^{\,}_{k}$, $X = \frac{1}{\sqrt{2}}(d^{\,}_k+d^{\dag}_k)$ and we have:
\begin{eqnarray}\label{eq:coeff}
A_1&=& \frac{\left(b-\sqrt{b^2+c^2+d^2}+c+i d\right) \sqrt{a-\sqrt{b^2+c^2+d^2}}}{2 (c+i
d) \sqrt{\frac{1}{\frac{b}{\sqrt{b^2+c^2+d^2}}+1}}} \nonumber \\
B_1 &=& -\frac{\left(\sqrt{b^2+c^2+d^2}-b+c+i d\right) \sqrt{a-\sqrt{b^2+c^2+d^2}}}{2 (c+i
d) \sqrt{\frac{1}{\frac{b}{\sqrt{b^2+c^2+d^2}}+1}}} \nonumber \\
A_2 &=& \frac{\left(\sqrt{b^2+c^2+d^2}+b+c+i d\right) \sqrt{a+\sqrt{b^2+c^2+d^2}}}{2 (c+i
d) \sqrt{\frac{1}{1-\frac{b}{\sqrt{b^2+c^2+\text d^2}}}}} \nonumber \\
B_2 &=&\frac{\left(\sqrt{b^2+c^2+d^2}+b-c-i d\right) \sqrt{a+\sqrt{b^2+c^2+d^2}}}{2 (c+i
d) \sqrt{\frac{1}{1-\frac{b}{\sqrt{b^2+c^2+d^2}}}}} \nonumber \\
\end{eqnarray}
and $a = \frac{1}{2}[1 + e^{-2\Gamma(t-t^\prime)}]$, $b = \cos[f_k(t,t^\prime)]e^{-\Gamma(t-t^\prime)}$, $c = \frac{1}{2}-n_k(t^\prime)+e^{-\Gamma(t-t^\prime)}(n_k(t^\prime)-\frac{1}{2})$ with $n_k(t) = \text{Tr}[\rho_k(t) P_1]$, $d = e^{-\Gamma(t-t^\prime)}\sin[f_k(t,t^\prime)]$, $\alpha = \sqrt{c-\frac{1}{2}(1-e^{-2\Gamma (t-t^\prime)})}$, $\beta = \sqrt{c+\frac{1}{2}(1-e^{-2\Gamma (t-t^\prime)})}$.
\par The retarded Green's function can be expressed in terms of the lesser and greater Green's functions in the following way: $G^{(R)}_k(t,t^\prime) = \theta(t-t^\prime)\left[G^>_k(t,t^\prime)-G^<_k(t,t^\prime)\right]$, where $G_k^>(t,t^\prime) = -i\left<d_k^{\,}(t)d^{\dag}_k(t^\prime)\right>=-i\,\text{Tr}\left\{X\,P_1\,V_{t,t^\prime}[X\,P_0\rho_k(t^\prime)] \right\}$ and $G_k^<(t,t^\prime) = i\left<d_k^{\dag}(t^\prime)d^{\,}_k(t)\right> = i\,\text{Tr}\left\{X\,P_1\,V_{t,t^\prime}\left[\rho_k(t^\prime)X\,P_0\right]\right\}$.
\par A generic one-qubit density matrix can be written in the following way: $\rho(t) = \rho_\text{diag} + x(t)X+y(t)Y$, where $\rho_{\text{diag}}(t) = \text{diag}(1-n_k(t),n_k(t))$. It is easy to show that the lesser or greater Green's function does not depend on the off-diagonal elements of the density matrix. In fact:
\begin{eqnarray}
&&\text{Tr}\{XP_1 V_{t,t^\prime}[(x\, X+y\,Y)XP_0 ] \} = \nonumber \\
&&(x-i\,y)\,\text{Tr}\{XP_1 V_{t,t^\prime}[P_0 ] \} = \nonumber \\
&&(x-i\,y)\text{Tr}\{X P_1 [(|A|_1^2 + |A_2|^2)P_0 + |\alpha|^2 P_1]\} = \nonumber \\
&&(x-i\,y)|\alpha|^2\,\text{Tr}\{XP_1\} = 0 \nonumber \,.
\end{eqnarray}
Therefore, we can write the lesser Green's function as:
\begin{eqnarray}
G^<_k(t,t^\prime) &=& i\text{Tr}\left\{V_{t,t^\prime}[\rho_\text{diag}XP_0]XP_1\right\} \nonumber \\
&=&i\,n_k(t^\prime)\,\text{Tr}\left\{V_{t,t^\prime}[P_1\,X\,P_0]XP_1\right\},
\end{eqnarray}
and by noticing that:
\begin{eqnarray}
V_{t,t^\prime}[P_1\,X\,P_0] &=& K_1^{\,}\,P_1X P_0K_1^\dag + K_2^{\,}P_1\,X\,P_0K_2^\dag \nonumber \\
&=&P_1\,X\,P_0\left(A_1^*B_1 + A_2^*B_2\right),
\end{eqnarray}
we obtain:
\begin{eqnarray}\label{eq:lesser}
G^{<}_k(t,t^\prime) &=& i\,n_k(t^\prime)\left(A_1^*B_1 + A_2^*B_2\right)\mbox{Tr}\left\{XP_1\,P_1X P_0 \right\} \nonumber \\
&=&i\,n_k(t^\prime)\,e^{-\Gamma (t-t^\prime)}e^{-if_k(t,t^\prime)}
\end{eqnarray}
Analogously we obtain the following expression for the greater Green's function:
\begin{eqnarray}\label{eq:greater}
G^>_k(t,t^\prime) &=&-i\text{Tr}\left\{X\,P_1\,V_{t,t^\prime}\left[XP_0\,\rho_{\text{diag}}\right]\right\}
\nonumber \\
&=&-i(1-n_k(t^\prime))e^{-\Gamma (t-t^\prime)}e^{-if_{k}(t,t^\prime)}.
\end{eqnarray}
Let us notice that only two Kraus operators, namely $K_1$ and $K_2$ were involved in the time evolution of the operator $\rho\,d_k^{\,}$ ($d^\dag_k\,\rho$) in the expression of the lesser (greater) Green's function.
From Eqs.~(\ref{eq:lesser}) and (\ref{eq:greater}), we obtain the retarted Green's function as following:
\begin{eqnarray}
G^{(R)}_{k}(t,t^\prime) &=& \theta(t-t^\prime)[G_k^>(t,t^\prime)-G_k^<(t,t^\prime)] \nonumber \\
&=& -i\theta(t-t^\prime)\,e^{-\Gamma (t-t^\prime)}e^{-if_k(t,t^\prime)}.
\end{eqnarray}
\end{document} |
\begin{document}
\title{Any-Order Online Interval Selection}
\begin{abstract}
We consider the problem of online interval scheduling on a single machine, where intervals arrive online in an order chosen by an adversary, and the algorithm must output a set of non-conflicting intervals. Traditionally in scheduling theory, it is assumed that intervals arrive in order of increasing start times. We drop that assumption and allow for intervals to arrive in any possible order. We call this variant \textit{any-order interval selection} (AOIS). We assume that some online acceptances can be revoked, but a feasible solution must always be maintained. For unweighted intervals and deterministic algorithms, this problem is unbounded. Under the assumption that there are at most $k$ different interval lengths, we give a simple algorithm that achieves a competitive ratio of $2k$ and show that it is optimal amongst deterministic algorithms, and a restricted class of randomized algorithms we call \textit{memoryless}, contributing to an open question by Adler and Azar \cite{adler2003beating}; namely whether a randomized algorithm without memory or with only ``bounded'' access to history can achieve a constant competitive ratio. We connect our model to the problem of \textit{call control} on the line, and show how the algorithms of Garay et al. \cite{garay1997efficient} can be applied to our setting, resulting in an optimal algorithm for the case of proportional weights. We also discuss the case of intervals with arbitrary weights, and show how to convert the single-length algorithm of Fung et al. \cite{fung2014improved} into a \textit{classify and randomly select} algorithm that achieves a competitive ratio of $2k$. Finally, we consider the case of intervals arriving in a \textit{random order}, and show that for single-lengthed instances, a \textit{one-directional} algorithm (i.e. replacing intervals in one direction), is the only deterministic memoryless algorithm that can possibly benefit from random arrivals.
\end{abstract}
\section{Introduction}
We consider the problem of scheduling intervals online with revoking\footnote{Displacing one or more previously scheduled intervals with a conflicting new interval.}.
Intervals arrive with a fixed start time and fixed end time, and have to be taken right away, or be discarded upon arrival, while no intervals in the solution conflict. The algorithm has to decide which intervals to include in the final schedule, so as to optimize some objective.
In the unweighted case, the goal is to maximize the number of intervals in the final solution. In the weighted case, we want an interval-set of maximum weight.
Following previous work, we allow some revoking of online decisions, which is often considered even in the conventional start-time-ordered scheduling model. More precisely, if a newly arrived interval conflicts with other intervals already taken by the algorithm, we are able to take the new interval and discard the conflicting intervals. We are able to displace multiple existing intervals at once, although this won't occur in the unweighted case. To avoid confusion, we should note that \textit{preemption}\footnote{In contrast to revoking, preemption in much of the scheduling literature means the pausing of a scheduled job, and resuming it later.} is used in the interval selection literature to mean precisely this revoking of previous decisions we just described. Under this definition, preemption is allowed in our model. When we discard an interval it is final and it cannot be taken again.\\\\
We focus mainly on the unweighted case, where all intervals have the the same weight. We discuss the competitive ratio of the problem in terms of \textit{k}, the number of distinct interval lengths. However our algorithm does not need a priori knowledge of \textit{k}. We show that a simple, deterministic, ``memoryless'' algorithm that only replaces when the new interval is entirely subsumed by an existing one, achieves the optimal competitive ratio in terms of the parameter \textit{k}. We also show that ``memoryless'' randomized algorithms can not do any better.
The main difference between our model and most of the interval selection literature, is allowing intervals to arrive in any order, a strict generalization of the ordered case. Bachmann et al. \cite{bachmann2013online} have studied the any-order input model in the context of ``\textit{t}-intervals'' (we are concerned with $t=1$). They consider randomized algorithms, and don't allow revoking. In that model, they get a lower bound of $\Omega(N)$, with $N$
being the number of intervals in a given input instance.
The next most closely related problem is that of call admission \cite{garay1992call} on the line graph, with online intervals corresponding to paths of a given line graph. The connection between call control on the line graph and interval selection has been noted before, but has not been carefully defined. We wish to clarify this connection by explaining the similarities as well as the differences, and how results correspond. We note that the parameter $k \leq N$ (respectively, $k \leq n-1$) is an obvious refinement of the number of intervals (respectively, the number of vertices for call admission on a line graph with $n$ vertices).\\\\
The applications of interval selection problems are plentiful. Some examples are resource allocation, network routing, transportation, and computer wiring. We refer the reader to the surveys by Kolen et al. \cite{kolen2007interval}, and Kovalyov et al. \cite{kovalyov2007fixed} for an overview of results and applications in the area of interval scheduling.\\\\
\textbf{Related Work.} Lipton and Tomkins \cite{lipton1994online} introduced the online interval scheduling problem. In our terminology, they consider the arrival of intervals with increasing start times (ordered), and interval weights that are proportional to the lengths. They don't allow displacement of existing intervals, and give a randomized algorithm with competitive ratio $O((log\Delta)^{1+\epsilon})$, where $\Delta$ is the ratio of the longest to shortest interval.\\\\
In the unweighted case with increasing starting times, Faigle and Nawijn \cite{faigle1995note} give an optimal 1-competitive algorithm that is allowed to revoke previous decisions (replace intervals). In the weighted case with increasing starting times, Woeginger \cite{woeginger1994line} shows that for general weights, no deterministic algorithm can achieve a constant competitive ratio. Canetti and Irani \cite{canetti1995bounding} extend this and show that even randomized algorithms with revocable decisions cannot achieve a constant ratio for the general weighted case.
For special classes of weight functions based on the length (including proportional weights), Woeginger \cite{woeginger1994line} gives an optimal deterministic algorithm with competitive ratio 4. Seiden \cite{seiden1998randomized} gives a randomized $(2+\sqrt{3})$-competitive algorithm when the weight of an interval is given by a continuous convex function of the length. Epstein and Levin \cite{epstein2008improved} give a $~2.45$-competitive randomized algorithm for weights given by functions of the length that are monotonically decreasing, and they also give an improved $1+\ln(2) \approx 1.693$ upper bound for the weight functions studied by Woeginger \cite{woeginger1994line}. Fung et al. \cite{fung2014improved} currently have the final word on the best upper bounds, giving \textit{barely random} algorithms that achieve a competitive ratio of 2 for all the Woeginger weight functions. These algorithms randomly choose one of two deterministic algorithms at the beginning. More generally, barely random algorithms have access to a small number of deterministic algorithms, and randomly choose one.\\\\
Restricting interval lengths has previously been considered in the literature, e.g. Lipton and Tomkins \cite{lipton1994online} study the case of two possible lengths, and Bachmann et al. \cite{bachmann2013online} consider single and two-length instances. For the related offline problem of throughput maximization, Hyatt-Denesik et al. \cite{hyatt2020approximations} consider $c$ distinct processing times. The special case of single-length jobs has been studied in the job scheduling \cite{sgall1998line,baptiste2000scheduling,chrobak2006note}, sum coloring \cite{borodin2012sum}, and the interval selection literature \cite{fung2012line,miyazawa2004improved}. Woeginger \cite{woeginger1994line} also points out how his results can be extended to the case of equal lengths and arbitrary weights. Miyazawa and Erlebach \cite{miyazawa2004improved} point out the equivalency between fixed length (w.l.o.g. unit) instances, and
proper interval instances, i.e. instances where no interval is contained within another. This is because of a result by Bogart and West \cite{bogart1999short}, showing the equivalency of the corresponding interval graphs in the offline setting.\\\\
There has also been some work on multiple identical machines. For the case of equal-length, arbitrary-weight intervals, Fung et al. \cite{fung2012line} give an algorithm that is 2-competitive when $m$, the number of machines, is even, and $(2+\frac{2}{2m-1})$ when $m$ is odd. Yu \& Jacobson \cite{yu2018online} consider C-benevolent (weight function is convex increasing) jobs and get an algorithm that is 2-competitive when $m$ is even, and $(2+\frac{2}{m})$-competitive when $m$ is odd.\\\\
In the problem of call control, a graph is given, and requests that correspond to pairs of nodes of the graph arrive online. The goal is to accept as many requests as possible, with the final set consisting of disjoint paths. When the underlying graph is a line, this problem is closely related to ours. For call control on the line, Garay et al. \cite{garay1997efficient} give optimal deterministic algorithms. In the unweighted case, they achieve a $O(\log(n))$ competitive ratio, where $n$ is the number of the vertices of the graph. In the case of proportional weights (weight is equal to the length of the path), they give an optimal algorithm that is $(\sqrt{5}+2)\approx 4.23$-competitive (its optimality was shown by Furst and Tomkins \cite{tomkins1995lower}). Adler and Azar \cite{adler2003beating} use randomization to overcome the $\log(n)$ lower bound, and give a 16-competitive algorithm. Emek et al. \cite{emek2016space} study interval selection in the streaming model, and show how to modify their streaming algorithm to work online, achieving a competitive ratio of 6, improving upon the 16-competitive algorithm of Adler and Azar. It is noteworthy that the Adler and Azar algorithm uses memory proportional to the entire input sequence.
In contrast, the Emek et al. algorithm only uses memory that is within a constant factor of a current OPT solution. It is still an open question if a randomized algorithm using only constant bounded memory can get a constant ratio in the unweighted case. We show that for a strict, but natural definition of memoryless randomized algorithms, a constant ratio cannot be obtained. The algorithms presented in this paper, along with the optimal algorithms by Garay et al. \cite{garay1997efficient} and Woeginger \cite{woeginger1994line}, fall under our definition of memoryless. It is worth noting that similar notions of memoryless algorithms, and comparison between randomized memoryless and deterministic, have appeared in the k-server and caching literature \cite{coester2019online,koutsoupias2009k,Kleinberg94,raghavan1989memory}. We would note that barely random algorithms as described earlier (i.e. algorithms that initially generate some random bits, which are used in every online step), are not memoryless but usually satisfy bounded memory. The algorithms by Fung et al. \cite{fung2014improved} are an example of this. More generally, this use of initial random bits are the \textit{classify and randomly select} algorithms\footnote{Barely random algorithms can be thought of as a special case of the classify and randomly select paradigm.} (e.g. Lipton and Tomkins \cite{lipton1994online} and Awerbuch et al. \cite{awerbuch1994competitive}). It's important to note that such algorithms may require prior knowledge of bounds on lengths of intervals. In appendix \ref{app:A} we discuss our meaning of memoryless and bounded memory online algorithms, and the relation to randomness, advice, and the Adler and Azar question. \\\\
The problem of admission control has also been studied under the model of minimizing rejections \cite{blum2001admission,alon2005admission} instead of maximizing acceptances. An alternative input model for interval selection is that of arriving conflicts \cite{halldorsson2013online} instead of single intervals, with the algorithm being able to choose at most one item from each conflict. We also note that, an instance of interval selection can be represented as an interval graph, with intervals corresponding to vertices, and edges denoting a conflict between two intervals. Generally, interval graphs reveal much less about the instance compared to receiving the actual intervals. In the interval graph representation, arriving vertices may have an adjacency list only in relation to already arrived vertices, or they may show adjacency to future vertices as well.\\\\
\textbf{Our results.} For the unweighted adversarial case, we know that no deterministic algorithm is bounded (follows from \cite{garay1997efficient}). Assuming there are at most $k$ different lengths, we show how a simple greedy algorithm achieves a competitive ratio of $2k$. We also give a matching lower bound that holds for all deterministic algorithms, as well as ``memoryless'' randomized algorithms. We note that an instance with $k$ different lengths can have a nesting depth of at most $k-1$. Alternatively, we can state our results in terms of $d$, the nesting depth (see figure \ref{fig:conflicts}), noting that $d \leq (k-1)$. This implies that our $2k$ bounds can be restated as $2(d+1)$. We also show how to extend the classify and randomly select paradigm used by Fung et al \cite{fung2014improved} to obtain a randomized algorithm that is $2k$-competitive for the case of arbitrary weights and $k$ different interval lengths. It's worth noting that Canetti and Irani \cite{canetti1995bounding} give a $\Omega (\sqrt{k})$ lower bound for randomized algorithms and arbitrary weights.\\\\
We show how the problem of call control on the line \cite{garay1997efficient} relates to interval selection, and in particular how their $\log n$-competitive algorithm for the unweighted case and their $(2+\sqrt{5})$-competitive algorithm for proportional weights carries over to interval selection. Lastly, we consider deterministic memoryless algorithms for the problem of any-order, unweighted, single-lengthed (i.e. unit) intervals with random order arrivals. We show that the only deterministic memoryless algorithm that can possibly benefit from random arrivals is one-directional, only replacing intervals if they overlap in that particular direction. \\\\
\textit{Organization of the paper.}
Section 2 has some definitions to clarify the model. Section 3 has our upper and lower bounds in the adversarial case, the connection to call control, and the application of the proportional weights algorithm to our model. Section 4 discusses arbitrary weights. Section 5 is about interval selection in the random order model. We end with some conclusions and open problems.
\section{Preliminaries}
Our model consists of intervals arriving on the real line. An interval $I_{i}$ is specified by a starting point $s_{i}$, and an end point $f_{i}$, with $s_{i} < f_{i}$. It occupies space $[s_{i},f_{i})$ on the line, and the conventional notions of intersection, disjointness, and containment apply. This allows two adjacent intervals $[s_{1},f)$ and $[f,f_{2})$ to not conflict, although our results would apply even if we considered closed intervals $[s_{i},f_{i}]$ with $[s_{1},f]$ and $[f,f_{2}]$ conflicting. There are two main ways two intervals can conflict, and they are shown in figure \ref{fig:conflicts}.\\\\
We use the notion of competitive ratio to measure the performance of our online algorithms. Given an algorithm $A$, let ALG denote the objective value of the solution achieved by the algorithm, and let OPT denote the optimal value achieved by an offline algorithm. The competitive ratio of $A$ is defined as follows: $CR(A)=\frac{OPT}{ALG} \geq 1$. We should note that we can repeat disjoint copies of our nemesis sequences, and get the corresponding tight lower bounds. As a result, we can omit the standard additive term in our definition of competitive ratio. We will sometimes abuse notation and use ALG and OPT to denote the sets of intervals maintained by the algorithm at some given point, and the set of intervals of an optimal solution respectively. In the case of deterministic algorithms and random arrival of intervals, the performance of an algorithm is a random variable, and the competitive ratios hold w.h.p. (definition of competitive ratio remains unchanged). The algorithm we present in the case of arbitrary weights is randomized, and its expected competitive ratio is defined as $CR(A)=\frac{OPT}{\E[ALG]}$.
\begin{figure}
\caption{Types of conflicts.}
\label{fig:conflicts}
\end{figure}
We sometimes refer to a \textit{chain} of intervals (figure \ref{fig:chain-ex}). This is a set of intervals where each interval partially conflicts with exactly two other intervals, except for the two end intervals that partially conflict with only one.
\begin{figure}
\caption{Interval chain.}
\label{fig:chain-ex}
\end{figure}
\section{Adversarial Order}
\subsection{Unweighted}
In this section, we assume an adversary chooses the instance configuration, along with the arrival order of all intervals. Lemma \ref{lem:need-rev} shows that revocable decisions are necessary even in the case of two different lengths. Algorithm \ref{alg:SUB} is the greedy algorithm that achieves the optimal competitive ratio of $2k$ in the unweighted case, and it works as follows: On the arrival of a new interval, take it if there's no conflict. If there's a conflict, take the new interval only if it is properly contained inside an existing interval.
\begin{algorithm}
\caption{}\label{alg:SUB}
\begin{algorithmic}
\State On the arrival of $I$:
\State $I_{s} \gets $ Set of intervals currently in the solution conflicting with $I$
\For{$I' \in I_{s}$}
\If{$I \subset I'$}
\State Take $I$ and discard $I'$
\State return
\EndIf
\EndFor
\State Discard $I$
\end{algorithmic}
\end{algorithm}
\begin{lemma}
\label{lem:need-rev}
The problem of any-order unweighted interval scheduling with two different lengths and irrevocable decisions is unbounded.
\end{lemma}
\begin{proof}
Consider two possible interval lengths of 1 and $K$. Let an interval of length $K$ arrive first. W.l.o.g. the algorithm takes it. Then $K$ 1-length intervals arrive next, all of them overlapping with first $K$-length interval. The algorithm cannot take any of the 1-length intervals, achieving a competitive ratio of $\frac{1}{K}$.
\end{proof}
\begin{figure}
\caption{Unweighted instance with two different lengths.
}
\label{fig:need-revoke}
\end{figure}
\begin{theorem}\label{theo:pos-2k}
Algorithm 1 achieves a competitive ratio of $2k$ for the problem of any-order unweighted interval scheduling with $k$ different lengths.
\end{theorem}
\begin{proof}
We define a mapping of intervals $f: OPT \longrightarrow ALG$, where every interval in ALG has at most $2k$ intervals in OPT mapped to it. Because intervals taken by the algorithm might be replaced during the execution, the mapping $f$ might be redefined multiple times. What follows is the way optimal intervals $I \in OPT$ are charged, as soon as they arrive, to intervals $I' \in ALG$. There are four cases of interest:\\\\
\textit{Case 1}: The newly arrived optimal interval is taken by the algorithm.\\
This can happen either because this interval did not conflict with any other intervals taken by the algorithm, or because it was entirely subsumed by a larger interval in ALG, in which case the algorithm would have replaced the large interval with the new small one. In this case, this optimal interval is mapped onto itself.\\\\
\textit{Case 2}: The newly arrived optimal interval partially conflicts with one interval currently in ALG.
In this case, this optimal interval is charged to the interval it conflicts with.\\\\
\textit{Case 3}: The newly arrived optimal interval partially conflicts with two intervals currently in ALG.
In this case, this optimal interval can be charged to any of these two intervals arbitrarily. We may assume it's always charged to the interval it conflicts with on the right. Notice also, that a newly arrived interval, cannot partially conflict with more than two intervals in ALG.\\\\
\textit{Case 4}: The newly arrived optimal interval subsumes an interval currently in ALG. W.l.o.g. we can assume this never happens. Any such optimal solution $OPT$ can be turned into an optimal solution $OPT'$, with the smaller interval in place of the larger one. We can restrict ourselves to only look at optimal solutions where no such transformation can take place. This case also encapsulates the case of an optimal interval perfectly coinciding with an interval taken by the algorithm.\\\\
An interval ($I_{l}$) taken by the algorithm can later be replaced, if a smaller one ($I_{s}$) comes along and is subsumed by it. When this happens, all intervals in $OPT$ charged to $I_{l}$ up to that point, will be transferred and charged to $I_{s}$. As a result, there are two ways an interval taken by the algorithm can be charged intervals in $OPT$. The first way is when an interval $I \in OPT$ is directly charged to an interval $I' \in ALG$ when $I$ arrives (Cases 1-4). This will be referred to as \textit{direct charging}. The second way is when a new interval, $I_{n}$, arrives, and replaces an existing interval $I_{e}$, in which case all optimal intervals previously charged to $I_{e}$, will now be charged to $I_{n}$. This will be referred to as \textit{transfer charging}.\\\\
\begin{proposition}
\label{prop:direct}
An interval taken by the algorithm (even temporarily), can be charged by at most two optimal intervals through \textit{direct charging}.
\end{proposition}
To see why this true, we consider the three main cases of direct charging explained earlier. In \textit{Case} 1, the optimal interval is taken by the algorithm and is charged to itself. Because no other optimal interval can conflict with it, we know this interval will never be directly charged again.\\
In \textit{Cases} 2 and 3, direct charging happens because of the optimal interval partially conflicting with one or two intervals currently taken by the algorithm. Because an interval taken by the algorithm can partially conflict with at most two optimal intervals (one on each side), it can be charged twice at most.\\
\begin{proposition}
\label{prop:transfer}
An interval taken by the algorithm can be charged at most $2k-2$ optimal intervals through \textit{transfer charging.}
\end{proposition}
Consider a sequence of interval replacements by the algorithm, where all optimal intervals charged to an interval in the sequence, are passed down to the next interval in the sequence. The last interval in that sequence will have accumulated all the optimal intervals charged to the previous intervals in that sequence. Because we consider $k$ different lengths, such a sequence can have up to $k$ intervals, participating in $k-1$ transfer charging events. We also know that every interval in that sequence can be charged at most two optimal intervals through direct charging (\textit{Proposition \ref{prop:direct}}) before being replaced.
Consequently, assuming two additional charges are added to each interval in that sequence, the last (smallest) interval will be charged $2(k-1)$ optimal intervals through transfer charging.\\\\
We have described a process, during which every optimal interval is charged to an interval in ALG.
By Propositions \ref{prop:direct} \& \ref{prop:transfer}, we know that an interval in ALG, can be charged by $2k$ intervals in OPT at most. Therefore, our algorithm has a competitive ratio of $2k$ for the problem of unweighted interval selection with revocable decisions and $k$ different possible interval lengths. This ratio is tight for this algorithm and an example instance for $k=2$ is shown in Figure 4. $I_{1}$ is directly charged by $I_{2}$ and $I_{3}$, transfers charges to $I_{4}$, which in turn is directly charged another two times by $I_{5}$ and $I_{6}$.
\end{proof}
\begin{figure}
\caption{4-competitive tight example for Algorithm 1. Interval subscripts corresponds to the arrival order.}
\label{fig:4-tight}
\end{figure}
We now provide a matching lower bound, showing that no deterministic algorithm can do better.
\begin{theorem}
\label{theo:neg-2k}
No deterministic algorithm can achieve a competitive ratio better than $2k$ for the problem of unweighted interval selection with revocable decisions and $k$ different lengths.
\end{theorem}
\begin{proof}
At any point during the execution, the algorithm will have exactly one interval in its solution, while the size of the optimal solution will keep growing. We begin by describing how the main component of the instance is constructed, using intervals of the same length. First, the adversary must decide on an overlap amount $v$, which can be arbitrary. All partially conflicting intervals will overlap by exactly this amount. Consider now the instance of figure \ref{fig:base-ADV}. Intervals $I_{1}$ and $I_{2}$ arrive first in that order. If $I_{1}$ is taken by the algorithm and is then replaced by $I_{2}$, then $I_{4}$ arrives. If $I_{1}$ was taken by the algorithm but was not replaced by $I_{2}$, then $I_{3}$ would arrive. Because this case is symmetrical, we only consider the former case of $I_{2}$ replacing $I_{1}$. What happens is that this chain keeps growing in the same direction, until the algorithm decides to stop replacing. When that happens, we look at the last three intervals of the chain. For example, when $I_{4}$ arrived, if the algorithm chose to not select $I_{4}$ and instead maintain $I_{2}$, we stop growing the chain and consider the intervals $(I_{1}, I_{2}, I_{4})$. If the algorithm never stops replacing, it will end up with $I_{5}$ in its solution. Although it's not necessary, if the algorithms seems to always be replacing as the chain is growing, the adversary is able to abuse this as much as they want. In all cases, there exists an optimal solution of at least two intervals, with neither of them being the one taken by the algorithm. Note also that this construction requires at most four intervals of length $L$, occupying space at most $(4L - 3v)$ in total.\\
\begin{figure}
\caption{Base adversarial construction}
\label{fig:base-ADV}
\end{figure}
A small detail is that w.l.o.g. we can assume $I_{1}$ is always taken by the algorithm when it first arrives. Because this construction will take place a number of times during the execution, when the algorithm will already have an interval in its solution, it's useful to consider the case when $I_{1}$ is not taken by the algorithm. In this case, we start growing the chain regardless. If $I_{2}$ or $I_{4}$ are taken by the algorithm, we treat it similarly to when $I_{1}$ was taken and the algorithm kept replacing. If the algorithm hasn't taken any interval even after $I_{4}$ has arrived, the chain stops growing and we consider the intervals $(I_{1}, I_{2}, I_{4})$.\\\\
Let $I_{alg}$ be the interval taken by the algorithm (or $I_{2}$ if no intervals were taken). All remaining intervals to arrive will be subsumed by $I_{alg}$, and thus will not conflict with the two neighboring intervals taken by $OPT$. Assuming $I_{alg}$ conflicts with one interval on the left and one on the right, that leaves space of length ($L - 2v$) for all remaining intervals. Inside that space, the exact same construction described will take place, only when the algorithm takes a new interval, it implies $I_{alg}$ is replaced. This can be thought of as going a level deeper, and using a sufficiently smaller interval length. More precisely, if $L'$ is the new (smaller) length that will be used, it must hold that $L' \leq \frac{L+v}{4}$.\\\\
After each such construction is completed, the size of the optimal solution grows by at least 2. Because there are at most $k$ different lengths, this can be repeated at most $k$ times. Finally, because the algorithm only ever keeps a single interval in its solution, it will achieve a competitive ratio of $2k$.
\end{proof}
We now extend Theorem \ref{theo:neg-2k} and show that the the $2k$ lower bound also holds for a class of randomized algorithms we call \textit{memoryless}. Intuitively, memoryless algorithms decide on taking or discarding the newly arrived interval, only by looking at the new interval, and all the intervals currently in the solution, using no information from previous online rounds. Although not randomized, it's worth noting that Algorithm \ref{alg:SUB}, along with the optimal deterministic algorithms for call control \cite{garay1997efficient}, are memoryless.
\begin{definition}[\textit{Memoryless randomized algorithm}]
We call a randomized algorithm memoryless, if a newly arrived interval $I_{new}$ is taken with probability $F(I_{new},S)$, where $S=\{I_{1},I_{2},...\}$ is the set of intervals currently in the solution, and each interval is a tuple of the form $(s_{i},f_{i})$.
\label{def:mem-rand}
\end{definition}
Notice that definition \ref{def:mem-rand} only allows us to make use of random bits of this current step, and it does not allow access to random bits from previous rounds. In particular, this definition does not capture barely random algorithms (as mentioned in the introduction), or algorithms that fall under the \textit{classify and randomly select} paradigm.\\
\begin{theorem}
No memoryless randomized algorithm can achieve a competitive ratio better than $2k$ for the
problem of unweighted interval selection with revocable decisions and k different lengths. More specifically, for all $p \in (0,1]$, there exists an $\epsilon_{p} >0$, such that the competitive ratio is greater than $2k-\epsilon_{p}$ with probability $p$.
\end{theorem}
\begin{proof}
The proof is very similar to the proof of Theorem \ref{theo:neg-2k}. The instance has the same structure as the one described in the proof of Theorem \ref{theo:neg-2k}, with the difference that whenever a new interval is taken with probability $p>0$, the adversary will have to add as many copies of that interval as necessary, so that it's taken w.h.p. Figure \ref{fig:no-mem-rand} shows an example of multiple copies of a new interval, ensuring that a replacement happens w.h.p.
\end{proof}
\begin{figure}
\caption{Replacing $I_{1}
\label{fig:no-mem-rand}
\end{figure}
It is worth mentioning that similar to how we extend our lower bound to hold for memoryless randomized algorithms, one can extend the $\log(n)$ lower bound for call control \cite{garay1997efficient} to also hold for memoryless randomized algorithms. We also prove the following lower bound on all randomized algorithms and instances with no proper inclusions, capturing the case of $k=1$.
\begin{lemma}
No randomized algorithm can achieve a competitive ratio better than $\frac{4}{3}$ for the case of unweighted instances with no proper inclusions.
\end{lemma}
\begin{proof}
Consider two different input sequences, $S_{1}$ and $S_{2}$, each consisting of a chain of intervals. The first two intervals $(I_{1},I_{2})$ are the same in both sequences and partially conflict, with $f_{2} > f_{1}$. $S_{1}$ has the third interval in the sequence partially conflict with $I_{2}$, whereas $S_{2}$ has the third interval conflict with $I_{1}$.
The adversary chooses one of the two sequences with probability $\frac{1}{2}$. Regardless of what the algorithm does after the arrival of $I_{2}$, it ends up with a single interval in its solution with probability $\frac{1}{2}$, whereas the optimal solution is always of size $2$. This leads to an expected solution size of $\frac{3}{2}$, and a competitive ratio of $\frac{4}{3}$.
\end{proof}
\subsection{Connection to Call Control}
In this section we relate our results to those of call control on the line.
In the problem of call control, we are given a graph $G(V,E)$, and requests (intervals) correspond to paths on the graph. We note that in the call control literature, it is assumed that requests can come in any order. The length of a request is defined as the length of the corresponding path, and a valid solution is a set of edge-disjoint paths. The objective is to maximize the number of accepted paths. The special case of line graphs is of most relevance to us, with Garay et al. \cite{garay1997efficient} giving an optimal $\log(n)$-competitive algorithm, where $n=|V|$. Their algorithm is similar to ours, with one important additional replacement rule. If the new interval's length is less than half the length of the shortest interval it's conflicting with, the new interval is taken by displacing whatever is necessary. Notice how by fixing each edge to be of the same Euclidean length, that notion of length matches ours, without modifying the instance in any meaningful way.\\\\
An apparent difference between our model and that of call control, is that in the latter, we're initially given the graph. It is not clear how an algorithm that uses that information would operate in our setting.
The optimal algorithm of \cite{garay1997efficient} does not use that information. We can use such an algorithm on an instance of AOIS, using our definition of length. To see how the $\log(n)$ upper bound would still apply, we describe a way to add vertices on the line after the entire AOIS instance has been revealed, such that we can view it as an instance of call control. There isn't just one way to add those vertices \footnote{Notice how we can take an instance of call control on $n$ points, and repeatedly add $n-1$ new points, one new point between every pair of consecutive points, while keeping it a valid instance.}, and ideally we're interested in the minimum number of vertices for a ``valid'' call control instance to be formed\footnote{The total number of vertices we add to view it as an instance of call control has no impact on the quality (i.e., the number of intervals accepted) of the solution. The $\log(n)$ is in terms of the minimum n.}. There are two requirements for the point-adding construction to be considered valid: (a) the points must be equally spaced, so that the two definitions of length in our model and call control match, and (b) every start and end point of an interval must coincide with a point. If all interval start and end points are rational numbers, we can multiply them by a common denominator, and use integer points. If there exist interval start/end points that are irrational, we can approximate the call control instance by adding sufficiently many points that are sufficiently close to each other, so that our comparison of two lengths in the AOIS instance, gives the same result as in the final call control instance. This is a technical issue we leave as an open problem.
Such a construction allows us to view the AOIS instance as an instance of call control. If the resulting graph was given to us a priori and we applied the call control algorithm to it, the final solution would be the same. This also allows us to use the $(\sqrt{5}+2)$-competitive algorithm by Garay et al. \cite{garay1997efficient} in the case of interval weights proportional to their length.\\\\
Applying an algorithm for AOIS on a call control instance is more straightforward. W.l.o.g. we can fix edge lengths to be unit lengths and directly apply the algorithm, while achieving the same competitive ratio.\\\\
To see how our $2k$ lower bound applies to call control on the line, notice that being allowed $k$ different lengths, there need to be enough points to allow the adversary to fit in the instance of Theorem \ref{theo:neg-2k}. Given that construction, we can compute a lower bound on the number of points (vertices) required. The base (shortest) intervals need two points each. Each level (base one included) has at most four intervals, all of the same length. Because the points are equally spaced, same-length intervals cover the same number of points. If an interval on level $j$ covers $x$ points, an interval of the upper (longer) level $j+1$ covers at least $4x$ points. This results in a lower bound of $2^{2k+1}$ points. Another way to view this, is that if the number of different lengths is sufficiently small (compared to $n$), the $2k$ lower bound applies, and our algorithm becomes optimal for call control on the line. In particular, this argument does not contradict the known $log(n)$ lower bound.\\\\
Whether the algorithm by Garay et al. can be forced to a competitive ratio worse than $2k$ for AOIS is open. When deciding on which algorithm to use, potential knowledge about the instance structure may be of help. This becomes apparent from the two following observations.
\begin{observation}
There exists an instance where the algorithm by Garay et al. \cite{garay1997efficient} achieves a competitive ratio of $(2k-2)$, whereas algorithm \ref{alg:SUB} gets 1.
\end{observation}
This instance is essentially two long chain-like structures that meet in the middle, and it is depicted in figure \ref{fig:garay-2k}. A single side of the construction is better shown in figure \ref{fig:garay-2k-helper}. The arrival sequence is $L_{1},R_{1},L_{2},R_{2},L'_{2},R'_{2},...,L_{k-1},R_{k-1},L'_{k-1},R'_{k-1},M$. We have that $|L_{i}|=|L'_{i}|=|R_{i}|=|R'_{i}|$, and $|L_{i}|>2|L_{i+1}|$. The algorithm in \cite{garay1997efficient} would always take the next interval on the chains $L_{1},L_{2},...$ and $R_{1},R_{2},...$, ending up with only $M$, which would displace both intervals in the solution $\{L_{k-1},R_{k-1}\}$. The optimal solution $\{L_{1},R_{1},L'_{2},R'_{2},...,L'_{k-1},R'_{k-1}\}$ is obtained by our algorithm, and it is of size $(2k-2)$.
\begin{figure}
\caption{Instance where algorithm by Garay et al.\cite{garay1997efficient}
\label{fig:garay-2k}
\end{figure}
\begin{figure}
\caption{Single chain from instance of figure \ref{fig:garay-2k}
\label{fig:garay-2k-helper}
\end{figure}
\begin{observation}
There exists an instance where algorithm \ref{alg:SUB} achieves a competitive ratio of $2k$, whereas the algorithm by Garay et al. gets 1.
\end{observation}
This instance \footnote{This instance is taken from lecture notes by Yossi Azar \url{http://www.cs.tau.ac.il/~azar/Online-Class10.pdf} .} is shown in figure \ref{fig:azar-fig}. Our algorithm ends up with a single interval in its solution, whereas |OPT| = $2k$.
\begin{figure}
\caption{Instance where our algorithm gets competitive ratio $2k$, whereas Garay et al. get 1.}
\label{fig:azar-fig}
\end{figure}
\section{Arbitrary Weights}
The case of intervals having an arbitrary weights has previously been considered for the case of single-length instances and ordered arrivals. Woeginger \cite{woeginger1994line} gives an optimal deterministic algorithm that is 4-competitive. Fung et al. \cite{fung2014improved} give a barely random algorithm that is 2-competitive, and show that it is optimal amongst barely random algorithms that choose between two deterministic algorithms. Woeginger \cite{woeginger1994line} shows that in the case of two different lengths, there does not exist a deterministic algorithm with finite competitive ratio. We show how to combine the barely random algorithm of Fung et al., with a classify and randomly select algorithm, to obtain a randomized algorithm for the any-order case, that achieves a competitive ratio of $2k$, when there are $k$ different lengths.\\\\
First, one can observe that the 2-competitive single-length algorithm by Fung et al. \cite{fung2014improved} (Theorem 3.1), works even in the case of any-order arrivals. Our algorithm (denoted as $ARB$), which requires knowledge of all the different lengths of the instance, works as follows: Choose one of $k$ lengths, uniformly at random. Then execute the algorithm of Fung et al., looking only at intervals of the chosen length.
\begin{theorem}
Algorithm $ARB$ achieves a competitive ratio of $2k$, for the problem of any-order interval selection, with $k$ different lengths and arbitrary weights.
\end{theorem}
\begin{proof}
Let $L_{1},L_{2},...,L_{k}$ be all the different lengths of an instance. Associated with length $L_{i}$, is a sub-instance $C_{i}$, comprised only of the intervals of length $L_{i}$. Let $OPT_{i}$ denote the weight of an optimal solution on sub-instance $C_{i}$. The expected performance of the algorithm can be bounded as follows:
$$ \E[ALG] \geq \frac{1}{k}\frac{OPT_{1}}{2} + \frac{1}{k}\frac{OPT_{2}}{2} +...+\frac{1}{k}\frac{OPT_{k}}{2} \geq \frac{OPT}{2k}$$
The first inequality holds because applying Fung et al. \cite{fung2014improved} on $C_{i}$ gives a solution of weight at least $\frac{OPT_{i}}{2}$. The second inequality holds because for every length $L_{j}$, the total weight of the intervals of length $L_{j}$ in the final solution, is at most $OPT_{j}$.
\end{proof}
We note that the algorithm does not need to know the actual lengths beforehand, or even $k$. The algorithm can start working with the first length that appears. When a second length arrives, the algorithm discards its current solution and chooses the new length with probability $\frac{1}{2}$. More generally, when the $i$th length arrives, the algorithm starts over using the new length with probability $\frac{1}{i}$. One can see that the probability that any length is chosen is $\frac{1}{k}$. Moreover, by replacing the 2-competitive arbitrary weights algorithm with a simple greedy algorithm, we get a randomized algorithm for the unweighted case that is $2k$-competitive and does not use revoking (as long as we know $k$).
\section{Random Order}
In this section, we assume the adversary chooses the instance configuration, but the intervals arrive in a random order. We consider unweighted, single-lengthed instances, and deterministic memoryless algorithms with revocable acceptances.
We consider various cases and show that the only type of algorithm that can possibly benefit from the random order model is a \textit{one-directional} algorithm, namely an algorithm that only replaces intervals on the left side, or only on the right side, regardless of the amount of overlap. For any other algorithm, we show how the adversary can enforce a competitive ratio of 2, resulting in no benefit over adversarial arrivals for single-lengthed instances. \\\\
On the instances we present, an algorithm only keeps one interval in its solution at any given time w.h.p., so the decision on taking or discarding a newly arrived interval, depends only on the local conflicts. The behavior of an algorithm is described by two functions, $F_{l}$ and $F_{r}$: $F_{l}(v) \in \{0,1\}$ denotes whether the algorithm replaces $I_{old}$ with $I_{new}$ when the conflict is on the left of $I_{old}$, and the overlap is equal to $v$. $F_{r}$ is defined similarly for conflicts on the right. We are concerned with single-lengthed instances, where there can only be partial conflicts. We also assume that a new interval is never taken if it conflicts with more than one existing intervals in the solution, and show why such an action cannot benefit the algorithm.\\\\
One might notice that in the above description, information about the endpoints of the conflicting intervals is omitted. This was to improve readability, and we do in fact allow a deterministic memoryless algorithm to know the endpoints of intervals. The lower bounds presented in this section still hold, regardless of where the intervals are placed on the line.\\\\
Let $L$ denote the interval length of an instance.\\
\begin{figure}
\caption{Random order bad instance. $\alpha < \frac{L}
\label{fig:rom_inst}
\end{figure}
Figure \ref{fig:rom_inst} depicts the general structure of the main bad instance in the random order model. There is a single copy of intervals $I_{2}$ and $I_{3}$, but a very large number of identical intervals $I_{1}$. We first prove the following lemmas about two different algorithms. We refer to an algorithm as \textit{always-replace}, if the new interval is always taken whenever a conflict occurs. Respectively, a \textit{never-replace} algorithm never takes the new interval when there's a conflict.
\begin{lemma}
An \textit{always-replace} algorithm has a competitive ratio of 2 for the instance of figure \ref{fig:rom_inst}.
\end{lemma}
\begin{proof}
Because there is a large number of $I_{1}$ intervals, when we look at the arrival sequence of intervals, $I_{1}$ will both precede and follow the arrival of $I_{2}$ and $I_{3}$ with very high probability. This will result in the algorithm ending up with a single interval $I_{1}$ in its solution, whereas the size of an optimal solution is 2.
\end{proof}
\begin{lemma}
A \textit{never-replace} algorithm has a competitive ratio of 2 for the instance of figure \ref{fig:rom_inst}.
\end{lemma}
\begin{proof}
The first online interval will be $I_{1}$ w.h.p. It will never be replaced and the algorithm will end up with one interval in its solution, admitting a competitive ratio of 2.
\end{proof}
\subsection{Overlap at most $\frac{L}{2}$}
We first consider the behavior of an algorithm for overlaps at most half the length of the interval. If there is an overlap amount $v\leq \frac{L}{2}$, such that $F_{l}(v) = F_{r}(v)$, then the adversary can use the aforementioned instance with $\alpha = \beta = v$. The algorithm's behavior would then be either that of an \textit{always-replace} algorithm, or that of a \textit{never-replace} algorithm, incurring a competitive ratio of 2. For an algorithm to do better, it must be that $F_{l}(v) \neq F_{r}(v), \forall v\leq \frac{L}{2}$. In other words, for each overlap, the algorithm would replace in one way. Assume now that there exist two different overlap amounts, $\alpha, \beta \leq \frac{L}{2}$, that replace in different directions, namely $F_{l}(\alpha)\neq F_{l}(\beta)$ and $F_{r}(\alpha)\neq F_{r}(\beta)$. W.l.o.g. assume $F_{r}(\alpha) = 1$. In this case, the adversary can use the instance of figure \ref{fig:rom_inst}. Interval $I_{1}$ arrives first w.h.p., and intervals $I_{2}$ and $I_{3}$ are rejected whenever they arrive, resulting in 2-competitiveness. To avoid this, it must be that $F_{l}(\alpha)= F_{l}(\beta)$ and $F_{r}(\alpha)= F_{r}(\beta)$ $\forall \alpha, \beta \leq \frac{L}{2}$, meaning that for overlaps at most half the length of the interval, the algorithm replaces in one direction.
\subsection{Overlap greater than $\frac{L}{2}$}
We now consider the algorithm's behavior for overlap amounts greater than $\frac{L}{2}$, knowing that for overlap $v\leq \frac{L}{2}$, the algorithm is one-directional. W.l.o.g. we assume that $F_{l}(v) = 1, \forall v\leq \frac{L}{2}$.
\begin{lemma}
If $\exists\, \gamma > \frac{L}{2}: F_{l}(\gamma) = 0$, the adversary can force a competitive ratio of 2.
\end{lemma}
\begin{proof}
Consider the instance of figure \ref{fig:rom-bad-2}. Because of the multiple copies of $I_{1}$, the arrival of $I_{2}$ and $I_{3}$ is preceded by $I_{1}$ intervals w.h.p. We know that $F_{r}(\alpha)=0$, and given that $F_{l}(\gamma) = 0$, we have that $I_{2}$ and $I_{3}$ will be rejected on arrival, leaving the algorithm with a single interval in its solution.
\end{proof}
\begin{lemma}
If $\exists\,\gamma > \frac{L}{2}: F_{r}(\gamma) = 1$, the adversary can force a competitive ratio of 2.
\end{lemma}
\begin{proof}
Using the previous lemma, we assume that $F_{l}(\gamma) = 1$. Using the same instance of figure \ref{fig:rom-bad-2}, w.h.p. $I_{2}$ will conflict with, and replace $I_{1}$ on arrival, and will then be replaced by another arrival of $I_{1}$. Interval $I_{3}$ will again be rejected on arrival, leaving the algorithm with $I_{1}$ in its solution.
\end{proof}
\begin{figure}
\caption{Random order bad instance. $\alpha < \frac{L}
\label{fig:rom-bad-2}
\end{figure}
We now explain why an algorithm doesn't gain anything by replacing more than one interval at a time.
First, notice that in all the negative results presented in this section, there's never a conflict between more than two intervals w.h.p. Because we consider single-lengthed instances, a newly arrived interval can conflict with at most two other intervals currently in the algorithm's solution. Assume that for some overlap amounts $\alpha$ and $\beta$, a memoryless algorithm accepts a new interval conflicting with two already accepted intervals. At least one of $\{\alpha , \beta\}$ must be less than $\frac{L}{2}$ (otherwise the two current intervals would have a conflict, a contradiction). The adversary can then use the instance from figure \ref{fig:rom_inst} or figure \ref{fig:rom-bad-2}, with the appropriate $\alpha$ and $\beta$ overlaps. Regardless of the arrival order of $\{I_{2},I_{3}\}$, the algorithm ends up with a single interval in its solution.\\\\
Combining sections 5.1 and 5.2, we get the following theorem:
\begin{theorem}
Every deterministic memoryless algorithm that isn't one-directional, can be forced to a competitive ratio of at least 2 for the problem of online unweighted single-lengthed interval selection under random order arrivals.
\end{theorem}
\section{Conclusions \& Open Problems}
There are a number of possible directions for future work. A very natural direction is looking at specific weighted cases. Deterministically, Garay et al. \cite{garay1997efficient} have settled the case of proportional weights with an optimal, constant-competitive algorithm. It's interesting to see if a similar constant can be achieved for the more general weight functions studied by Woeginger \cite{woeginger1994line}, with or without randomness. We considered the case of arbitrary weights in Section 4.\\\\
It is fair to say that we have a very limited understanding of randomized algorithms for interval selection. In the unweighted adversarial setting, we have shown that no memoryless randomized algorithm can be constant-competitive and Fung et al. show that with one random bit, their 2-competitive algorithm is optimal. But we have no other negative results for unweighted or weighted interval selection when revoking is permitted. We would like to extend the memoryless model to algorithms with constant memory (beyond the current solution) as discussed further in the appendix. In particular, we would want to allow access to a few initial random bits which would also capture algorithms that fall under the \textit{classify and randomly select} paradigm. It would also be interesting to restrict the number of copies the adversary can generate, maybe only allowing a single copy of every interval, and see if memoryless randomized algorithms become more powerful.\\\\
As mentioned earlier, we can think of the parameter $k$ as a refinement of the total number of intervals, and the number of vertices of a call control instance. We find it interesting to see if restricting the number of different lengths can yield better results for the problem of call control on other classes of graphs, such as trees (see \cite{awerbuch1994competitive}).\\\\
Finally, to the best of our knowledge, we have initiated the study of this model under random order arrivals, where there are many open questions for future work. We have only looked at single-lengthed instances, a special case that, in the adversarial setting, doesn't even require revoking. Looking at multiple lengths under random arrivals, is a natural next step. Lastly, we have shown that one-directional algorithms for single-lengthed instances, are the only type of deterministic memoryless algorithms that can possibly benefit from random order arrivals. We don't have any provable upper bounds on the performance of a one-directional algorithm, but
we have conducted experiments that suggest it may achieve much better than 2-competitiveness. This is an interesting contrast with the adversarial model, where a one-directional algorithm would perform arbitrarily bad.\\\\
\textbf{Acknowledgements:} We would like to thank Denis Pankratov, Adi Ros{\'e}n and Omer Lev for many helpful comments.
\printbibliography
\appendix
\section{Memory in online computation}\label{app:A}
The impact of limited memory (or time) is usually not considered in online competitive analysis, since the analysis is information theoretic and independent of complexity issues. Of course, the assumption is that algorithms are usually efficient (in terms of time and space) while negative results are that much stronger as they do not require any complexity assumptions.
However, the arguments for limited memory in streaming algorithms apply equally well to online algorithms which are forced to immediately make decisions for each input as it occurs. There has been some limited results concerning memory with respect to competitiveness. Perhaps the first study of memoryless algorithms occurs in Kleinberg's \cite{Kleinberg94} study of balancing algorithms for 2-server algorithms where it is shown that the optimal competitive ratio cannot be achieved. The first issue is to define memoryless and bounded memory algorithms in online computation? In the earlier conference version of Adler and Azar \cite{adler2003beating}, they ask ``is there a memoryless online algorithm for interval selection that achieves a constant competitive ratio''. In the journal version they reframe this question and ask ``is there a bounded memory algorithm achieving a constant competitive ratio''. They do no provide a definite meaning for the term {\it bounded memory}.
Emek et al. \cite{emek2016space} provide an interesting streaming based online algorithm (with revoking) that is ``barely random'' and achieves an improved constant competitive ratio. They seem to implicitly argue that their algorithm is ``bounded memory'' in the sense that the additional memory (beyond the current solution) is linear in the size of the optimal solution.
Here we are counting memory in terms of the number of intervals and not necessarily in terms of bits of memory. This is a ``permissive'' definition of memoryless that could nicely serve in defining ``semi-streaming'' that goes beyond graph optimization problems.
In this ``semi-streaming'' model, Cabello and Pérez-Lantero \cite{cabello2017interval} give alternative algorithms that match the performance of Emek et al. \cite{emek2016space} for interval selection and same-length interval selection. In addition, for interval selection (on $n$ equally spaced points) they show how to ($2+\epsilon$)-approximate the optimal solution \textit{size} using $O(\epsilon^{-5}log^{6}n)$ space, and show that no better approximation can be achieved using $o(n)$ space. This lower bound on memory also applies to algorithms for computing a solution in the model proposed by Halld{\'o}rsson et al. \cite{halldorsson2010streaming}.\\\\
In section 3, we define memoryless in a strict sense, namely that the algorithm does not maintain any information except the current solution. This is the definition of memoryless as used in Raghavan and Snir \cite{raghavan1989memory}, Koutsoupias \cite{koutsoupias2009k}, and Coester and Koutsoupias \cite{coester2019online}. The strict definition is also sufficient for the simple $\frac{1}{4}$ competitive, 1 random bit randomized algorithm (without revoking) for the proportional knapsack\footnote{In the proportional knapsack the profit of an item is equal to its size. We assume every item has $size > 0$ or that the representation of the $i^{th}$ item includes the index $i$.}.
But as stated the strict definition does not include barely random algorithms even for those which do not use any memory beyond remembering a few initial bits. However, we would argue that remembering any initial random bits is a form of memory. \\\\
This leads us to what is arguably the most interesting interpretation of the Adler and Azar question; namely, is there a constant competitive (perhaps barely random) randomized algorithm that does not store any information besides the current solution and some number of initial bits. Of course we would allow such algorithms to use fresh bits (as well as the current solution) in randomly deciding the decision for the current input item. The Emek et al. algorithm uses memory well beyond the 2 random bits. In contrast, the Fung et al. \cite{fung2014improved} algorithm for single length, arbitrary weights, does not use any additional memory beyond the one initial random bit. Indeed,
many classify and randomly select algorithms only remember the initial random bits needed to classify an input item. We can also ask more generally when an algorithm only maintains a constant number of bits (and not necessarily initial random bits) in both deterministic and randomized algorithms. B{\"{o}}ckenhauer et al. \cite{bockenhauer2014online} show that the proportional and general knapsack problems exhibit ``phase transitions'' as to how much advice and random bits are needed to achieve certain certain competitive ratios. The results of \cite{pena2019extensions,BuchbinderNW23,BockenhauerKKK2017,DurrKR2016} provide interesting phase transitions for randomized algorithms and deterministic algorithms with advice for the online unweighted bipartite matching problem. \\\\
Finally we mention that the results of Mikklesen \cite{mikkelsen2015randomization} (for repeatable problems) and B{\"{o}}ckenhauer \cite{BockenhauerKKK2017} provide interesting results about the relation between randomized algorithms and advice. The bipartite matching problem and the interval selection problem are repeatable problems. It is interesting to explore this relation further with regard to the interval selection problem. Namely, does interval selection have a phase transition in that 1 bit of randomness is sufficient for a barely random $2$ competitive ratio whereas no additional random or advice bits can help, or is there perhaps some threshold at $\Theta({\log n})$ where that amount of advice (randomness) can asymptotically beat the $2$ ratio for interval selection.
It is interesting to observe the difference between advice bits and random bits for the proportional and general knapsack problems as proven in B{\"{o}}ckenhauer et al. \cite{bockenhauer2014online}. Is there a provable difference for interval selection between advice bits and random bits?
\end{document} |
\begin{document}
\def\rangle{\ranglengle}
\def\langle{\langlengle}
\def\begin{equation}{\begin{equation}}
\def\end{equation}{\end{equation}}
\def\begin{eqnarray}{\begin{eqnarray}}
\def\end{eqnarray}{\end{eqnarray}}
\def{\hat a}{{{\hat a}t a}}
\def{\hat b}{{{\hat a}t b}}
\def{\hat u}{{{\hat a}t u}}
\def{\hat v}{{{\hat a}t v}}
\def{\hat c}{{{\hat a}t c}}
\def{\hat d}{{{\hat a}t d}}
\def\noindent{\noindentindent}
\def\noindentn{\noindentnumber}
\def\hangindent=45pt{{\hat a}ngindent=45pt}
\def\vskip 12pt{\vskip 12ptskip 12pt}
\def\mbox{\small 1} \!\! \mbox{1}{\mbox{\small 1} \!\! \mbox{1}}
\title{Quantum Imaging and Metrology}
\author{Hwang Lee, Pieter Kok, and Jonathan P. Dowling}
\address{
Exploration Systems Autonomy, Section 367, MS 126-347 \\
Quantum Computing Technologies Group \\
Jet Propulsion Laboratory,
California Institute of Technology \\
4800 Oak Grove Drive, Pasadena, CA~~91109-8099
}
\maketitle
\abstracts{
The manipulation of quantum entanglement has found enormous potential
for improving performances of devices such as gyroscopes, clocks, and
even computers. Similar improvements have been demonstrated for
lithography and microscopy. We present an overview of some aspects of
enhancement by quantum entanglement in imaging and metrology.
}
In state-of-the-art optical-lithographic, semiconductor etching
techniques, the Rayleigh diffraction limit puts a lower bound on the
feature size that can be printed on a chip. This limit states that the
minimal resolvable feature is on the order of $\langlembda/4$, where
$\langlembda$ is the wavelength of the light used. Classically, if you
want to etch features of size 50~nm and smaller, you will be forced
to use optical radiation with wavelengths less than 200~nm. Hence, in
the optical lithographic community great efforts are put into producing
commercial lithographic schemes that utilize wavelengths in the hard
UV and X-ray regimes\cite{yablo99}. However, such an approach
introduces severe technological and commercial difficulties. For
example, mirrors and lenses that are cheap and well understood in the
optical regime are much less common and much harder (and more
expensive) to make in the UV and X-ray regions of the
spectrum, and the problems become worse the shorter you go. Recently,
it has been shown, however, that the Rayleigh diffraction limit in
optical lithography can be circumvented by the use of path-entangled
photon number states\cite{boto00}.
Fundamentally, optical light beams used for lithography are
quantum-mechanical in nature: they are superpositions of photon-number
states. This quantum language allows us to taylor non-classical states
of light, in which it is possible to program nonlocal correlations
between photons. Typically, one photon contains information about the
location and the momentum of other photons in the stream\cite{shih01}.
If the proper nonlocal correlations are employed, you can actually
manipulate the location at which the light strikes the lithographic
substrate, such that features of size $\langlembda/4N$ can be etched using
$N$ photons of wavelength $\langlembda$. The use of such an effect was
also proposed in sub-natural spectroscopy\cite{rathe95}.
This remarkable property of quantum-correlated photons has been
recognized for some years in the context of quantum optical
interferometers. In a typical optical interferometer in which ordinary
coherent) laser light enters via only one port, the phase sensitivity
in the shot noise limit scales as $\Delta \vskip 12ptarphi = 1/\sqrt{\bar n}$
where ${\bar n}$ is the mean number of photons\cite{scully97}. It
would seem that any desired sensitivity $\Delta \vskip 12ptarphi$ could be
attained by simply increasing the laser power. However, when the
intensity of the laser (${\bar n}$) becomes too large, the power
fluctuations at the interferometer's mirrors introduce additional
noise terms that limit the device's overall sensitivity.
Much of the early interest in squeezed
light empathized overcoming this signal-to-noise barrier. In the early
1980s it was demonstrated that squeezing the vacuum in the unused
input port of the interferometer causes the phase sensitivity to beat
the standard shot-noise limit\cite{caves81}. The total laser power
required for a given amount of phase sensitivity $\vskip 12ptarphi$ is thus
greatly reduced.
In 1986, Yurke and collaborators, as well as Yuen, considered the
question of phase noise reduction using correlated particles in number
states, incident upon both input ports of a Mach-Zehnder
interferometer\cite{yurke86,yurke86b,yuen86}. They showed that if $N$
quantum particles entered into each input port of the interferometer
in nearly equal numbers (and in a highly entangled fashion), then, for
large $N$, it was indeed possible to obtain an asymptotic phase
sensitivity of $1/N$, instead of $1/\sqrt{N}$. This is the best you
can do using number states in only one input port, and it indicates
that the photon counting noise does not originate from the intensity
fluctuations of the input beam\cite{scully93}. Similar observations
were made by many authors for optical
interferometers\cite{holland93,hillery93,brif96,kim98} as well as
Ramsey-type atom interferometers
\cite{yamamoto95,bollinger96,bouyer97,dowling98}. Wineland and
co-workers, in particular, have shown that the optimal frequency
measurement can be achieved by using {\it maximally entangled
states}\cite{bollinger96}. These maximally entangled states are of a
particular interest, since they have a similar form as the ones
required for quantum lithography.
Let us take a look at the quantum enhancement due to maximally
entangled states, using standard parameter estimation. Consider an
ensemble of $N$ two-state systems in the state:
\be
|\vskip 12ptarphi\ranglengle = {1 \over \sqrt{2}}
(|0\ranglengle+e^{i\vskip 12ptarphi}|1\ranglengle)
\ee
\noindent
where $|0\ranglengle$ and $|1\ranglengle$ denote the two basis states. The
phase information can be obtained by measurement of an observable
${\hat a}t{A} =|0\ranglengle\langlengle 1| + |1\ranglengle\langlengle 0|$. The expectation
value of ${\hat a}t{A}$ is then given by
\begin{equation}
\langlengle\vskip 12ptarphi|{\hat a}t{A}|\vskip 12ptarphi\ranglengle=\cos\vskip 12ptarphi\; .
\end{equation}
\noindent
When we repeat this experiment $N$ times, we obtain
$ \langlengle\vskip 12ptarphi_R|{\hat a}t{A}_R|\vskip 12ptarphi_R\ranglengle=N\cos\vskip 12ptarphi,
$
where
$
|\vskip 12ptarphi_R\ranglengle = |\vskip 12ptarphi\ranglengle_1 \ldots |\vskip 12ptarphi\ranglengle_N
$,
and
${\hat a}t{A}_R = {\mbox{\Large $\oplus$}}_{k=1}^N
{\hat a}t{A}^{(k)}
$.
Since ${\hat a}t{A}_R^2=\mbox{\small 1} \!\! \mbox{1}$, the variance of ${\hat a}t{A}_R$, given $N$
samples, is readily computed to be $(\Delta A_R)^2 =
N(1-\cos^2\vskip 12ptarphi) = N \sin^2 \vskip 12ptarphi$. According to estimation
theory\cite{helstrom76}, we have
\begin{equation}\langlebel{est}
\Delta\vskip 12ptarphi_{\rm SL} = \frac{\Delta A_R}{|d\langlengle
{\hat a}t{A}_R\ranglengle/d\vskip 12ptarphi|} = \frac{1}{\sqrt{N}}\; .
\end{equation}
This is the standard variance in the parameter $\vskip 12ptarphi$ after $N$
trials. In other words, the uncertainty in the phase is inversely
proportional to the square root of the number of trials.
This is called the {\em shot-noise limit}.
Now consider an entangled state
\begin{equation}\langlebel{entang}
|\vskip 12ptarphi_N\ranglengle\equiv {1 \over \sqrt{2}}
|N,0\ranglengle + e^{iN\vskip 12ptarphi}|0,N\ranglengle\; ,
\end{equation}
where $|N,0\ranglengle$ and $|0,N\ranglengle$ are
collective states of $N$ particles, defined as
\begin{eqnarray}
|N,0\rangle &=& |0\rangle_1 |0\rangle_2 \cdot\cdot\cdot |0\rangle_N \noindentn \\
|0,N\rangle &=& |1\rangle_1 |1\rangle_2 \cdot\cdot\cdot |1\rangle_N .
\langlebel{noon}
\end{eqnarray}
The relative phase $e^{iN\vskip 12ptarphi}$ is accumulated when each particle
in state $|1\rangle$ acquires a phase shift of $e^{i \vskip 12ptarphi}$. An
important question now is what we need to measure in order to extract
the phase information. Recalling the single-particle case of ${{\hat a}t
A}=|0\rangle\langle 1| + |1\rangle \langle 0|$, we need an observable that does what
the operator $|0,N\ranglengle\langlengle N,0| + |N,0\ranglengle\langlengle 0,N|$
does. For the given state of Eq.~(\ref{entang}), we can see that this
can be achieved by an observable, ${\hat a}t{A}_N = {\mbox{\Large
$\otimes$}}_{k=1}^N {\hat a}t{A}^{(k)} $. The expectation value of
${\hat a}t{A}_N$ is then
\begin{equation}\langlebel{cosn}
\langlengle\vskip 12ptarphi_N |{\hat a}t{A}_N| \vskip 12ptarphi_N\ranglengle = \cos N\vskip 12ptarphi\; .
\end{equation}
Again, ${\hat a}t{A}_N^2=\mbox{\small 1} \!\! \mbox{1}$, and $(\Delta A_N)^2 = 1-\cos^2 N\vskip 12ptarphi =
\sin^2 N\vskip 12ptarphi$. Using Eq.\ (\ref{est}) again, we obtain the
so-called Heisenberg limit (HL) of the minimal detectable phase:
\begin{equation}\langlebel{bol}
\Delta\vskip 12ptarphi_{\rm HL} = \frac{\Delta A_N}{|d\langlengle {\hat a}t{A}_N
\ranglengle/d\vskip 12ptarphi|}=\frac{1}{N}\; .
\end{equation}
The precision in $\vskip 12ptarphi$ is increased by a factor $\sqrt{N}$ over
the standard noise limit. Of course, the preparation of a quantum
state such as Eq.~(\ref{entang}) is essential to the given
protocol\cite{lee02b}.
In quantum lithography, we exploit the $\cos N\vskip 12ptarphi$ behavior,
exhibited by Eq.~(\ref{cosn}), to draw closely spaced lines on a
suitable substrate\cite{boto00}. Entanglement-enhanced frequency
measurements\cite{bollinger96} and gyroscopy\cite{dowling98} exploit
the $\sqrt{N}$ increased precision given by Eq.~(\ref{bol}).
The physical interpretations of $A_N$ and the phase $\vskip 12ptarphi$ will
differ in the different protocols. Three distinct physical
representations of this construction are of particular
interest.
First, in a Mach-Zehnder interferometer [as depicted in Fig.~1(a)] the
input light field is divided into two different paths by a beam
splitter, and recombined by another beam splitter. The phase
difference between the two paths is then measured by balanced
detection of the two output modes. Secondly, in Ramsey-type
spectroscopy, atoms are put in a superposition of the ground state
and an excited state with a $\pi/2$-pulse [see Fig.~1(b)]. After a
relative phase shift is accumulated by the atomic states during the free
evolution, the second $\pi/2$-pulse is applied and the internal state
of the outgoing atom is measured. The third system is given by a qubit
that undergoes a Hadamard transform $H$, then picks up a relative
phase and is transformed back with a second Hadamard transformation
[Fig.~1(c)]. This representation is more mathematical than the
previous two, and it allows us to extract the unifying mathematical
principle that underlies the three systems.
\begin{figure}
\caption{
Three distinct
representations of phase measurement:
(a) a Mach-Zehnder interferometer,
(b) Ramsey spectroscopy,
and (c) a generic quantum logic gate.
The two basis states of a qubit,
$|0\rangle$ and $|1\rangle$, may be regarded as the atomic
two levels, or the two paths in a Mach-Zehnder interferometer.
The state $|\vskip 12ptarphi\ranglengle$ can be regarded as
a single photon state just before the second beam splitter
in the Mach-Zehnder interferometer, or the single atom state
just before the second $\pi/2$-pulse in the Ramsey interferometer.
}
\end{figure}
In all three protocols, the initial state is transformed
by a discrete Fourier transform (beam splitter, $\pi/2$-pulse or
Hadamard), then picks up a relative phase, and is transformed back
again. The Hadamard transform is the standard (two-dimensional)
``quantum'' finite Fourier transform, such as used in the
implementation of Shor's algorithm\cite{ekert96}. The phase shift,
which is hard to measure directly, is applied to the transformed
basis. The result is a bit flip in the initial basis $\{
|0\ranglengle,|1\ranglengle\}$, and this is readily measured. We call the
formal equivalence between these three systems the
{\em quantum Rosetta stone}\cite{lee02}.
In discussing quantum computer circuits
with researchers from the fields of quantum optics or atomic
clocks, we find the ``quantum Rosetta stone'' a useful tool. For
example, in a Ramsey-type atom interferometer, noting that ${{\hat a}t
A}\equiv \sigma_x = H \sigma_z H$, and
\be
{\hat a}t{A}_N = {\mbox{\Large $\otimes$}}_{k=1}^N {\hat a}t{A}^{(k)}
=
\left( {\mbox{\Large $\otimes$}}_{k=1}^N H^{(k)}\right)
{\hat a}t{A}_N^\prime
\left({\mbox{\Large $\otimes$}}_{k=1}^N H^{(k)}\right),
\ee
we need to measure ${\hat a}t{A}_N^\prime = {\mbox{\Large
$\otimes$}}_{k=1}^N \sigma_z^{(k)}$ after the second beam
splitter (see Fig.~2). On the other hand, a direct optical measurement
of ${\hat a}t{A}_N$ corresponds to an $N$-photion absorption scheme in
quantum lithography.
\begin{figure}
\caption{
Quantum interferometry.
{\tt NOON}
\end{figure}
\begin{figure}
\caption{
Quantum lithography.
{\tt NOON}
\end{figure}
Such an enhancement of a factor of $\sqrt{N}$ in quantum metrology is
known to be the best possible precision permitted by the uncertainty
principle\cite{ou96}. It is also interesting to see that exploiting
quantum correlations yields a $\sqrt{N}$ enhancement in Grover's
search algorithm, which has also been shown to be optimal\cite{zalka99}.
Is this $\sqrt{N}$ enhancement over the classical protocols universal?
It certainly does seem so. But then Shor's algorithm shows its
exponential improvement over the best {\em known} classical
algorithm.
We like to end this article with the ``Williams-Dowling Inverse-Shor
Conjecture''\cite{williams00}. Assume that the best {\em known}
classical factoring algorithm is the best {\em possible} one. Then,
according to Conjecture 1, the quantum Rosetta stone tells us that
there must exist an interferometric measurement strategy for phase
sensitivity that is exponentially bettter than the shot-noise limit
(the best classical strategy). However, we know that this is false.
Conjecture 2, therefore, tells us that according to the quantum
Rosetta stone, Shor's algorithm is a $\sqrt{N}$ improvement over the
{\em best, but unknown} classical protocol. Thus,
there exists a
classical algorithm that is exponetially faster than the {best known}
one,
though quadratically slower than the quantum algorithm!
\section*{Acknowledgments}
This work was carried out by the Jet Propulsion Laboratory,
California Institute of Technology,
under a contract with the National Aeronautics
and Space Administration.
We wish to thank
C.\ P.\ Williams,
and D.\ J.\ Wineland for stimulating discussions.
We would like to acknowledge support from the ONR,
ARDA, NSA, and DARPA.
P.K.\ and H.L.\ would also like to acknowledge
the National Research Council.
\end{document} |
\begin{document}
\title{Popular Matchings and Limits to Tractability\thanks{This paper is a merger of results shown in the arXiv papers \cite{FPZ18,K18-roommates} along with one in \cite{Kav18} and new results.}}
\author{Yuri Faenza\inst{1} \and Telikepalli Kavitha\inst{2} \and Vladlena Powers\inst{1} \and Xingyu Zhang\inst{1}}
\institute{IEOR, Columbia University, New York, USA,\\
\email{yf2414, vp2342, [email protected]} \and Tata Institute of Fundamental Research,\\ Mumbai, India, \email{[email protected]}}
\maketitle
\pagestyle{plain}
\begin{abstract}
We consider {\em popular matching} problems in both bipartite and non-bipartite graphs with strict preference lists. It is known that every stable matching is a min-size popular matching. A subclass of max-size popular matchings called
{\em dominant matchings} has been well-studied in bipartite graphs: they always exist and there is a simple linear time algorithm to find one.
We show that stable and dominant matchings are the only two tractable subclasses of popular matchings in bipartite graphs; more precisely, we show that it is NP-complete to decide if $G$ admits a popular matching that is neither stable nor dominant. We also show a number of related hardness results, such as (tight) inapproximability of the maximum weight popular matching problem. In non-bipartite graphs, we show a strong negative result: it is NP-hard to decide whether a popular matching exists or not, and the same result holds if we replace \emph{popular} with \emph{dominant}.
On the positive side, we show the following results in any graph:
\begin{itemize}
\item we identify a subclass of dominant matchings called {\em strongly dominant} matchings and show a linear time algorithm to decide if a strongly dominant matching exists or not;
\item we show an efficient algorithm to compute a popular matching of minimum cost in a graph with edge costs and bounded treewidth.
\end{itemize}
\end{abstract}
\section{Introduction}
\label{intro}
The marriage problem considered by Gale and Shapley~\cite{GS62} is arguably the most relevant two-sided market model, and has been studied and applied in many areas of mathematics, computer science, and economics. The classical model assumes that the input is a complete bipartite graph, and that each node is endowed with a strict preference list over the set of nodes of the opposite color class. The goal is to find a matching that respects a certain concept of fairness called \emph{stability}. An immediate extension deals with incomplete lists, i.e., it assumes that the input graph is bipartite but not complete. In this setting, the problem enjoys strong and elegant structural properties, that lead to fast algorithms for many related optimization problems (a classical reference in this area is~\cite{GI}).
In order to investigate more realistic scenarios, several extensions of the above Gale-Shapley model have been investigated. On one hand, one can change the structure of the \emph{input}, admitting e.g.~ties in preference lists, or of preference patterns that are given by more complex choice functions, or allow the input graph to be non-bipartite (see e.g.~\cite{Manlove} for a collection of extensions). On the other hand, one can change the requirements of the \emph{output}, i.e., we could ask for a matching that satisfies properties other than stability. For instance, relaxing the stability condition to \emph{popularity} allows us to overcome one of the main drawbacks of stable matchings and this is its size --- the restriction that blocking pairs are forbidden constrains the size of a stable matching; there are simple instances where the size of a stable matching is only half the size of a maximum matching (note that a stable matching is maximal, so its size is at least half the size of a maximum matching).
Popularity is a natural relaxation of stability: roughly speaking, a matching $M$ is \emph{popular} if the number of nodes that prefer $M$ to any matching $M'$ is at least the number of nodes that prefer $M'$ to $M$. One can show that stable matchings are popular matchings of minimum size, and a maximum size popular matching can be twice as large as a stable
matching. Hence, popularity allows for matchings of larger size while still guaranteeing a certain fairness condition.
Popular matchings (and variations thereof) have been extensively studied in the discrete optimization community, see e.g. \cite{Biro,CK16,HK11,HK17,KMN09,Kav12,Kav16}, but there are still large gaps on what we know on the tractability of optimization problems over the set of popular matchings. Interestingly, all tractability results in popular matchings rely on connections with stable matchings. For instance, Kavitha~\cite{Kav12} showed that a max-size popular matching can be found efficiently by a combination of the \emph{Gale-Shapley} algorithm and {\em promotion} of nodes rejected once by all neighbors. Cseh and Kavitha~\cite{CK16} showed that a pair of nodes is matched together in some popular matching if and only if this pair is matched together either in some stable matching or in some \emph{dominant} matching. Dominant matchings are a subclass of max-size popular matchings, and these are equivalent (under a simple linear map) to stable matchings in a larger graph. Recently, Kavitha \cite{Kav18} showed that when there are weights on edges, the problem of finding a max-weight popular matching is NP-hard.
The notion of popular matchings can be immediately extended to non-bipartite graphs. Popular matchings need not always exist in a non-bipartite graph and it was not known if one can efficiently decide if a popular matching exists or not in a given non-bipartite graph.
\noindent {\bf Our Contribution.} In this paper, we show NP-hardness, inapproximability results, and polynomial-time algorithms for many problems in popular matchings, some of which have been posed as open questions in many papers in the area (see e.g.~\cite{Cseh on popular matchings,CK16,HK17,Kav16,Manlove}). Our most surprising result is probably the following: it is NP-complete to decide if a bipartite graph has a popular matching that is neither stable nor dominant (see Theorem~\ref{final-thm}).
Stable matchings and dominant matchings always exist in bipartite graphs and there are linear time algorithms to compute these matchings. Recall that a stable matching is a min-size popular matching and a dominant matching is a max-size popular matching:
thus finding a min-size (similarly, max-size) popular matching is easy. We are not aware of any other natural combinatorial optimization problem where finding elements of min-size (similarly, max-size) is easy but to decide whether there exists {\em any} element that is neither min-size nor max-size is NP-hard.
We also deduce other hardness results: it is NP-complete to decide if there exists a popular matching that contains or does not contain two given edges (see Theorem \ref{thm:forced-forb3} and Section~\ref{sec:forbidden-forced}, where this and some other positive and negative results are discussed); unless P$=$NP, the maximum weight popular matching problem with nonnegative costs cannot be approximated better than a factor $1/2$, and this is tight, since a $1/2$-approximation follows by known results (see Theorem~\ref{thr:mwp}). Moving to non-bipartite graphs, we show that the problem of deciding if a popular matching exists is NP-complete\footnote{Very recently, this result also appeared in~\cite{GMSZ18} on arXiv; our results (from \cite{K18-roommates}) were obtained independently and our proofs are different.} (see Theorem~\ref{main-thm}). We also show that the problem stays NP-complete if we replace \emph{popular} with \emph{dominant} (see Theorem~\ref{second-thm}).
All together, those negative results settle the main open questions in the area, and cast a dark shadow on the tractability of popular matchings. While stable matchings are a tractable subclass of popular matchings in non-bipartite graphs~\cite{Irv85}, the dominant matching problem is NP-hard in non-bipartite graphs, as shown here. The fact that stable matchings and dominant matchings are the only tractable subclasses of popular matchings in bipartite graphs prompts the following question: is there is a non-trivial subclass of dominant matchings that is tractable in {\em all} graphs?
We show the answer to the above question is ``yes'' by identifying a subclass called {\em strongly dominant} matchings (see Definition~\ref{def:strong-dominant}): in bipartite graphs, these two classes coincide.
We show a simple linear time algorithm for the problem of deciding if a given graph admits a strongly dominant matching or not and to find one, if so.
We also show that a popular matching of minimum cost (with no restriction on the signs of the cost function) in bipartite and non-bipartite graphs can be found efficiently if the treewidth of the input graph is bounded.
\noindent{\bf Background and Related results.}
Algorithmic questions for popular matchings were
first studied in the domain of {\em one-sided} preference lists~\cite{AIKM07} in bipartite instances
where it is only nodes one side, also called agents, that have preferences over their neighbors, also called objects.
Popular matchings need not always exist here, however fractional matchings that are popular always exist~\cite{KMN09}.
Popular matchings always exist in a bipartite instance $G$ with two-sided strict preference lists~\cite{Gar75}. Polynomial time
algorithms to compute a max-size popular matching here were given in \cite{HK11,Kav12} and these algorithms always compute dominant matchings. The equivalence between dominant matchings in the given bipartite graph and stable matchings in a larger bipartite graph shown in \cite{CK16} implies a polynomial time algorithm to solve the max-weight popular matching problem in a complete bipartite graph. It was shown in \cite{Kav18} that it is $\mathsf{NP}$-hard to find a max-size popular matching in a non-bipartite graph (even when a stable matching exists) and it was shown in~\cite{HK17} that it is $\mathsf{UGC}$-hard to compute a $\Theta(1)$-approximation of a max-weight popular matching in non-bipartite graphs.
It was very recently shown \cite{Kav-WG18} that given a bipartite graph $G$ along with a parameter $k\in(\mathsf{min},\mathsf{max})$, where
$\mathsf{min}$ is the size of a stable matching and $\mathsf{max}$ is the size of a dominant matching,
it is NP-hard to decide whether $G$ admits a popular matching of size $k$ or not. Note that our NP-hardness result is a much stronger statement as we show that the problem of deciding whether $G$ admits a popular matching of
{\em any} intermediate size (rather than a particular size $k$) is NP-hard.
\noindent {\bf Organization of the paper.} Definitions and important properties of stable and popular matchings are given in Section \ref{prelims}. A linear time algorithm for strongly dominant matchings is given in Section~\ref{section4}. Our main gadget construction is given in Section~\ref{sec:hardness}, where we also show that the problem of deciding if a graph admits a popular matching that is neither stable nor dominant is NP-complete. Other hardness (and some related positive) results are given in Section~\ref{sec:consequences}. In Section~\ref{sec:treewidth}, we give an algorithm for finding a popular matching of minimum cost in a graph with bounded treewidth.
\section{Preliminaries}\label{prelims}
\subsection{Definitions}\label{sec:definitions}
Throughout the paper, we will consider problems where our input is a graph $G$, together with a collection of rankings, one per node of $G$, with each node ranking its neighbors in a strict order of preference. We will denote an edge of $G$ between nodes $u$ and $v$ as $(u,v)$ or $uv$. A matching $M$ in $G$ is {\em stable} if there is no \emph{blocking edge} with respect to $M$, i.e. an edge whose both endpoints strictly prefer each other to their respective assignments in $M$. It follows from the classical work of Gale and Shapley~\cite{GS62} that a stable matching always exists when $G$ is bipartite and such a matching can be computed in linear time.
The notion of popularity was introduced by G\"ardenfors~\cite{Gar75} in 1975.
We say a node $u$ {\em prefers} matching $M$ to matching $M'$ if either (i)~$u$ is matched in $M$
and unmatched in $M'$ or (ii)~$u$ is matched in both $M, M'$ and $u$ prefers $M(u)$ to $M'(u)$, where $M(u)$ is the partner of $u$ in $M$.
For any two matchings $M$ and $M'$, let $\phi(M,M')$ be the number of nodes that prefer $M$ to $M'$.
\begin{definition}
\label{pop-def}
A matching $M$ is {\em popular} if $\phi(M,M') \ge \phi(M',M)$ for every matching $M'$ in $G$,
i.e., $\Delta(M,M') \ge 0$ where $\Delta(M,M') = \phi(M,M') - \phi(M',M)$.
\end{definition}
Thus, there is no matching $M'$ that would defeat a popular matching $M$ in an election between $M$ and $M'$, where each node casts a vote
for the matching that it prefers. Since there is no matching where more nodes are {\em better-off} than in a popular matching, a popular matching
can be regarded as a ``globally stable matching''. Equivalently, popular matchings are weak {\em Condorcet winners}~\cite{condorcet} in the voting instance where nodes are voters and all feasible matchings are the candidates.
Though (weak) Condorcet winners need not exist in a general voting instance, popular matchings always exist in bipartite graphs,
since every stable matching is popular~\cite{Gar75}. Popular matchings have been well-studied in bipartite graphs, in particular,
a subclass of max-size popular matchings called {\em dominant matchings} is well-understood~\cite{CK16,HK11,Kav12}.
\begin{definition}
\label{def:dominant}
A popular matching $M$ is dominant in $G$ if $M$ is more popular than any larger matching in $G$, i.e., $\Delta(M,M') > 0$ for any matching
$M'$ such that $|M'| > |M|$.
\end{definition}
Dominant matchings always exist in a bipartite graph and such a matching can be computed in linear time~\cite{Kav12}. Every polynomial time algorithm currently known to find a popular matching in a bipartite graph finds either a stable matching~\cite{GS62} or a dominant matching~\cite{HK11,Kav12,CK16}.
In some problems, together with the graph $G$ and the preference lists, we will also be given a weight function $c: E \rightarrow {\mathbb{R}}$. The \emph{weight} (or \emph{cost}) of a matching $M$ of $G$ (with respect to $c$) is defined as $c(M):=\sum_{e \in M} c(e)$.
\subsection{Combinatorial characterization of popular and dominant matchings}
\label{sec:comb-prelims}
Fix a matching $M$ of $G$. A node $u$ of $G$ is \emph{$M$-exposed} if $\delta(u) \cap M=\emptyset$, and \emph{$M$-covered} otherwise. An \emph{$M$-alternating} path (resp. cycle) in $G$ is a path (resp. cycle) whose edges alternate between $M$ and in $E(G)\setminus M$. An $M$-alternating path is \emph{$M$-augmenting} if its first and last nodes are $M$-exposed. We can associate \emph{labels} to edges from $E\setminus M$ as follows:
\begin{itemize}
\item an edge $(u,v)$ is $(-,-)$ if both $u$ and $v$ prefer their respective partners in $M$ to each other;
\item $(u,v) = (+,+)$ if $u$ and $v$ prefer each other to their partners in $M$;
\item $(u,v) = (+,-)$ if $u$ prefers $v$ to its partner in $M$ and $v$ prefers its partner in $M$ to $u$.
\end{itemize}
We write $(u,v)=(+,+)$ or \emph{$uv$ is a $(+,+)$ edge}, and similarly for the other cases.
We also say that \emph{the label of $(u,v)$ at $u$ is $+$} (resp. $-$) if $u$ prefers $v$ to its current partner (resp. its current partner to $v$).
Note that \emph{blocking edges} introduced in Section \ref{sec:definitions} coincide exactly with $(+,+)$ edges.
The graph $G_M$ is defined as the subgraph of $G$ obtained by deleting edges that are labeled $(-,-)$, and by attaching to other edges not in $M$ the appropriate labels defined above. Observe that $M$ is also a matching of $G_M$, hence definitions of $M$-alternating path and cycles apply in $G_M$ as well. These definitions can be used to obtain a characterization of popular matchings in terms of forbidden substructures of $G_M$, as shown in~\cite{HK11}.
\begin{theorem}\label{thr:characterize-popular}
A matching $M$ of $G$ is popular if and only if $G_M$ does not contain any of the following:
\begin{enumerate}
\item[(i)]\label{it:circuit} an $M$-alternating cycle with a $(+,+)$ edge.
\item[(ii)]\label{it:two-plusplus}an $M$-alternating path that starts and ends with two distinct $(+,+)$ edges.
\item[(iii)]\label{it:plusplus-and-unmatched}
an $M$-alternating path that starts from an $M$-exposed node and ends with a $(+,+)$ edge.
\end{enumerate}
\end{theorem}
Graph $G_M$ can also be used to obtain a characterization of dominant matchings, as shown in~\cite{CK16}.
\begin{theorem}
\label{thm:dominant}
Let $M$ be a popular matching. $M$ is dominant if and only if there is no $M$-augmenting path in $G_M$.
\end{theorem}
The above characterizations can be used to efficiently decide if given a matching $M$ is popular (similarly, dominant), see \cite{HK11}. Hence, in most of our NP-completeness reductions, we focus on proving the hardness part.
The matchings that satisfy Definition~\ref{def:dominant} were called ``dominant'' in~\cite{CK16},
however dominant matchings in bipartite graphs were first constructed in \cite{HK11}.
It was observed in \cite{HK11} that Definition~\ref{def:strong-dominant} given below was a sufficient condition for a
matching $M$ to be a max-size popular matching and the goal in \cite{HK11} was to efficiently construct
such a matching in a bipartite graph. It was shown in \cite{Kav12} that if
$M$ satisfies conditions~(i)-(iv) in Definition~\ref{def:strong-dominant} then $M$ satisfies
the condition given in Theorem~\ref{thm:dominant} along with the conditions given in Theorem~\ref{thr:characterize-popular};
thus $M$ is a dominant matching.
\begin{definition}
\label{def:strong-dominant}
A matching $M$ is strongly dominant in $G = (V,E)$ if there is a partition $(L,R)$ of the node set $V$ such that
(i)~$M \subseteq L \times R$, (ii)~$M$ matches all nodes in $R$,
(iii)~every $(+,+)$ edge is in $R \times R$, and (iv)~every edge in $L \times L$ is $(-,-)$.
\end{definition}
Consider the complete graph on 4 nodes $d_0, d_1, d_2, d_3$ where $d_0$'s preference list is $d_1 \succ d_2 \succ d_3$
(i.e., top choice $d_1$, followed by $d_2$ and then $d_3$), $d_1$'s preference list is $d_2 \succ d_3 \succ d_0$,
$d_2$'s preference list is $d_3 \succ d_1 \succ d_0$, and $d_3$'s preference list is $d_1 \succ d_2 \succ d_0$.
This instance (see Fig.~\ref{D:example}) has no stable matching. The matching
$M = \{(d_0,d_1),(d_2,d_3)\}$ is a strongly dominant matching here with $L = \{d_0,d_2\}$ and $R = \{d_1,d_3\}$.
$M \subseteq L \times R$ and it is a perfect matching. Moreover, the edge $(d_0,d_2) \in L \times L$ is $(-,-)$
and there is only one $(+,+)$ edge here, which is $(d_1,d_3) \in R \times R$.
In bipartite graphs, every dominant matching is strongly dominant~\cite{CK16}.
However in non-bipartite graphs, not every dominant matching is strongly dominant. For instance, consider the following graph on 4 nodes
$a, b, c, d$ where $a$'s preference list is $b \succ c \succ d$, while $b$'s preference list is $a \succ c$ and
$c$'s preference list is $a \succ b$ and $d$'s only neighbor is $a$. It is simple to check that the matching $\{(a,d),(b,c)\}$
is popular; moreover it is a perfect matching and hence it is dominant. However it is {\em not} strongly dominant as both
$(a,b)$ and $(a,c)$ are $(+,+)$ edges and one of $b,c$ has to be in $L$.
\subsection{Dual certificates for stable, popular, and dominant matchings}
\label{sec:certificates}
Let $\tilde{G}$ be the graph $G$ augmented with {\em self-loops}, i.e., it is assumed that every node is its own last choice.
Corresponding to any matching $N$ in $G$, there is a perfect matching $\tilde{N}$ in $\tilde{G}$ defined as follows:
$\tilde{N} = N \cup \{(u,u): u$ is left unmatched in $N\}$.
Let $M$ be any matching in $G$. Corresponding to $M$, we can define an edge weight function $\mathsf{wt}_M$ in $\tilde{G}$ as follows.
\begin{equation*}
\mathsf{wt}_M(u,v) = \begin{cases} 2 & \text{if\ $(u,v)$\ is\ labeled\ $(+,+)$}\\
-2 & \text{if\ $(u,v)$\ is\ labeled\ $(-,-)$ }\\
0 & \text{otherwise}
\end{cases}
\end{equation*}
Thus $\mathsf{wt}_M(e) = 0$ for every $e \in M$.
We need to define $\mathsf{wt}_M$ on self-loops as well: for any node~$u$, let $\mathsf{wt}_M(u,u) = 0$ if $u$ is unmatched in $M$, else
let $\mathsf{wt}_M(u,u) = -1$.
It is easy to see that for any matching $N$ in $G$, $\Delta(N,M) = \mathsf{wt}_M(\tilde{N})$,
where $\Delta(N,M) = \phi(N,M) - \phi(M,N)$ (see Definition~\ref{pop-def}).
Thus $M$ is popular if and only if every perfect matching in the graph $\tilde{G}$ has weight at most 0.
\subsubsection{Certificates in bipartite graphs.} Here we give a quick overview of the LP framework of popular matchings in bipartite graphs from \cite{KMN09}
along with some results from \cite{Kav16,Kav18}.
\begin{theorem}[\cite{KMN09}]
\label{thm:witness}
Let $M$ be any matching in $G = (A \cup B, E)$. The matching $M$ is popular if and only if there exists a vector $\vec{\alpha} \in \mathbb{R}^n$ (where $n = |A \cup B|$) such that $\sum_{u \in A \cup B}\alpha_u = 0$ and
\begin{eqnarray*}
\alpha_{a} + \alpha_{b} & \ \ \ge \ \ & \mathsf{wt}_{M}(a,b) \ \ \ \forall\, (a,b)\in E\\
\alpha_u & \ \ \ge \ \ & \mathsf{wt}_M(u,u) \ \ \ \forall\, u\in A \cup B.
\end{eqnarray*}
\end{theorem}
The vector $\vec{\alpha}$ will be an optimal solution to the LP that is dual to the max-weight perfect matching LP in $\tilde{G}$
(with edge weight function $\mathsf{wt}_M$). For any popular matching $M$, a vector $\vec{\alpha}$ as given in Theorem~\ref{thm:witness}
will be called a {\em witness} to $M$.
A stable matching has the all-zeros vector $\vec{0}$ as a witness while it follows from \cite{CK16} that
a dominant matching $M$ has a witness $\vec{\alpha}$ where
$\alpha_u \in \{\pm 1\}$ for all nodes $u$ matched in $M$ and $\alpha_u = 0$ for all nodes $u$ left unmatched in $M$.
The following lemma will be useful to us. Let $|A\cup B| = n$.
\begin{lemma}[\cite{Kav16}]
Every popular matching in $G = (A \cup B, E)$ has a witness in $\{0,\pm 1\}^n$.
\end{lemma}
Call $s\in V$ a {\em stable node} if it is matched in some (equivalently, all) stable matching(s)~\cite{GS85}. Every popular matching has to match all stable nodes~\cite{HK11}. A node that is not stable is called an {\em unstable node}.
Call any $e \in E$ a {\em popular edge} if there is some popular matching in $G$ that contains $e$.
Let $M$ be a popular matching in $G = (A \cup B, E)$ and let $\vec{\alpha} \in \{0,\pm 1\}^n$ be a witness of $M$.
Lemma~\ref{prop0} given below follows from complementary slackness conditions.
\begin{lemma}[\cite{Kav18}]
\label{prop0}
For any popular edge $(a,b)$, we have $\alpha_a + \alpha_b = \mathsf{wt}_M(a,b)$.
For any unstable node $u$ in $G$, if $u$ is left unmatched in $M$, then $\alpha_u = 0$ else $\alpha_u = -1$.
\end{lemma}
The popular subgraph $F_G$ is a useful subgraph of $G$ defined in \cite{Kav18}.
\begin{definition}
\label{def:popular-subgraph}
The {\em popular subgraph} $F_G = (A \cup B, E_F)$ is the subgraph of $G = (A \cup B, E)$
whose edge set $E_F$ is the set of popular edges in $E$.
\end{definition}
The graph $F_G$ need not be connected. Let $\mathcal{C}_1,\ldots,\mathcal{C}_h$ be the various components in $F_G$.
Recall that $M$ is a popular matching in $G$ and $\vec{\alpha} \in \{0,\pm 1\}^n$ is a witness of $M$.
\begin{lemma}[\cite{Kav18}]
\label{prop1}
For any connected component $\mathcal{C}_i$ in $F_G$, either $\alpha_u = 0$ for all nodes $u \in \mathcal{C}_i$ or
$\alpha_u \in \{\pm 1\}$ for all nodes $u \in \mathcal{C}_i$. Moreover, if $\mathcal{C}_i$ contains one or
more unstable nodes, either all these unstable nodes are matched in $M$ or none of them is
matched in $M$.
\end{lemma}
The following definition marks the state of each connected component $\mathcal{C}_i$ in $F_G$ as ``zero'' or ``unit'' in $\vec{\alpha}$
--- this classification will be useful to us in our hardness reduction.
\begin{definition}
\label{def:stab-domn}
A connected component $\mathcal{C}_i$ in $F_G$ is in {\em zero state} in $\vec{\alpha}$ if $\alpha_u = 0$ for all nodes $u \in \mathcal{C}_i$.
Similarly, $\mathcal{C}_i$ in $F_G$ is in {\em unit state} in $\vec{\alpha}$ if $\alpha_u \in \{\pm 1\}$ for all nodes $u \in \mathcal{C}_i$.
\end{definition}
\subsubsection{Certificates in non-bipartite graphs.}
The following theorem shows that the sufficient condition in Theorem~\ref{thm:witness} certifies popularity in non-bipartite graphs as well.
\begin{theorem}
\label{thm:non-bipartite}
Let $M$ be any matching in $G = (V, E)$. The matching $M$ is popular if there exists $\vec{\alpha} \in \mathbb{R}^{|V|}$ such that
$\sum_{u \in V} \alpha_u = 0$ and
\begin{eqnarray*}
\alpha_u + \alpha_v & \ \ \ge \ \ & \mathsf{wt}_{M}(u,v) \ \ \ \forall\, (u,v)\in E\\
\alpha_u & \ \ \ge \ \ & \mathsf{wt}_M(u,u) \ \ \ \forall\, u\in V.
\end{eqnarray*}
\end{theorem}
The proof of Theorem~\ref{thm:non-bipartite} follows by considering the max-weight perfect matching LP in the graph $\tilde{G}$ with edge weight
function $\mathsf{wt}_M$ as the primal LP.
It is easy to see that if there exists a vector $\vec{\alpha} \in \mathbb{R}^{|V|}$ as given above then the optimal value of the dual LP is at most 0,
equivalently, $\mathsf{wt}_M(\tilde{N}) \le 0$ for all matchings $N$ in $G$, i.e., $M$ is a popular matching.
If $M$ is a popular matching that admits $\vec{\alpha} \in \mathbb{R}^{|V|}$ satisfying the conditions in Theorem~\ref{thm:non-bipartite},
we will call $\vec{\alpha}$ a {\em witness} of $M$.
Note that any stable matching in $G$ has $\vec{0}$ as a witness.
The witness of the matching $M$ described in Section~\ref{sec:comb-prelims}
on the nodes $d_0,d_1,d_2,d_3$ is $\vec{\alpha}$ where $\alpha_{d_1} = \alpha_{d_3} = 1$ and $\alpha_{d_0} = \alpha_{d_2} = -1$.
Consider the popular (but not strongly dominant) matching $M = \{(a,d),(b,c)\}$ in the other instance described in Section~\ref{sec:comb-prelims}:
this matching $M$ does not admit any witness as given in Theorem~\ref{thm:non-bipartite}.
We will show in Section~\ref{section4} that every strongly dominant matching $M$ admits a witness $\vec{\alpha}$ as given in Theorem~\ref{thm:non-bipartite};
moreover, there will be a witness $\vec{\alpha}$ such that for every node $u$ matched in $M$, $\alpha_u = \pm 1$.
\section{Strongly dominant matchings}
\label{section4}
In this section we generalize the max-size popular matching algorithm for bipartite graphs~\cite{Kav12} to solve the strongly dominant matching
problem in all graphs. We show a surprisingly simple reduction from the strongly dominant matching
problem in $G = (V,E)$ to the stable matching problem in a new graph $G' = (V,E')$. Thus Irving's algorithm~\cite{Irv85},
which efficiently solves the stable matching problem in all graphs, when run in $G'$, solves our problem.
The graph $G'$ can be visualized as the bidirected graph corresponding to $G$. The node set of $G'$ is the same as that of $G$.
For every $(u,v) \in E$, there will be 2 edges in $G'$ between $u$ and $v$: one directed from $u$ to $v$ which will be denoted by $(u^+,v^-)$
or $(v^-,u^+)$ and the other directed from $v$ to $u$ which will be denoted by $(u^-,v^+)$ or $(v^+,u^-)$.
For any $u \in V$, if $u$'s preference list in $G$ is $v_1 \succ v_2 \succ \cdots \succ v_k$ then $u$'s
preference list in $G'$ is $v^-_1 \succ v^-_2 \succ \cdots \succ v^-_k \succ v^+_1 \succ v^+_2 \succ \cdots \succ v^+_k$.
The neighbor $v_i^-$ corresponds to the edge $(u^+,v_i^-)$ and the neighbor $v_i^+$ corresponds to the edge $(u^-,v_i^+)$.
Thus $u$ prefers {\em outgoing} edges to {\em incoming} edges: among outgoing edges (similarly, incoming edges), its order is its original
preference order.
\begin{itemize}
\item A matching $M'$ in $G'$ is a subset of $E'$ such that for each $u \in V$, $M'$ contains at most one edge incident to $u$, i.e.,
at most one edge in $\{(u^+,v^-),(u^-,v^+): v \in {\mathbb{N}}br(u)\}$ is in $M'$, where ${\mathbb{N}}br(u)$ is the set of $u$'s neighbors in $G$.
\item For any matching $M'$ in $G'$, define the {\em projection} $M$ of $M'$ as follows:
\[M = \{(u,v): (u^+,v^-)\ \mathrm{or}\ (u^-,v^+)\ \mathrm{is\ in}\ M'\}.\]
It is easy to see that $M$ is a matching in $G$.
\end{itemize}
\begin{definition}
A matching $M'$ is stable in $G'$ if for every edge $(u^+,v^-) \in E'\setminus M'$: either (i)~$u$ is matched in $M'$
to a neighbor ranked better than $v^-$ or (ii)~$v$ is matched in $M'$ to a neighbor ranked better than $u^+$.
\end{definition}
We now present our algorithm to find a strongly dominant matching in $G = (V,E)$.
\begin{enumerate}
\item Build the bidirected graph $G' = (V,E')$.
\item Run Irving's stable matching algorithm in $G'$.
\item If a stable matching $M'$ is found in $G'$ then
return the projection $M$ of $M'$.
Else return ``$G$ has no strongly dominant matching''.
\end{enumerate}
Note that
running Irving's algorithm in the bidirected graph $G'$ is exactly the same as running Irving's algorithm in the
simple undirected graph $H$ that has {\em three} copies of each node $u \in V$:
these are $u^+, u^-$, and $d(u)$. Corresponding to each edge $(u,v)$ in $G$, there
will be the two edges $(u^+,v^-)$ and $(u^-,v^+)$ in $H$ and for each $u \in V$, the graph $H$ also has the edges
$(u^+,d(u))$ and $(u^-,d(u))$.
If $u$'s preference list in $G$ is $v_1 \succ v_2 \succ \cdots \succ v_k$ then $u^+$'s
preference list in $H$ will be $v^-_1 \succ v^-_2 \succ \cdots \succ v^-_k \succ d(u)$ and $u^-$'s
preference list will be $d(u) \succ v^+_1 \succ v^+_2 \succ \cdots \succ v^+_k$.
The preference list of $d(u)$ will be $u^+ \succ u^-$. Thus in any stable matching in $H$, one of
$u^+,u^-$ has to be matched to $d(u)$.
Before we prove the correctness of our algorithm, we will characterize strongly
dominant matchings in terms of their witnesses.
\begin{theorem}
\label{lem:strongly-dominant}
A matching $M$ is strongly dominant in $G$ if and only if there exists $\vec{\alpha}$ that satisfies Theorem~\ref{thm:non-bipartite}
such that $\alpha_u = \pm 1$ for all nodes $u$ matched in $M$ and $\alpha_u = 0$ for all $u$ unmatched in~$M$.
\end{theorem}
\begin{proof}
Let $M$ be a strongly dominant matching in $G = (V,E)$. So $V$ can be partitioned into $L \cup R$ such that properties~(i)-(iv) in
Definition~\ref{def:strong-dominant} are satisfied.
We will construct $\vec{\alpha}$ as follows. For $u \in V$:
\begin{itemize}
\item if $u \in R$ then set $\alpha_u = 1$
\item else set $\alpha_u = -1$ for $u$ matched in $M$ and $\alpha_u = 0$ for $u$ unmatched in $M$.
\end{itemize}
Since $M$ matches all nodes in $R$, all nodes unmatched in $M$ are in $L$. Thus $\alpha_u = 0$ for all $u$ unmatched in $M$
and $\alpha_u = \pm 1$ for all $u$ matched in $M$. For any edge $(u,v) \in M$, since $M \subseteq L \times R$,
$(\alpha_u,\alpha_v) \in \{(1,-1),(-1,1)\}$ and so $\alpha_u + \alpha_v = 0$. Thus $\sum_{u \in V}\alpha_u = 0$.
We will now show that $\vec{\alpha}$ satisfies Theorem~\ref{thm:non-bipartite}.
We claim that $\alpha_u \ge \mathsf{wt}_M(u,u)$.
This is because $\alpha_u = 0 = \mathsf{wt}_M(u,u)$ for $u$ left unmatched in $M$ and $\alpha_u \ge -1 = \mathsf{wt}_M(u,u)$ for $u$ matched in $M$.
We will now show that $\alpha_u + \alpha_v \ge \mathsf{wt}_M(u,v)$ for any edge $(u,v)$ in $G$, i.e., the edge $(u,v)$ is {\em covered}.
Recall that $\mathsf{wt}_M(u,v) \in \{0, \pm 2\}$.
\begin{itemize}
\item Since $\mathsf{wt}_M(e) \le 2$ for any edge $e$ and $\alpha_u = 1$ for all $u \in R$, all edges in $R \times R$ are covered.
\item We also know that any edge in $L \times L$ is labeled $(-,-)$, i.e., $\mathsf{wt}_M(u,v) = -2$ for any edge
$(u,v) \in L \times L$. Since $\alpha_u \ge -1$ for any $u \in L$, edges in $L \times L$ are covered.
\item We also know that all $(+,+)$ edges are in $R \times R$ and so $\mathsf{wt}_M(u,v) \le 0$ for all
$(u,v) \in L \times R$. Since $\alpha_u \ge -1$ for $u \in L$ and $\alpha_v = 1$ for $v \in R$, all edges in $L \times R$ are covered.
\end{itemize}
We will now show the converse. Let $M$ be a matching with a witness $\vec{\alpha}$ as given in the statement of the theorem.
The matching $M$ is popular (by Theorem~\ref{thm:non-bipartite}).
Note that we can interpret $\vec{\alpha}$ as the optimal solution to the dual LP of the maximum weight perfect matching LP in $\tilde{G}$ with weights given by $\mathsf{wt}_M(u,v)$, of which $\tilde M$ is an optimal solution (since $M$ is popular).
Hence, the pair $(\tilde M,\vec{\alpha})$ satisfy complementary slackness conditions.
In order to show $M$ is strongly dominant, we will obtain a partition $(L, R)$ of $V$ as follows: let
$R = \{u: \alpha_u = 1\}$ and $L = \{u: \alpha_u \ \text{is\ either}\ 0 \ \text{or -1}\}$.
Complementary slackness conditions on the dual LP imply that if $(u,v) \in M$
then $\alpha_u + \alpha_v = \mathsf{wt}_M(u,v) = 0$. Since $u,v$ are matched, $\alpha_u,\alpha_v \in \{\pm 1\}$; so one of $u,v$ is in $L$ and the
other is in $R$. Thus $M \subseteq L \times R$.
We have $\mathsf{wt}_M(u,v) \le \alpha_u + \alpha_v$ for every $(u,v) \in E$. There cannot be any edge between 2 nodes left unmatched in
$M$ as that would contradict $M$'s popularity. So $\mathsf{wt}_M(u,v) \le \alpha_u + \alpha_v \le -1$ for all
$(u,v) \in E \cap (L \times L)$. Since $\mathsf{wt}_M(u,v) \in \{0,\pm 2\}$, $\mathsf{wt}_M(u,v) = -2$ for all edges
$(u,v)$ in $L \times L$. In other words, every edge in $L \times L$ is labeled $(-,-)$.
Moreover, any $(+,+)$ edge can be present only
in $R \times R$ since $\mathsf{wt}_M(u,v) \le \alpha_u + \alpha_v \le 1$ for all edges $(u,v) \in L \times R$.
Finally, since $\alpha_u = \mathsf{wt}_M(u,u) = 0$ for all $u$
unmatched in $M$ (by complementary slackness conditions on the dual LP)
and every node $u \in R$ satisfies $\alpha_u = 1$, it means that all nodes in $R$ are matched in $M$. \qed
\end{proof}
\subsection{Correctness of our algorithm}
We will first show that if our algorithm returns a matching $M$, then $M$ is a strongly
dominant matching in $G$.
\begin{lemma}
\label{lemma2:main}
If $M'$ is a stable matching in $G'$ then the projection of $M'$ is a strongly dominant matching in $G$.
\end{lemma}
\begin{proof}
Let $M$ be the projection of $M'$. In order to show that $M$ is a strongly dominant matching in $G$,
we will construct a witness $\vec{\alpha}$ as given in Theorem~\ref{lem:strongly-dominant}.
Set $\alpha_u = 0$ for all nodes $u$ left unmatched in $M$. For each node $u$ matched in $M$:
\begin{itemize}
\item if $(u^+,\ast) \in M'$ then set $\alpha_u = 1$; else set $\alpha_u = -1$.
\end{itemize}
Note that $\sum_{u\in V} \alpha_u = 0$ since for each edge $(a,b) \in M$ (so either $(a^+,b^-)$ or $(a^-,b^+)$ is in $M'$),
we have $\alpha_a + \alpha_b = 0$ and for each node $u$ that is unmatched in $M$, we have $\alpha_u = 0$ . We also have
$\alpha_u \ge \mathsf{wt}_M(u,u)$ for all $u \in V$ since
(i)~$\alpha_u = 0 = \mathsf{wt}_M(u,u)$ for all $u$ left unmatched in $M$ and (ii)~$\alpha_u \ge -1 = \mathsf{wt}_M(u,u)$ for all $u$ matched in $M$.
We will now show that for every $(a,b) \in E$, $\alpha_a + \alpha_b \ge \mathsf{wt}_M(a,b)$.
\begin{enumerate}
\item Suppose $(a^+,\ast) \in M'$. So $\alpha_a = 1$. We will consider 3 subcases here.
\begin{itemize}
\item The first subcase is that
$(b^+,\ast) \in M'$. So $\alpha_b = 1$. Since $\mathsf{wt}_M(a,b) \le~2$, it follows that
$\alpha_a + \alpha_b = 2 \ge \mathsf{wt}_M(a,b)$.
\item The second subcase is that $(b^-,\ast) \in M'$. So $\alpha_b = -1$. If $(a^+,b^-) \in M'$ then
$\mathsf{wt}_M(a,b) = 0 = \alpha_a + \alpha_b$. So assume $(a^+,c^-)$ and $(b^-,d^+)$ belong to $M'$.
Since $M'$ is stable, the edge $(a^+,b^-)$ does not block $M'$. Thus either
(i)~$a$ prefers $c^-$ to $b^-$ or (ii)~$b$ prefers $d^+$ to $a^+$. Hence $\mathsf{wt}_M(a,b) \in \{0,-2\}$ and so
$\alpha_a + \alpha_b = 0 \ge \mathsf{wt}_M(a,b)$.
\item The third subcase is that $b$ is unmatched in $M$. So $\alpha_b = 0$.
Since $M'$ is stable, the edge $(a^+,b^-)$ does not
block $M'$. Thus $a$ prefers its partner in $M'$ to $b^-$ and so $\mathsf{wt}_M(a,b) = 0 < \alpha_a + \alpha_b$.
\end{itemize}
\item Suppose $(a^-,\ast) \in M$. There are 3 subcases here as before. The case where $(b^+,\ast) \in M$ is
totally analogous to the case where $(a^+,\ast)$ and $(b^-,\ast)$ are in $M$. So we will consider the remaining 2 subcases
here.
\begin{itemize}
\item The first subcase is that $(b^-,\ast) \in M'$. So $\alpha_b = -1$. Let $(a^-,c^+)$ and $(b^-,d^+)$ belong to $M'$.
Since $M'$ is stable, the edge $(a^+,b^-)$ does not block $M'$. So $b$ prefers $d^+$ to $a^+$.
Similarly, the edge $(a^-,b^+)$ does not block $M'$. Hence $a$ prefers $c^+$ to $b^+$.
Thus {\em both} $a$ and $b$ prefer their respective partners in $M$ to each other, i.e., $\mathsf{wt}_M(a,b) = -2$. So we have
$\alpha_a + \alpha_b = -2 = \mathsf{wt}_M(a,b)$.
\item The second subcase is that $b$ is unmatched in $M$. Then the edge $(a^+,b^-)$ {\em blocks} $M'$
since $a$ prefers $b^-$ to $c^+$ (for any neighbor $c$) and $b$ prefers to be matched to $a^+$ than be left unmatched.
Since $M'$ is stable and has no blocking edge, this means that this subcase does not arise.
\end{itemize}
\item Suppose $a$ is unmatched in $M$. Then $(b^+,\ast) \in M'$ (otherwise $(a^-,b^+)$ blocks $M'$); moreover,
$b$ prefers its partner in $M'$ to $a^-$. So we have $\mathsf{wt}_M(a,b) = 0 < \alpha_a + \alpha_b$.
\end{enumerate}
Thus we always have $\alpha_a + \alpha_b \ge \mathsf{wt}_M(a,b)$.
Since $\vec{\alpha}$ satisfies the conditions in Theorem~\ref{lem:strongly-dominant}, $M$ is a strongly dominant matching in $G$.
\end{proof}
We will now show that if $G'$ has no stable matching, then $G$ has no strongly dominant matching.
\begin{lemma}
\label{lemma1:main}
If $G$ admits a strongly dominant matching then $G'$ admits a stable matching.
\end{lemma}
\begin{proof}
Let $M$ be a strongly dominant matching in $G = (V,E)$. Let $\vec{\alpha}$ be a witness of $M$ as given in
Theorem~\ref{lem:strongly-dominant}. That is, $\alpha_u = 0$ for $u$ unmatched in $M$ and $\alpha_u = \pm 1$ for $u$ matched in $M$. As done in the proof of Theorem~\ref{lem:strongly-dominant}, we can interpret $(\tilde{M},\vec{\alpha})$ as a pair of optimal primal and dual solutions. Hence, for each $(u,v) \in M$, $\alpha_u + \alpha_v = \mathsf{wt}_M(u,v) = 0$ by complementary slackness on the dual LP,
so $(\alpha_u,\alpha_v)$ is either $(1,-1)$ or $(-1,1)$.
We will construct a stable matching $M'$ in $G'$ as follows. For each $(u,v) \in M$:
\begin{itemize}
\item if $(\alpha_u,\alpha_v) = (1,-1)$ then add $(u^+,v^-)$ to $M'$;
else add $(u^-,v^+)$ to $M'$.
\end{itemize}
We will show that no edge in $E'\setminus M'$ blocks $M'$.
Let $(a^+,b^-) \notin M'$. We consider the following cases here:
\noindent{\bf Case 1.} Suppose $\alpha_b = 1$. Then $(b^+,d^-) \in M'$ where $d = M(b)$. Since $b$ prefers $d^-$ to $a^+$,
$(a^+,b^-)$ is not a blocking edge to $M'$.
\noindent{\bf Case 2.} Suppose $\alpha_b = -1$. Then $(b^-,d^+) \in M'$ where $d = M(b)$.
We have 2 sub-cases here: (i)~$\alpha_a = 1$ and (ii)~$\alpha_a = -1$.
Note that $\alpha_a \ne 0$ as the edge $(a,b)$ would not be covered by $\alpha_a + \alpha_b$ then. This is because if $\alpha_a = 0$ then
$a$ is unmatched in $M$ and $\mathsf{wt}_M(a,b) = 0$ while $\alpha_a + \alpha_b = -1$.
\begin{itemize}
\item In sub-case~(i), some edge $(a^+,c^-)$ belongs to $M'$.
We know that $\mathsf{wt}_M(a,b) \le \alpha_a + \alpha_b = 0$, so either
(1)~$a$ prefers $M(a) = c$ to $b$ or (2)~$b$ prefers $M(b) = d$ to $a$. Hence either (1)~$a$ prefers $c^-$ to $b^-$ or
(2)~$b$ prefers $d^+$ to $a^+$. Thus $(a^+,b^-)$ is not a blocking edge to $M'$.
\item In sub-case~(ii), some edge $(a^-,c^+)$ belongs to $M'$. We know that $\mathsf{wt}_M(a,b) \le \alpha_a + \alpha_b = -2$, so
$a$ prefers $M(a) = c$ to $b$ {\em and} $b$ prefers $M(b) = d$ to $a$. Thus $b$ prefers $d^+$ to $a^+$, hence
$(a^+,b^-)$ is not a blocking edge to $M'$.
\end{itemize}
\noindent{\bf Case 3.} Suppose $\alpha_b = 0$. Thus $b$ was unmatched in $M$. Each of $b$'s neighbors has to be matched in $M$ to a
neighbor that it prefers to $b$, otherwise $M$ would be unpopular. We have $\alpha_a + \alpha_b \ge \mathsf{wt}_M(a,b) = 0$, hence it follows
that $\alpha_a = 1$. Thus $(a^+,c^-) \in M'$ where $c$ is a neighbor that $a$ prefers to $b$. So $(a^+,b^-)$ is not a blocking edge
to $M'$. \qed
\end{proof}
Lemmas~\ref{lemma2:main} and \ref{lemma1:main} show that a strongly dominant matching is present in $G$ if and only if a stable
matching is present in $G'$. This finishes the proof of correctness of our algorithm.
Since Irving's stable matching algorithm in $G'$ can be implemented to run in linear time~\cite{Irv85}, we can conclude the following theorem.
\begin{theorem}
\label{thm:strongly-dom}
There is a linear time algorithm to determine if a graph $G = (V,E)$ with strict preference lists admits
a strongly dominant matching or not and if so, to return one.
\end{theorem}
\section{Finding a popular matching that is neither stable nor dominant}
\label{sec:hardness}
This section is devoted to proving the following result.
\begin{theorem}
\label{final-thm}
Given a bipartite instance $G = (A \cup B,E)$ with strict preference lists, the problem of deciding if $G$ admits a popular matching that is neither
a stable matching nor a dominant matching is NP-hard.
\end{theorem}
Our reduction will be from 1-in-3 SAT. Recall that 1-in-3 SAT is the set of 3CNF formulas with no negated variables such that there is a
satisfying assignment that makes {\em exactly one} variable true in each clause. Given an input formula $\phi$, to determine if $\phi$ is
1-in-3 satisfiable or not is NP-hard~\cite{Sch78}.
We will now build a bipartite instance $G = (A \cup B, E)$.
The node set $A \cup B$ will consist of nodes in 4 levels: levels~0, 1, 2, and 3 along with
4 nodes $a_0,b_0,z$, and $z'$.
Nodes in $(A\cup B) \setminus \{a_0,b_0,z,z'\}$ are partitioned into gadgets that appear in some level~$i$, for $i \in \{0,1,2,3\}$. For each variable in our 1-in-3 SAT formula, we construct a variable gadget (in level 1), and for each clause, we construct three clause gadgets in level 0, three in level~2, and one in level~3.
We will show that every gadget forms a separate connected component in the popular subgraph of $G$.
If $M$ is any popular matching in $G$ that is neither a stable matching nor a dominant matching and $\vec{\alpha}$ is any witness of $M$ then it will be the case that every level~0 gadget in $G$ is in zero state in $\vec{\alpha}$ and every level~3 gadget in $G$ is in unit state in $\vec{\alpha}$. This will force the following property to hold for every clause in $\phi$:
\begin{itemize}
\item if $c = X_i \vee X_j \vee X_k$ then among the gadgets corresponding to $X_i,X_j,X_k$ in level~1, {\em exactly one} is in unit state in $\vec{\alpha}$.
\end{itemize}
Thus a popular matching in $G$ that is neither stable nor dominant will yield a 1-in-3 satisfiable assignment to $\phi$. Conversely, if $\phi$ is 1-in-3 satisfiable then we can build a popular matching in $G$ that is neither stable nor dominant. We describe our gadgets below.
\noindent{\em Level~1 nodes.} Every gadget in level~1 is a variable gadget.
Corresponding to each variable $X_i$, we will have the gadget in Fig.~\ref{level1:example}.
The preference lists of the 4 nodes in the gadget corresponding to $X_i$ are as follows:
\begin{minipage}[c]{0.45\textwidth}
\centering
\begin{align*}
&x_i\colon \, y_i \succ y'_i \succ z \succ \cdots \qquad\qquad && y_i\colon \, x_i \succ x'_i \succ z' \succ \cdots \\
&x'_i\colon \, y_i \succ y'_i \succ \cdots \qquad\qquad && y'_i\colon \, x_i \succ x'_i \succ \cdots\\
\end{align*}
\end{minipage}
The nodes in the gadget corresponding to $X_i$ are also adjacent to nodes in the clause gadgets:
these neighbors belong to the ``$\cdots$'' part of the preference lists. Note that the order among the nodes in the ``$\cdots$'' part
in the above preference lists does not matter.
\begin{figure}
\caption{The gadget corresponding to variable $X_i$: node preferences are indicated on edges. The node $y_i$ is the top choice of both $x_i$ and $x'_i$ and the node $y'_i$ is the second choice of both $x_i$ and $x'_i$. The node $x_i$ is the top choice of both $y_i$ and $y'_i$ and the node $x'_i$ is the second choice of both $y_i$ and $y'_i$.}
\label{level1:example}
\end{figure}
Let $c = X_i \vee X_j \vee X_k$ be a clause in $\phi$. We will describe the gadgets that correspond to $c$.
For the sake of readability, when we describe preference lists below, we drop the superscript $c$ from
all the nodes appearing in gadgets corresponding to clause $c$.
\noindent{\em Level~0 nodes.}
There will be three level~0 gadgets, each on 4 nodes, corresponding to clause $c$. See Fig.~\ref{level0:example}.
We describe below the preference lists of the 4 nodes $a^c_1,b^c_1,a^c_2,b^c_2$ that belong to the leftmost gadget.
\begin{minipage}[c]{0.45\textwidth}
\centering
\begin{align*}
&a_1\colon \, b_1 \succ \underline{y'_j} \succ b_2 \succ \underline{z} \qquad\qquad && b_1\colon \, a_2 \succ \underline{x'_k} \succ a_1 \succ \underline{z'} \\
&a_2\colon \, b_2 \succ b_1 \qquad\qquad && b_2\colon \, a_1 \succ a_2 \\
\end{align*}
\end{minipage}
Neighbors that are outside this gadget are underlined.
The preferences of nodes in the other two gadgets in level~0 corresponding to $c$ ($a^c_t,b^c_t$ for $t = 3,4$ and $a^c_t,b^c_t$ for $t = 5,6$)
are analogous.
\begin{figure}
\caption{Corresponding to clause $c = X_i \vee X_j \vee X_k$, we have the above 3 gadgets in level 0. The node $a^c_1$'s second choice
is $y'_j$ and $b^c_1$'s is $x'_k$, similarly, $a^c_3$'s is $y'_k$ and $b^c_3$'s is $x'_i$, also $a^c_5$'s is $y'_i$ and $b^c_5$'s is $x'_j$.}
\label{level0:example}
\end{figure}
We will now describe the three level~2 gadgets corresponding to clause $c$. See Fig.~\ref{level2:example}.
\noindent{\em Level~2 nodes.}
There will be three level~2 gadgets, each on 6 nodes, corresponding to clause $c$.
The preference lists of the nodes $p^c_t,q^c_t$ for $0 \le t \le 2$ are described below.
\begin{minipage}[c]{0.45\textwidth}
\centering
\begin{align*}
&p_0\colon \, q_0 \succ q_2 \qquad\qquad && q_0\colon \, p_0 \succ p_2 \succ \underline{z'} \succ \underline{s_0}\\
&p_1\colon \, q_1 \succ q_2 \succ \underline{z} \qquad\qquad && q_1\colon \, p_1 \succ p_2 \\
&p_2\colon \, q_0 \succ \underline{y_j} \succ q_1 \succ q_2 \succ \cdots\qquad\qquad && q_2\colon \, p_1 \succ \underline{x_k} \succ p_0 \succ p_2 \succ \cdots\\
\end{align*}
\end{minipage}
The ``$\cdots$'' in the preference lists of $p_2$ and $q_2$ above are to nodes $t^{c_i}_0$ and $s^{c_i}_0$ respectively (in a level 3 gadget),
for {\em all} clauses $c_i$. The order among these neighbors is not important.
\begin{figure}
\caption{We have the above 3 gadgets in level 2 corresponding to $c = X_i \vee X_j \vee X_k$ . The node $p^c_2$'s second choice is
$y_j$ and $q^c_2$'s is $x_k$, similarly, $p^c_5$'s is $y_k$ and $q^c_5$'s is $x_i$, similarly $p^c_8$'s is $y_i$ and $q^c_8$'s is $x_j$.}
\label{level2:example}
\end{figure}
Let us note the preference lists of $p_2$ and $q_2$: they are each other's fourth choices.
The node $p_2$ regards $q_0$ as its top choice, $y_j$ as its second choice, and $q_1$ as its third choice.
The node $q_2$ regards $p_1$ as its top choice, $x_k$ as its second choice, and $p_0$ as its third choice.
The preferences of nodes $p^c_t,q^c_t$ for $3 \le t \le 5$
are described below. The ``$\cdots$'' in the preference lists of $p_5$ and $q_5$ above are to nodes $t^{c_i}_0$ and
$s^{c_i}_0$ respectively, for all clauses $c_i$.
\begin{minipage}[c]{0.45\textwidth}
\centering
\begin{align*}
&p_3\colon \, q_3 \succ q_5 \qquad\qquad && q_3\colon \, p_3 \succ p_5 \succ \underline{z'} \succ \underline{s_0}\\
&p_4\colon \, q_4 \succ q_5 \succ \underline{z} \succ \underline{t_0} \qquad\qquad && q_4\colon \, p_4 \succ p_5 \\
&p_5\colon \, q_3 \succ \underline{y_k} \succ q_4 \succ q_5 \succ \cdots\qquad\qquad && q_5\colon \, p_4 \succ \underline{x_i} \succ p_3 \succ p_5 \succ \cdots\\
\end{align*}
\end{minipage}
The preferences of nodes $p^c_t,q^c_t$ for $6 \le t \le 8$
are described below. The ``$\cdots$'' in the preference lists of $p_8$ and $q_8$ above are to nodes $t^{c_i}_0$ and $s^{c_i}_0$
respectively, for all clauses $c_i$.
\begin{minipage}[c]{0.45\textwidth}
\centering
\begin{align*}
&p_6\colon \, q_6 \succ q_8 \qquad\qquad && q_6\colon \, p_6 \succ p_8 \succ \underline{z'} \\
&p_7\colon \, q_7 \succ q_8 \succ \underline{z} \succ \underline{t_0} \qquad\qquad && q_7\colon \, p_7 \succ p_8 \\
&p_8\colon \, q_6 \succ \underline{y_i} \succ q_7 \succ q_8 \succ \cdots\qquad\qquad && q_8\colon \, p_7 \succ \underline{x_j} \succ p_6 \succ p_8 \succ \cdots\\
\end{align*}
\end{minipage}
\noindent{\em Level~3 nodes.}
Gadgets in level~3 are again clause gadgets. There is exactly one level~3 gadget on 8 nodes $s^c_i,t^c_i$, for $0 \le i \le 3$,
corresponding to clause $c$.
\begin{minipage}[c]{0.45\textwidth}
\centering
\begin{align*}
&s_0\colon \, t_1 \succ \underline{q_0} \succ t_2\succ \underline{q_3} \succ t_3 \succ \cdots \qquad\qquad && t_0\colon \, s_3 \succ \underline{p_7} \succ s_2\succ \underline{p_4} \succ s_1 \succ \cdots\\
&s_1\colon \, t_1 \succ t_0 \qquad\qquad && t_1\colon \, s_1 \succ s_0 \\
&s_2\colon \, t_2 \succ t_0 \qquad\qquad && t_2\colon \, s_2 \succ s_0 \\
&s_3\colon \, t_3 \succ t_0 \qquad\qquad && t_3\colon \, s_3 \succ s_0 \\
\end{align*}
\end{minipage}
The preference lists of the 8 nodes in the level~3 gadget corresponding to clause $c$ are described above.
It is important to note the preference lists of $s_0$ and $t_0$ here.
Among neighbors in this gadget, $s_0$'s order is $t_1 \succ t_2 \succ t_3$ while
$t_0$'s order is $s_3 \succ s_2 \succ s_1$. Also, $s_0$'s order is interleaved with $q_0 \succ q_3$ (these are nodes from level~2 gadgets) and
$t_0$'s order is interleaved with $p_7 \succ p_4$.
The ``$\cdots$'' in the preference lists of $s_0$ and $t_0$ above are to neighbors in levels~1 and 2. Let $n_0$ be the number of variables in $\phi$.
All the nodes $y'_1,\ldots,y'_{n_0}$ along with $q^{c_i}_2,q^{c_i}_5,q^{c_i}_8$ for all clauses $c_i$ will be at the tail of the preference list
of $s^c_0$ and the order among all these nodes is not important.
Similarly, all the nodes $x'_1,\ldots,x'_{n_0}$ along with $p^{c_i}_2,p^{c_i}_5,p^{c_i}_8$ for all clauses $c_i$
will be at the tail of the preference list of $t^c_0$ and the order among all these nodes is also not important.
There are four more nodes in $G$. These are $a_0,z' \in A$ and $b_0,z \in B$.
Thus we have
\begin{eqnarray*}
A & = & \cup_c\{a^c_i: 1 \le i \le 6\} \cup_i \{x_i,x'_i\} \cup_c\{p^c_i: 0 \le i \le 8\} \cup_c\{s^c_i: 0 \le i \le 3\} \cup \{a_0,z'\}\\
B & = & \cup_c\{b^c_i: 1 \le i \le 6\} \cup_i \{y_i,y'_i\} \cup_c\{q^c_i: 0 \le i \le 8\} \cup_c\{t^c_i: 0 \le i \le 3\} \cup \{b_0,z\}.
\end{eqnarray*}
The neighbors of $a_0$ are $b_0,z$ and the neighbors of $b_0$ are $a_0,z'$.
The node $a_0$'s preference list is $b_0 \succ z$ and the node $b_0$'s preference list is $a_0 \succ z'$.
The set of neighbors of $z$ is $\{a_0\} \cup_i \{x_i\} \cup_c \{a^c_1,a^c_3,a^c_5\} \cup_c \{p^c_1,p^c_4,p^c_7\}$ and the set of neighbors of $z'$ is
$\{b_0\} \cup_i \{y_i\} \cup_c \{b^c_1,b^c_3,b^c_5\} \cup_c \{q^c_0,q^c_3,q^c_6\}$.
The preference lists of $z$ and $z'$ are as follows: (here $k$ is the number of clauses in $\phi$)
\begin{eqnarray*}
z &\colon& x_1 \succ \cdots \succ x_{n_0} \ \succ \ p^{c_1}_1 \succ \cdots \succ p^{c_k}_7 \ \succ \ a_0 \succ \cdots \\
z' &\colon& y_1 \succ \cdots \succ y_{n_0} \ \succ \ q^{c_1}_0 \succ \cdots \succ q^{c_k}_6 \ \succ \ b_0 \succ \cdots
\end{eqnarray*}
Thus $z$ prefers neighbors in level~1 to neighbors in level~2, then comes $a_0$, and then neighbors in level~0.
Analogously, for $z'$ (with $b_0$ replacing $a_0$).
The order among neighbors in level~$i$ (for $i = 0,1,2$) in the preference lists of $z$ and $z'$ does not matter.
\subsection{Some stable/dominant matchings in $G$}
It would be helpful to see some stable matchings and dominant matchings in the above instance $G$.
\begin{itemize}
\item The men-optimal stable matching $S$ in $G$ includes $(a_0,b_0)$ and in the level~0 gadgets, for all clauses $c$,
the edges $(a^c_i,b^c_i)$ for $1 \le i \le 6$.
\begin{itemize}
\item In the level~1 gadgets, the edges $(x_i,y_i)$ and $(x'_i,y'_i)$ are included for all $i \in [n_0]$.
\item In the level~2 gadgets, for all clauses $c$, the edges $(p^c_i,q^c_i)$ for $0 \le i \le 8$ are included.
\item In the level~3 gadgets, for all clauses $c$, the edges $(s^c_i,t^c_i)$ for $1 \le i \le 3$ are included.
\item The nodes $z,z'$ and $s^c_0,t^c_0$ for all clauses $c$ are left unmatched in $S$.
\end{itemize}
\item The women-optimal stable matching $S'$ in $G$ includes $(a_0,b_0)$ and the same edges as $S$ in all level~1, 2, 3 gadgets. In
level~0, $S'$ includes for all clauses $c$, the edges $(a^c_1,b^c_2),(a^c_2,b^c_1)$,$(a^c_3,b^c_4)$,$(a^c_4,b^c_3)$, $(a^c_5,b^c_6),(a^c_6,b^c_5)$.
\item The dominant matching $M^*$ as computed by the algorithm in \cite{Kav12} will be as follows:
\begin{itemize}
\item $M^*$ contains the edges $(a_0,z), (z',b_0)$, and in the level~0 gadgets, for all clauses $c$,
the edges $(a^c_1,b^c_2),(a^c_2,b^c_1)$,$(a^c_3,b^c_4)$,$(a^c_4,b^c_3)$, $(a^c_5,b^c_6),(a^c_6,b^c_5)$.
\item In the level~1 gadgets, the edges $(x_i,y'_i)$ and $(x_i,y'_i)$ are included for all $i \in [n_0]$.
\item In the level~2 gadgets, for each clause $c$, the edges $(p^c_0,q^c_2),(p^c_1,q^c_1),(p^c_2,q^c_0)$ are included from the leftmost gadget
(see Fig.~\ref{level2:example}). Analogous edges (two blue ones and the middle red edge) are included from the other two level~2 gadgets corresponding to $c$.
\item In the level~3 gadgets, the edges $(s^c_0,t^c_1), (s^c_1,t^c_0), (s^c_2,t^c_2), (s^c_3,t^c_3)$ for all clauses $c$ are included.
\end{itemize}
\end{itemize}
Note that $M^*$ is a perfect matching as it matches all nodes in $G$. We can show the above matching $M^*$ to be popular by the following witness
$\vec{\alpha} \in \{\pm 1\}^n$ to $M^*$:
\begin{itemize}
\item $\alpha_{a_0} = \alpha_{b_0} = 1$ while $\alpha_{z} = \alpha_{z'} = -1$.
\item $\alpha_{a^c_i} = 1$ and $\alpha_{b^c_i} = -1$ for $1 \le i \le 6$ and all clauses $c$.
\item $\alpha_{x_i} = \alpha_{y_i} = 1$ while $\alpha_{x'_i} = \alpha_{y'_i} = -1$ for all $i \in [n_0]$.
\item $\alpha_{p^c_0} = \alpha_{q^c_0} = \alpha_{p^c_1} = 1$ while $\alpha_{q^c_1} = \alpha_{p^c_2} = \alpha_{q^c_2} = -1$ for all clauses $c$. Similarly
for the other 2 level~2 gadgets corresponding to $c$ and all other clauses.
\item $\alpha_{s^c_1} = \alpha_{t^c_1} = \alpha_{s^c_2} = \alpha_{s^c_3} = 1$ while $\alpha_{s^c_0} = \alpha_{t^c_0} = \alpha_{t^c_2} = \alpha_{t^c_3} = -1$ for all clauses $c$.
\end{itemize}
It can be checked that we have $\alpha_u + \alpha_v = 0$ for every edge $(u,v) \in M^*$. We also have $\alpha_u + \alpha_v \ge \mathsf{wt}_{M^*}(u,v)$ for every edge $(u,v)$
in the graph. In particular, the endpoints of every blocking edge to $M^*$, such as $(a_0,b_0)$, $(x_i,y_i)$ for all $i$,
$(p^c_{3j}, q^c_{3j})$ for all $c$ and $j \in \{0,1,2\}$, and $(s^c_1, t^c_1)$ for all $c$, have their $\alpha$-value equal to 1.
There are many other dominant matchings in this instance $G$:
\begin{itemize}
\item The edges $(a^c_i,b^c_i)$ may be included for $i \in \{1,\cdots,6\}$ and all clauses $c$.
\item The top red edge and two green ones (such as the edges $(p^c_0,q^c_0),(p^c_1,q^c_2),(p^c_2,q^c_1)$
in the leftmost level~2 gadget corresponding to $c$) may be included from a level~2 gadget.
\item From the level~3 gadget corresponding to $c$,
the edges $(s^c_0,t^c_2), (s^c_1,t^c_1), (s^c_2,t^c_0), (s^c_3,t^c_3)$ or the edges $(s^c_0,t^c_3), (s^c_1,t^c_1), (s^c_2,t^c_2), (s^c_3,t^c_0)$ may be included.
\end{itemize}
\subsection{The popular subgraph of $G$}
Recall the popular subgraph $F_G$ from Section~\ref{prelims}, whose edge set is the set of popular edges in $G$.
\begin{lemma}
\label{lem:conn-comp}
Let $C$ be any level~$i$ gadget in $G$, where $i \in \{0,1,2,3\}$. All the nodes in $C$ belong to the same connected component in $F_G$.
\end{lemma}
\begin{proof}
Consider a level~0 gadget in $G$, say on $a^c_1,b^c_1,a^c_2,b^c_2$ (see Fig.~\ref{level0:example}). The men-optimal stable matching $S$ in $G$ contains
the edges $(a^c_1,b^c_1)$ and $(a^c_2,b^c_2)$ while the women-optimal stable matching $S'$ contains the edges $(a^c_1,b^c_2)$ and $(a^c_2,b^c_1)$.
Thus there are popular edges among these 4 nodes and so these 4 nodes belong to the same connected component in $F_G$.
Consider a level~1 gadget in $G$, say on $x_i,y_i,x'_i,y'_i$ (see Fig.~\ref{level1:example}). Every stable matching in $G$ contains $(x_i,y_i)$ and $(x'_i,y'_i)$
while the dominant matching $M^*$ contains $(x_i,y'_i)$ and $(x'_i,y_i)$. Thus there are popular edges among these 4 nodes
and so these 4 nodes belong to the same connected component in $F_G$.
Consider a level~2 gadget in $G$, say on $p^c_i,q^c_i$ for $i = 0,1,2$ (see Fig.~\ref{level2:example}).
The dominant matching $M^*$ contains the edges $(p^c_0,q^c_2)$ and $(p^c_2,q^c_0)$.
There is also another dominant matching in $G$ that contains the edges $(p^c_1,q^c_2)$ and $(p^c_2,q^c_1)$.
Thus there are popular edges among these 6 nodes and so these 6 nodes belong to the same connected component in $F_G$.
Consider a level~3 gadget in $G$, say on $s^c_i,t^c_i$ for $i = 0,1,2,3$.
The dominant matching $M^*$ contains $(s^c_0,t^c_1)$, and $(s^c_1,t^c_0)$.
There is another dominant matching in $G$ that contains $(s^c_0,t^c_2)$ and $(s^c_2,t^c_0)$.
There is yet another dominant matching in $G$ that contains $(s^c_0,t^c_3)$ and $(s^c_3,t^c_0)$.
Thus there are popular edges among these 8 nodes and so these 8 nodes belong to the same connected component in $F_G$. \qed
\end{proof}
The following theorem will be important for us and we will prove it in Section~\ref{sec:proof-separate}.
\begin{theorem}
\label{thm:separate}
Every level~$i$ gadget, for $i \in \{0,1,2,3\}$, forms a distinct connected component in the graph $F_G$. The four nodes $a_0,b_0,z$, and $z'$
belong to their own connected component in $F_G$.
\end{theorem}
\subsection{Popular matchings in $G$}
Let $M$ be any popular matching in $G$. Note that $M$ either matches {\em both} $z$ and $z'$ or leaves both these nodes unmatched.
This is because both $z$ and $z'$ are unstable nodes in the same connected component in $F_G$ (by Theorem~\ref{thm:separate}),
so either both are matched or both are unmatched in $M$ (by Lemma~\ref{prop1}). It is similar with nodes $s^c_0$ and $t^c_0$
for any clause $c$: any popular matching either matches both $s^c_0$ and $t^c_0$ or leaves both these nodes unmatched.
\begin{lemma}
\label{bipartite:lemma2}
Suppose $M$ is a popular matching in $G$ that matches $z$ and $z'$. Then $M$ is a dominant matching in $G$.
\end{lemma}
\begin{proof}
Let $\vec{\alpha} \in \{0,\pm 1\}^n$ be a witness of $M$. It follows from
Theorem~\ref{thm:separate} that $(a_0,z)$ and $(z',b_0)$ are in $M$.
Since $z$ and $z'$ prefer their neighbors in level~1
to $a_0$ and $b_0$ respectively while these neighbors prefer their partners in $M$ to $z$ and $z'$ (by Theorem~\ref{thm:separate}),
we have $\mathsf{wt}_M(x_i,z) = \mathsf{wt}_M(z',y_i) = 0$.
The nodes $z$ and $z'$ are unstable in $G$ and $M$ matches them, so $\alpha_z = \alpha_{z'} = -1$ (by Lemma~\ref{prop0}).
Since $\alpha_{x_i} + \alpha_z \ge 0$ and $\alpha_{z'} + \alpha_{y_i} \ge 0$, it follows that $\alpha_{x_i} = \alpha_{y_i} = 1$ for all $i$.
Thus all nodes in level~1 have $\alpha$-values equal to $\pm 1$ (by Lemma~\ref{prop1}).
In particular, $\alpha_{x'_i} = \alpha_{y'_i} = -1$. This is due to the fact that $\mathsf{wt}_M(x_i,y'_i) = 0$ (by Theorem~\ref{thm:separate})
and $\alpha_{x_i} + \alpha_{y'_i} = \mathsf{wt}_M(x_i,y'_i)$ (by Lemma~\ref{prop0}) as $(x_i,y'_i)$ is a popular edge.
Similarly, with $(x'_i,y_i)$.
Suppose $s_0^c,t_0^c$ are unmatched in $M$. Then $\mathsf{wt}_M(s^c_0,y'_i) = \mathsf{wt}_M(x'_i,t^c_0) = 0$. This is because
$s^c_0$ and $t^c_0$ prefer to be matched to any neighbor than be unmatched while (by Theorem~\ref{thm:separate})
$y'_i$ and $x'_i$ prefer their partners in $M$ to $s^c_0$ and $t^c_0$, respectively. Since we assumed $s_0^c,t_0^c$ to be unmatched in $M$,
$\alpha_{s_0^c} = \alpha_{t^c_0} = 0$ (by Lemma~\ref{prop0}). So this implies that $\alpha_{s_0} + \alpha_{y'_i} = 0 -1 < \mathsf{wt}_M(s^c_0,y'_i)$, i.e.,
the edge $(s^c_0,y'_i)$ (similarly, $(x'_i,t^c_0)$) is not covered by the sum of $\alpha$-values of its endpoints, a contradiction.
Thus $s_0^c,t_0^c$ are forced to be matched in $M$.
Thus $s^c_0$ and $t^c_0$ for all clauses $c$
are matched in $M$. The other nodes in $G$ are stable and hence they have to be matched in $M$.
Thus $M$ is a perfect matching and also popular, so it is a dominant matching in $G$. \qed
\end{proof}
\begin{lemma}
\label{bipartite:lemma3}
Suppose $M$ is a popular matching in $G$ that leaves $s^c_0$ and $t^c_0$ unmatched for some $c$. Then $M$ is a stable matching
in $G$.
\end{lemma}
\begin{proof}
We will repeatedly use Theorem~\ref{thm:separate} here.
Let $\vec{\alpha} \in \{0,\pm 1\}^n$ be a witness of $M$. Since the nodes $s^{c}_0$ and $t^{c}_0$ are unmatched in $M$, we have
$\alpha_{s^c_0} = \alpha_{t^c_0} = 0$ (by Lemma~\ref{prop0}). Also $\mathsf{wt}_M(s^c_0,q^c_2) = \mathsf{wt}_M(p^c_2,t^c_0) = 0$ since both $s^c_0$ and
$t^c_0$ prefer to be matched than be unmatched while $q^c_2$ and $p^c_2$ prefer their partners in $M$ to $s^c_0$ and $t^c_0$,
respectively (by Theorem~\ref{thm:separate}). So $\alpha_{p^c_2} \ge 0$ and $\alpha_{q^c_2} \ge 0$ for all $c$.
We also have $\alpha_{p^c_2} + \alpha_{q^c_2} = \mathsf{wt}_M(p^c_2, q^c_2) \le 0$ since any popular matching
matches $p^c_2$ to a partner at least as good as $q^c_2$ and similarly, $q^c_2$ to a partner at least as good as $p^c_2$
(by Theorem~\ref{thm:separate}).
This means that $\alpha_{p^c_2} = \alpha_{q^c_2} = 0$. The same argument can be used for every $p^{c_i}_{3j+2}$ and $q^{c_i}_{3j+2}$
(for any clause $c_i$ and $j = 0,1,2$) to show that $\alpha_{p^{c_i}_{3j+2}} = \alpha_{q^{c_i}_{3j+2}} = 0$. Thus all level~2 nodes
have $\alpha$-values equal to 0 (by Lemmas~\ref{prop1} and \ref{lem:conn-comp}).
The fact that all level~2 nodes have $\alpha$-values equal to 0 immediately implies that all level~3 nodes also have
$\alpha$-values equal to 0. This is because if $M$ matches $s^{c_i}_0$ and $t^{c_i}_0$ for some clause $c_i$ then at least one of
$s^{c_i}_0,t^{c_i}_0$ is not matched to its top choice
neighbor in its gadget. So either $\mathsf{wt}_M(s^{c_i}_0, q^{c_i}_0) = 0$ or $\mathsf{wt}_M(p^{c_i}_7,t^{c_i}_0) = 0$.
Since $\alpha_{q^{c_i}_0} = \alpha_{p^{c_i}_7} = 0$ (these are level~2 nodes) and $\alpha_{s^{c_i}_0} = \alpha_{t^{c_i}_0} = -1$
(by Lemma~\ref{prop0}), we have a
contradiction. Thus $\alpha_u = 0$ for every level~3 node $u$ (by Lemma~\ref{prop1}).
Similarly, $\alpha_{x'_i} \ge 0$ and $\alpha_{y'_i} \ge 0$ for all $r$ as the edges $(x'_i,t^c_0)$ and
$(s_0^c,y'_i)$ would not be covered otherwise. Also, $\alpha_{x'_i} + \alpha_{y'_i} = \mathsf{wt}_M(x'_i, y'_i)$ (by Lemma~\ref{prop0}) as
$(x'_i,y'_i)$ is a popular edge and $\mathsf{wt}_M(x'_i, y'_i) \le 0$ since $M$ matches $x'_i$ to either $y_i$ or $y'_i$
and similarly, $y'_i$ to either $x_i$ or $x'_i$ (by Theorem~\ref{thm:separate}). Thus $\alpha_{x'_i} = \alpha_{y'_i} = 0$.
This means that all level~1 nodes have $\alpha$-values equal to 0 (by Lemma~\ref{prop1}).
Since all level~1 nodes have $\alpha$-values equal to 0, we have $\alpha_z = \alpha_{z'} = 0$; otherwise
the edges $(x_i,z)$ and $(z',y_i)$ would not be covered. This is because $\mathsf{wt}_M(x_i,z) = \mathsf{wt}_M(z',y_i) = 0$
(by Theorem~\ref{thm:separate}). So in order to cover the edges $(x_i,z)$ and $(z',y_i)$, we need to have $\alpha_z \ge 0$
and $\alpha_{z'} \ge 0$, i.e., $\alpha_z = \alpha_{z'} = 0$ (by Lemma~\ref{prop0}).
Thus $\alpha_{a_0} = \alpha_{b_0} = 0$ (by Theorem~\ref{thm:separate} and Lemma~\ref{prop1}).
Moreover, $\alpha_z = \alpha_{z'} = 0$ also implies that all their neighbors in level~0 have their $\alpha$-values at least 0. For instance,
consider $a^c_1$ and $b^c_1$: in order to cover the edges $(a^c_1,z)$ and $(z',b^c_1)$, we have $\alpha_{a^c_1} \ge 0$ and $\alpha_{b^c_1} \ge 0$.
Since either $(a^c_1,b^c_1) \in M$ or $(a^c_1,b^c_2),(a^c_2,b^c_1)$ are in $M$ (by Theorem~\ref{thm:separate}), we have
$\mathsf{wt}_M(a^c_1,b^c_1) = 0$. Because $(a^c_1,b^c_1)$ is a popular edge, this means $\alpha_{a^c_1} + \alpha_{b^c_1} = 0$ (by Lemma~\ref{prop0}).
Thus $\alpha_{a^c_1} = \alpha_{b^c_1} = 0$.
Similarly, $\alpha_{a^c_{2i-1}} = \alpha_{b^c_{2i-1}} = 0$ for $i = 1,2,3$ and all clauses $c$.
Thus all level~0 nodes have $\alpha$-values equal to 0.
So $\vec{\alpha} = \vec{0}$, i.e., $\mathsf{wt}_M(e) \le 0$ for all edges $e$. In other words, there is no blocking edge to $M$.
Thus $M$ is a stable matching. \qed
\end{proof}
It follows from Lemmas~\ref{bipartite:lemma2} and \ref{bipartite:lemma3} that if $M$ is a popular matching in $G$ that is
neither stable nor dominant then $M$ has to match nodes $s^c_0,t^c_0$ for all $c$ and leave $z$ and $z'$ unmatched.
Equivalently, $M$ has to match all nodes except $z$ and $z'$.
Conversely, if $M$ is a popular matching in $G$ that matches all nodes except $z$ and $z'$ then $M$ is neither a max-size popular matching
nor a min-size popular matching, i.e., $M$ is neither dominant nor stable. Thus we can conclude the following theorem.
\begin{theorem}
\label{bipartite:theorem}
The graph $G$ admits a popular matching that is neither stable nor dominant if and only if $G$ admits a popular matching
that matches all nodes except $z$ and $z'$.
\end{theorem}
\subsection{Desired popular matchings in $G$}
We will call a matching $M$ in $G$ that matches all nodes except $z$ and $z'$ a {\em desired popular matching}
here. Let $M$ be such a matching and let $\vec{\alpha} \in\{0,\pm 1\}^n$ be a witness of $M$, where $n$ is the number of
nodes in $G$.
Recall Definition~\ref{def:stab-domn} from Section~\ref{prelims}.
We say a gadget is in {\em unit} (similarly, {\em zero}) state in $\vec{\alpha}$ if for any node $u$ in this gadget,
we have $\alpha_u \in \{\pm 1\}$ (resp., $\alpha_u = 0$). The following two observations will be important here.
\begin{itemize}
\item[1.] All level~3 gadgets have to be in {\em unit} state in $\vec{\alpha}$.
\item[2.] All level~0 gadgets have to be in {\em zero} state in $\vec{\alpha}$.
\end{itemize}
The nodes $s^c_0$ and $t^c_0$, for all clauses $c$, are left unmatched in any stable matching in $G$.
Since $M$ has to match the unstable nodes $s^c_0$ and $t^c_0$ for all clauses $c$,
$\alpha_{s^c_0} = \alpha_{t^c_0} = -1$ for all $c$ (by Lemma~\ref{prop0}). Thus the first observation follows
from Lemmas~\ref{prop1} and \ref{lem:conn-comp}. We prove the second observation below.
\begin{new-claim}
Any level~0 gadget has to be in zero state in $\vec{\alpha}$.
\end{new-claim}
\begin{proof}
Consider any level~0 gadget, say on nodes $a^c_1,b^c_1,a^c_2,b^c_2$. Since $M$ is a popular matching, we have $\alpha_{a^c_1} + \alpha_z \ge \mathsf{wt}_M(a^c_1,z)$ and
$\alpha_{z'} + \alpha_{b^c_1} \ge \mathsf{wt}_M(z',b^c_1)$. Since $z$ and $z'$ are unmatched in $M$, $\alpha_{z} = \alpha_{z'} = 0$ (by Lemma~\ref{prop0}).
Since $\mathsf{wt}_M(a^c_1,z) = 0$ and $\mathsf{wt}_M(z',b^c_1) = 0$, we have $\alpha_{a^c_1} \ge 0$ and $\alpha_{b^c_1} \ge 0$.
The edge $(a^c_1,b^c_1)$ is a popular edge. Thus $\alpha_{a^c_1} + \alpha_{b^c_1} = \mathsf{wt}_M(a^c_1,b^c_1)$ (by Lemma~\ref{prop0}) and we have
$\mathsf{wt}_M(a^c_1,b^c_1) = 0$ (as seen in the last part of the proof of Lemma~\ref{bipartite:lemma3}).
Thus $\alpha_{a^c_1} + \alpha_{b^c_1} = 0$. So $\alpha_{a^c_1} = \alpha_{b^c_1} = 0$.
Thus this gadget is in zero state and this holds for every level~0 gadget.
$\lozenge$
\end{proof}
Lemmas~\ref{lemma1}-\ref{lemma3} are easy to show and are crucial to our NP-hardness proof. Let $c = X_i \vee X_j \vee X_k$ be any clause
in $\phi$. In our proofs below, we are omitting the superscript $c$ from node names for the sake of readability. Recall that
$\vec{\alpha} \in \{0, \pm 1\}^n$ is a witness of our ``desired popular matching'' $M$.
\begin{lemma}
\label{lemma1}
For every clause $c$ in $\phi$, at least two of the three level~2 gadgets corresponding to $c$ have to be in unit state in $\vec{\alpha}$.
\end{lemma}
\begin{proof}
Let $c$ be any clause in $\phi$.
We know from observation~1 above that the level~3 gadget corresponding to $c$ is in {\em unit} state in $\vec{\alpha}$.
So $\alpha_{s_0} = \alpha_{t_0} = -1$.
Also, one of the following three cases holds: (1)~$(s^c_0,t^c_1)$ and $(s^c_1,t^c_0)$ are in $M$,
(2)~$(s^c_0,t^c_2)$ and $(s^c_2,t^c_0)$ are in $M$,
(3)~$(s^c_0,t^c_3)$ and $(s^c_3,t^c_0)$ are in $M$.
\begin{itemize}
\item In case~(1), the node $t_0$ prefers $p_4$ and $p_7$ to its partner $s_1$ in $M$. Thus $\mathsf{wt}_M(p_4,t_0) = \mathsf{wt}_M(p_7,t_0) = 0$.
Since $\alpha_{t_0} = -1$, we need to have $\alpha_{p_4} = \alpha_{p_7} = 1$ so that $\alpha_{p_4} + \alpha_{t_0} \ge \mathsf{wt}_M(p_4,t_0)$ and
$\alpha_{p_7} + \alpha_{t_0} \ge \mathsf{wt}_M(p_7,t_0)$. Thus the middle and rightmost level~2 gadgets corresponding to $c$
(see Fig.~\ref{level2:example}) have to be in unit state in $\vec{\alpha}$.
\item In case~(2), the node $t_0$ prefers $p_7$ to its partner $s_2$ in $M$ and the node $s_0$ prefers $q_0$ to its partner $t_2$
in $M$. Thus $\alpha_{p_7} = \alpha_{q_0} = 1$ so that $\alpha_{p_7} + \alpha_{t_0} \ge \mathsf{wt}_M(p_7,t_0)$ and
$\alpha_{s_0} + \alpha_{q_0} \ge \mathsf{wt}_M(s_0,q_0)$. Thus the leftmost and rightmost level~2 gadgets corresponding to $c$
(see Fig.~\ref{level2:example}) have to be in unit state in $\vec{\alpha}$.
\item In case~(3), the node $s_0$ prefers $q_0$ and $q_3$ to its partner $t_3$ in $M$.
Thus $\alpha_{q_0} = \alpha_{q_3} = 1$ so that $\alpha_{s_0} + \alpha_{q_0} \ge \mathsf{wt}_M(s_0,q_0)$ and
$\alpha_{s_0} + \alpha_{q_3} \ge \mathsf{wt}_M(s_0,q_3)$. Thus the leftmost and middle level~2 gadgets corresponding to $c$
(see Fig.~\ref{level2:example}) have to be in unit state in $\vec{\alpha}$. \qed
\end{itemize}
\end{proof}
\begin{lemma}
\label{lemma2}
For any clause $c$ in $\phi$, {\em at least one} of the level~1 gadgets
corresponding to variables in $c$ is in unit state in $\vec{\alpha}$.
\end{lemma}
\begin{proof}
We showed in Lemma~\ref{lemma1} that at least two of the three level~2 gadgets corresponding to $c$ are in unit state in $\vec{\alpha}$.
We have three cases here (see Fig.~\ref{level2:example}): (i)~the leftmost and middle gadgets are in unit state in $\vec{\alpha}$,
(ii)~the leftmost and rightmost gadgets are in unit state in $\vec{\alpha}$, and
(iii)~the middle and rightmost gadgets are in unit state in $\vec{\alpha}$.
Let us consider case~(i) first.
It follows from the proof of Lemma~\ref{lemma1} that $\alpha_{q_0} = \alpha_{q_3} = 1$. This also forces
$\alpha_{p_1} = \alpha_{p_4} = 1$. This is because $\alpha_{p_1}$ and $\alpha_{p_4}$ have to be non-negative since $p_1$ and $p_4$ are
neighbors of the unmatched node $z$. And so by Lemma~\ref{prop1}, $\alpha_{p_1} = 1$ and $\alpha_{p_4} = 1$.
As $q_0$ and $p_1$ are the most preferred neighbors of $p_2$ and $q_2$ while $p_2$ and $q_2$ are the least preferred neighbors of $q_0$
and $p_1$ in $M$ (by Theorem~\ref{thm:separate}), we have $\mathsf{wt}_M(p_2,q_0) = \mathsf{wt}_M(p_1,q_2) = 0$.
Since $(p_2,q_0)$ and $(p_1,q_2)$ are popular edges, it follows from Lemma~\ref{prop0} that $\alpha_{p_2} + \alpha_{q_0} = 0$ and
$\alpha_{p_1} + \alpha_{q_2} = 0$. Thus $\alpha_{p_2} = \alpha_{q_2} = -1$ and so $(p_2,q_2) \notin M$. So either $(p_2,q_0),(p_0,q_2)$
are in $M$ or $(p_2,q_1),(p_1,q_2)$ are in $M$ (by Theorem~\ref{thm:separate}). This means that either $\mathsf{wt}_M(p_2,y_j) = 0$ or
$\mathsf{wt}_M(x_k,q_2) = 0$. That is, either $\alpha_{y_j} = 1$ or $\alpha_{x_k} = 1$.
Similarly, $\mathsf{wt}_M(p_5,q_3) = \mathsf{wt}_M(p_4,q_5) = 0$ and we can conclude that $\alpha_{p_5} = \alpha_{q_5} = -1$.
Thus $(p_5,q_5) \notin M$ and either $(p_5,q_3),(p_3,q_5)$ are in $M$ or $(p_5,q_4),(p_4,q_5)$ are in $M$ (by Theorem~\ref{thm:separate}).
This means that either $\mathsf{wt}_M(p_5,y_k) = 0$ or $\mathsf{wt}_M(x_i,q_5) = 0$. That is, either $\alpha_{y_k} = 1$ or $\alpha_{x_i} = 1$.
Thus either (1)~the gadgets corresponding to variables $X_i$ and $X_j$ are in unit state or
(2)~the gadget corresponding to $X_k$ is in unit state in $\vec{\alpha}$.
Thus in this case at least {\em one} of the level~1 gadgets corresponding to variables in $c$ is in unit state in $\vec{\alpha}$.
The proofs of case~(ii) and case~(iii) are quite similar. Let us consider case~(ii) next.
It follows from the proof of Lemma~\ref{lemma1} that $\alpha_{q_0} = \alpha_{p_7} = 1$.
This also forces $\alpha_{p_1} = \alpha_{q_6} = 1$ and $\alpha_{p_2} = \alpha_{q_2} = -1$.
By the same reasoning as in case~(i), we have either $\mathsf{wt}_M(p_2,y_j) = 0$ or $\mathsf{wt}_M(x_k,q_2) = 0$.
That is, either $\alpha_{y_j} = 1$ or $\alpha_{x_k} = 1$. Similarly, $\alpha_{p_8} = \alpha_{q_8} = -1$ and
either $\mathsf{wt}_M(p_8,y_i) = 0$ or $\mathsf{wt}_M(x_j,q_8) = 0$, i.e., $\alpha_{y_i} = 1$ or $\alpha_{x_j} = 1$.
So either (1)~the gadgets corresponding to variables $X_i$ and $X_k$ are in unit state or (2)~the gadget corresponding to $X_j$
is in unit state in $\vec{\alpha}$. Thus in this case also at least one of the level~1 gadgets corresponding to variables in $c$
is in unit state in $\vec{\alpha}$.
In case~(iii), it follows from the proof of Lemma~\ref{lemma1} that $\alpha_{p_4} = \alpha_{p_7} = 1$.
This forces $\alpha_{q_3} = \alpha_{q_6} = 1$ and $\alpha_{p_5} = \alpha_{q_5} = -1$.
By the same reasoning as in case~(i), we have either $\mathsf{wt}_M(p_5,y_k) = 0$ or $\mathsf{wt}_M(x_i,q_5) = 0$.
That is, either $\alpha_{y_k} = 1$ or $\alpha_{x_i} = 1$. Similarly, either $\alpha_{y_i} = 1$ or $\alpha_{x_j} = 1$.
Thus either (1)~the gadgets corresponding to variables $X_j$ and $X_k$ are in unit state or (2)~the gadget corresponding to $X_i$
is in unit state in $\vec{\alpha}$. Thus in this case also at least one of the level~1 gadgets corresponding to variables in $c$
is in unit state in $\vec{\alpha}$. \qed
\end{proof}
\begin{lemma}
\label{lemma3}
For any clause $c$ in $\phi$, {\em at most one} of the level~1 gadgets corresponding to variables in $c$ is
in unit state in $\vec{\alpha}$.
\end{lemma}
\begin{proof}
We know from observation~2 made at the start of this section that all the three level~0 gadgets corresponding to $c$ are in zero state
in $\vec{\alpha}$. So $\alpha_{a_t} = \alpha_{b_t} = 0$ for $1 \le t \le 6$. We know from Theorem~\ref{thm:separate} that
either $(a_1,b_1),(a_2,b_2)$ are in $M$ or $(a_1,b_2),(a_2,b_1)$ are in $M$. So either $\mathsf{wt}_M(a_1,y'_j) = 0$ or $\mathsf{wt}_M(x'_k,b_1) = 0$.
So either $\alpha_{y'_j} \ge 0$ or $\alpha_{x'_k} \ge 0$.
Consider any variable $X_r$. We know from Theorem~\ref{thm:separate} that either $\{(x_r,y'_r),(x'_r,y_r)\} \subseteq M$ or
$\{(x_r,y_r),(x'_r,y'_r)\} \subseteq M$. It follows from Lemma~\ref{prop0} that $\alpha_{x_r} + \alpha_{y'_r} = \mathsf{wt}_M(x_r,y'_r) = 0$ and
$\alpha_{x'_r} + \alpha_{y_r} = \mathsf{wt}_M(x'_r,y_r) = 0$.
Also due to the nodes $z$ and $z'$, we have $\alpha_{x_r} \ge 0$ and $\alpha_{y_r} \ge 0$.
Thus $\alpha_{y'_r} \le 0$ and $\alpha_{x'_r} \le 0$.
Hence we can conclude that either $\alpha_{y'_j} = 0$ or $\alpha_{x'_k} = 0$. In other words, either the gadget corresponding to $X_j$
or the gadget corresponding to $X_k$ is in zero state. Similarly, by analyzing the level~0 gadget on nodes $a^c_t,b^c_t$ for
$t = 3,4$, we can show that either the gadget corresponding to $X_k$ or the gadget corresponding to $X_i$ is in zero state.
Also, by analyzing the level~0 gadget on nodes $a^c_t,b^c_t$ for $t = 5,6$, either the gadget corresponding to $X_i$ or the gadget
corresponding to $X_j$ is in zero state.
Thus at least 2 of the 3 gadgets corresponding to variables in clause $c$ are in zero state in $\vec{\alpha}$. Hence at most 1 of
the 3 gadgets corresponding to variables in $c$ is in unit state in $\vec{\alpha}$. \qed
\end{proof}
\begin{theorem}
\label{thm1}
If $G$ admits a desired popular matching then $\phi$ has a 1-in-3 satisfying assignment.
\end{theorem}
\begin{proof}
Let $M$ be a desired popular matching in $G$. That is, $M$ matches all nodes except $z$ and $z'$. Let $\vec{\alpha} \in \{0,\pm 1\}^n$
be a witness of $M$.
We will now define a $\mathsf{true}$/$\mathsf{false}$ assignment for the variables in $\phi$.
For each variable $X_r$ in $\phi$ do:
\begin{itemize}
\item If the level~1 gadget corresponding to $X_r$ is in {\em unit} state in $\vec{\alpha}$, i.e., if $\alpha_{x_r} = \alpha_{y_r} = 1$
and $\alpha_{x'_r} = \alpha_{y'_r} = -1$ or equivalently, if $(x_r,y'_r)$ and $(x'_r,y_r)$ are in $M$, then set $X_r$ to $\mathsf{true}$.
\item Else set $X_r$ to $\mathsf{false}$, i.e., the level~1 gadget corresponding to $X_r$ is in {\em zero} state in $\vec{\alpha}$ or
equivalently, $(x_r,y_r)$ and $(x'_r,y'_r)$ are in $M$.
\end{itemize}
Since $M$ is our desired popular matching,
it follows from Lemmas~\ref{lemma2} and \ref{lemma3} that for every clause $c$ in $\phi$, {\em exactly one} of the three level~1 gadgets
corresponding to variables in $c$ is in unit state in $\vec{\alpha}$.
When the gadget $X_r$ is in unit state, we have
$\alpha_{x_r} = \alpha_{y_r} = 1$ and $\alpha_{x'_r} = \alpha_{y'_r} = -1$. This is due to the fact that
$\alpha_{x_r} \ge 0$ and $\alpha_{y_r} \ge 0$ due to the nodes $z$ and $z'$, respectively.
Thus $X_r$ is in unit state in $\vec{\alpha}$ if and only if the edges $(x_r,y'_r)$ and $(x'_r,y_r)$ are in $M$.
Hence for each clause $c$ in $\phi$, exactly one of the
variables in $c$ is set to $\mathsf{true}$. Hence this is a 1-in-3 satisfying assignment for $\phi$. \qed
\end{proof}
\subsection{The converse}
\label{converse}
Suppose $\phi$ admits a 1-in-3 satisfying assignment. We will now use this assignment to construct a desired popular matching $M$ in $G$.
The edge $(a_0,b_0)$ is in $M$. For each variable $X_r$ in $\phi$ do:
\begin{itemize}
\item if $X_r = \mathsf{true}$ then include the edges $(x_r,y'_r)$ and $(x'_r,y_r)$ in $M$;
\item else include the edges $(x_r,y_r)$ and $(x'_r,y'_r)$ in $M$.
\end{itemize}
Consider a clause $c = X_i \vee X_j \vee X_k$. We know that exactly one of $X_i,X_j,X_k$ is set to $\mathsf{true}$ in our assignment.
Assume without loss of generality that $X_j = \mathsf{true}$.
We will include the following edges in $M$ from all the gadgets corresponding to $c$.
Corresponding to the level~0 gadgets for $c$ (see Fig.~\ref{level0:example}):
\begin{itemize}
\item Add the edges $(a^c_1,b^c_1), (a^c_2,b^c_2)$ from the leftmost gadget and $(a^c_5,b^c_6),(a^c_6,b^c_5)$
from the rightmost gadget to $M$.
We will select $(a^c_3,b^c_3),(a^c_4,b^c_4)$ from the middle gadget. (Note that we could also have selected
$(a^c_3,b^c_4),(a^c_4,b^c_3)$ from the middle gadget.)
\end{itemize}
Corresponding to the level~2 gadgets for $c$ (see Fig.~\ref{level2:example}):
\begin{itemize}
\item Add the edges $(p^c_0,q^c_0),(p^c_2,q^c_1),(p^c_1,q^c_2)$ from the leftmost gadget,
$(p^c_3,q^c_3),(p^c_4,q^c_4),(p^c_5,q^c_5)$ from the middle gadget, and
$(p^c_6,q^c_8),(p^c_7,q^c_7),(p^c_8,q^c_6)$ from the rightmost gadget to $M$.
\end{itemize}
Since the leftmost and rightmost level~2 gadgets (see Fig.~\ref{level2:example}) are dominant, we will include $(s^c_0,t^c_2)$ and $(s^c_2,t^c_0)$ in $M$. Hence
\begin{itemize}
\item Add the edges $(s^c_0,t^c_2), (s^c_1,t^c_1),(s^c_2,t^c_0),(s^c_3,t^c_3)$ to $M$.
\end{itemize}
Thus $M$ matches all nodes except $z$ and $z'$. We will show the following theorem now.
\begin{theorem}
The matching $M$ described above is a popular matching in $G$.
\end{theorem}
\begin{proof}
We will prove $M$'s popularity by describing a witness $\vec{\alpha} \in \{0,\pm 1\}^n$. That is, $\sum_{u\in A\cup B} \alpha_u$ will be 0 and every edge will be
covered by the sum of $\alpha$-values of its endpoints, i.e., $\alpha_u + \alpha_v \ge \mathsf{wt}_M(u,v)$ for all edges $(u,v)$ in $E$. We will also have
$\alpha_u \ge \mathsf{wt}_M(u,u)$ for all nodes $u$.
Set $\alpha_{a_0} = \alpha_{b_0} = \alpha_z = \alpha_{z'} = 0$. Also set $\alpha_u = 0$ for all nodes $u$ in the gadgets with {\em no} ``blocking edges''.
This includes all level~0 gadgets,
and the gadgets in level~1 that correspond to variables set to $\mathsf{false}$, and also the level~2 gadgets
such as the gadget with nodes $p^c_3,q^c_3,p^c_4,q^c_4,p^c_5,q^c_5$ (the middle gadget in Fig.~\ref{level2:example}) since we assumed $X_j = \mathsf{true}$.
For every variable $X_r$ assigned to $\mathsf{true}$: set $\alpha_{x_r} = \alpha_{y_r} = 1$ and $\alpha_{x'_r} = \alpha_{y'_r} = -1$.
For every clause, consider the level~2 gadgets corresponding to this clause with ``blocking edges'':
for our clause $c$, these are the leftmost and rightmost gadgets in Fig.~\ref{level2:example} (since we assumed $X_j = \mathsf{true}$).
Recall that we included in $M$ the edges $(p^c_0,q^c_0),(p^c_2,q^c_1),(p^c_1,q^c_2)$ from the leftmost gadget.
We will set $\alpha_{q^c_0} = \alpha_{p^c_1} = \alpha_{q^c_1} = 1$ and $\alpha_{p^c_0} = \alpha_{p^c_2} = \alpha_{q^c_2} = -1$.
We also included in $M$ the edges $(p^c_6,q^c_8),(p^c_7,q^c_7),(p^c_8,q^c_6)$ from the rightmost gadget.
We will set $\alpha_{p^c_6} = \alpha_{q^c_6} = \alpha_{p^c_7} = 1$ and $\alpha_{q^c_7} = \alpha_{p^c_8} = \alpha_{q^c_8} = -1$.
In the level~3 gadget corresponding to $c$, we included in $M$ the edges $(s^c_0,t^c_2), (s^c_1,t^c_1),(s^c_2,t^c_0)$, $(s^c_3,t^c_3)$.
We will set $\alpha_{t^c_1} = \alpha_{s^c_2} = \alpha_{t^c_2} = \alpha_{s^c_3} = 1$ and $\alpha_{s^c_0} = \alpha_{t^c_0} = \alpha_{s^c_1} = \alpha_{t^c_3} = -1$.
The claim below shows that $\vec{\alpha}$ is indeed a valid witness for $M$. Thus $M$ is a popular matching. \qed
\end{proof}
\begin{new-claim}
The vector $\vec{\alpha}$ defined above is a witness to $M$'s popularity.
\end{new-claim}
\begin{proof}
For any edge $(u,v) \in M$, we have $\alpha_u + \alpha_v = 0$, also $\alpha_z = \alpha_{z'} = 0$. Thus $\sum_{u \in A \cup B}\alpha_u = 0$.
For any neighbor $v$ of $z$ or $z'$, we have
$\alpha_v \ge 0$. Thus all edges incident to $z$ or $z'$ are covered by the sum of $\alpha$-values of their endpoints.
It is also easy to see that for every intra-gadget edge $(u,v)$, we have $\alpha_u + \alpha_v \ge \mathsf{wt}_M(u,v)$.
In particular, the endpoints of
every blocking edge to $M$ have their $\alpha$-value set to 1. When $X_j = \mathsf{true}$, the edge
$(x_j,y_j)$ is a blocking edge to $M$ and so are $(p^c_1,q^c_1),(p^c_6,q^c_6),(s^c_2,t^c_2)$ in the gadgets involving clause $c$.
We will now check that the edge covering constraint holds for all edges $(u,v)$ where $u$ and $v$ belong to different levels.
\begin{itemize}
\item Consider edges in $G$ between a
level~0 gadget and a level~1 gadget. When $X_j = \mathsf{true}$, the edges $(a^c_1,y'_j)$ and $(x'_j,b^c_5)$ are the most interesting as they have one endpoint
in a gadget with $\alpha$-values 0 and another endpoint in a gadget with $\alpha$-values equal to $\pm 1$.
Observe that both these edges are labeled $(-,-)$. This is because $a^c_1$ prefers its partner $b^c_1$
to $y'_j$ and symmetrically, $y'_j$ prefers its partner $x_j$ to $a^c_1$. Thus $\mathsf{wt}_M(a^c_1,y'_j) = -2 < \alpha_{a^c_1} + \alpha_{y'_j} = 0 - 1$.
Similarly, $b^c_5$ prefers its partner $a^c_6$ to $x'_j$ and symmetrically, $x'_j$ prefers its partner $y_j$ to $b^c_5$.
Thus $\mathsf{wt}_M(x'_j,b^c_5) = -2 < \alpha_{x'_j} + \alpha_{b^c_5} = - 1 + 0$.
\item We will now consider edges in $G$ between a level~1 gadget and a level~2 gadget.
We have $\mathsf{wt}_M(p^c_2,y_j) = 0$ since $p^c_2$ prefers $y_j$ to its partner $q^c_1$ while $y_j$ prefers its partner $x'_j$ to $p^c_2$. We have
$\alpha_{p^c_2} + \alpha_{y_j} = -1 + 1 = \mathsf{wt}_M(p^c_2,y_j) = 0$. The edge $(x_k,q^c_2)$ is labeled $(-,-)$ and we have $\alpha_{x_k} = 0$ and
$\alpha_{q_2} = -1$.
Similarly, the edge $(p^c_8,y_i)$ is labeled $(-,-)$ and so this is covered by the sum of $\alpha$-values of its endpoints.
We have $\mathsf{wt}_M(x_j,q^c_8) = 0 = 1 - 1 = \alpha_{x_j} + \alpha_{q^c_8}$.
We also have $\mathsf{wt}_M(p^c_5,y_k) = 0$ and $\alpha_{p^c_5} = \alpha_{y_k} = 0$. Similarly,
$\mathsf{wt}_M(x_i,q^c_5) = 0$ and $\alpha_{x_i} = \alpha_{q^c_5} = 0$. Thus all these edges are covered.
\item We will now consider edges in $G$ between a level~2 gadget and a level~3 gadget. First, consider the edges
$(s^c_0,q^c_0), (s^c_0,q^c_3), (p^c_7,t^c_0), (p^c_4,t^c_0)$.
We have $\mathsf{wt}_M(s^c_0,q^c_0) = 0$ and $\alpha_{s^c_0} = -1, \alpha_{q^c_0} = 1$, so this edge is covered. Similarly, $\mathsf{wt}_M(p^c_7,t^c_0) = 0$ and
$\alpha_{p^c_7} = 1,\alpha_{t^c_0} = -1$. The edges $(s^c_0,q^c_3)$ and $(p^c_4,t^c_0)$ are labeled $(-,-)$, so they are also covered.
Next consider the edges $(s^c_0,q^{c_i}_{3j+2})$ and $(p^{c_i}_{3j+2},t^c_0)$ for any clause $c_i$ and $j \in \{0,1,2\}$.
It is easy to see that these edges are labeled $(-,-)$, so these edges are also covered.
\item Finally consider the edges between a level~1 gadget and a level~3 gadget. Corresponding to clause $c$, these edges are $(s^c_0,y'_i)$ and $(x'_i,t^c_0)$
for any $i \in [n_0]$.
It is again easy to see that these edges are labeled $(-,-)$ and so they are covered. Thus it follows that $\vec{\alpha}$ is a witness to $M$'s popularity.
$\lozenge$
\end{itemize}
\end{proof}
\subsection{Proof of Theorem~\ref{thm:separate}}
\label{sec:proof-separate}
Let $c = X_i \vee X_j \vee X_k$ be a clause in $\phi$. We will show in the following claims that no edge between 2 different gadgets can be popular.
\begin{new-claim}
No edge between a level~0 node and a level~1 node can be popular.
\end{new-claim}
\begin{proof}
Consider any such edge in $G$, say $(a^c_1,y'_j)$. In order to show this edge cannot be present in any popular matching, we will show a popular matching $S$ along
with a witness $\vec{\alpha}$ such that $\alpha_{a^c_1} + \alpha_{y'_j} > \mathsf{wt}_S(a^c_1,y'_j)$. Then it will immediately follow from the {\em slackness} of this edge that
$(a^c_1,y'_j)$ is not used in any popular matching (by Lemma~\ref{prop0}).
Let $S$ be the men-optimal stable matching in $G$. The vector $\vec{\alpha} = \vec{0}$ is a witness to $S$. The edges $(a^c_1,b^c_1)$
and $(x'_j,y'_j)$ belong to $S$, so we have $\mathsf{wt}(a^c_1,y'_j) = -2$ while $\alpha_{a^c_1} = \alpha_{y'_j} = 0$. Thus $(a^c_1,y'_j)$ is not a popular edge.
We can similarly show that $(x'_k,b^c_1)$ is not a popular edge by considering the women-optimal stable matching $S'$.
$\lozenge$
\end{proof}
\begin{new-claim}
No edge between a level~1 node and a level~2 node is popular.
\end{new-claim}
\begin{proof}
Consider any such edge in $G$, say $(p^c_2,y_j)$.
Consider the dominant matching $M^*$ that contains the edges $(p^c_0,q^c_2),(p^c_2,q^c_0)$ and also
$(x_j,y'_j),(x'_j,y_j)$. Note that $\mathsf{wt}_{M^*}(p^c_2,y_j) = -2$.
A witness $\vec{\beta}$ to $M^*$ sets $\beta_{p^c_2} = \beta_{q^c_2} = -1$ and $\beta_{x_j} = \beta_{y_j} = 1$. This is because $(x_j,y_j)$ and $(p^c_0,q^c_0)$
are blocking edges to $M^*$, so $\beta_{x_j} = \beta_{y_j} = 1$ and similarly, $\beta_{p^c_0} = \beta_{q^c_0} = 1$ (this makes $\beta_{p^c_2} = \beta_{q^c_2} = -1$).
So $\beta_{p^c_2} + \beta_{y_j} = 0$ while $\mathsf{wt}_{M^*}(p^c_2,y_j) = -2$. Thus this edge
is slack and so it cannot be a popular edge.
We can similarly show that the edge $(x_k,q^c_2)$ is not popular by considering the dominant matching
that includes the edges $(p^c_1,q^c_2)$ and $(p^c_2,q^c_1)$.
$\lozenge$
\end{proof}
\begin{new-claim}
No edge between a node in level~3 and a node in levels~1 or 2 is popular.
\end{new-claim}
\begin{proof}
Consider the edge $(s^c_0,q^c_0)$. The dominant matching $M^*$ includes the edges $(s^c_0,t^c_1),(s^c_1,t^c_0)$, and $(p^c_2,q^c_0)$.
So we have $\mathsf{wt}_{M^*}(s^c_0,q^c_0) = -2$ while $\beta_{s^c_0} = -1$ and $\beta_{q^c_0} = 1$, where
$\vec{\beta}$ is a witness to $M^*$. Hence $(s^c_0,q^c_0)$ is not a popular edge. It can similarly be shown for any edge
$e \in \{(s^c_0,q^c_3),(p^c_4,t^c_0),(p^c_7,t^c_0)\}$ that $e$ is not a popular edge.
Suppose the edge $(s^c_0,u)$ for some $u \in \{y'_i: i\in[n_0]\} \cup \{q^{c_i}_2,q^{c_i}_5,q^{c_i}_8: c_i$ is a clause$\}$ belongs to a popular matching $M$.
In order to show a contradiction,
consider the edge $(s^c_0,t^c_1)$. We have $\mathsf{wt}_M(s^c_0,t^c_1) = 0$ since $s^c_0$ prefers $t^c_1$ to $u$ while $t^c_1$ prefers its partner in $M$
(this is $s^c_1$) to $s^c_0$. Since $\alpha_{s^c_0} = -1$, it has to be the case that $\alpha_{t^c_1} = 1$.
Since $s^c_0$ is matched to a node outside $\{t^c_1,t^c_2,t^c_3\}$, the node $t^c_0$ also has to be matched to a node outside $\{s^c_1,s^c_2,s^c_3\}$ ---
otherwise one of the 3 stable nodes $t^c_1,t^c_2,t^c_3$ would be left unmatched in $M$; however as $M$ is popular, every stable node
has to be matched in $M$. We have also seen
earlier that neither $(p^c_4,t^c_0)$ nor $(p^c_7,t^c_0)$ is popular. Thus $t^c_0$ has to be matched to a neighbor worse than $s^c_1$.
Thus $\mathsf{wt}_M(s^c_1,t^c_0) = 0$ and so $\alpha_{s^c_1} = 1$. Since $(s^c_1,t^c_1)$ is a stable edge, it follows from Lemma~\ref{prop0} that
$\alpha_{s^c_1} + \alpha_{t^c_1} = \mathsf{wt}_M(s^c_1,t^c_1)$. However $\mathsf{wt}_M(s^c_1,t^c_1) = 0$ since $(s^c_1,t^c_1) \in M$ and we have just shown that
$\alpha_{s^c_1} = \alpha_{t^c_1} = 1$. This is a contradiction and thus $(s^c_0,u) \notin M$.
$\lozenge$
\end{proof}
It is easy to see that the nodes $z$ and $z'$ are in the same connected component of $F_G$ as the dominant matching $M^*$ contains the edges
$(a_0,z)$ and $(z',b_0)$ while any stable matching in $G$ contains $(a_0,b_0)$.
We will now show that any popular matching that matches $z$ and $z'$ has to match these nodes to $a_0$ and $b_0$, respectively.
\begin{lemma}
\label{bipartite:lemma1}
If $M$ is a popular matching in $M$ that matches $z$ and $z'$ then $\{(a_0,z),(z',b_0)\} \subseteq M$.
\end{lemma}
\begin{proof}
Suppose $(x_i,z) \in M$ for some $i \in [n_0]$. We know from the above claims that there is no popular edge
between $x_i$'s gadget and any neighbor in levels~0, 2, or 3. So $(x_i,z) \in M$ implies that $(z',y_i) \in M$ since all the 4 nodes $x_i,y_i,x'_i,y'_i$
have to be matched in $M$ and there is no other possibility of a popular edge incident to either $y_i$ or $y'_i$. Hence $(x'_i,y'_i) \in M$.
Thus $(x_i,y'_i)$ and $(x'_i,y_i)$ are blocking edges to $M$.
This means that $\alpha_{x_i} = \alpha_{y_i} = \alpha_{x'_i} = \alpha_{y'_i} = 1$. Note that $(x'_i,y'_i)$ is a stable edge and so
$\alpha_{x'_i} + \alpha_{y'_i} = \mathsf{wt}_M(x'_i,y'_i)$. However $\mathsf{wt}_M(x'_i,y'_i) = 0$ while $\alpha_{x'_i} = \alpha_{y'_i} = 1$. This is a contradiction and hence
$(x_i,z) \notin M$ for any $i \in [n_0]$. We can similarly show that neither $(p^c_{3j+1},z)$ nor $(z',q^c_{3j})$ is in $M$, for any $j \in \{0,1,2\}$ and any clause $c$.
So if $z$ and $z'$ are matched in $M$ then it has to be either with $a_0,b_0$ or with some level~0 neighbors.
Observe that if $(a^c_{2i-1},z) \in M$ then $(z',b^c_{2i-1}) \in M$ as the 4 nodes $a^c_{2i-1},b^c_{2i-1},a^c_{2i},b^c_{2i}$ have to be matched in $M$
and there is no other possibility of a popular edge incident to any of these 4 nodes (by our first claim in this section).
Suppose $(a^c_{2i-1},z)$ and $(z',b^c_{2i-1})$ are in $M$ for some $c$ and $i \in \{1,2,3\}$. Since $z$ prefers $a_0$ to any level~0
neighbor, we have $\mathsf{wt}_M(a_0,z) = 0$. Similarly, $\mathsf{wt}_M(z',b_0) = 0$. Since $\mathsf{wt}_M(a_{2i-1}^c, b_{2i-1}^c) = 2$, this implies that $\alpha_{a_{2i-1}^c} = \alpha_{b_{2i-1}^c} = 1$. Hence, $\alpha_z = \alpha_{z'} = -1$ and we have
$\alpha_{a_0} = \alpha_{b_0} = 1$. Note that $\alpha_{a_0} + \alpha_{b_0}$ has to be equal to $\mathsf{wt}_M(a_0,b_0)$ as $(a_0,b_0)$
is a popular edge.
If $a_0$ is not matched to $z$ (and so $b_0$ is not matched to $z'$), then
$(a_0,b_0) \in M$. So $\mathsf{wt}_M(a_0,b_0) = 0$, however $\alpha_{a_0} + \alpha_{b_0} = 2$, a contradiction.
Thus if $M$ matches $z$ and $z'$ then $\{(a_0,z),(z',b_0)\} \subseteq M$. \qed
\end{proof}
Thus the above claims and Lemma~\ref{bipartite:lemma1} show that every level~$i$ gadget (for $i = 0,1,2,3$) forms a distinct connected component
in the graph $F_G$ and the 4 nodes $a_0,b_0,z$, and $z'$ belong to their own connected component.
This finishes the proof of Theorem~\ref{thm:separate}.
We have shown a polynomial time reduction from 1-in-3 SAT to the problem of deciding if $G = (A \cup B, E)$ admits a popular matching that
matches all nodes except $z$ and $z'$. That is, we have shown the following theorem.
\begin{theorem}
\label{thm:section3}
The instance $G = (A \cup B, E)$ admits a popular matching that matches all nodes except $z$ and $z'$ if and only if $\phi$ is in 1-in-3 SAT.
\end{theorem}
Theorem~\ref{bipartite:theorem} showed that $G$ admits a popular matching that is neither stable nor dominant if and only if $G$ admits a popular matching
that matches all nodes except $z$ and $z'$. Hence Theorem~\ref{final-thm} follows from Theorem~\ref{thm:section3}.
\section{Related hardness results}\label{sec:consequences}
\subsection{Popular matchings with forced/forbidden elements}\label{sec:forbidden-forced}
We now consider the \emph{popular matching problem in $G = (A \cup B, E)$ with forced/forbidden elements} (\texttt{pmffe}), which is the variant of the popular matching problem in a bipartite graph $G = (A \cup B, E)$, where our input also consists of some forced (resp., forbidden) edges $E_1$ (resp., $E_0$), and/or some forced (resp. forbidden) nodes $U_1$ (resp. $U_0$). The goal is to compute a popular matching $M$ in $G$ where all forced elements are included in $M$
and no forbidden element is included in $M$.
The following result is immediate.
\begin{corollary}
\label{cor:forced-forb1}
The popular matching problem in $G = (A \cup B, E)$ with a given forced node set $U_1$ and a given forbidden node set $U_0$ is NP-hard for $|U_0| = |U_1| = 1$.
\end{corollary}
The proof of Corollary~\ref{cor:forced-forb1} follows from our instance $G$ in Section~\ref{sec:hardness} with $U_0 = \{z\}$ and $U_1 = \{s^c_0\}$. It follows from the proof of
Lemma~\ref{bipartite:lemma3} that the nodes $s^c_0,t^c_0$ for all clauses $c$ have to be matched in such a popular matching $M$. So $M$ has to match all nodes
in $G$ except $z$ and $z'$. Theorem~\ref{thm:section3} showed that finding such a popular matching $M$ is NP-hard.
The variant of the popular matching problem in $G = (A \cup B, E)$ with a given forced node set $U_1$ where $|U_1| = 1$ and a given forced/forbidden edge set
$E_0 \cup E_1$ where $|E_0 \cup E_1| = 1$ is also NP-hard. To show this, consider $U_1 = \{s^c_0\}$ and either $E_1 = \{(a_0,b_0)\}$ or $E_0 = \{(a_0,z)\}$.
The forced node set forces the nodes $s^c_0,t^c_0$ for all clauses $c$ to be matched in our popular matching $M$ while $E_1$ (similarly, $E_0$)
forces $z$ and $z'$ to be unmatched in $M$.
In order to show the NP-hardness of the variant with $|E_0| = 2$ or $|E_1| = 2$, we will augment our instance $G$ in Section~\ref{sec:hardness} with an extra
level~1 gadget $X_0$ (see Fig.~\ref{level1:example}). Call the new instance $G_0$. The gadget $X_0$ has 4 nodes $x_0,x'_0,y_0,y'_0$ with the following preferences:
\begin{minipage}[c]{0.45\textwidth}
\centering
\begin{align*}
&x_0\colon \, y_0 \succ y'_0 \qquad\qquad && y_0\colon \, x_0 \succ x'_0 \\
&x'_0\colon \, y_0 \succ y'_0 \succ t^{c_1}_0 \succ \cdots \succ t^{c_k}_0 \qquad\qquad && y'_0\colon \, x_0 \succ x'_0 \succ s^{c_1}_0 \succ \cdots \succ s^{c_k}_0\\
\end{align*}
\end{minipage}
Thus nodes in the gadget $X_0$ are not adjacent to $z$ or $z'$ or to any node in levels~0, 1, or 2 --- however $x'_0$ is adjacent to $t^c_0$ for all clauses $c$
and $y'_0$ is adjacent to $s^c_0$ for all clauses $c$. For each clause $c$, the node $y'_0$ is at the bottom of $s^c_0$'s
preference list and the node $x'_0$ is at the bottom of $t^c_0$'s preference list.
A stable matching in the instance $G_0$ includes the edges $(x_0,y_0)$ and $(x'_0,y'_0)$ while there is a dominant matching in this instance
with the edges $(x_0,y'_0)$ and $(x'_0,y_0)$. It is easy to extend Theorem~\ref{thm:separate} to show that the popular subgraph for the
instance $G_0$ is the popular subgraph $F_G$ along with an extra connected component with 4 nodes $x_0,y_0,x'_0,y'_0$.
We are now ready to show the following result.
\begin{lemma}
\label{lem:forced-forb2}
The popular matching problem in $G = (A \cup B, E)$ with a given forced edge set $E_1$ is NP-hard for $|E_1| = 2$.
\end{lemma}
The proof of Lemma~\ref{lem:forced-forb2} follows from the instance $G_0$ with $E_1 = \{(a_0,b_0), (x_0,y'_0)\}$.
Let $M$ be a popular matching in $G_0$ that includes the edges $(a_0,b_0)$ and $(x_0,y'_0)$.
Since $M$ has to contain $(a_0,b_0)$, it means that the nodes $z$ and $z'$ are unmatched in $M$.
Since $(x_0,y'_0) \in M$, it means that
$(x'_0,y_0) \in M$. So $(x_0,y_0)$ is a blocking edge to $M$ and we have $\alpha_{x_0} = \alpha_{y_0} = 1$ and $\alpha_{x'_0} = \alpha_{y'_0} = -1$.
This forces $s^c_0,t^c_0$ for all $c$ to be matched in $M$. The argument is the same as in Lemma~\ref{bipartite:lemma3} since the edges $(s^c_0,y'_0)$
and $(x'_0,t^c_0)$ would not be covered otherwise. This is because $\alpha_{x'_0} = \alpha_{y'_0} = -1$ and if $s^c_0,t^c_0$ are unmatched then
$\alpha_{s^c_0} = \alpha_{t^c_0} = 0$ and $\mathsf{wt}_M(s^c_0,y'_0) = \mathsf{wt}_M(x'_0,t^c_0) = 0$. This would make $\alpha_{s^c_0} + \alpha_{y'_0} < \mathsf{wt}_M(s^c_0,y'_0)$
and similarly, $\alpha_{x'_0} + \alpha_{t^c_0} < \mathsf{wt}_M(x'_0,t^c_0)$.
Thus $M$ has to be a popular matching that matches all nodes in $G_0$ except $z$ and $z'$. It is easy to see that the proof of Theorem~\ref{thm:section3} implies
that finding such a popular matching in $G_0$ is NP-hard.
We can similarly show that the popular matching problem in $G_0$ with a given forbidden edge set $E_0$ is NP-hard for $|E_0| = 2$.
For this, we will take $E_0 = \{(a_0,z),(x_0,y_0)\}$. This is equivalent to setting $E_1 = \{(a_0,b_0),(x_0,y'_0)\}$.
We can similarly show that this problem with a given forced set $E_1$ and a given forbidden edge set $E_0$ is NP-hard for $|E_0| = |E_1| = 1$.
For this, we will take $E_0 = \{(a_0,z)\}$ and $E_1 = \{(x_0,y'_0)\}$. This will force $s^c_0,t^c_0$ for all $c$ to be matched in $M$ while $z,z'$ are unmatched in $M$.
Finally, the variant with $|U_0| = 1$ and $|E_0\cup E_1| = 1$ follows by taking $U_0 = \{z\}$ and $E_0 = \{(x_0,y_0)\}$ or $E_1 = \{(x_0,y'_0)\}$.
We now put all those observations together in the following theorem.
\begin{theorem}
\label{thm:forced-forb3}
The popular matching problem in $G = (A \cup B, E)$ with forced/forbidden element set $\langle E_0, E_1, U_0, U_1\rangle$ is NP-hard
when (i)~$|E_0| = 2$, (ii)~$|E_1| = 2$, (iii)~$|E_0| = |E_1| = 1$, (iv)~$|U_0| = |U_1| = 1$, (v)~$|U_0| = 1$ and $|E_0\cup E_1| = 1$,
and (vi)~$|U_1| = 1$ and $|E_0\cup E_1| = 1$.
\end{theorem}
Note that when $|E_1|=1$ and $E_0=U_0=U_1=\emptyset$, \texttt{pmffe} reduces to the \emph{popular edge problem}, that can be solved in polynomial time~\cite{CK16}. Now suppose $|E_0|=1$ and $E_1=U_0=U_1=\emptyset$: we show below that a polynomial-time algorithm for \texttt{pmffe} follows from the algorithm for the popular edge problem. Define $E_s$ and $\overline{E}_s$ as:
$$\begin{array}{lll}
E_s &= \{ e \in E: \exists \text{ stable matching } M \text{ s.t. } e \in M\},\\
\overline{E}_s &= \{ e \in E: \exists \text{ stable matching } M \text{ s.t. } e \notin M\}.
\end{array}$$
$E_d$, $\overline{E}_d$ (resp., ${E_p}$, $\overline{E}_p$) are defined similarly, by replacing ``stable'' with ``dominant'' (resp., popular). It was proved in~\cite{CK16} that $E_p = E_s \cup E_d$. We now argue that $\overline{E}_p = \overline{E}_s \cup \overline{E}_d$.
Consider any $e \in \overline{E}_s \cup \overline{E}_d$. Since there is a stable or dominant matching that does not contain $e$, it follows that $e \in \overline{E}_p$. We conclude that $\overline{E}_p \supseteq \overline{E}_s \cup \overline{E}_d$. We will now show that $\overline{E}_p \subseteq \overline{E}_s \cup \overline{E}_d$.
Consider any $e=(i,j) \in \overline{E}_p$. There is a popular matching $M$ s.t. $e \notin M$, and $i$ or $j$ is matched (if $i$ and $j$ are unmatched then $(i,j) = (+,+)$, and $M$ is not popular). Without loss of generality assume $(j,k) \in M$. It follows from $E_p = E_s \cup E_d$ that there exists a stable or dominant matching $M'$ s.t. $(j,k) \in M'$, hence $(i,j) \notin M'$. Thus $\overline{E}_p \subseteq \overline{E}_s \cup \overline{E}_d$.
Since $\overline{E}_p = \overline{E}_s \cup \overline{E}_d$, we can solve the forbidden edge problem by checking if $e \in \overline{E}_s$ or $e \in \overline{E}_d$. For stable matchings, this can be done in polynomial time, see e.g.~\cite{GI}. For dominant matchings, it immediately follows from results from~\cite{CK16}.
\subsection{Weighted popular matching problems}
In this section, we are given, together with the usual bipartite graph and the rankings, a nonnegative edge weight vector $c\geq 0$. First, consider the problem of finding a popular matching of \emph{minimum} weight wrt $c$ (\texttt{min-wp}). Recall that \texttt{pmffe} with $|E_0|=2$, $U_1=U_0=E_1=\emptyset$ is NP-hard, as shown by the instance with graph $G_0$ and $E_0=\{(a_0,z),(x_0,y_0)\}$ (see Section \ref{sec:forbidden-forced}). Let the weights of $(a_0,z)$ and $(x_0,y_0)$ be equal to $1$, and let all other weights be $0$. We can conclude the following.
\begin{corollary}\label{cor:miwp}
\texttt{min-wp} is NP-Hard.
\end{corollary}
Now consider the problem of finding a popular matching of \emph{maximum} weight wrt $c$. We denote this problem by \texttt{max-wp}. It was shown in \cite{Kav18} that this problem is NP-hard. We show here a tight inapproximability result.
\begin{theorem}\label{thr:mwp}
Unless P=NP, \texttt{max-wp} cannot be approximated in polynomial time to a factor better than $\frac{1}{2}$. On the other hand, there is a polynomial-time algorithm that computes a $\frac{1}{2}$-approximation to \texttt{max-wp}.
\end{theorem}
\begin{proof}
Consider the instance $G_0$ again. The hardness of approximating \texttt{max-wp} to a factor better than $\frac{1}{2}$ immediately follows from Lemma~\ref{lem:forced-forb2} by setting the weights of edges $(a_0,b_0)$ and $(x_0,y_0')$ to $1$ and all other edge weights to $0$.
We will now show that a popular matching in $G$ of weight at least $c(M^*)/2$ can be computed in polynomial time, where
$M^*$ is a max-weight popular matching in $G$.
It was shown in \cite{CK16} that any popular matching $M$ in $G = (A \cup B, E)$ can be
partitioned into $M_0 \cup M_1$ such that $M_0 \subseteq S$ and $M_1 \subseteq D$,
where $S$ is a stable matching and $D$ is a dominant matching in $G$.
Consider the following algorithm.
\begin{enumerate}
\item Compute a max-weight stable matching $S^*$ in $G$.
\item Compute a max-weight dominant matching $D^*$ in $G$.
\item Return the matching in $\{S^*, D^*\}$ with larger weight.
\end{enumerate}
Since all edge weights are non-negative, either the max-weight stable matching in $G$ or the max-weight dominant matching in $G$ has weight at least $c(M^*)/2$.
Thus Steps~1-3 compute a $\frac{1}{2}$-approximation for max-weight popular matching in $G = (A \cup B,E)$.
Regarding the implementation of this algorithm, both $S^*$ and $D^*$ can be computed in polynomial time~\cite{Rot92,CK16}.
Thus our algorithm runs in polynomial time. \qed
\end{proof}
\subsection{Popular and Dominant matchings in non-bipartite graphs}
In this section, we consider the popular (similarly, dominant) problem in general graphs. The only modification to the input with respect to the previous paragraphs, is that our input graph $G$ is not required to be bipartite. As usual, together with $G$, we are given a collection of rankings, one per node of $G$, with each node ranking its neighbors in a strict order of preference. The goal is to decide if $G$ admits a popular (resp., dominant) matching.
Theorem~\ref{thm:section3} showed that that the problem of deciding if a bipartite instance $G = (A \cup B, E)$ admits a popular matching
that matches {\em exclusively} a given set $S \subset A \cup B$ is NP-hard (i.e., the nodes outside $S$ have to be left unmatched).
Let us call this the {\em exclusive popular set} problem.
We will now use the hardness of the exclusive popular set problem in the instance $G$ from Section~\ref{sec:hardness}
to show that the dominant problem in non-bipartite graphs is NP-hard. In order to show this, {\em merge} the nodes $z$ and $z'$ in the instance $G$
from Section~\ref{sec:hardness} into a single node $z$. Call the new graph $G'$.
The preference list of the node $z$ in $G'$ is all its level~1 neighbors in some order of preference, followed by
all its level~2 neighbors in some order of preference, followed by $a_0,b_0$, and then level~0 neighbors. The order among level~$i$ neighbors (for $i = 0,1,2$)
in this list does not matter.
\begin{lemma}
A popular matching $N$ in $G'$ is dominant if and only $N$ matches all nodes in $G'$ except $z$.
\end{lemma}
\begin{proof}
Let $N$ be any popular matching in $G'$. Any popular matching has to match all stable nodes in $G'$~\cite{HK11},
thus $N$ matches all stable nodes in $G'$. Suppose some unstable node other than $z$ (say, $s^c_0$) is left unmatched in $N$.
We claim that $t^c_0$ also has to be left unmatched in $N$. Since $s^c_1$ and $t^c_1$ have no other neighbors, the edge $(s^c_1,t^c_1) \in N$
and so there is an augmenting path
$\rho = s^c_0$-$t^c_1$-$s^c_1$-$t^c_0$ with respect to $N$. Observe that $N$ is {\em not} more popular than $N \oplus \rho$, a larger matching.
Thus $N$ is not a dominant matching in $G'$.
In order to justify that $t^c_0$ also has to be left unmatched in $N$,
let us view $N$ as a popular matching in $G$.
We know that $s^c_0$ and $t^c_0$ belong to the same connected component in the popular
subgraph $F_G$ (by Lemma~\ref{lem:conn-comp}). So if $s^c_0$ is left unmatched in $N$, then $t^c_0$ is also unmatched in $N$ (by Lemma~\ref{prop1}).
Conversely, suppose $N$ is a popular matching in $G'$ that matches all nodes except $z$. Then there is no larger matching than $N$ in $G'$,
thus $N$ is a dominant matching in $G'$. \qed
\end{proof}
Thus a dominant matching exists in $G'$ if and only if there is a popular matching in $G'$ that matches all nodes except $z$.
This is equivalent to deciding if there exists a popular matching in $G$ that matches all nodes in $G$ except $z$ and $z'$. Thus we
have shown the following theorem.
\begin{theorem}
\label{second-thm}
Given a graph $G = (V,E)$ with strict preference lists, the problem of deciding if $G$ admits a dominant matching or not
is NP-hard. Moreover, this hardness holds even when $G$ admits a stable matching.
\end{theorem}
We will now show that the popular matching problem in non-bipartite graphs is also NP-hard.
For this, we will augment the graph $G'$ with the gadget $D$ given in Fig.~\ref{D:example}. Call the new graph $H$.
\begin{figure}
\caption{Each of $d_1,d_2,d_3$ is a top choice neighbor for another node here and $d_0$ is the last choice of $d_1,d_2,d_3$.}
\label{D:example}
\end{figure}
\noindent{\em The gadget $D$.}
There will be 4 nodes $d_0,d_1,d_2,d_3$ that form the gadget $D$ (see Fig.~\ref{D:example}).
The preferences of nodes in $D$ are given below.
\begin{minipage}[c]{0.45\textwidth}
\centering
\begin{align*}
&d_1\colon \, d_2 \succ d_3 \succ d_0 \qquad\qquad && d_2\colon \, d_3 \succ d_1 \succ d_0\\
&d_3\colon \, d_1 \succ d_2 \succ d_0 \qquad\qquad && d_0\colon \, d_1 \succ d_2 \succ d_3 \succ \cdots \\
\end{align*}
\end{minipage}
The node $d_0$ will be adjacent to all nodes in $H$, except $z$. The neighbors of $d_0$ that are not in $D$ are in the ``$\cdots$''
part of $d_0$'s preference list and the
order among these nodes does not matter. The node $d_0$ will be at the bottom of preference lists of all its neighbors.
\begin{lemma}
\label{new-lemma1}
For any popular matching $M$ in $H$, the following properties hold:
\begin{itemize}
\item[(1)] either $\{(d_0,d_1), (d_2,d_3)\} \subset M$ or $\{(d_0,d_2), (d_1,d_3)\} \subset M$.
\item[(2)] $M$ matches all nodes in $H$ except $z$.
\end{itemize}
\end{lemma}
\begin{proof}
Since each of $d_1,d_2,d_3$ is a top choice neighbor for some node in $H$, a popular matching in $H$ cannot leave any of these 3 nodes unmatched.
Since these 3 nodes have no neighbors outside themselves other than $d_0$,
a popular matching has to match $d_0$ to one of $d_1,d_2,d_3$.
Thus $d_0,d_1,d_2,d_3$ are matched among themselves in $M$.
The only possibilities for $M$ when restricted to $d_0,d_1,d_2,d_3$ are the pair of edges $(d_0,d_1), (d_2,d_3)$ and $(d_0,d_2), (d_1,d_3)$.
The third possibility
$(d_0,d_3),(d_1,d_2)$ is ``less popular than'' $(d_0,d_1),(d_2,d_3)$ as $d_0,d_2$, and $d_3$ prefer the latter to the former.
This proves part~(1) of the lemma.
Consider any node $v \ne z$ outside the gadget $D$. If $v$ is left unmatched in $M$ then we either have an alternating path $\rho_1 = (v,d_0)$-$(d_0,d_1)$-$(d_1,d_3)$
or an alternating path $\rho_2 = (v,d_0)$-$(d_0,d_2)$-$(d_2,d_1)$ with respect to $M$: the middle edge in each of these alternating paths belongs to $M$
and the third edge is a {\em blocking edge} with respect to $M$. Both $\rho_1$ and $\rho_2$ are $M$-alternating paths in $G_M$ that start from an $M$-exposed
node and end with a $(+,+)$ edge --- this is a forbidden structure for a popular matching (see Theorem~\ref{thr:characterize-popular}, path~(iii)).
Hence every node $v \ne z$ in $H$ has to be matched in $M$. This proves part~(2). \qed
\end{proof}
Since the total number of nodes in $H$ is odd, at least 1 node has to be left unmatched in any matching in $H$.
Lemma~\ref{new-lemma1} shows that the node $z$ will be left unmatched in any popular matching in $H$.
For any popular matching $M$ in $H$,
the matching $M$ restricted to $G'$ (recall that $G'$ is $H \setminus D$) has to be popular on $G'$, otherwise it would
contradict the popularity of $M$ in $H$. We will now show the following converse of Lemma~\ref{new-lemma1}.
\begin{lemma}
\label{new-lemma2}
If $G'$ admits a popular matching that matches all its nodes except $z$ then $H$ admits a popular matching.
\end{lemma}
\begin{proof}
Let $N$ be a popular matching in $G'$ that matches all its nodes except $z$.
Let $G'_N$ be the subgraph obtained by removing all edges labeled $(-,-)$ with respect to $N$ from $G'$.
Since $N$ is popular in $G'$, it satisfies the
necessary and sufficient conditions for popularity given in Theorem \ref{thr:characterize-popular}.
We claim $M = N \cup \{(d_0,d_1),(d_2,d_3)\}$ is a popular matching in $H$.
We will now show that $M$ obeys those conditions in the subgraph $H_M$ obtained by deleting edges labeled $(-,-)$ with respect to $M$.
The graph $H_M$ is the graph $G'_N$ along with some edges within the gadget $D$.
There is no edge in $H_M$ between $D$ and any node in $G'$ since every edge in $H$ between $D$ and a node in $G'$ is $(-,-)$.
This is because for any such edge $(d_0,v)$, the node $d_0$ prefers $d_1$ (its partner in $M$) to $v$ and similarly, $v$ prefers each
of its neighbors in $G'$ to $d_0$. Since $v \ne z$, we know that $N$ (and thus $M$) matches $v$ to one of its neighbors in $G'$.
It is easy to check that $\{(d_0,d_1),(d_2,d_3)\}$ satisfies the 3 conditions from Theorem \ref{thr:characterize-popular} in the subgraph of $D$
obtained by pruning $(-,-)$ edges.
We know that $N$ satisfies the 3 conditions from Theorem \ref{thr:characterize-popular} in $G'_N$.
Thus $M$ satisfies all these 3 conditions in $H_M$.
Hence $M$ is popular in $H$. \qed
\end{proof}
Thus we have shown that $H$ admits a popular matching if and only if $G'$ admits a matching that matches all nodes except $z$.
Since the latter problem is NP-hard, so is the former problem. Thus we have shown the following result.
\begin{theorem}
\label{main-thm}
Given a graph $G = (V,E)$ with strict preference lists, the problem of deciding if $G$ admits a popular matching or not is NP-hard.
\end{theorem}
\section{Popular matchings of minimum cost in bounded treewidth graphs}\label{sec:treewidth}
In this section, we show a polynomial time algorithm to compute a minimum cost popular matching in a roommates instance $G = (V,E)$ with arbitrary edge costs
under the assumption that $G$ has bounded treewidth.
Bounded treewidth is a classical assumption that often turns intractable problems into tractable ones. A typical example is Maximum Independent (Stable) Set (\texttt{MIS}), for which a polynomial-time algorithm exists in bounded treewidth graphs \cite{Bod}. \texttt{MIS} enjoys two nice properties, often shared by problems for which the bounded treewidth approach is successful. The first is \emph{monotonicity}: if $S$ is
an independent set in a graph $G$ and $G'$ is a subgraph of $G$, then the solution induced by $S$ on $G'$ is also feasible. The
second is \emph{locality}: in order to check if $S$ is an independent set, it suffices to verify, for each node of $S$, if any node of its neighborhood also belongs to $S$.
Interestingly, similar properties do not hold for popular matchings. Indeed, popularity is not a local condition, since it may depend on how nodes far away in the graph are matched. Moreover, if we take a graph $G$ and a popular matching $M$, the subset of $M$ contained in an induced subgraph of $G$ may not be popular. Examples with both those features can be easily constructed by building on the characterization of popular matchings given by Theorem \ref{thr:characterize-popular}.
Our technique to prove the tractability of the minimum cost popular matching problem in the bounded treewidth case is as follows. We assume wlog that all matchings have different costs. Given a vertex separator $S$ of $G$ and a connected component $X$, we define the family of \emph{$(S,X)$-locally popular} matchings. This contains all matchings that may potentially be extended to a popular matching in the whole graph by adding edges not incident to $X\cup S$ (a formal definition is given in Section~\ref{sec:wpm}). As $(S,X)$-locally popular matchings can be exponentially many even in graphs of bounded treewidth
, we cannot store all of them. Instead, we divide them in classes (which we call \emph{tipping points}), and show that, if a matching $M$ of a class can be completed to a $(S',X')$-locally popular matching for some sets with $X'\supseteq X$ and $X'\cup S' \supseteq X \cup S$ by adding some matching $M'$ not incident to $X\cup S$, then all matchings of the same class can be extended via the same matching $M'$ to a $(S',X')$-locally popular matching. Hence, it is enough to keep only a representative for each class --- the one of minimum cost, which we call the \emph{leader}. Finally, we show how to iteratively construct tipping points and their leaders by building on a tree decomposition of the graph (see Algorithm \ref{Algo}). In particular, if the graph has bounded treewidth, a tree decomposition of bounded width can be computed in linear time~\cite{Bo96}, there will be only polynomially many tipping points and leaders, and they can be built in polynomial time, see Theorem \ref{thr:main}.
\subsection{Additional definitions}
For $k \in {\mathbb{N}}$, we write $[k]:=\{1,\dots,k\}$. Recall that our input graph is $G = (V,E)$.
For $S\subseteq V$, we denote by $G\setminus S$ the subgraph of $G$ induced by $V\setminus S$. If $G\setminus S$ is disconnected, $S$ is said to be a \emph{vertex separator}. Given a matching $M$ on $G$ and a set $U\subseteq V$, the matching \emph{induced} by $M$ on $U$ is given by $E[U]\cap M$, and it is denoted by $M[U]$. For $u \in V$, $M(u)$ is the unique $v \in V$ such that $(u,v) \in M$ if such a $v$ exists, and $\emptyset$ otherwise.
Now also fix $U\subseteq V$. Given a matching $M$ of $G$, we say that $M$ is a \emph{$U$-matching} if for all edges $(u,v) \in M$, we have $\{u,v\}\cap U \neq \emptyset$. Given a matching $M$ of $G$, the \emph{$U$-matching induced by $M$} is the set $M_U=\{(u,v) \in M : \{u,v\}\cap U \neq \emptyset\}$.
Since all graphs we deal with are simple, throughout this section we represent paths and cycles as ordered set of nodes, with the first node of a cycle coinciding with the last. This also allows us to distinguish between the \emph{first} and \emph{last} node of a path. We will say that \emph{$e$ is an edge of $P$} if it is an edge between two consecutive nodes of $P$. A $U$-path $P$ is a path in $G$ where either the first or the last node of $P$ (or possibly both)
belongs to $U$, and all other vertices of $P$ do not lie in $U$ (i.e., if $P=(v_1,\dots,v_k)$, then $\emptyset \neq P\cap U \subseteq \{v_1,v_k\}$).
Two paths $P=(v_1,\dots,v_k)$, $P'=(v_1',\dots,v_{k'}')$ are called \emph{$U$-disjoint} if $P\cap P'\cap U=\emptyset$. $P$, $P'$ are said to be \emph{internally vertex-disjoint} if, for all $i \in [k]$ and $j \in [k']$, $v_i\neq v_j'$ with possibly the exception of $(i,j)\in \{(1,1),(1,k'),(k,1),(k,k')\}$.
Consider paths $P_1,P_2,\dots,P_q$ of $G$, $q\geq 2$, whose union contains at least two distinct vertices and with the following properties:
\begin{itemize}
\item $P_i \cap P_j= \emptyset$, unless $|i-j| \leq 1$ or $i,j \in \{1,q\}$;
\item If $q\geq 3$, for $i=1,\dots,q-1$, the last node of $P_i$ is the first node of $P_{i+1}$, and $P_i, P_{i+1}$ are otherwise disjoint. Moreover, $P_1$ and $P_q$ are disjoint, with possibly the exception of the last node of $P_q$ coinciding with the first node of $P_1$;
\item If $q=2$, the last node of $P_1$ coincides with the first node of $P_2$, the last node of $P_2$ may coincide with the first node of $P_1$, and paths $P_1$ and $P_2$ are otherwise disjoint.
\end{itemize}
The \emph{juxtaposition} of $P_1,\dots,P_q$ is the path (if the first node of $P_1$ and the last of $P_q$ are different) or cycle (otherwise) defined from $(P_1,P_2,\dots, P_q)$ as above by removing consecutive repeated vertices.
We assume that no two matchings of $G$ have the same cost. This can be achieved efficiently by standard perturbation techniques.
\subsection{Locally popular matchings, Configurations, and Tipping points}\label{sec:wpm}
In this section, we fix a graph $G = (V,E)$ together with a strict ranking of the neighbors of each node in $V$, a vertex separator $S$ of $G$, and a connected component $X$ of $G\setminus S$ (possibly $S=\emptyset$ and $X=V$).
Let $M$ be a $(X\cup S)$-matching of $G$. We will extensively work with graph $G_M[X \cup S]$ -- that is, the subgraph of $G_M$ induced by $X\cup S$. For a node $v$ in $S$ that is matched in $M$ to a neighbor outside $X \cup S$, the labels of edges of $G_M[X\cup S]$ incident to $v$ are a function of the edge $(v,M(v))$, however note that the edge $(v,M(v))$ is not in $G[X \cup S]$.
We say that $M$ is \emph{$(S,X)$-locally popular} if none of the structures (i), (ii), and (iii) from Theorem~\ref{thr:characterize-popular} is a subgraph of $G_M[X \cup S]$. We remark that a node that is not matched in $G_M[X\cup S]$ may still be $M$-covered (hence not $M$-exposed). If this happens, such a node cannot be the $M$-exposed node in the path (iii) from Theorem \ref{thr:characterize-popular}. The following simple proposition relates the definitions of popularity and $(S,X)$-local popularity.
\begin{prop}\label{prop:popular-will-be-weak}
Let $G,S,X$ be as above, $S'$ be a vertex separator of $G$, and $X'$ be one of the connected components of $G\setminus S'$ such that $X' \cup S' \supseteq X \cup S$. Then:
\begin{enumerate}
\item Let $M$ be a matching in $G$. Then $M$ is $(\emptyset,V)$-locally popular if and only if it is popular.
\item Let $M'$ be a $(S',X')$-locally popular matching. Let $M:=M'_{X \cup S}$. Then $M$ is an $(S,X)$-locally popular matching.
\end{enumerate}
\end{prop}
\begin{proof}
1. It follows by definition and from the fact that $G_M[\emptyset\cup V]=G_M$.
2. Suppose $M$ is not $(S,X)$-locally popular. Then, one of the structures from Theorem \ref{thr:characterize-popular} is a subgraph of $G_M[X \cup S]$ --- call it $P$. We claim that $P$ is a forbidden structure in $G_{M'}[X' \cup S']$ (again, in the sense of Theorem \ref{thr:characterize-popular}), hence $M'$ is not $(S',X')$-locally popular, a contradiction.
Indeed, $X\cup S \subseteq X' \cup S'$, and none of the edges of $M'\setminus M$ is incident to $X\cup S$ by definition. We deduce $G_M[X\cup S]=G_{M'}[X \cup S]$ and a node of $X\cup S$ is $M$-exposed if and only if it is $M'$-exposed. This concludes the proof. \qed
\end{proof}
We now introduce the concepts of \emph{configuration} and \emph{tipping point} in order to partition the family of $(S,X)$-locally popular matchings into a (small) number of classes. These definitions are related to the existence of certain structures that are not forbidden in a popular matching, but restrict the capability of extending locally popular matchings to popular matchings of the whole graph.
From now on, we also fix $M$ to be a $(X \cup S)$-matching of $G$.
Let ${\cal P}_M$ be the set of $M$-alternating paths of $G_M[X \cup S]$. We associate to each $P\in {\cal P}_M$ a $2$-dimensional \emph{parity} vector $\pi$, where the first component of $\pi$ is defined to be $0$ if the first edge of $P$ is a matching edge, $1$ otherwise. Similarly, the second component of the parity vector $\pi$ takes values $0$ or $1$, depending on whether the last edge of the path is a matching edge or not. We also associate to $P$ a two-dimensional \emph{level} vector $\ell=(e,p)$, where $e\in\{0,1,2\}$ is the number of $M$-exposed nodes of $P$, and $p \in \{0,1\}$ denotes the number of $(+,+)$ edges of $P$. If $p\geq 2$, then we say that $P$ is of level $\infty$. Note that not all parity-level combinations are possible. Moreover, if $P$ is of level $\infty$, then $M$ is not $(S,X)$-locally popular. Let
\begin{equation}
\label{eq:U}
U=((u_1,v_1),(u_2,v_2),\dots,(u_k,v_k)),
\end{equation}
for some $k \in {\mathbb{N}}$, where $u_1,v_1,u_2,\dots,v_k \in S \cup \{\emptyset\}$, under the additional condition that $u_i\neq v_i$ for all $i \in [k]$, all pairs are different, and each node of $S$ can appear at most twice in the collection. Moreover, let $L=(\ell_1,\dots, \ell_k)$, $\Pi=(\pi_1,\pi_2,\dots,\pi_k)$ and, for $i \in [k]$, $\ell_i \in \{0,1,2\} \times \{0,1\}$ and $\pi_i \in \{0,1\}\times \{0,1\}$. The triple ${\cal C}=(U,L,\Pi)$ is called an \emph{$(S,X)$-configuration}.
We say that $M$ is \emph{active at ${\cal C}$} if there exist pairwise $X$-disjoint $S$-paths $P^1,\dots,P^k \in {\cal P}_M$, such that, for $i \in [k]$, $P^i$: is of level $\ell_i$ and parity $\pi_i$; starts at $u_i \in S$ if $u_i\neq \emptyset$ and at some node of $X$ otherwise; ends at $v_i \in S$ if $v_i \neq \emptyset$ and at some node of $X$ otherwise. We call those paths the \emph{certificate of ${\cal C }$ at $M$}\footnote{Note that, if $M$ is active at ${\cal C}$, it is also active at the configurations e.g. obtained by permuting entries of $U$ (and of $L$ and $\Pi$ accordingly). This causes some redundancy, yet this will not affect our analysis, so we do not eliminate it.}.
Let $\{(U_i,L_i,\Pi_i)\}_{i=1,\dots,q}$ be the collection of all $(S,X)$-configurations at which $M$ is active. We call
$$(\{(U_i,L_i,\Pi_i)\}_{i=1,\dots,q},M_S)$$ the \emph{$(S,X)$-tipping point of $M$}.
\begin{prop}
\label{prop:poly-many-tipping}
Let $G,X,S$ be as above. Let $M$ be an $(S,X)$-locally popular matching. The $(S,X)$-tipping point of $M$ is uniquely defined. On the other hand, there exists a function $g: {\mathbb{N}} \rightarrow {\mathbb{N}}$ such that the number of $(S,X)$-configurations is bounded by $g(|S|)$, and the collection of $(S,X)$-tipping points of $M$, where $M$ ranges over all $(S,X)$-locally popular matchings, has size at most $g(|S|)|V|^{|S|}$.
\end{prop}
\begin{proof}
The first statement follows by definition. For the second and third: the number of $(S,X)$-configurations $(U,L,\Pi)$ is a function of the size of $S$ only, since all pairs from $U$ are different, and each node of $S$ can appear in at most two pairs from $U$. Moreover, the number of $S$-matchings of $G$ is upper bounded by $|V|^{|S|}$. \qed
\end{proof}
\begin{prop}\label{pro:match-same}
Let $G,X,S$ be as above. Let $M$ be an $(S,X)$-locally popular matching and ${\cal T}$ be the $(S,X)$-tipping point of $M$. Let $N$ be another $(S,X)$-locally popular matching whose $(S,X)$-tipping point is also ${\cal T}$. Let $e\in \delta(v)$ for some $v \in S$. Then:
\begin{enumerate}
\item $e \in M$ if and only if $e \in N$.
\item Suppose $e \notin M$ and let $\star \in \{+,-\}$ be the label of $e$ at $v$ wrt $M$. Then $e \notin N$ and $\star$ is also the label of $e$ at $v$ wrt $N$.
\end{enumerate}
\end{prop}
\begin{proof}
Both statements immediately follow from $M_S = N_S$, which holds by definition of tipping point. \qed
\end{proof}
\subsection{Leaders}
Fix $G,S,X,M$ as in the previous section, and let ${\cal T}$ be the $(S,X)$-tipping point of $M$. $M$ is said to be the \emph{${\cal T}$-leader} if it is the one of minimum cost among all $(S,X)$-locally popular matchings whose $(S,X)$-tipping point is ${\cal T}$.
Due to the initial perturbation of costs, note that there is at most one ${\cal T}$-leader for each tipping point ${\cal T}$.
The following crucial lemma shows that, in order to find a min cost popular matching, it suffices to consider matchings that induce $(X\cup S)$-matchings that are ${\cal T}$-leaders, for any $(S,X)$-tipping point ${\cal T}$. The proof of Lemma~\ref{lem:only-leaders-left-alive} is given in Section~\ref{sec:lemma15-proof}.
\begin{lemma}\label{lem:only-leaders-left-alive}
Suppose we are given $G,X,S,M$ as above. Let ${\cal T}$ be the $(S,X)$-tipping point of $M$, and assume that $M$ is not the ${\cal T}$-leader. Let $S'$ be a vertex separator of $G$, $X'$ a connected component of $G\setminus S'$ with the property that $X'\supseteq X$ and $S' \cup X' \supseteq S \cup X$ (possibly $S'=\emptyset$ and $X'=V$). Let $M'$ be a
$(S',X')$-locally popular matching such that $M'_{X\cup S}=M$. Let ${\cal T}'$ be the $(S',X')$-tipping point of $M'$. Then $M'$ is not a ${\cal T}'$-leader.
\end{lemma}
\subsection{Tree decomposition}
Let $G = (V,E)$ be a graph. A \emph{tree decomposition} of $G$ is a pair $(T, {\cal B})$ where $T$ is a tree and ${\cal B}=\{B(i):i\in V(T)\}$ is a family of subsets of $V(G)$, called \emph{bags}, one for each vertex of $T$, satisfying the following:
\begin{enumerate}
\item For each $(u,v) \in E$, there is at least one bag $B \in {\cal B}$ such that $(u,v)\in B$.
\item If $i\neq j \neq k \in V(T)$ are such that $k$ is on the unique path from $i$ to $j$ in $T$ then $B(i)\cap B(j) \subseteq B(k)$.
\end{enumerate}
Note that the second property implies that, for any vertex $u\in V(G)$, the bags which contain $u$ form a subtree of $T$. We will sometimes abuse notation and denote by $B$ both a vertex of $V(T)$ and the bag corresponding to it.
The \emph{width} of a tree decomposition $(T, {\cal B})$ is $\max\{|B|-1:B\in {\cal B}\}$. The \emph{treewidth} of $G$ is the minimum integer $\omega$ such that there is a tree decomposition of $G$ of width $\omega$.
Let $G = (V,E)$ be a graph and $(T, {\cal B})$ be a tree decomposition of $G$ of width $\omega$. Wlog we can assume that, for each pair of bags $B,B'$ adjacent in $T$, $B\cap B'$ is a vertex separator of $G$. Form a directed rooted tree by picking an arbitrary vertex as the root and orienting the remaining edges towards the root, and call this a \emph{directed tree decomposition}. In the directed tree, each node $X$ other than the root node has exactly one \emph{successor} $S(B)$, i.e., there exists exactly one node $S(B)$ such that $(B,S(B))$ is an arc of the directed tree decomposition. If $B$ is the root, we set $S(B)=\emptyset$. We also say that $B$ is a \emph{predecessor} of $S(B)$ and notice that a bag may have multiple predecessors, and has none if and only if it is a leaf of $T$.
\noindent{\em Dichotomic tree decomposition.}
Let $G$ be a graph and $(T,{\cal B})$ a directed tree decomposition of $G$ of width $\omega$. We can transform the directed tree decomposition of $G$ it into a directed tree decomposition of the same graph and width where every node has indegree at most $2$. We call such a tree decomposition \emph{dichotomic}. Indeed, suppose edges $(B_1,B), (B_2,B), \dots, (B_t,B)$ with $t\geq 3$ are part of the directed tree decomposition. Then we can create a copy $\bar B$ of $B$, add edge $(\bar B,B)$, and split the edges entering $B$ as follows: $(B_1,\bar B), \dots, (B_{\lceil t/2\rceil},\bar B)$ and $(B_{\lceil t/2\rceil+1},B), \dots, (B_{t},B)$. The indegree of $X$ and $\bar X$ is at most $\lfloor t/2\rfloor+1<t$. Notice that the digraph we obtain is still a tree decomposition of $G$, since each edge of $G$ is still covered by a bag, and the bags which contain any vertex $v\in G$ still form a continuous subtree.
If we repeat the above operation for a non-dichotomic tree once for each original node of indegree at least $3$, the maximum indegree over all nodes of the tree goes from $t>2$ to $\lfloor t/2\rfloor+1$, while the number of nodes is at most doubled. Hence, we can iterate the operation so as to obtain a tree decomposition with at most a quadratic number of vertices, all of which have indegree at most $2$.
From here on, we assume without loss of generality that our directed tree decomposition is dichotomic. Let $T'$ be a subtree of $T$, and $V(T')$ the set of nodes contained in at least a bag of $T'$. We say that $T'$ is a \emph{closed subtree} of $T$ if $B \in V(T')$ and $B'$ is a predecessor of $B$, then $B'$ belongs to $T'$. By connectivity, for each node $B$ of a closed subtree $T'$, $S(B)$ also belongs to $T'$, with the exception of at most one node that we call the \emph{head} of $T'$ and denoted by $H(T')$. If $T'\neq T$, the successor of $H(T')$ exists and it is also called the \emph{successor of $T'$} and denoted by $S(T')$. Note that each bag $B$ is the head of exactly one closed subtree of $T$, that we denote by $T_B$.
\begin{remark}\label{rem:tree-decomposition}
Let $(T,{\cal B})$ be a dichotomic directed tree decomposition. Then the following holds:
let $T'$ be a closed subtree of $T$ and $B$ be the head of $T'$. The removal of $B$ partitions $T'\setminus B$ in at most $2$ closed subtrees. The successor of each of those subtrees is $B$.
\end{remark}
\subsection{The algorithm}
We now give our algorithm for computing a minimum weight popular matching, see Algorithm~\ref{Algo}. Note that Algorithm~\ref{Algo} relies on the subroutine \texttt{Update} described in Algorithm~\ref{algo:update}, and the implementations of some other subroutines are not completely defined. We give a formal description of those together with a complexity analysis in the proof of Theorem \ref{thr:main}.
Algorithm~\ref{Algo} takes as input a graph $G$ with strict preference lists as usual, and a dichotomic directed tree decomposition $(T,{\cal B})$ of $G$. It iteratively constructs the sequence of closed subtrees $T_B$ of $T$, with the first $T_B$ corresponding to a leaf $B$ of $T$, and the last to the root. For each $T_B$, let $S=B \cap S(B)$ and $X=V(T_B)\setminus S$. The algorithm constructs and stores in ${\cal L}_B$ all the pairs $(M,{\cal T})$, where ${\cal T}$ is an $(S,X)$-tipping point and $M$ is the ${\cal T}$-leader. This set can be found by building on the corresponding sets for the (at most two) predecessors of $B$. Finally, of all matchings $M$ such that $(M,{\cal T}) \in {\cal L}_{B}$ -- with $B$ being the root -- the one of minimum cost is output. If at the end of any iteration when a bag $B$ is flagged, we have ${\cal L}_B=\emptyset$, we deduce that $G$ has no popular matching.
With a little abuse of notation, we will write $M \in {\cal L}_B$ if $(M,{\cal T}) \in {\cal L}_B$ for some ${\cal T}$. Similarly, we write ${\cal L}_B={\cal L}_B \cup \{M\}$ to mean that $(M,{\cal T})$ is added to ${\cal L}_B$ for an appropriate ${\cal T}$, and similarly for ${\cal L}_B \setminus \{M\}$.
\begin{algorithm}[h!]
\caption{ }
\label{Algo}
\begin{algorithmic} [1]
{\mathbb{R}}EQUIRE A graph $G$, together with, for each node $v \in V$, a strict ranking of the neighbors of $v$. A dichotomic directed tree decomposition $(T,{\cal B})$ of $G$.
\ENSURE A popular matching of minimum cost in $G$.
\STATE {For all $B \in {\cal B}$, label $B$ as \emph{unflagged}.}
\STATE{Choose an unflagged bag $B$ whose predecessors are flagged, and flag $B$.}\label{st:pick-a-bag}
\STATE{Let $T_B$ be the closed subtree of $T$ whose head is $B$, and set ${\cal L}_B=\emptyset$.}
\STATE{Let $S=B\cap S(B)$, $X=V(T_B)\setminus S$.}
\IF{$B$ has no predecessor in $T$}\label{step:if-leaf}
\FOR{all $B$-matchings $M^*$ of $G$}
\IF{$M^*$ is an $(S,X)$-locally popular matching}\label{step:if1}
\STATE{\texttt{Update}($M^*$, ${\cal L}_B$)}
\ENDIF
\ENDFOR
\ELSE
\STATE{Let $S_1=B\cap B_1$ and (possibly) $S_2=B\cap B_2$, where $B_1$ and (possibly) $B_2$ are the predecessors of $B$.}
\FOR{all $B$-matchings $M$ of $G$, all $M_1\in {\cal L}_{B_1}$ and (possibly) $M_2 \in {\cal L}_{B_2}$}
\IF{$(M_1)_{S_1}=M_{S_1}$ and (possibly) $(M_2)_{S_2}=M_{S_2}$}\label{st:same-induced}
\STATE{Let $M^*=M \cup M_1 \cup M_2$}\label{st:M-star}
\IF{$M^*$ is an $(S,X)$-locally popular matching of $G$}
\STATE{\texttt{Update}($M^*$, ${\cal L}_B$)}
\ENDIF
\ENDIF
\ENDFOR
\ENDIF
\IF{${\cal L}_B=\emptyset$}
\STATE{output: $G$ has no popular matching.}
\ELSIF{there is a bag $B \in {\cal B}$ that is unflagged}
\STATE{Go to Step \ref{st:pick-a-bag}.}
\ENDIF
\STATE{Let $B$ be the head of $T$. Output the matching of minimum cost from ${\cal L}_B$.}
\end{algorithmic}
\end{algorithm}
\begin{algorithm}[h]
\begin{algorithmic}
\caption{\texttt{Update}}\label{algo:update}
{\mathbb{R}}EQUIRE $M^*$, ${\cal L}_B$
\STATE{Let ${\cal T}$ be the $(S,X)$-tipping point of $M^*$.}\label{st:find-T}
\IF{there exists $M' \in {\cal L}_B$ whose tipping point is ${\cal T}$}
\IF{$c(M^*)<c(M')$}
\STATE{Set ${\cal L}_B={\cal L}_B \setminus \{M'\}\cup \{M^*\}$.}
\ENDIF
\ELSE
\STATE{Set ${\cal L}_B={\cal L}_B \cup \{M^*\}$.}
\ENDIF
\end{algorithmic}
\end{algorithm}
\subsection{Analysis}
The goal of this section is to prove the following result.
\begin{theorem}\label{thr:main}
Algorithm \ref{Algo} is correct. If the treewidth of $G = (V,E)$ is upper bounded by a constant~$\omega$, then it can be implemented to run in time $O(|V|^{3\omega +7})$.
\end{theorem}
We assume throughout the proof that every bag $B$ that is not a leaf has two predecessors, as the (simpler) case where some $B$ has only one predecessor follows in a similar fashion. For a bag $B$ we write $S:=B\cap S(B)$ and $X:=V(T_B)\setminus S$; if $B$ is not a leaf, we denote by $B_1$ and $B_2$ its predecessors, and write $S_i:=B\cap B_i$, $X_i=V(T_{B_i})\setminus S_i$ for $i=1,2$. We start by proving the correctness of Algorithm \ref{Algo}. We show that, at the end of the iteration where bag $B$ is flagged,
\begin{itemize}
\item[$(*)$] ${\cal L}_B$ contains exactly all ${\cal T}$-leaders, for all $(S,X)$-tipping points ${\cal T}$ for which a ${\cal T}$-leader exists.
\end{itemize} Suppose $(*)$ is proved. Then, when $B$ is the root, ${\cal L}_B$ contains only popular matchings (by Proposition~\ref{prop:popular-will-be-weak}), and the one of minimum cost among those is the popular matching of minimum cost. Again by Proposition \ref{prop:popular-will-be-weak}, if ${\cal L}_B=\emptyset$ at the end of the iteration where $B$ is flagged, then $G$ has no popular matching, and the output of Algorithm \ref{Algo} is again correct.
The proof of $(*)$ is by induction on the number of nodes $n_B$ of $T_B$. If $n_B=1$, then the condition from the {\bf if} statement in Step \ref{step:if-leaf} is verified. In this case, the statement is immediate, since we enumerate over all possible $B$-matchings of $G$, check those that are $(S,X)$-locally popular, and for each $(S,X)$-tipping point ${\cal T}$, keep the $(S,X)$-locally popular matching of minimum cost active at ${\cal T}$.
Now suppose $n_B > 1$. By induction hypothesis, for $i=1,2$, all matchings that are stored in ${\cal L}_{B_i}$ are exactly all ${\cal T}$-leaders, for all $(S_i,X_i)$-tipping points ${\cal T}$ for which a ${\cal T}$-leader exists (i.e., there is at least a $(S_i,X_i)$-locally popular matching active at ${\cal T}$). Now let ${\cal T}$ be an $(S,X)$-tipping point for which a ${\cal T}$-leader $\hat M$ exists. It suffices to show that one of the matchings $M^*$ constructed at Step \ref{st:M-star} is indeed $\hat M$.
Since $\hat M$ is $(S,X)$-locally popular and $X_i \cup S_i \subseteq X \cup S$, matching $M_i:=\hat M_{X_i \cup S_i}$ is also $(S_i,X_i)$-locally popular by Proposition \ref{prop:popular-will-be-weak}. By Lemma \ref{lem:only-leaders-left-alive}, $M_i$ is a $(S_i,X_i)$-leader. By induction, $M_i \in {\cal L}_{B_i}$. On the other hand, $M:=\hat M_{B}$ is a $B$-matching of $G$ such that $M_{S_i}=(M_{i})_{S_i}$ for $i=1,2$. Since we enumerate all $B$-matchings of $G$, as well as all matchings from ${\cal L}_{B_1}$ and ${\cal L}_{B_2}$, matching $\hat M$ is eventually enumerated.
\noindent{\em Running time analysis.}
We now bound the running time of Algorithm~\ref{Algo}. We will use the following general fact proved in the claim below: if we are given an $(X\cup S)$-matching $M$ in a graph $H$ and $|X\cup S|$ is bounded, then one can check $(S,X)$-local popularity of $M$. If $M$ is $(S,X)$-locally popular, then we can efficiently find the $(S,X)$-tipping point at which $M$ is active.
\begin{new-claim}\label{cl:everything-is-checkable}
Let $H = (U,F)$ be a graph, $S$ a vertex separator of $H$, $X$ a connected component of $H\setminus S$. Let $M$ be a $(X \cup S)$-matching of $H$. Assume $|X\cup S|$ is upper bounded by a constant. Then in $O(|F|)$ time one can:
\begin{enumerate}
\item check if $M$ is $(S,X)$-locally popular in $H$ and, if it is,
\item find the $(S,X)$-tipping point (in $H$) at which $M$ is active.
\end{enumerate}
\end{new-claim}
\begin{proof}
Building graph $H_M[X\cup S]$ takes $O(|F|)$ time. Since $|X\cup S|$ is upper bounded by a constant, it takes constant time to enumerate all paths and cycles of $H_M[X \cup S]$, and to check if any of these violates the definition of $(S,X)$-local popularity. In case $M$ is indeed $(S,X)$-locally popular, for any family of paths ${\cal P}$ in $H_M[X\cup S]$ and $(S,X)$-configurations ${\cal C}$, one can check in constant time if ${\cal P}$ is a certificate for $M$ at ${\cal C}$. Since by Proposition \ref{prop:poly-many-tipping} the number of $(S,X)$-configurations is upper bounded by $g(\omega+1)$, which is a constant by hypothesis, the claim follows.
$\lozenge$
\end{proof}
Observe that $|B|\leq \omega +1$, so enumerating all $B$-matchings takes time $O(|V|^{\omega+1})$.
Because of Claim \ref{cl:everything-is-checkable} and Step \ref{step:if1} (in Algorithm~\ref{Algo}), finding the tipping point of any
$B$-matching $M^*$ can be performed in time $O(|E|)=O(|V|^2)$. Moreover, once the tipping point of ${M^*}$ is computed, the \texttt{Update} function can be implemented to run in time $O(|V|^{\omega+1})$, since ${\cal L}_B$ at each step will contain at most one matching per tipping point, and by Proposition \ref{prop:poly-many-tipping}, there are at most $g(\omega+1)|V|^{\omega +1}$ many tipping points.
We conclude that, when $n_B=1$, the iteration when $B$ is flagged runs in time $O(|V|^{2\omega +2})$. We now prove, by induction on $n_B$, that the iteration when any $B$ is flagged can also be implemented to run in time $O(|V|^{3\omega +5})$, the base case having been just proved. Multiplying this by $O(|V|^2)$ (the number of bags of the tree decomposition), we obtain the desired bound.
Assume $n_B>1$. Because of what was discussed above, given an $(S,X)$-configuration ${\cal C}$ and a matching $M^*=M\cup M_1 \cup M_2$,
where $M$ is a $B$-matching of $G$ and for $i=1,2$ $M_i \in {\cal L}_{B_i}$,
it is enough to give an upper bound on the time needed to decide if $M^*$ is $(S,X)$-locally popular and if it is active at ${\cal C}$.
Recall that the number of $B$-matchings is $O(|V|^{\omega+1})$, while $|{\cal L}_{B_i}|=O(|V|^{\omega+1})$ and the number of $(S,X)$-configuration
is at most $g(\omega + 1)$ by Proposition \ref{prop:poly-many-tipping}.
The condition from Step \ref{st:same-induced} can be verified in time $O(|E|)$. Hence, we assume that $(M_1)_{S_1}=M_{S_1}$ and $(M_2)_{S_2}=M_{S_2}$. We start with a simple claim.
\begin{new-claim}\label{cl:all-paths-are-ivd}
For $i=1,2$, let ${\cal C}_i=(U_i,L_i,\Pi_i)$ be a configuration at which $M_i$ is active, and let ${\cal P}_i$ be a certificate of ${\cal C}_i$ at $M_i$. Then all paths from ${\cal P}_1,{\cal P}_2$ are pairwise internally vertex-disjoint.
\end{new-claim}
\begin{proof}
By definition of certificate, paths from ${\cal P}_1$ are pairwise internally vertex-disjoint, and similarly so are paths from ${\cal P}_2$. Now let $P_1 \in {\cal P}_1$, $P_2 \in {\cal P}_2$. For $i=1,2$, $P_i\subseteq V(T_{B_i})$ and $P_i$ is an $S_i$-path. Since $V(T_{B_1})\cap V(T_{B_2})\subseteq S_1 \cup S_2$, the claim follows.
$\lozenge$
\end{proof}
Now fix an $(S_1,X_1)$-configuration ${\cal C}_1=(U_1,L_1,\Pi_1)$ in $G$ at which $M_1$ is active, and an $(S_2,X_2)$-configuration ${\cal C}_2=(U_2,L_2,\Pi_2)$ in $G$ at which $M_2$ is active. Consider the graph $H$ and matching $M_H$ obtained as follows. Start from $H=G_M[B]$ and the corresponding matching $M_H=M[B]$, and let $(u,v)$ be the first pair from $U_1$. Assume $u,v$ are matched by $M_H$, $\ell_1=(0,1)$, $\pi_1=(0,0)$, the other cases following in a similar fashion.
Add to $H$ and $M_H$ new nodes $u',w',z',v'$, matching edges $(u',w')$, $(z',v')$, $(+,+)$ edge $(w',z')$, and $(+,-)$ edges $(u,u')$ and $(v,v')$. Note that $(u,u',w',z',v',v)$ is a $M_H$-alternating $S_1$-path of parity $\pi_1$ and level $\ell_1$ starting at $u$ and ending at $v$. We call it the \emph{shortcut} of the path with endpoints $u,v$. Repeat this for all pairs from $U_1$ and $U_2$, adding at each time new nodes and an appropriate path. Note that this adds to $H$ a bounded number of nodes. This means that the graph $H$ and matching $M_H$ can be constructed in time $O(|E|)$.
\begin{new-claim}\label{cl:almost-there}
Let $M^*$ be an $(S\cup X)$-matching. $M^*$ is not an $(S,X)$-locally popular matching in $G$ if and only if there exist an $(S_i,X_i)$-configuration ${\cal C}_i$ at which $M_i$ is active for $i=1,2$, such that, if we construct graph $H$ and matching $M_H$ as above, then $M_H$ is not $(S,V(H)\setminus S)$-locally popular in $H$.
\end{new-claim}
\begin{proof}
Consider the forbidden subgraph $P$ (from Theorem \ref{thr:characterize-popular}) of $G_{M^*}[X \cup S]$, whose existence certifies that $M^*$ is not $(S,X)$-locally popular in $G$, and take $P$ that is minimal with this property. Using an immediate modification to Algorithm \ref{algo:decompose} (given in Section~\ref{sec:lemma15-proof}), we can write $P$ as the juxtaposition of $(P_1,P_2,\dots,P_k)$ where each $P_j$ is: either an $S_i$-path contained in $G[X_i \cup S_i]$ for $i\in \{1,2\}$, or a path contained in $G[B]$. Collect all such $S_1$-paths from $(P_1,\dots,P_k)$ not contained in $G[B]$ in ${\cal P}_1$, and all $S_2$-paths from $(P_1,\dots,P_k)$ not contained in $G[B]$ in ${\cal P}_2$. One easily checks that ${\cal P}_i$ form the certificate of a certain $(S_i,X_i)$-configuration ${\cal C}_i$ at $M_i$.
Now consider the graph $H$ and matching $M_H$ obtained from configurations ${\cal C}_1$ and ${\cal C}_2$. By replacing each path from ${\cal P}_1$ and ${\cal P}_2$ with the corresponding shortcut, we deduce that $M_H$ is not an $(S,V(H)\setminus S)$-locally popular matching in $H$.
The opposite direction follows in a similar (inverse) fashion: start from a forbidden path in $H$ that certifies that $M_H$ is not $(S,V(H)\setminus S)$-locally popular in $H$, write it as $(P_1,\dots,...,P_k)$, replace each of those subpaths with the appropriate certificate paths so as to obtain an $M^*$-alternating forbidden path by Claim \ref{cl:all-paths-are-ivd}.
$\lozenge$
\end{proof}
The proof of the following claim follows in a similar fashion to the proof of Claim \ref{cl:almost-there}.
\begin{new-claim}\label{cl:there,there}
Let $M^*$ be an $(S,X)$-locally popular matching in $G$ and let ${\cal C}$ be an $(S,X)$-configuration. Then $M^*$ is active at ${\cal C}$ if and only if there exist a $(S_i,X_i)$-configuration ${\cal C}_i$ at which $M_i$ is active for $i=1,2$ such that if we construct graph $H$ and matching $M_H$, then $M_H$ is active at ${\cal C}$ (here we interpret ${\cal C}$ as an $(S,V(H)\setminus S)$-configuration in $H$, which is allowed since $S\subseteq V(H)$).
\end{new-claim}
Because of Claim \ref{cl:almost-there}, we can check if $M^*$ is an $(S,X)$-locally popular matching in $G$ by checking, for each pair of $(S_i,X_i)$-configurations ${\cal C}_i$ at which $M_i$ is active for $i=1,2$, if the corresponding $M_H$ is $(S,V(H)\setminus S)$-locally popular in $H$. For $i=1,2$, the number of $(S_i,X_i)$-configurations is at most $g(\omega+1)$. Since the number of nodes of $H$ is bounded (we start with a bounded set of nodes and we add a bounded number of new nodes), testing if $M_H$ is $(S,V(H)\setminus S)$-locally popular in $H$ can be done in time $O(|E|)=O(|V|^2)$ by Claim \ref{cl:everything-is-checkable}. Similarly, if $M^*$ is $(S,X)$-locally popular in $G$, we can find its tipping point in time $O(|V|^2)$ by repeatedly applying Claims~\ref{cl:there,there} and \ref{cl:everything-is-checkable}. Hence, the iteration when $B$ is flagged can be performed in time $O(|V|^{3\omega +5})$. This concludes the proof of Theorem~\ref{thr:main}.
\subsection{Proof of Lemma~\ref{lem:only-leaders-left-alive}}
\label{sec:lemma15-proof}
Let $N$ be the $(S,X)$-locally popular matching that is the ${\cal T}$-leader. This implies that $M_S=N_S$, and $N$ is a $(X\cup S)$-matching. Define $N':=N \cup (M'\setminus M)$, and notice this is a disjoint union. Hence, $N'_{X \cup S}=N$. We will now show that $N'$ is a $(S',X')$-locally popular matching whose $(S'\cup X')$-tipping point is ${\cal T}'$. We then have:
$$c(N') = c(N) + c(N'\setminus N) < c(M) + c(M'\setminus M) = c(M'),$$ concluding the proof. We start with some claims, the first two of which immediately follow from construction.
\begin{new-claim}\label{cl:sameG}
$G_{M'}[X \cup S]=G_M[X\cup S]$ and $G_{N'}[X\cup S]=G_{N}[X \cup S]$.
\end{new-claim}
\begin{new-claim}\label{cl:reverseM'}
$M'=M\cup(N'\setminus N)$, and the latter is a disjoint union.
\end{new-claim}
\begin{new-claim}\label{cl:one}
Let $e \in E$ not be incident to a node of $X$.
\begin{enumerate}
\item[a)]\label{first} $e \in N'$ if and only if $e \in M'$.
\item[b)]\label{second} Let $e \notin N'$. Then $e \in E(G_{N'})$ if and only if $ e \in E(G_{M'})$. Moreover, if $e \in E(G_{N'})$, then it has the same labels in $G_{N'}$ and in $G_{M'}$.
\end{enumerate}
\end{new-claim}
\begin{proof}
a) Suppose $e \in N'$. If $e \in N$, then $e \in N_S = M_S \subseteq M \subseteq M'$, where equality holds by hypothesis and inclusions by definition. Else, $e \in (N'\setminus N)\subseteq M'$. The argument can be reversed to show the opposite direction.
b) Suppose $e=(u,v) \notin N'$. By part a), $e \notin M'$. If $u \notin S$, then $N'(u)=M'(u)$ by part a). We deduce that the label of $e$ at $u$ coincides in $M'$ and $N'$. If instead $u \in S$, then the label of $e$ at $u$ in $G_{M}$ and $G_{N}$ coincide by Proposition \ref{pro:match-same}. The claim follows since $M'_S=M_S=N_S=N'_S$.
$\lozenge$ \end{proof}
\begin{new-claim}\label{cl:matching,indeed}
$N'$ is an $(X' \cup S')$-matching of $G$.
\end{new-claim}
\begin{proof}
Let us first show that it is a matching of $G$. Recall that $M'_{X \cup S}=M$. Hence, $M'\setminus M$ has no edge incident to $X \cup S$. Since by hypothesis $N$ is an $(X \cup S)$-matching, $N$ has no edge incident to nodes other than $X \cup S$. Hence $N'$ is the union of two node-disjoint matchings of $G$, hence a matching.
Now by hypothesis, $S \cup X \subseteq S' \cup X'$. Hence, $N$ has no edge in $G\setminus (S' \cup X')$. Moreover, $M'$ is by hypothesis an $(S',X')$-locally popular matching, so it is an $(S'\cup X')$-matching. Hence, $N'$ is an $(S'\cup X')$-matching.
$\lozenge$\end{proof}
\begin{new-claim}\label{cl:fromPtoPhat}
Let $k \in {\mathbb{N}}$ and $P^1 ,\dots, P^k \in {\cal P}_{M'}$ be pairwise internally vertex-disjoint and $X$-disjoint. Assume that, for $j \in [k]$, path $P^j$ is of level $\ell_j \in \{0,1,2\} \times \{0,1\}$ and parity $\Pi_j \in\{0,1\}\times\{0,1\}$, that every node appears at most twice in $P^1\dots,P^k$, no pair of paths have exactly the same endpoints, and no path is contained in $X$.
Then there exist pairwise internally vertex-disjoint and $X$-disjoint paths $Q^1,\dots,Q^k \in {\cal P}_{N'}$ with the following properties:
\begin{enumerate}
\item For $j \in [k]$, $Q^j$ is of level $\ell_j$ and parity $\Pi_j$, and $P^j \cap S'=Q^j \cap S'$.
\item For $j \in [k]$, let $u_j$ (resp. $v_j$) be the first (resp. last) node of $P^j$. If $u_j$ (resp. $v_j$) $\notin X$, then $u_j$ (resp. $v_j$) is the first (resp. last) node of $Q^j$. If $u_j$ (resp. $v_j$) $\in X$, the first (resp. last) node of $Q^j$ also belongs to $X$.
\end{enumerate}
Moreover,
\begin{enumerate}
\item[3.]
1,2 also hold if we switch the roles of $M'$ and $N'$.
\end{enumerate}
\end{new-claim}
\begin{proof}
The proof of the claim will only assume that $M$ and $N$ are $(S,X)$-locally popular matchings with the same $(S,X)$-tipping point. Recall that $N'=N\cup (M'\setminus M)$, $M'=M\cup(N'\setminus N)$ (by Claim~\ref{cl:reverseM'}), and that those unions are disjoint. Therefore, the roles of $M$ and $N$ (hence those of $M'$ and $N'$) can be exchanged and the conclusions preserved. This proves 3 (assuming 1,2).
Let us consider any path $P$ from $P^1,\dots,P^k$, where we omit the superscript for the sake of readability. If $P \cap X=\emptyset$, we write $P_\infty:=P$. Else, we write $P$ as the juxtaposition of $P_0,P_1,P_2,\dots, P_q,P_\infty$, defined through the procedure described in Algorithm \ref{algo:decompose}. Note that some of those paths may consist of a single node. We call them \emph{trivial}. Note that a trivial path can be removed from the juxtaposition without changing the resulting $P$. Hence, in the following, we assume that all paths are non-trivial (without changing the subscripts of the remaining non-trivial paths), and we let $q$ be the subscript of the last non-trivial path (other than $\infty$).
\begin{algorithm}[h!]
\caption{ }
\label{algo:decompose}
\begin{algorithmic} [1]
\STATE {Let $v$ be the first vertex of $P$.}
\IF{$v \in X$}\label{step:if-X}
\STATE{Let $P_0=\{v\}$ and $u=v$.}
\ELSE
\STATE{Let $u'$ be the first vertex of $P$ that is contained in $X$, and $u$ the predecessor of $u'$ in $P$. Note that $u \in S$. Let $P_0$ be the subpath of $P$ between $v$ and $u$ (possibly $u=v$ and $P_0=\{u\}$).}
\ENDIF
\STATE{Let $i=1$.}
\STATE{Starting from $u$, traverse $P$ until the first node $v \in S$, $v\neq u$ is encountered.}\label{step:iterative}
\IF{such $v$ does not exist}
\STATE{Set $q=i-1$ and go to Step \ref{step:output}.}
\ELSE{}
\STATE{Let $P_i$ be the subpath between $u$ and $v$. Set $i=i+1$ and $u=v$.}
\STATE{Starting from $u$, traverse $P$ until the first node $u' \in X$ is encountered.}
\IF{such $u'$ does not exist}
\STATE{Set $q=i-1$ and go to Step \ref{step:output}.}
\ELSE{} \STATE{Let $P_i$ be the subpath of $P$ between $u$ and the predecessor $v$ of $u'$ in $P$. Note that $v \in S$ and possibly $v=u$.}
\STATE{Set $u=v$, $i=i+1$, and go to Step \ref{step:iterative}.}
\ENDIF
\ENDIF
\STATE{Let $P_\infty$ be the subpath of $P$ between $u$ and the last node of $P$.}\label{step:output}
\STATE{Output $P_0,P_1,\dots,P_{q},P_\infty$.}
\end{algorithmic}
\end{algorithm}
The following fact immediately holds by construction.
\begin{fact}\label{fact:basic}
Let $\{P_i\}_{i\in [q]\cup\{0,\infty\}}$ be the output of Algorithm \ref{algo:decompose} on input $P=P^j$ for some $j \in [k]$. Then: (i) $P$ is the juxtaposition of $P_0,P_1,\dots,P_{q},P_\infty$. (ii) For all $i \in [q] \cup \{0,\infty\}$, $P_i \in {\cal P}_{M'}$. (iii) For $i\in {\mathbb{N}}$, $i \geq 2$, the first and last nodes of $P_i$ belong to $S$. (iv) For $i \in {\mathbb{N}}$, $i$ odd, $P_i$ is an $S$-path and $P_i\subseteq X \cup S$. (v) For $i$ even, $P_i \cap X=\emptyset$. (vi) Assume $q\geq 1$. Then $P_1$ ends at a node of $S$, and starts either at a node of $X$ (if the {\bf if} condition in Step \ref{step:if-X} of Algorithm \ref{algo:decompose} is satisfied), or at a node of $S$ (otherwise).
(vii) If $P_\infty \cap X \neq \emptyset$, then $P_\infty$ is an $S$-path of $G_{M'}[X\cup S]$ ending at some node of $X$.
\end{fact}
Let us call a subpath $P_i$ of $P$ \emph{hidden} if $i \equiv 1 \bmod 2$ or $i=\infty$ and $P_\infty \cap X\neq \emptyset$ (i.e., condition (vii) above is verified). Now consider the collection of paths $\{P_i^j\}$ defined above, for $j \in [k]$\footnote{We introduced the superscripts back, to distinguish the paths produced by applying Algorithm \ref{algo:decompose} to $P^1,\dots,P^k$.}.
\begin{fact}\label{fact:paths}
Consider the family of paths $\{P_i^j\}$ with $j \in [k]$. Then:
\begin{enumerate}
\item $\{P_i^j\}$ is a collection of pairwise internally vertex-disjoint paths and each node appears at most twice in the collection.
\item The restriction of the collection $\{P_i^j\}$ to hidden paths is a family of pairwise $X$-disjoint $S$-paths from ${\cal P}_M$, and the level of each of those path is not $\infty$.
\item If $P_i^j \cap P_{i'}^{j'} \neq \emptyset$, then either (a) $j=j'$ and ($|i-i'|\leq 1 $ or $i,i' \in\{q_j,\infty\}$), or (b) $i$ (resp. $i'$) is the first or last path of the juxtaposition leading to $P^j$ (resp. $P^{j'}$).
\end{enumerate}
\end{fact}
\begin{proof}
Part 1 follows from hypothesis, the fact that paths $P^j$, $j \in [k]$ form a collection of pairwise internally vertex-disjoint paths and Fact \ref{fact:basic}.
We now prove part 2. Restrict the collection $\{P_i^j\}$ to hidden paths. We already argued in Fact \ref{fact:basic} that those are $S$-paths. As they are subpaths of $M'$-alternating paths, they are also $M'$-alternating. Using again Fact \ref{fact:basic}, they are contained in $X \cup S$. Using Claim \ref{cl:sameG}, we deduce they are paths from ${\cal P}_{M}$. The fact that they are $X$-disjoint follows by hypothesis for $j\neq j'$, and by Fact \ref{fact:basic} for $j=j'$. The fact that $M'$ is $(S',X')$-locally popular implies that they cannot be of level $\infty$. This proves part 2.
We now prove part 3. Let $P_i^j \cap P_{i'}^{j'} \neq \emptyset$. If $j=j'$, by Fact \ref{fact:basic} the only possibility is that $P^j_i$, $P^{j}_{i'}$ are consecutive paths in the juxtaposition leading to $P$. This is case (a) in 3. Else, $j\neq j'$ and case~(b) follows from the fact that $P^j$ and $P^{j'}$ are internally vertex-disjoint, concluding the proof of 3.
$\lozenge$\end{proof}
Consider the collection of hidden paths $\{P_i^j\}$ for $j \in [k]$, and let $u_i^j$ (resp. $v_i^j$) be the first (resp. last) node of each of those paths. Consider ${\cal C}=(U,L,\Pi)$ defined as follows: $U$ is the ordered collection of pairs:
\begin{itemize}
\item $(u^j_1,v^j_1)$ if the first node of $P_1^j$ belongs to $S$, and $(\emptyset,v^j_1)$ otherwise.
\item $(u^j_i,v^j_i)$ for $i\geq 3$ odd and
\item $(u^j_\infty,\emptyset)$ if $P^j_\infty$ is hidden.
\end{itemize}
$L$ is the ordered collection of levels $\ell_i^j$ of the hidden paths $P_i ^j$, while $\Pi$ is the ordered collection of their parities. Using Fact \ref{fact:basic}, Fact \ref{fact:paths} and the hypothesis, one easily verifies the following.
\begin{fact}
${\cal C}$ is an $(S,X)$-configuration. $M$ is active at ${\cal C}$, and the collection of hidden paths $\{P^j_i\}$ for $j \in [k]$ is a certificate of ${\cal C}$ at $M$.
\end{fact}
By hypothesis, $N$ is also active at ${\cal C}$. Hence, for all pairs $(i,j)$ such that $P_i^j$ is hidden, we can find pairwise $X$-disjoint $S$-paths $Q_i^j \in {\cal P}_{N}$, with each $P_i^j$ starting (resp. ending) at node $u_i^j$ (resp. $v_i^j$) if this belongs to $S$, and at a node of $X$ otherwise, of the appropriate level and parity.
For $j \in [k]$, recall that $P^j$ is the juxtaposition of $(P^j_0,P^j_1,\dots,P_{q_j}^j,P^j_\infty)$, from which we removed the trivial paths. Consider the path $Q^j$ obtained by replacing, in the juxtaposition above, each hidden path $P^j_i$ with the corresponding $Q^j_i$. We conclude the proof of the claim by showing that the collection of $Q^j$, $j \in [k]$, satisfies the claim.
Fix $j \in [k]$. Let us first argue that $Q^j$ is a path in $G$ that satisfies property 2 from the statement of the claim. Note that, by construction, for all hidden $P^j_i$, the first (resp. last) vertex of $P^j_i$ coincides with the first (resp. last) vertex of $Q_i^j$, with possibly the exception of the first vertex of $P_0^j$ (resp. last vertex of $P_\infty^j$) if it belongs to $X$. Moreover, since all $Q^j_i$ are pairwise $X$-disjoint $S$-paths with no edge in $G\setminus X$, each $Q^j$ is a path in $G$. The statement follows.
Let us now argue that $Q^j$ is $N'$-alternating. Since $N'[(X'\cup S') \setminus X]=M'[(X'\cup S') \setminus X]$ and all $P^j_i$ that are not hidden are $M'$-alternating and do not intersect $X$, we conclude that all such $P^j_i$ are also $N'$-alternating. For each hidden $P^j_i$, $Q^j_i$ is an ${N}$-alternating path in $G_{N}[X \cup S]$ by construction. Since $G_{N}[X \cup S]=G_{N'}[X \cup S]$ by Claim \ref{cl:sameG}, each $Q^i_j$ is also $N'$-alternating.
By construction, the parity of each hidden $P_i^j$ coincides with that of the corresponding $Q_i^j$. We conclude that each $Q^j$ is $N'$-alternating.
Let us now show that $Q^j \in {\cal P}_{N'}$, and it is of the same level and parity of $P^j$. Let $P^j_i$ be non-hidden. By Fact \ref{fact:basic}, it is contained in $G[X'\cup S']\setminus X$. By Claim \ref{cl:one}, $P^j_i\cap N'=P^j_i\cap M'$, and each edge from $P^j_i\setminus N'$ has the same labels in $G_{M'}[X' \cup S']$ and $G_{N'}[X' \cup S']$. Now consider a hidden path $P^j_i$. By construction, $Q^j_i$ is a path of $G_{N}[X\cup S]=G_{N'}[X \cup S]$ of the same parity and level of $P^j_i$. The statement follows.
Now observe that property 1 of the claim holds, since the hypothesis $X\subseteq X'$ implies $S' \cap X=\emptyset$, hence we have: $$Q^j \cap S'= (Q^j \cap (S' \cap S))\cup (Q^j \cap (S'\setminus S))= (P^j \cap (S' \cap S))\cup (P^j \cap (S'\setminus S)) = P^j \cap S'.$$
Finally, let us observe that $Q^1,\dots, Q^k$ are internally vertex-disjoint and $X$-disjoint. The $X$-disjointness follows from the fact that all $Q^j_i$ corresponding to hidden $P^j_i$ are $X$-disjoint, while other $P^j_i$ do not intersect $X$ by Fact \ref{fact:basic}. The fact that $Q^1,\dots, Q^k$ are pairwise internally vertex-disjoint follows from Fact \ref{fact:paths}.
$\lozenge$ \end{proof}
In order to conclude the proof of the lemma, we need to prove that $N'$ is a $(S',X')$-locally popular matching whose tipping point is ${\cal T}'$.
\noindent{\em $(S',X')$-local popularity.} Let us first show that $N'$ is $(S',X')$-locally popular. Suppose it is not, then there exists a forbidden path or cycle $P$ as in item (i), (ii), or (iii) from Theorem \ref{thr:characterize-popular} that is contained in $G_{N'}[S' \cup X']$. Since (by Claim \ref{cl:sameG}) $G_{N}[X\cup S]=G_{N'}[X\cup S]$ and $v \in X\cup S$ is $N$-exposed if and only if it is $N'$-exposed, we deduce $P\not\subseteq X\cup S$, else $N$ would not be $(S,X)$-locally popular, a contradiction.
Suppose $P$ is a path: take one that is inclusionwise minimal among all paths that certify that $N'$ is not $(S',X')$-locally popular. We can then write $P$ as the juxtaposition of $(P^1,P^2)$ of levels $\ell_1,\ell_2 \neq \infty$, so that the first node of $P^2$ (which coincides with the last node of $P^1$) does not belong to $X$. Clearly $P^1,P^2 $ satisfy the hypothesis of Claim \ref{cl:fromPtoPhat} with the roles of $N'$ and $M'$ exchanged and $k=2$. Let $Q^1, Q^2$ be the paths whose existence is guaranteed by the claim. We claim that the juxtaposition of $(Q^1,Q^2)$ is a forbidden path in $G_{M'}[X'\cup S']$, a contradiction.
First, observe that the last node of $Q^1$ coincides with the last node of $P^1$ (since the latter does not belong to $X$) which coincides with the first node of $P^2$, which coincides with the first node of $Q^2$ (again, since it does not belong to $X$). We now argue that $P^1$ and $P^2$ have no other node in common. We know they are internally vertex-disjoint and $X$-disjoint. Hence, if the first node of $P^1$ or the last node of $P^2$ belongs to $X$, we are done. Suppose not. Then, by Claim \ref{cl:fromPtoPhat}, the first node of $Q^1$ is the first node of $P^1$, which is different by hypothesis ($P$ is a path) from the last node of $P^2$, which coincides, again by Claim \ref{cl:fromPtoPhat}, with the last node of $Q^2$. Hence, the juxtaposition of $(Q^1,Q^2)$ is a path of $G$, that we denote by $Q$. Again by Claim \ref{cl:fromPtoPhat}, $Q^1$, $Q^2 \in {\cal P}_{M'}$. Moreover, the parity and level of $Q^1$ (resp. $Q^2$) coincide with the parity and level of $P^1$ (resp. $P^2$). Hence $Q \in {\cal P}_{M'}$ is the required forbidden path, obtaining the desired contradiction.
The argument when $P$ is a cycle follows in a similar fashion, writing $P$ as the juxtaposition of $(P^1,P^2,P^3)$ with $P^1\cap X = P^2\cap X = \emptyset$, and the endpoints of $P_3$ lying in $S$ (note that we can assume that $P$ contains exactly one $(+,+)$ edge, else it also contains a path with two $(+,+)$ edges and we are back to the previous case).
\noindent{\em Tipping point.}
Let us now show that ${\cal T'}$ is the tipping point of $N'$. First, observe that $X \subseteq X'$ implies $S'\cap X=\emptyset$. For $v \in S' \cap S$, we know that $N'(v)=N(v) = M(v)=M'(v)$. For $v \in S'\setminus S$, we have $N'(v)=M'(v)$ by construction. Hence $N'_{S'} = M'_{S'}$. Now let ${\cal C}=\{U,L,\Pi\}$ be a $(S',X')$-configuration at which $M'$ is active, with $U$ as in \eqref{eq:U}. We will show that $N'$ is also active at ${\cal C}$. A symmetric argument shows that, if ${N'}$ is active at ${\cal C}$, then so is ${M}'$, concluding the proof.
Take paths $P^1,\dots,P^k$ that are the certificate of ${\cal C}$ at $M'$. We claim that those paths satisfy the hypothesis of Claim \ref{cl:fromPtoPhat}. They are, by construction, $S'$-paths that are pairwise $X'$-disjoint. This implies that they are pairwise internally vertex-disjoint and $X$-disjoint (using again $X\cap S'=\emptyset$). The $X'$-disjointness implies that each node from $X'$ appears at most once. Moreover, again by construction, each node from $S'$ appears at most twice. Hence, every node appears at most twice in $P^1,\dots, P^k$. Moreover, by definition of $U$, no two paths have the same endpoints.
Let $Q^1,\dots,Q^k \in {\cal P}_{N'}$ be the paths whose existence is guaranteed by Claim \ref{cl:fromPtoPhat}. We show that $Q^1,\dots,Q^k$ are the certificate of ${\cal C}$ at $N'$, concluding the proof. Fix $j \in [k]$. The parity and level of $Q^j$ is the same as that of $P^j$. Hence, the parity of $Q^j$ is $\Pi_j$, and its level $\ell_j$. Recall that $Q^j$ is an $S'$-path. By property $2$ of Claim \ref{cl:fromPtoPhat}, $Q^j$ starts (resp. ends) at the same node as $P^j$ when this belongs to $S'$, and at some node of $X'$ otherwise. Using property 1 of Claim \ref{cl:fromPtoPhat}, we deduce that $Q^j$ is a $S'$-path that starts (resp. ends) at $u_j$ (resp. $v_j$) when $u_j\neq \emptyset$ (resp. $v_j\neq \emptyset$), and at a node $x \in X'$ otherwise.
We are left to show that paths $\{Q^j\}_{j=1,\dots,k}$ are $X'$-disjoint. Suppose not, then there exists $v \in X'$, $j\neq j'$ such that $v \in Q^j \cap Q^{j'}$. Note that $v$ cannot be an internal node of $Q^j$ or $Q^{j'}$, since those paths are pairwise internally vertex-disjoint by construction. On the other hand, if $v$ is an endpoint of both $Q^j$ and $Q^{j'}$, then the statement either follows from the $X$-disjointness of $Q^j$ and $Q^{j'}$, or by property $2$ of Claim \ref{cl:fromPtoPhat}.
\qed
\end{document} |
\betaegin{document}
\tauitle[Wellposedness of the 2D water waves in a regime allowing for angled crests]{Wellposedness of the 2D full water wave equation in a regime that allows for non-$C^1$ interfaces}
\alphauthor{Sijue Wu
}
\alphaddress{Department of Mathematics, University of Michigan, Ann Arbor, MI}
\tauhanks{Financial support in part by NSF grants DMS-1101434, DMS-1361791 and a Simons fellowship.}
\betaegin{abstract}
We consider the two dimensional gravity water wave equation in a regime where the free interface is allowed to be non-$C^1$.
In this regime, only a degenerate Taylor inequality $-\fracrac{\partial P}{\partial\betaold{n}}\gammae 0$ holds, with degeneracy at the singularities. In \cite{kw} an energy functional $\mathcal E(t)$ was constructed
and an a-prori estimate was proved. The energy functional $\mathcal E(t)$ is not only finite for interfaces and velocities in Sobolev spaces, but also finite for a class of non-$C^1$ interfaces with angled crests.
In this paper we prove the existence, uniqueness and stability of the solution of the 2d gravity water wave equation in the class where $\mathcal E(t)<^{-1}nfty$, locally in time,
for any given data satisfying $\mathcal E(0)<^{-1}nfty$.
\end{abstract}
\maketitle
\betaaselineskip15pt
\section{Introduction}
A class of water wave problems concerns the
motion of the
interface separating an inviscid, incompressible, irrotational fluid,
under the influence of gravity,
from a region of zero density (i.e. air) in
$n$-dimensional space. It is assumed that the fluid region is below the
air region. Assume that
the density of the fluid is $1$, the gravitational field is
$-{\betaold k}$, where ${\betaold k}$ is the unit vector pointing in the upward
vertical direction, and at
time $t\gammae 0$, the free interface is $\Sigma(t)$, and the fluid
occupies region
$\Omega(t)$. When surface tension is
zero, the motion of the fluid is described by
\betaegin{equation}\label{euler}
\betaegin{cases} \ \betaold v_t + (\betaold v\cdot \nabla) \betaold v = -\betaold k-\nabla P
\qquad \tauext{on } \Omega(t),\ t\gammae 0,
\\
\ \tauext{div}\,\betaold v=0 , \qquad \tauext{curl}\,\betaold v=0, \qquad \tauext{on }
\Omega(t),\ t\gammae
0,
\\
\ P=0, \qquad\qquad\qquad\qquad\qquad\tauext{on }
\Sigma(t) \\
\ (1, \betaold v) \tauext{
is tangent to
the free surface } (t, \Sigma(t)),
\end{cases}
\end{equation}
where $ \betaold v$ is the fluid velocity, $P$ is the fluid
pressure.
There is an important condition for these problems:
\betaegin{equation}\label{taylor}
-\fracrac{\partial P}{\partial\betaold n}\gammae 0
\end{equation}
pointwise on the interface, where $\betaold n$ is the outward unit normal to the fluid interface
$\Sigma(t)$ \cite{ta};
it is well known that when surface tension is neglected and the Taylor sign condition \eqref{taylor} fails, the water wave motion can be subject to the Taylor instability \cite{ ta, bi, bhl, ebi}.
The study on water waves dates back centuries. Early mathematical works include Newton \cite{newton}, Stokes \cite{st}, Levi-Civita \cite{le}, and G.I. Taylor \cite{ta}. Nalimov
\cite{na}, Yosihara \cite{yo} and Craig \cite{cr} proved local in time existence and uniqueness of solutions for the 2d water wave equation \eqref{euler} for small and smooth initial data. In \cite{wu1, wu2}, we showed that for dimensions $n\gammae 2$, the strong Taylor sign condition
\betaegin{equation}\label{taylor-s}
-\fracrac{\partial P}{\partial\betaold n}\gammae c_0>0
\end{equation}
always holds for the
infinite depth water wave problem \eqref{euler}, as long as the interface is in $C^{1+{\varepsilon}ilon}$, ${\varepsilon}ilon>0$; and the initial value problem of equation \eqref{euler} is locally well-posed in Sobolev spaces $H^s$, $s\gammae 4$ for arbitrary given data. Since then,
local wellposedness for water waves with additional effects such as the surface tension, bottom and non-zero vorticity, under the assumption \eqref{taylor-s},\fracootnote{When there is surface tension, or bottom, or vorticity, \eqref{taylor-s} does not always hold, it needs to be assumed.} were obtained, c.f. \cite{am, cl, cs, ig1, la, li, ot, sz, zz}. Alazard, Burq \& Zuily \cite{abz, abz14} proved local wellposedness of \eqref{euler} in low regularity Sobolev spaces where the interfaces are only in $C^{3/2}$. Hunter, Ifrim \& Tararu \cite{hit} obtained a low regularity result for the 2d water waves that improves on \cite{abz}.
The author \cite{wu3, wu4}, Germain, Masmoudi \& Shatah \cite{gms}, Ionescu \& Pusateri \cite{ip} and Alazard \& Delort \cite{ad} obtained almost global and global existence for two and three dimensional water wave equation \eqref{euler} for small, smooth and localized data; see \cite{hit, it, dipp, wang1, wang2, bmsw} for some additional developments. Furthermore in \cite{cf},
Castro, C\'ordoba, Fefferman, Gancedo and G\'omez-Serrano proved that for the 2d water wave equation \eqref{euler}, there exist initially non-self-intersecting interfaces that become self-intersecting at a later time;
and as was shown in \cite{cs1}, the same result holds in 3d.
All these work either prove or assume the strong Taylor sign condition \eqref{taylor-s}, and the lowest regularity
considered are $C^{3/2}$ interfaces.
A common phenomena we observe in the ocean are waves with angled crests, with the interface possibly non-$C^1$.
A natural question is:
is the water wave equation \eqref{euler} well-posed in any class that includes non-$C^1$ interfaces?
We focus on the two dimensional case in this paper.
As was explained in \cite{kw}, the main difficulty in allowing for non-$C^1$ interfaces with angled crests is that in this case, both the quantity $-\fracrac{\partial P}{\partial \betaf{n}}$ and the Dirichlet-to-Neumann operator $\nabla_{\betaf n}$ degenerate, with degeneracy at the singularities on the interface;\fracootnote{We assume the acceleration is finite.} and only a weak Taylor inequality $-\frac{\partial P}{\partial \vec{n}}\gammae 0$ holds. From earlier work \cite{wu1, wu2, am, la, abz, sz}, we know
the problem of solving the water wave equation \eqref{euler} can be reduced to solving a quasilinear equation of the interface $z=z(\alphalpha,t)$, of type
\betaegin{equation}\label{quasi1}
\partial_t^2\fracrak u+ a \nabla_{\betaf n} \fracrak u=f(\fracrak u, \partial_t \fracrak u)
\end{equation}
where $a=-\fracrac{\partial P}{\partial \betaf{n}}$.
When the strong Taylor sign condition \eqref{taylor-s} holds and $\nabla_{\betaf n}$ is non-degenerative, equation \eqref{quasi1} is of the hyperbolic type with the right hand side consisting of lower order terms, and the Cauchy problem can be solved using classical tools. In the case where the solution dependent quantity $a=-\fracrac{\partial P}{\partial \betaf{n}}$ and operator $\nabla_{\betaf n}$
degenerate, equation \eqref{quasi1} losses its hyperbolicity, classical tools do not apply.
New ideas are required to solve the problem.
In \cite{kw}, R. Kinsey and the author constructed an energy functional $\mathcal E(t)$ and proved an a-priori estimate,
which states that for solutions of the water wave equation \eqref{euler}, if $\mathcal E(0)<^{-1}nfty$, then $\mathcal E(t)$ remains finite for a time period that depends only on $\mathcal E(0)$.
The energy functional $\mathcal E(t)$
is finite for interfaces and velocities in Sobolev classes, and most importantly, it is also finite for a class of non-$C^1$ interfaces with angled crests.\fracootnote{In particular, the class where $\mathcal E(t)<^{-1}nfty$ allows for angled crest type interfaces with interior angles at the crest $<\fracrac\pi 2$, which coincides with the range of the angles of the self-similar solutions in \cite{wu5}. Stokes extreme waves is not in the class where $\mathcal E(t)<^{-1}nfty$. }
In this paper, we show that for any given data satisfying $\mathcal E(0)<^{-1}nfty$, there is a $T>0$, depending only on $\mathcal E(0)$, such that
the 2d water wave equation
\eqref{euler} has a unique solution in the class where $\mathcal E(t)<^{-1}nfty$ for time $0\le t\le T$, and the solution is stable. We will work on the free surface equations that were derived in \cite{wu1, wu2}. The novelty of this paper is that we study the degenerative case, and solve the equation in a broader class that includes non-$C^1$ interfaces.
\subsection{Outline of the paper} In \S\ref{notation1} we introduce some basic notations and conventions; further notations will be introduced throughout the paper. In \S\ref{prelim} we recall the results in \cite{wu1, wu2}, and derive the free surface equation and its quasi-linearization,
from system \eqref{euler}, in both the Lagrangian and Riemann mapping variables, for interfaces and velocities in Sobolev spaces.
We derived the quasilinear equation in terms of the horizontal component in the Riemann mapping variable in \cite{wu1}, and in terms of full components in the Lagrangian coordinates in \cite{wu2}.
Here we re-derive the equations for the sake of coherence. In \S\ref{general-soln} we will recover the water wave equation \eqref{euler} from the interface equation \eqref{interface-r}-\eqref{interface-holo}-\eqref{a1}-\eqref{b}, showing the equivalence of the two systems for smooth and non-self-intersecting interfaces. In \S\ref{a priori},
we present the energy functional $\mathcal E(t)$ constructed and the a-priori estimate proved in \cite{kw}.
In \S\ref{prelim-result}, we give a blow-up criteria in terms of the energy functional $\mathcal E(t)$ and
a stability inequality for solutions of the interface equation \eqref{interface-r}-\eqref{interface-holo}-\eqref{a1}-\eqref{b} with a bound depending only on $\mathcal E(t)$. In \S\ref{main} we present the main result, that is,
the local in time wellposedness of the Cauchy problem for the water wave equation \eqref{euler} in the class where $\mathcal E(t)<^{-1}nfty$.
In \S\ref{proof}, we give the proof for the blow-up criteria, Theorem~\ref{blow-up} and in \S\ref{proof3}, the stability inequality, Theorem~\ref{unique}. For the sake of completeness, we will also provide a proof for the a-priori estimate of \cite{kw} in the current setting in \S\ref{proof}.
In \S\ref{proof2}, we will prove the main result, Theorem~\ref{th:local}.
Some basic preparatory results are given in Appendix~\ref{ineq}; various identities that are useful for the paper are derived in Appendix~\ref{iden}. And in Appendix~\ref{quantities}, we list the quantities that are controlled by $\mathcal E$. A majority of these are already shown in \cite{kw}.
{\betaf Remark}: The blow-up criteria and the proof for the existence part of Theorem~\ref{th:local} are from the unpublished manuscript of the author \cite{wu7}, with some small modifications.
\subsection{Notation and convention}\label{notation1}
We consider solutions of the water wave equation \eqref{euler} in the setting where the fluid domain $\Omega(t)$ is simply connected, with the free interface $\Sigma(t):=\partial\Omega(t)$ being a Jordan curve,\fracootnote{That is, $\Sigma(t)$ is homeomorphic to the line $\mathbb R$.}
$${\betaold v}(z, t)\tauo 0,\qquad\tauext{as } |z|\tauo^{-1}nfty$$
and the interface $\Sigma(t)$ tending to horizontal lines at infinity.\fracootnote{The problem with velocity $\betaold v(z,t)\tauo (c,0)$ as $|z|\tauo^{-1}nfty$ can be reduced to the one with $\betaold v\tauo 0$ at infinity by studying the solutions in a moving frame. $\Sigma(t)$ may tend to two different lines at $+^{-1}nfty$ and $-^{-1}nfty$.}
We use the following notations and conventions: $[A, B]:=AB-BA$ is the commutator of operators $A$ and $B$. $H^s=H^s(\mathbb R)$ is the Sobolev space with norm $\|f\|_{H^s}:=(^{-1}nt (1+|\xi|^2)^s|\hat f(\xi)|^2\,d\xi)^{1/2}$, $\dot H^{s}=\dot H^{s}(\mathbb R)$ is the Sobolev space with norm $\|f\|_{\dot H^{s}}:= c(^{-1}nt |\xi|^{2s} |\hat f(\xi)|^2\,d\xi)^{1/2}$, $L^p=L^p(\mathbb R)$ is the $L^p$ space with $\|f\|_{L^p}:=(^{-1}nt|f(x)|^p\,dx)^{1/p}$ for $1\le p<^{-1}nfty$, and $f^{-1}n L^^{-1}nfty$ if $\|f\|_{L^^{-1}nfty}:=\tauext{ sup }|f(x)|<^{-1}nfty$. When not specified, all the
norms $\|f\|_{H^s}$, $\|f\|_{\dot H^{s}}$, $\|f\|_{L^p}$, $1\le p\le^{-1}nfty$ are in terms of the spatial variable only, and $\|f\|_{H^s(\mathbb R)}$, $\|f\|_{\dot H^{s}(\mathbb R)}$, $\|f\|_{L^p(\mathbb R)}$, $1\le p\le^{-1}nfty$ are in terms of the spatial variables. We say $f^{-1}n C^j([0, T], H^s)$ if the mapping $f=f(t):=f(\cdot, t): t^{-1}n [0, T]\tauo H^s$ is $j$-times continues differentiable, with $\sup_{[0, T], \ 0\le k\le j}\|\partial_t^k f(t)\|_{H^s}<^{-1}nfty$; we say $f^{-1}n L^^{-1}nfty([0, T], H^s)$ if $\sup_{[0, T]}\|f(t)\|_{H^s}<^{-1}nfty$.
$C^j(X)$ is the space of $j$-times continuously differentiable functions on the set $X$; $C^j_0(\mathbb R)$ is the space of $j$-times continuously differentiable functions that decays at the infinity.
Compositions are always in terms of the spatial variables and we write for $f=f(\cdot, t)$, $g=g(\cdot, t)$, $f(g(\cdot,t),t):=f\circ g(\cdot, t):=U_gf(\cdot,t)$.
We identify $(x,y)$ with the complex number $x+iy$; ${\mathbb{R}}e z$, $\Im z$ are the real and imaginary parts of $z$; $\betaar z={\mathbb{R}}e z-i\Im z$ is the complex conjugate of $z$. $\overline \Omega$ is the closure of the domain $\Omega$, $\partial\Omega$ is the boundary of $\Omega$, ${\mathscr P}_-:=\{z^{-1}n \mathbb C: \Im z<0\}$ is the lower half plane. We write
\betaegin{equation}\label{eq:comm}
[f,g; h]:=\fracrac1{\pi i}^{-1}nt\fracrac{(f(x)-f(y))(g(x)-g(y))}{(x-y)^2}h(y)\,dy.
\end{equation}
We use $c$, $C$ to denote universal constants. $c(a_1, \dots )$, $C(a_1, \dots)$, $M(a_1, \dots)$ are constants depending on $a_1, \dots $; constants appearing in different contexts need not be the same. We write $f\lesssim g$ if there is a universal constant $c$, such that $f\le cg$.
\section{Preliminaries}\label{prelim}
Equation \eqref{euler} is a nonlinear equation defined on moving domains, it is difficult to study it directly. A classical approach is to reduce from \eqref{euler} to an equation on the interface, and study solutions of the interface equation. Then use the incompressibility and irrotationality of the velocity field to recover the velocity in the fluid domain by solving a boundary value problem for the Laplace equation.
In what follows we derive the interface equations from \eqref{euler}, and vice versa;
we assume that the interface, velocity and acceleration are in Sobolev spaces.
\subsection{The equation for the free surface in Lagrangian variable}\label{surface-equation-l}
Let the free interface $\Sigma(t): z=z(\alphalpha, t)$, $\alphalpha^{-1}n\mathbb R$ be given by Lagrangian parameter $\alphalpha$, so $z_t(\alphalpha, t)={\betaold v}(z(\alphalpha,t);t)$ is the velocity of the fluid particles on the interface, $z_{tt}(\alphalpha,t)={\betaold v_t + (\betaold v\cdot \nabla) \betaold v}(z(\alphalpha,t); t)$ is the acceleration.
Notice that $P=0$ on $\Sigma(t)$ implies that $\nabla P$ is normal to $\Sigma(t)$, therefore $\nabla P=-i\fracrak a z_\alphalpha$, where
\betaegin{equation}\label{frak-a}
\fracrak a =-\fracrac1{|z_\alphalpha|}\fracrac{\partial P}{\partial {\betaold n}};
\end{equation}
and the first and third equation of \eqref{euler} gives
\betaegin{equation}\label{interface-l}
z_{tt}+i=i\fracrak a z_\alphalpha.
\end{equation}
The second equation of \eqref{euler}: $\tauext{div } \betaold v=\tauext{curl } \betaold v=0$ implies that $\betaar {\betaold v}$ is holomorphic in the fluid domain $\Omega(t)$, hence $\betaar z_t$ is the boundary value of a holomorphic function in $\Omega(t)$.
Let $\Omega\subset \mathbb C$ be a domain with boundary $\Sigma: z=z(\alphalpha)$, $\alphalpha^{-1}n I$, oriented clockwise. Let $\mathfrak H$ be the Hilbert transform associated to $\Omega$:
\betaegin{equation}\label{hilbert-t}
\fracrak H f(\alphalpha)=\fracrac1{\pi i}\, \tauext{pv.}^{-1}nt\fracrac{z_\betaeta(\betaeta)}{z(\alphalpha)-z(\betaeta)}f(\betaeta)\,d\betaeta
\end{equation}
We have the following characterization of the trace of a holomorphic function on $\Omega$.
\betaegin{proposition}\cite{jour}\label{prop:hilbe}
a. Let $g ^{-1}n L^p$ for some $1<p <^{-1}nfty$.
Then $g$ is the boundary value of a holomorphic function $G$ on $\Omega$ with $G(z)\tauo 0$ at infinity if and only if
\betaegin{equation}
\label{eq:1571}
(I-\mathfrak H) g = 0.
\end{equation}
b. Let $ f ^{-1}n L^p$ for some $1<p<^{-1}nfty$. Then $ \fracrac12(I+\mathfrak H) f$ is the boundary value of a holomorphic function $\fracrak G$ on $\Omega$, with $\fracrak G(z)\tauo 0$ at infinity.
c. $\mathfrak H1=0$.
\end{proposition}
By Proposition~\ref{prop:hilbe} the second equation of \eqref{euler} is equivalent to
$\betaar z_t=\mathfrak H {\betaar z_t}$.
So the motion of the fluid interface $\Sigma(t): z=z(\alphalpha,t)$ is given by
\betaegin{equation}\label{interface-e}
\betaegin{cases}
z_{tt}+i=i\fracrak a z_\alphalpha\\
\betaar z_t=\fracrak H \betaar z_t.
\end{cases}
\end{equation}
\eqref{interface-e} is a fully nonlinear equation. In \cite{wu1}, Riemann mapping was introduced to analyze equation \eqref{interface-e} and to derive the quasilinear equation.
\subsection{The free surface equation in Riemann mapping variable}\label{surface-equation-r}
Let $\mathbb{P}hi(\cdot, t): \Omega(t)\tauo {\mathscr P}_-$ be the Riemann mapping taking $\Omega(t)$ to the lower half plane ${\mathscr P}_-$,
satisfying $\mathbb{P}hi(z(0,t),t)=0$ and $\lim_{z\tauo^{-1}nfty}\mathbb{P}hi_z(z,t)=1$. Let
\betaegin{equation}\label{h}
h(\alphalpha,t):=\mathbb{P}hi(z(\alphalpha,t),t),
\end{equation}
so $h(0,t)=0$ and $h:\mathbb R\tauo\mathbb R$ is a homeomorphism. Let $h^{-1}$ be defined by
$$h(h^{-1}(\alphalpha',t),t)=\alphalpha',\quad \alphalpha'^{-1}n \mathbb R;$$
and
\betaegin{equation}\label{1001}
Z(\alphalpha',t):=z\circ h^{-1}(\alphalpha',t),\quad Z_t(\alphalpha',t):=z_t\circ h^{-1}(\alphalpha',t),\quad Z_{tt}(\alphalpha',t):=z_{tt}\circ h^{-1}(\alphalpha',t)
\end{equation}
be the reparametrization of the position, velocity and acceleration of the interface in the Riemann mapping variable $\alphalpha'$. Let
\betaegin{equation}\label{1002}
Z_{,\alphalpha'}(\alphalpha', t):=\partial_{\alphalpha'}Z(\alphalpha', t),\quad Z_{t,\alphalpha'}(\alphalpha', t):=\partial_{\alphalpha'}Z_t(\alphalpha',t), \quad Z_{tt,\alphalpha'}(\alphalpha', t):=\partial_{\alphalpha'}Z_{tt}(\alphalpha',t), \tauext{ etc.}
\end{equation}
Notice that ${\betaar {\betaold v}}\circ \mathbb{P}hi^{-1}: {\mathscr P}_-\tauo \mathbb C$ is holomorphic in the lower half plane ${\mathscr P}_-$ with ${\betaar {\betaold v}}\circ \mathbb{P}hi^{-1}(\alphalpha', t)={\betaar Z}_t(\alphalpha',t)$. Precomposing \eqref{interface-l} with $h^{-1}$ and applying Proposition~\ref{prop:hilbe} to $
{\betaar {\betaold v}}\circ \mathbb{P}hi^{-1}$ in ${\mathscr P}_-$, we have the free surface equation in the Riemann mapping variable:
\betaegin{equation}\label{interface-r}
\betaegin{cases}
Z_{tt}+i=i\mathcal AZ_{,\alphalpha'}\\
\betaar{Z}_t=\mathbb H \betaar{Z}_t
\end{cases}
\end{equation}
where $\mathcal A\circ h=\fracrak a h_\alphalpha$ and $\mathbb H$ is the Hilbert transform associated with the lower half plane ${\mathscr P}_-$:
\betaegin{equation}\label{ht}
\mathbb H f(\alphalpha')=\fracrac1{\pi i}\tauext{pv.}^{-1}nt\fracrac1{\alphalpha'-\betaeta'}\,f(\betaeta')\,d\betaeta'.
\end{equation}
Observe that $\mathbb{P}hi^{-1}(\alphalpha', t)=Z(\alphalpha', t)$ and $(\mathbb{P}hi^{-1})_{z'}(\alphalpha',t)=Z_{,\alphalpha'}(\alphalpha',t)$. So
$Z_{,\alphalpha'}$, $\dfrac1{Z_{,\alphalpha'}}$ are boundary values of the holomorphic functions $(\mathbb{P}hi^{-1})_{z'}$ and $\dfrac1{(\mathbb{P}hi^{-1})_{z'}}$, tending to 1 at the spatial infinity. By Proposition~\ref{prop:hilbe}, \fracootnote{We work in the regime where $\fracrac1{Z_{,\alphalpha'}}-1^{-1}n L^2(\mathbb R)$. }
\betaegin{equation}\label{interface-holo}
\fracrac1{Z_{,\alphalpha'}}-1=\mathbb H\paren{\fracrac1{Z_{,\alphalpha'}}-1}. \end{equation}
By the chain rule, we know for any function $f$, $U_h^{-1}\partial_t U_h f =(\partial_t+ b \partial_{\alphalpha'})f$, where
$$b:=h_t\circ h^{-1}.$$
So $Z_{tt}=(\partial_t+b\partial_{\alphalpha'})Z_t$, and $Z_{t}=(\partial_t+b\partial_{\alphalpha'})Z$.
\subsubsection{Some additional notations}
We will often use the fact that $\mathbb H$ is purely imaginary, and decompose a function into the sum of its holomorphic and antiholomorphic parts. We define
the projections to the space of holomorphic functions in the lower, and respectively, upper half planes by
\betaegin{equation}\label{proj}
\mathbb P_H :=\fracrac12(I+\mathbb H),\qquad\tauext{and }\quad \mathbb P_A:=\fracrac12(I-\mathbb H).
\end{equation}
We also define \betaegin{equation}\label{da-daa}
D_\alpha =\dfrac {1}{z_\alpha}\partial_\alpha ,\quad \tauext{ and } \quad D_\alphaa = \dfrac { 1}{Z_{,\alphaa}}\partial_\alphaa .
\end{equation}
We know by the chain rule that $\paren{D_\alpha f} \circ h^{-1}= D_\alphaa \paren{f \circ h^{-1}}$; and for any holomorphic
function $G$ on $\Omega(t)$ with boundary value $g(\alphalpha,t):=G(z(\alphalpha,t),t)$, $D_\alpha g=G_z\circ z$, and $D_\alphaa (g\circ h^{-1})=G_z\circ Z$. Hence $D_\alpha$, $D_\alphaa$ preserves the holomorphicity of $g$, $g\circ h^{-1}$.
\subsubsection{The formulas for $A_1$ and $b$.}
Let $A_1:=\mathcal A |Z_{,\alphalpha'}|^2$. Notice that $\mathcal A\circ h=\fracrak a h_\alphalpha=-\fracrac{\partial P}{\partial\vec n}\fracrac{h_\alphalpha}{\alphabs{z_\alphalpha}}$, so
$A_1$ is related to the important quantity $-\fracrac{\partial P}{\partial\vec n}$ by
$$-\fracrac{\partial P}{\partial\vec n}\circ Z=\fracrac{A_1}{\alphabs{Z_{,\alphaa}}}.$$
Using Riemann mapping, we analyzed the quantities $A_1$ and $b$ in \cite{wu1}.
Here we re-derive the formulas for the sake of completeness; we will carefully note the a-priori assumptions made in the derivation. We mention that the same derivation can also be found in \cite{wu6}. We also mention that in \cite{hit}, using the formulation of Ovsjannikov \cite{ovs}, the authors also re-derived the formulas \eqref{b} and \eqref{a1}.
Assume that
\betaegin{equation}\label{assume1}\lim_{z'^{-1}n {\mathscr P}_-, z'\tauo^{-1}nfty} \mathbb{P}hi_t\circ \mathbb{P}hi^{-1}(z',t)=0,\qquad \lim_{z'^{-1}n {\mathscr P}_-,z'\tauo^{-1}nfty} \betaraces{i(\mathbb{P}hi^{-1})_{z'} (z',t)-(\mathbb{P}hi^{-1})_{z'} \overline{ \vec{v}}_t\circ \mathbb{P}hi^{-1}(z',t)}=i. \ \fracootnote{ It was shown in \cite{wu1} that the water wave equation \eqref{euler} is well-posed in this regime.}\end{equation}
\betaegin{proposition}[Lemma 3.1 and (4.7) of \cite{wu1}, or Proposition 2.2 and (2.18) of \cite{wu6}]\label{prop:a1}
We have \betaegin{equation}\label{b}
b:=h_t\circ h^{-1}={\mathbb{R}}e (I-\mathbb H)\paren{\fracrac{Z_t}{Z_{,\alphalpha'}}};
\end{equation}
\betaegin{equation}\label{a1}
A_1=1-\Im [Z_t,\mathbb H]{\betaar Z}_{t,\alphalpha'}=1+\fracrac1{2\pi }^{-1}nt \fracrac{|Z_t(\alphalpha', t)-Z_t(\betaeta', t)|^2}{(\alphalpha'-\betaeta')^2}\,d\betaeta'\gammae 1;
\end{equation}
\betaegin{equation}\label{taylor-formula}
-\fracrac{\partial P}{\partial\betaold n}\Big |_{Z=Z(\cdot,t)}= \fracrac{A_1}{|Z_{,\alphalpha'}|}.
\end{equation}
In particular, if the interface $\Sigma(t)^{-1}n C^{1+{\varepsilon}ilon}$ for some ${\varepsilon}ilon>0$, then the strong Taylor sign condition \eqref{taylor-s} holds.
\end{proposition}
\betaegin{proof}
Taking complex conjugate of the first equation in \eqref{interface-r}, then multiplying by $Z_{,\alphalpha'}$ yields
\betaegin{equation}\label{interface-a1}
Z_{,\alphalpha'}({\betaar Z}_{tt}-i)=-i\mathcal A|Z_{,\alphalpha'}|^2:=-i A_1.
\end{equation}
The left hand side of \eqref{interface-a1} is almost holomorphic since $Z_{,\alphalpha'}$ is the boundary value of the holomorphic function $(\mathbb{P}hi^{-1})_{z'}$ and $\betaar z_{tt}$ is the time derivative of the holomorphic function $\betaar z_t$. We explore the almost holomorphicity of $\betaar z_{tt}$ by expanding. Let $F=\betaar {\betaold v}$, we know $F$ is holomorphic in $\Omega(t)$ and $\betaar z_t=F(z(\alphalpha, t),t)$, so
\betaegin{equation}\label{eq:1}
\betaar z_{tt}=F_t(z(\alphalpha, t),t)+F_z(z(\alphalpha, t),t) z_t(\alphalpha, t),\qquad \betaar z_{t\alphalpha}=F_z(z(\alphalpha, t),t) z_\alphalpha(\alphalpha, t)
\end{equation}
therefore \betaegin{equation}\label{eq:2}
\betaar z_{tt}= F_t\circ z+ \fracrac{\betaar z_{t\alphalpha}}{z_\alphalpha} z_t.
\end{equation}
Precomposing with $h^{-1}$, subtracting $-i$, then multiplying by $Z_{,\alphalpha'}$, we have
$$Z_{,\alphalpha'}({\betaar Z}_{tt}-i)= Z_{,\alphalpha'} F_t\circ Z+ Z_t {\betaar Z}_{t,\alphalpha'}-i Z_{,\alphalpha'}=-iA_1 $$
Apply $(I-\mathbb H)$ to both sides of the equation. Notice that $F_t\circ Z$ is the boundary value of the holomorphic function $F_t\circ \mathbb{P}hi^{-1}$. By assumption \eqref{assume1} and Proposition~\ref{prop:hilbe}, $(I-\mathbb H)(Z_{,\alphalpha'} F_t\circ Z-iZ_{,\alphalpha'})=-i$; therefore
$$-i(I-\mathbb H) A_1= (I-\mathbb H)(Z_t {\betaar Z}_{t,\alphalpha'})-i$$
Taking imaginary parts on both sides and using the fact $(I-\mathbb H){\betaar Z}_{t,\alphalpha'}=0$ \fracootnote{Because $(I-\mathbb H){\betaar Z}_{t}=0$.} to rewrite $(I-\mathbb H)(Z_t {\betaar Z}_{t,\alphalpha'})$ as $[Z_t,\mathbb H]{\betaar Z}_{t,\alphalpha'}$ yields
\betaegin{equation}\label{A_1}
A_1=1-\Im [Z_t,\mathbb H]{\betaar Z}_{t,\alphalpha'}.
\end{equation}
The identity \betaegin{equation}
-\Im[Z_t,\mathbb H]{\betaar Z}_{t,\alphalpha'}
=\fracrac1{2\pi }^{-1}nt \fracrac{|Z_t(\alphalpha', t)-Z_t(\betaeta', t)|^2}{(\alphalpha'-\betaeta')^2}\,d\betaeta'
\end{equation}
is obtained by integration by parts.
The quantity $b:=h_t\circ h^{-1}$ can be calculated similarly.
Recall $h(\alphalpha,t)=\mathbb{P}hi(z(\alphalpha,t),t)$, so
$$h_t=\mathbb{P}hi_t\circ z+(\mathbb{P}hi_z\circ z) z_t,\qquad h_\alphalpha=(\mathbb{P}hi_z\circ z) z_\alphalpha$$
hence
$h_t= \mathbb{P}hi_t\circ z+ \fracrac{h_\alphalpha}{z_\alphalpha} z_t$. Precomposing with $h^{-1}$ yields
\betaegin{equation}\label{b1}
h_t\circ h^{-1}=\mathbb{P}hi_t\circ Z+ \fracrac{Z_t}{Z_{,\alphalpha'}}.
\end{equation}
Now $\mathbb{P}hi_t\circ Z$ is the boundary value of the holomorphic function $\mathbb{P}hi_t\circ \mathbb{P}hi^{-1}$. By assumption \eqref{assume1} and Proposition~\ref{prop:hilbe}, $(I-\mathbb H)\mathbb{P}hi_t\circ Z=0$. Apply $(I-\mathbb H)$ to both sides of \eqref{b1} then take the real parts, we get
$$b=h_t\circ h^{-1}= {\mathbb{R}}e (I-\mathbb H)\paren{\fracrac{Z_t}{Z_{,\alphalpha'}}}.$$
A classical result in complex analysis states that if the interface is in $C^{1+{\varepsilon}ilon}$, ${\varepsilon}ilon>0$, tending to lines at infinity, then
$c_0\le \alphabs{Z_{,\alphaa}}\le C_0$, for some constants $c_0, C_0>0$. So in this case, the strong Taylor sign condition \eqref{taylor-s} holds.
\end{proof}
\subsection{The quasilinear equation}\label{surface-quasi-r} In \cite{wu1, wu2} we showed that the quasi-linearization of the free surface equation \eqref{interface-e} can be accomplished by just taking one time derivative to equation \eqref{interface-l}.
Taking derivative to $t$ to \eqref{interface-l} we get
\betaegin{equation}\label{quasi-l}
{\betaar z}_{ttt}+i\fracrak a {\betaar z}_{t\alphalpha}=-i\fracrak a_t {\betaar z}_{\alphalpha}=\fracrac{\fracrak a_t}{\fracrak a} ({\betaar z}_{tt}-i).
\end{equation}
Precomposing
with $h^{-1}$ on both sides of \eqref{quasi-l}, we have the equation in the Riemann mapping variable
\betaegin{equation}\label{quasi-r1}
{\betaar Z}_{ttt}+i\mathcal A {\betaar Z}_{t,\alphalpha'}=\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} ({\betaar Z}_{tt}-i)
\end{equation}
We compute $\dfrac{\fracrak a_t}{\fracrak a}$ by the identities $\fracrak a h_\alphalpha=\mathcal A\circ h$, and $\mathcal A\circ h=\dfrac{A_1}{\alphabs{Z_{,\alphaa}}^2}\circ h=A_1\circ h \dfrac{ h_\alpha^2}{\alphabs{z_{\alpha}}^2}$, so
\betaegin{equation}\label{eq:frak a}
\fracrak a =A_1\circ h \dfrac{ h_\alpha}{\alphabs{z_{\alpha}}^2};
\end{equation}
and we obtain, by taking derivative to $t$ to \eqref{eq:frak a},
$$
\dfrac{\fracrak a_t}{\fracrak a}= \fracrac{\partial_t \paren{ A_1\circ h}}{A_1\circ h}+\fracrac{h_{t\alpha}}{h_\alpha}-2{\mathbb{R}}e \fracrac{z_{t\alpha}}{z_\alpha}.
$$
Notice that $\fracrac{h_{t\alpha}}{h_\alpha}\circ h^{-1}=(h_t\circ h^{-1})_{\alphaa}:=b_\alphaa$. So
\betaegin{equation}\label{at}
\dfrac{\fracrak a_t}{\fracrak a}\circ h^{-1}= \fracrac{(\partial_t +b\partial_\alphaa) A_1}{A_1}+b_\alphaa -2{\mathbb{R}}e D_\alphaa Z_t;
\end{equation}
where we calculate from \eqref{b} that
\betaegin{equation}\label{ba}
\betaegin{aligned}
b_\alphaa&= {\mathbb{R}}e \paren{ (I-\mathbb H) \fracrac{Z_{t,\alphalpha'}}{Z_{,\alphaa}}+ (I-\mathbb H) \paren{Z_t\partial_\alphaa\fracrac 1{Z_{,\alphaa}}}}\\& =2{\mathbb{R}}e D_\alphaa Z_t+ {\mathbb{R}}e \paren{ (-I-\mathbb H) \fracrac{Z_{t,\alphalpha'}}{Z_{,\alphaa}}+ (I-\mathbb H) \paren{Z_t\partial_\alphaa \fracrac 1{Z_{,\alphaa}}}}\\&
=2{\mathbb{R}}e D_\alphaa Z_t+{\mathbb{R}}e \paren{\betaracket{ \fracrac1{Z_{,\alphaa}}, \mathbb H} Z_{t,\alphalpha'}+ \betaracket{Z_t, \mathbb H}\partial_\alphaa \fracrac1{Z_{,\alphaa}} },
\end{aligned}
\end{equation}
here in the last step we used the fact that $(I+\mathbb H)Z_{t,\alphaa}=0$ and $(I-\mathbb H)\partial_\alphaa \fracrac 1{Z_{,\alphaa}}=0$ \fracootnote{These follows from $(I-\mathbb H)\betaar Z_t=0$ and \eqref{interface-holo}.} to rewrite the terms as commutators; and
we compute, by \eqref{A_1} and \eqref{eq:c14},
\betaegin{equation}\label{dta1}
(\partial_t +b\partial_\alphaa) A_1= -\Im \paren{\betaracket{Z_{tt},\mathbb H}\betaar Z_{t,\alphalpha'}+\betaracket{Z_t,\mathbb H}\partial_\alphaa \betaar Z_{tt}-[Z_t, b; \betaar Z_{t,\alphaa}]}.
\end{equation}
We now sum up the above calculations and write the quasilinear system in the Riemann mapping variable.
We have
\betaegin{equation}\label{quasi-r}
\betaegin{cases}
(\partial_t+b\partial_\alphaa)^2{\betaar Z}_{t}+i\dfrac{A_1}{\alphabs{Z_{,\alphaa}}^2}\partial_\alphaa {\betaar Z}_{t}=\dfrac{\fracrak a_t}{\fracrak a}\circ h^{-1} ({\betaar Z}_{tt}-i)
\\ \betaar Z_t=\mathbb H \betaar Z_t
\end{cases}
\end{equation}
where
\betaegin{equation}\label{aux}
\betaegin{cases}
b:=h_t\circ h^{-1}={\mathbb{R}}e (I-\mathbb H)\paren{\dfrac{Z_t}{Z_{,\alphalpha'}}}\\
A_1=1-\Im [Z_t,\mathbb H]{\betaar Z}_{t,\alphalpha'}=1+\dfrac1{2\pi }^{-1}nt \dfrac{|Z_t(\alphalpha', t)-Z_t(\betaeta', t)|^2}{(\alphalpha'-\betaeta')^2}\,d\betaeta'\\
\dfrac1{Z_{,\alphaa}}= i\dfrac{\betaar Z_{tt}-i}{A_1}\\
\dfrac{\fracrak a_t}{\fracrak a}\circ h^{-1}= \dfrac{(\partial_t +b\partial_\alphaa) A_1}{A_1}+b_\alphaa -2{\mathbb{R}}e D_\alphaa Z_t
\end{cases}
\end{equation}
Here the third equation in \eqref{aux} is obtained by rearranging the terms of the equation \eqref{interface-a1}.
Using it to replace $\fracrac1{Z_{,\alphaa}}$ by $i\fracrac{\betaar Z_{tt}-i}{A_1}$, we get a system for the complex conjugate velocity and acceleration $(\betaar Z_t, \betaar Z_{tt})$. The initial data for the system \eqref{quasi-r}-\eqref{aux} is set up as follows.
\subsubsection{The initial data}\label{id-r}
Without loss of generality, we choose the parametrization of the initial interface $\Sigma(0): Z(\cdot,0):=Z(0)$ by the Riemann mapping variable, so $h(\alphalpha,0)=\alphalpha$ for $\alphalpha^{-1}n\mathbb R$;
we take the initial
velocity $Z_t(0)$,
such that it satisfies $\betaar Z_t(0)=\mathbb H \betaar Z_t(0)$.
And we take the initial acceleration $Z_{tt}(0)$ so that it solves the equation
\eqref{interface-a1} or the third equation in \eqref{aux}.
\subsection{Local wellposedness in Sobolev spaces}
By \eqref{taylor-formula} and \eqref{a1}, if $Z_{,\alphaa}^{-1}n L^^{-1}nfty$, then the strong Taylor stability criterion \eqref{taylor-s} holds.
In this case, the system \eqref{quasi-r}-\eqref{aux} is quasilinear of the hyperbolic type, with the left hand side of the first equation in \eqref{quasi-r} consisting of the higher order terms.\fracootnote{ $i\partial_\alphaa =|\partial_\alphaa|$ when acting on holomorphic functions. The Dirichlet-to-Neumann operator $\nabla_{\betaf n}= \fracrac1{|Z_{,\alphaa}|}|\partial_\alphaa|$.} In \cite{wu1} we showed that the Cauchy problem of \eqref{quasi-r}-\eqref{aux}, and equivalently of \eqref{interface-r}-\eqref{interface-holo}-\eqref{b}-\eqref{a1},
is uniquely solvable in Sobolev spaces $H^s$, $s\gammae 4$.
Let the initial data be given as in \S\ref{id-r}.
\betaegin{theorem}[Local wellposedness in Sobolev spaces, cf. Theorem 5.11, \S6 of \cite{wu1}]\label{prop:local-s} Let $s\gammae 4$. Assume that
$Z_t(0)^{-1}n H^{s+1/2}(\mathbb R)$, $Z_{tt}(0)^{-1}n H^s(\mathbb R)$ and $Z_{,\alphaa}(0)^{-1}n L^^{-1}nfty(\mathbb R)$.
Then there is $T>0$, such that on $[0, T]$, the initial value problem of \eqref{quasi-r}-\eqref{aux}, or equivalently of \eqref{interface-r}-\eqref{interface-holo}-\eqref{b}-\eqref{a1}, has a unique solution
$Z=Z(\cdot, t)$, satisfying
$(Z_{t}, Z_{tt})^{-1}n C^
l([0, T], H^{s+1/2-l}(\mathbb R)\tauimes H^{s-l}(\mathbb R))$, and $Z_{,\alphalpha'}-1^{-1}n C^l([0, T], H^{s-l}(\mathbb R))$, for $l=0,1$.
Moreover if $T^*$ is the supremum over all such times $T$, then either $T^*=^{-1}nfty$, or $T^*<^{-1}nfty$, but
\betaegin{equation}\label{eq:1-1}
\sup_{[0, T^*)}(\|Z_{,\alphaa}(t)\|_{L^^{-1}nfty}+\|Z_{tt}(t)\|_{H^3}+\| Z_t(t)\|_{H^{3+1/2}})=^{-1}nfty. \end{equation}
\end{theorem}
\betaegin{remark} 1. Let $h=h(\alpha, t)$ be the solution of the ODE
\betaegin{equation}\label{b1-1}
\betaegin{cases}
h_t=b(h,t),\\
h(\alpha, 0)=\alpha
\end{cases}
\end{equation}
where $b$ is as given by \eqref{b}. Then $z=Z\circ h$ satisfies equation
\eqref{interface-l}, cf. \S6 of \cite{wu1}.
2.
\eqref{quasi-r}-\eqref{aux} is a system for the complex conjugate velocity and acceleration $(\betaar Z_t, \betaar Z_{tt})$, the interface doesn't appear explicitly,
so a solution can exist even if $Z=Z(\cdot, t)$ becomes self-intersecting. Similarly, equation \eqref{interface-r}-\eqref{interface-holo}-\eqref{b}-\eqref{a1} makes sense even if $Z=Z(\cdot, t)$ self-intersects. To obtain the solution of the water wave equation \eqref{euler} from the solution of the quasilinear equation \eqref{quasi-r}-\eqref{aux} as given in Theorem~\ref{prop:local-s} above, in \S 6 of \cite{wu1},
an additional chord-arc condition is assumed for the initial interface, and it was shown that the solution $Z=Z(\cdot, t)$ remains non-self-intersecting for a time period depending only on the initial chord-arc constant and the initial Sobolev norms. \fracootnote{\eqref{interface-r}-\eqref{interface-holo}-\eqref{b}-\eqref{a1} is equivalent to the water wave equation \eqref{euler} only when the interface is non-self-intersecting, see \S\ref{general-soln}.}
3.
Observe that we arrived at
\eqref{quasi-r}-\eqref{aux} from \eqref{euler} using only the following properties of the domain: 1. there is a conformal mapping taking the fluid region $\Omega(t)$ to ${\mathscr P}_-$; 2. $P=0$ on $\Sigma(t)$. We note that $z\tauo z^{1/2}$ is a conformal map that takes the region $\mathbb C\setminus \{z=x+i0, x>0\}$ to the upper half plane; so a domain with its boundary self-intersecting at the positive real axis can be mapped conformally onto the lower half plane ${\mathscr P}_-$. Taking such a domain as the initial fluid domain,
assuming $P=0$ on $\Sigma(t)$ even when $\Sigma(t)$ self-intersects,\fracootnote{We note that when $\Sigma(t)$ self-intersects, the condition $P=0$ on $\Sigma(t)$ is unphysical.} one can still solve equation \eqref{quasi-r}-\eqref{aux} for a short time, by Theorem~\ref{prop:local-s}. Indeed this is one of the main ideas in the work of \cite{cf}. Using this idea and the time reversibility of the water wave equation, by choosing an appropriate initial velocity field that pulls the initial domain apart, Castro, Cordoba et.\ al. \cite{cf} proved the existence of "splash" and "splat" singularities starting from a smooth non-self-intersecting fluid interface.
\end{remark}
\subsection{Recovering the water wave equation \eqref{euler} from the interface equations}\label{general-soln} In this section, we derive the equivalent system in the lower half plane ${\mathscr P}_-$ for the interface equations
\eqref{interface-r}-\eqref{interface-holo}-\eqref{a1}-\eqref{b}, and show how to recover from here the water wave equation \eqref{euler}. Although the derivation is quite straight-forward, to the best knowledge of the author, it has not been done before.
Let $Z=Z(\cdot, t)$ be a solution of \eqref{interface-r}-\eqref{interface-holo}-\eqref{a1}-\eqref{b},
satisfying the regularity properties of Theorem~\ref{prop:local-s}; let $U(\cdot, t): \mathscr P_-\tauo \mathbb C$, $\mathbb{P}si(\cdot, t): \mathscr P_-\tauo \mathbb C$ be the holomorphic functions, continuous on $\betaar {\mathscr P}_-$, such that
\betaegin{equation}\label{eq:270}
U(\alphalpha',t)=\betaar Z_t(\alphalpha', t),\qquad \mathbb{P}si(\alphalpha',t)=Z(\alphalpha',t),\qquad \mathbb{P}si_{z'}(\alphalpha',t)=Z_{,\alphalpha'}(\alphalpha', t),
\end{equation}
and $\lim_{z'\tauo^{-1}nfty} U(z',t)=0$, $\lim_{z'\tauo^{-1}nfty}\mathbb{P}si_{z'}(z',t)=1$.\fracootnote{We know $U(z', t)=K_{y'}\alphast \betaar Z_t$, $\mathbb{P}si_{z'}=K_{y'}\alphast Z_{,\alphaa}$ and by the Maximum principle, $\fracrac1{\mathbb{P}si_{z'}}=K_{y'}\alphast \fracrac1{Z_{,\alphaa}}$, here $K_{y'}$ is the Poisson kernel defined by \eqref{poisson}. By \eqref{interface-a1}, $\fracrac1{Z_{,\alphaa}}-1^{-1}n C([0, T], H^s(\mathbb R))$ for $s\gammae 4$, so $\mathbb{P}si_{z'}\ne 0$ on $\betaar {\mathscr P}_-$. }
From $Z_t=(\partial_t+b\partial_\alphaa)Z=\mathbb{P}si_t (\alphaa, t)+b \mathbb{P}si_{z'}(\alphaa,t)$, we have
\betaegin{equation}\label{eq:271}
b=\fracrac{Z_t}{ \mathbb{P}si_{z'} }-\fracrac{\mathbb{P}si_t}{\mathbb{P}si_{z'}}=\fracrac{\betaar U}{\mathbb{P}si_{z'}}-\fracrac{\mathbb{P}si_t}{\mathbb{P}si_{z'}},\qquad \tauext{on }\partial {\mathscr P}_- ,
\end{equation}
and substituting in we get
$$\betaar Z_{tt}=(\partial_t+b\partial_\alphaa) \betaar Z_t=U_t+\paren{\fracrac{\betaar U}{\mathbb{P}si_{z'}}-\fracrac{\mathbb{P}si_t}{\mathbb{P}si_{z'}} } U_{z'}\qquad \tauext{on }\partial {\mathscr P}_- ;$$
so $\betaar Z_{tt}$ is the trace of the function $U_t-\fracrac{\mathbb{P}si_t}{\mathbb{P}si_{z'}}U_{z'}+\fracrac{\betaar U}{\mathbb{P}si_{z'}}U_{z'} $ on $\partial {\mathscr P}_-$; and $Z_{,\alphalpha'}(\betaar Z_{tt}-i)$ is then the trace of the function $\mathbb{P}si_{z'}
U_t- {\mathbb{P}si_t}U_{z'}+{\betaar U}U_{z'} -i\mathbb{P}si_{z'}$ on $\partial {\mathscr P}_-$. This gives, from \eqref{interface-a1} that
\betaegin{equation}\label{eq:272}
\mathbb{P}si_{z'} U_t- {\mathbb{P}si_t}U_{z'}+{\betaar U}U_{z'}-i\mathbb{P}si_{z'}=-iA_1,\qquad \tauext{on }\partial {\mathscr P}_- .
\end{equation}
Observe that on the left hand side of \eqref{eq:272}, $\mathbb{P}si_{z'} U_t- {\mathbb{P}si_t}U_{z'}-i\mathbb{P}si_{z'}$ is holomorphic on ${\mathscr P}_-$,
while
${\betaar U}U_{z'}=\partial_{z'}(\betaar U U)$.
So there is a real valued function $\fracrak P: {\mathscr P}_-\tauo \mathbb R$, such that
\betaegin{equation}\label{eq:273}
\mathbb{P}si_{z'} U_t- {\mathbb{P}si_t}U_{z'}+{\betaar U}U_{z'}-i\mathbb{P}si_{z'}=-2\partial_{z'}\fracrak P=-(\partial_{x'}-i\partial_{y'})\fracrak P,\qquad \tauext{on }{\mathscr P}_-;
\end{equation}
and by \eqref{eq:272}, because $iA_1$ is purely imaginary,
\betaegin{equation}\label{eq:274}
\fracrak P=c,\qquad \tauext{on }\partial {\mathscr P}_-.
\end{equation}
where $c^{-1}n \mathbb R$ is a constant.
Applying $\partial_{x'}+i\partial_{y'}:=2\overline{ \partial}_{z'}$ to \eqref{eq:273} yields
\betaegin{equation}\label{eq:275}
\mathcal Delta \fracrak P= -2|U_{z'}|^2\qquad\tauext{on }{\mathscr P}_-.
\end{equation}
It is easy to check that for $y'\le 0$ and $t^{-1}n [0, T]$, $\paren{U, U_t, U_{z'}, \mathbb{P}si_{z'}-1, \fracrac1{\mathbb{P}si_{z'}}-1, \mathbb{P}si_t}(\cdot+iy', t)^{-1}n L^2(\mathbb R)\cap L^^{-1}nfty(\mathbb R)$, and $(U, \mathbb{P}si, \fracrac1{\mathbb{P}si_{z'}}, \fracrak P)^{-1}n C^1( \overline{\mathscr P}_-\tauimes [0, T])$.
It is clear that the above process is reversible. From a solution $(U, \mathbb{P}si, \fracrak P)^{-1}n C^1( \overline{\mathscr P}_-\tauimes [0, T])$ of the system \eqref{eq:273}-\eqref{eq:275}-\eqref{eq:274}-\eqref{eq:271}, with $\paren{U, U_t, U_{z'}, \mathbb{P}si_{z'}-1, \fracrac1{\mathbb{P}si_{z'}}-1, \mathbb{P}si_t}(\cdot+iy', t)^{-1}n L^2(\mathbb R)\cap L^^{-1}nfty(\mathbb R)$ for $y'\le 0,\ t^{-1}n [0, T]$, $U(\cdot, t)$, $\mathbb{P}si(\cdot, t)$ holomorphic in ${\mathscr P}_-$, and $b$ real valued, the boundary value $(Z(\alphaa,t), Z_t(\alphaa, t)):=(\mathbb{P}si(\alphaa,t), \betaar U(\alphaa, t))$ satisfies the interface equation \eqref{interface-r}-\eqref{interface-holo}-\eqref{b}-\eqref{a1}. Therefore the systems \eqref{interface-r}-\eqref{interface-holo}-\eqref{b}-\eqref{a1} and \eqref{eq:273}-\eqref{eq:275}-\eqref{eq:274}-\eqref{eq:271}, with $U(\cdot, t)$, $\mathbb{P}si(\cdot, t)$ holomorphic in ${\mathscr P}_-$, and $\mathbb{P}si_{z'}(\cdot, t)\ne 0$, $b$ real valued, are equivalent in the smooth regime.
Assume $(U, \mathbb{P}si)^{-1}n C( \overline{\mathscr P}_-\tauimes [0, T])\cap C^1( {\mathscr P}_-\tauimes (0, T))$ is a solution of the system \eqref{eq:273}-\eqref{eq:275}-\eqref{eq:274}, with $U(\cdot, t)$, $\mathbb{P}si(\cdot, t)$ holomorphic in ${\mathscr P}_-$, assume in addition that $\Sigma(t)=\{Z=Z(\alphalpha',t):=\mathbb{P}si(\alphalpha',t)\ | \ \alphalpha'^{-1}n\mathbb R\}$ is a Jordan curve with $$\lim_{|\alphalpha'|\tauo^{-1}nfty} Z_{,\alphalpha'}(\alphalpha',t)=1.$$ Let $\Omega(t)$ be the domain bounded by $Z=Z(\cdot,t)$ from the above, then $Z=Z(\alphalpha',t) $, $\alphalpha'^{-1}n\mathbb R$ winds the boundary of $\Omega(t)$ exactly once. By the argument principle, $\mathbb{P}si: \betaar {\mathscr P}_-\tauo \betaar \Omega(t)$ is one-to-one and onto, $\mathbb{P}si^{-1}:\Omega(t)\tauo {\mathscr P}_-$
exists and is a holomorphic function; and by equation \eqref{eq:273} and the chain rule, \betaegin{equation}\label{eq:276}
(U\circ \mathbb{P}si^{-1})_t+\betaar U\circ \mathbb{P}si^{-1}(U\circ \mathbb{P}si^{-1})_{z}+(\partial_x-i\partial_y)(\fracrak P\circ \mathbb{P}si^{-1})=i,\qquad \tauext{on }\Omega(t).
\end{equation}
Let $\betaar {\betaold v}=U\circ \mathbb{P}si^{-1}$, $P=\fracrak P\circ \mathbb{P}si^{-1}$. Observe that ${\betaf v}\betaar{\betaf v}_{z}= ({\betaold v}\cdot \nabla) \betaar {\betaold v}$. So $({\betaf v}, P)$ satisfies the water wave equation \eqref{euler} in the domain $\Omega(t)$.
\subsection{Non-$C^1$ interfaces}
Assume that the interface $Z=Z(\cdot, t)$ has an angled crest at $\alphaa_0$ with interior angle $\nu$,
we know from the discussion in \S3.3.2 of \cite{kw} that if the acceleration is finite, then it is necessary that $\nu\le \pi$; and if $\nu<\pi$ then
$\fracrac 1{Z_{,\alphaa}}(\alphaa_0, t)=0$. We henceforth call those points at which $\fracrac 1{Z_{,\alphaa}}=0$ the singularities.
If the interface is allowed to be non-$C^1$ with interior angles at the crests $<\pi$, then the coefficient $\fracrac{A_1}{\alphabs{Z_{,\alphaa}}^2}$ of the second term on the left hand side of the first equation in \eqref{quasi-r} can be degenerative, and in this case
it is not clear if equation \eqref{quasi-r} is still hyperbolic.
In order to handle this situation, we need to understand how the singularities propagate. In what follows we derive the evolution equation for $\fracrac 1{Z_{,\alphaa}}$. We will also give the evolution equations for the basic quantities $\betaar Z_t$ and $\betaar Z_{tt}$.\fracootnote{In Lagrangian coordinates, the first equation in \eqref{quasi-r} is of the form $(\partial_t^2+a\nabla_{\betaf n} )\betaar z_t=f$, where $a=-\fracrac{\partial P}{\partial {\betaf n}}=\fracrac{A_1}{|Z_{,\alphaa}|}\circ h$, and the Dirichilet-Neumann operator $\nabla_{\betaf n}\circ h^{-1}=\fracrac{i}{|Z_{,\alphaa}|}\partial_\alphaa$. So at the singularities both $a$ and $\nabla_{\betaf n}$ are denegerative.}
\subsection{Some basic evolution equations}
We begin with
$$\fracrac1{Z_{,\alphaa}}\circ h=\fracrac{h_\alpha}{z_\alpha},$$
taking derivative to $t$ yields,
$$\partial_t \paren{\fracrac1{Z_{,\alphaa}}\circ h}=\fracrac1{Z_{,\alphaa}}\circ h \paren{\fracrac{h_{t\alpha}}{h_\alpha}-\fracrac{z_{t\alpha}}{z_\alpha}};$$
precomposing with $h^{-1}$ gives
\betaegin{equation}\label{eq:dza}
(\partial_t+b\partial_{\alphalpha'})\paren{\fracrac1{Z_{,\alphaa}}}=\fracrac1{Z_{,\alphaa}} \paren{b_\alphaa-D_\alphaa Z_t}.
\end{equation}
The evolution equations for $\betaar Z_t$ and $\betaar Z_{tt}$ can be obtained from \eqref{interface-a1} and \eqref{quasi-l}.
We have, by \eqref{interface-a1},
\betaegin{equation}\label{eq:dzt}
(\partial_t+b\partial_{\alphalpha'})\betaar Z_t:= \betaar Z_{tt} =-i \fracrac {A_1}{Z_{,\alphaa}}+i.
\end{equation}
Using \eqref{interface-l} to replace $i\fracrak a$ by $-\fracrac{\betaar z_{tt}-i} {\betaar z_\alpha}$ in equation \eqref{quasi-l} yields
$$\betaar z_{ttt}=(\betaar z_{tt}-i) \paren{ \betaar { D_\alpha z_t}+ \fracrac{\fracrak a_t}{\fracrak a}};$$
precomposing with $h^{-1}$ gives
\betaegin{equation}\label{eq:dztt}
(\partial_t+b\partial_{\alphalpha'})\betaar Z_{tt}= (\betaar Z_{tt}-i) \paren{ \betaar { D_\alphaa Z_t}+ \fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}.
\end{equation}
Equations \eqref{eq:dza}, \eqref{eq:dzt} and \eqref{eq:dztt} describe the time evolution of the basic quantities $\fracrac1{Z_{,\alphaa}}$, $\betaar Z_t$ and $\betaar Z_{tt}$. In fact, equations \eqref{eq:dza}-\eqref{eq:dzt} together with \eqref{b}, \eqref{a1} and \eqref{ba} give a complete evolutionary system for the holomorphic quantities $\fracrac1{Z_{,\alphaa}}$ and $\betaar Z_t$, which characterize the fluid domain $\Omega(t)$ and the complex conjugate velocity $\betaar {\vec{v}}$. We will explore this evolution system in our future work. These equations give a first indication that it is natural to study the water wave problem in a setting where bounds are only imposed on $\fracrac1{Z_{,\alphaa}}$, $\betaar Z_t$ and their derivatives.
\subsection{An important equation} Here we record an important equation, which is obtained by rearranging the terms of \eqref{interface-a1}.
\betaegin{equation}\label{aa1}
\dfrac1{Z_{,\alphaa}}= i\dfrac{\betaar Z_{tt}-i}{A_1}.
\end{equation}
\section{Well-posedness in a broader class that includes non-$C^1$ interfaces.}\label{main-results}
We are now ready to study the Cauchy problem for the
water wave equation \eqref{euler}
in a regime that allows for non-$C^1$ interfaces.
We begin with an a-priori estimate.
\subsection{A-priori estimate for water waves with angled crests}\label{a priori}
Motivated by the question of the interaction of the free interface with a fixed vertical boundary, in \cite{kw}, Kinsey and the author studied the water wave equation \eqref{euler} in a regime that includes non-$C^1$ interfaces with angled crests in a periodic setting,
constructed an energy functional and proved an a-priori estimate
which does not require a positive lower bound for $\fracrac1{\alphabs{Z_{,\alphalpha'}}}$.
A similar result holds for the whole line case. While a similar proof as that in \cite{kw} applies to the whole line, for the sake of completeness,
we will provide a slightly different argument in \S\ref{proof0}. In the first proof in \cite{kw}, we expanded and then re-organized the terms to ensure that there is no further cancelations and the estimates can be closed. Here instead we will rely on the estimates for the quantities $b_\alphaa$, $A_1$ and their derivatives.\fracootnote{These estimates become available in the work \cite{wu7}. The same results in the current paper hold in the periodic setting.
}
Let
\betaegin{equation} \label{eq:ea}
{\betaf E}_a(t)=^{-1}nt\fracrac1{A_1}|Z_{,\alphaa}(\partial_t+b\partial_\alphaa) D_\alphaa\betaar Z_t |^2\,d\alphaa+ \nm{ D_\alphaa\betaar Z_t(t)}_{\dot H^{1/2}}^2,
\end{equation}
and
\betaegin{equation} \label{eq:eb}
{\betaf E}_b(t) = ^{-1}nt\fracrac1{A_1}\alphabs{Z_{,\alphaa}(\partial_t+b\partial_\alphaa)\paren{ \fracrac1{Z_{,\alphaa}}D_\alphaa^2\betaar Z_t }}^2\,d\alphaa+ \nm{\fracrac1{Z_{,\alphaa}}D_\alphaa^2\betaar Z_t (t)}_{\dot{H}^{1/2}}^2.
\end{equation}
Let
\betaegin{equation}\label{energy}
\fracrak E(t)= {\betaf E}_a(t)+{\betaf E}_b(t)+ \|\betaar{Z}_{t,\alphalpha'}(t)\|_{L^2}^2+\| D_{\alphalpha'}^2 \betaar{Z}_t(t)\|_{L^2}^2+\nm{\partial_\alphaa\fracrac1{Z_{,\alphaa}}(t)}^2_{L^2} + \alphabs{\fracrac1{Z_{,\alphaa}}(0, t)}^2.
\end{equation}
\betaegin{theorem}[cf. Theorem 2 of \cite{kw} for the periodic version]\label{prop:a priori}
Let
$Z=Z(\cdot,t)$, $t^{-1}n [0, T]$ be a solution of the system \eqref{interface-r}-\eqref{interface-holo}-\eqref{b}-\eqref{a1}, satisfying
$(Z_{t}, Z_{tt})^{-1}n C^
l([0, T], H^{s+1/2-l}(\mathbb R)\tauimes H^{s-l}(\mathbb R))$, $l=0,1$ for some $s\gammae 4$. There is a polynomial $C$ with universal nonnegative coefficients, such that
\betaegin{equation}\label{a priori-eq}
\fracrac{d}{dt}\fracrak E(t)\le C(\fracrak E(t)),\qquad \tauext{for } t^{-1}n [0, T].
\end{equation}
\end{theorem}
For the sake of completeness we will give a proof of Theorem~\ref{prop:a priori} in \S\ref{proof}.
\betaegin{remark}\label{remark3.2}
It appears that there is an $^{-1}nfty\cdot 0$ ambiguity in the definition of ${\betaf E}_a$ and ${\betaf E}_b$. This can be resolved by replacing the ambiguous quantities by the right hand sides of \eqref{2008-1} and \eqref{2010-2}. The same remark applies to Lemmas~\ref{basic-e}, ~\ref{basic-4-lemma},~\ref{dlemma1}. We opt for the current version for the clarity of the origins of the definitions and the more intuitive proofs.\fracootnote{The assumptions in Theorems~\ref{prop:a priori},~\ref{prop:a-priori},~\ref{unique} and Proposition~\ref{prop:energy-eq} is consistent with the completeness of the evolutionary equations \eqref{eq:dza}-\eqref{eq:dzt}. We mention that to obtain the wellposed-ness result, Theorem~\ref{th:local}, we only apply Theorems~\ref{prop:a priori},~\ref{prop:a-priori},~\ref{unique} and Proposition~\ref{prop:energy-eq} to solutions that satisfy in addition that $Z_{,\alphaa}^{-1}n L^^{-1}nfty$.}
By \eqref{eq:dza} and product rules,
\betaegin{equation}\label{2008-1}
Z_{,\alphaa}(\partial_t+b\partial_\alphaa) D_\alphaa\betaar Z_t=(b_\alphaa-D_\alphaa Z_t) \betaar Z_{t,\alphaa}+(\partial_t+b\partial_\alphaa) \betaar Z_{t,\alphaa}=
\betaar Z_{tt,\alphaa}- (D_\alphaa Z_t) \betaar Z_{t,\alphaa},
\end{equation}
and
\betaegin{equation}\label{2010-2}
Z_{,\alphaa}(\partial_t+b\partial_\alphaa) \paren{\fracrac1{Z_{,\alphaa}}D^2_\alphaa\betaar Z_t}=(b_\alphaa-D_\alphaa Z_t)D^2_\alphaa\betaar Z_t+ (\partial_t+b\partial_\alphaa) D^2_\alphaa\betaar Z_t.
\end{equation}
Let
\betaegin{equation}\label{2010-3}
\betaegin{aligned}
\fracrak e(t)= \nm{\betaar Z_{tt,\alphaa}(t)}_{L^2}^2&+\nm{D_\alphaa \betaar Z_t(t)}_{\dot H^{1/2}}^2+ \nm{ D^2_\alphaa\betaar Z_{tt}(t)}_{L^2}^2+\nm{\fracrac1{Z_{,\alphaa}}D^2_\alphaa\betaar Z_t (t)}_{\dot H^{1/2}}^2\\&+
\|\betaar{Z}_{t,\alphalpha'}(t)\|_{L^2}^2+\| D_{\alphalpha'}^2 \betaar{Z}_t(t)\|_{L^2}^2+\nm{\partial_\alphaa\fracrac1{Z_{,\alphaa}}(t)}^2_{L^2} + \alphabs{\fracrac1{Z_{,\alphaa}}(0, t)}^2.
\end{aligned}
\end{equation}
It is easy to check that the argument in \S\ref{basic-quantities} gives
\betaegin{equation}\label{equi-1}
\fracrak E(t)\lesssim c_1( \fracrak e(t) ),\qquad \tauext{and } \qquad \fracrak e(t)\lesssim c_2(\fracrak E(t)).
\end{equation}
for some universal polynomials $c_1=c_1(x)$ and $c_2=c_2(x)$.
\end{remark}
In fact, as was shown in \S10 in \cite{kw}, we have the following characterization, which is essentially a consequence of \eqref{equi-1} and equation \eqref{aa1}, of the energy functional $\fracrak E$ in terms of the holomorphic quantities $\fracrac1{Z_{,\alphaa}}$ and $\betaar Z_t$. Since the proof in \cite{kw} applies to the current setting,
we omit the proof.
Let \betaegin{equation}\label{energy1}
\betaegin{aligned}
\mathcal E(t)=\|\betaar Z_{t,\alphalpha'}(t)\|_{L^2}^2&+ \|D_{\alphalpha'}^2\betaar Z_t(t)\|_{L^2}^2+\nm{\partial_{\alphalpha'}\fracrac1{Z_{,\alphalpha'}}(t)}_{L^2}^2+\nm{D_{\alphalpha'}^2\fracrac1{Z_{,\alphalpha'}}(t)}_{L^2}^2\\&+\nm{\fracrac1{Z_{,\alphalpha'} }D_{\alphalpha'}^2\betaar Z_t (t) }_{\dot H^{1/2}}^2+\| D_{\alphalpha'}\betaar Z_t (t) \|_{\dot H^{1/2}}^2+\alphabs{\fracrac1{Z_{,\alphalpha'}}(0,t)}^2.
\end{aligned}
\end{equation}
\betaegin{proposition}[A characterization of $\fracrak E$ via $\mathcal E$, cf. \S10 of \cite{kw}] \label{prop:energy-eq}
There are polynomials $C_1=C_1(x)$ and $C_2=C_2(x)$,
with nonnegative universal coefficients, such that for any solution $Z$ of \eqref{interface-r}-\eqref{interface-holo}-\eqref{b}-\eqref{a1}, satisfying the assumption of Theorem~\ref{prop:a priori},
\betaegin{equation}\label{energy-equiv}
\mathcal E(t)\le C_1(\fracrak E(t)),\qquad\tauext{and}\quad \fracrak E(t)\le C_2(\mathcal E(t)).
\end{equation}
\end{proposition}
A corollary of Theorem~\ref{prop:a priori} and Proposition~\ref{prop:energy-eq} is the following
\betaegin{theorem}[A-priori estimate \cite{kw}]\label{prop:a-priori}
Let
$Z=Z(\cdot,t)$, $t^{-1}n [0, T']$ be a solution of the system \eqref{interface-r}-\eqref{interface-holo}-\eqref{b}-\eqref{a1}, satisfying the assumption of Theorem~\ref{prop:a priori}.
There are constants $T=T(\mathcal E(0))>0$, $C=C(\mathcal E(0))>0$ that depend only on $\mathcal E(0)$, and with $-T(e)$, $C(e)$ increasing with respect to $e$, such that
\betaegin{equation}\label{a priori-e}
\sup_{[0, \min\{T, T'\}]}\mathcal E(t)\le C(\mathcal E(0))<^{-1}nfty.
\end{equation}
\end{theorem}
\betaegin{remark}
1. Let $t$ be fixed, $s\gammae 2$, and assume $Z_t(t)^{-1}n H^s({\mathbb{R}})$. By Proposition~\ref{B2} and Sobolev embeddings, $A_1(t)-1=-\Im[Z_t,\mathbb H]\betaar Z_{t,\alphaa}^{-1}n H^s({\mathbb{R}})$; and by \eqref{aa1}, $Z_{tt}(t)^{-1}n H^s(\mathbb R)$ is equivalent to $\fracrac1{Z_{,\alphaa}}(t)-1^{-1}n H^s(\mathbb R)$.
2. Assume that $\paren{Z_t(t), \fracrac1{Z_{,\alphaa}}(t)-1}^{-1}n (H^{s+1/2}(\mathbb R), H^s(\mathbb R))$, $s\gammae 2$, or equivalently $(Z_t(t), Z_{tt}(t))^{-1}n (H^{s+1/2}(\mathbb R), H^s(\mathbb R))$.
It is easy to check
that $\mathcal E(t)<^{-1}nfty$. So in the class where $\mathcal E(t)<^{-1}nfty$, it allows for interfaces and velocities in Sobolev classes; it is clear that in the class where $\mathcal E(t)<^{-1}nfty$ it also allows for $\fracrac1{Z_{,\alphaa}}=0$, that is, singularities on the interface.
\end{remark}
\subsubsection{A description of the class $\mathcal E<^{-1}nfty$ in $\mathscr P_-$}\label{e-1}
We give here an equivalent description of the class $\mathcal E<^{-1}nfty$ in the lower half plane $\mathscr P_-$.
Let $1< p\le ^{-1}nfty$, and
\betaegin{equation}\label{poisson}
K_y(x)=\fracrac{-y}{\pi(x^2+y^2)},\qquad y<0
\end{equation}
be the Poisson kernel.
We know for any holomorphic function $G$ on $P_-$,
$$\sup_{y<0}\|G(x+iy)\|_{L^p(\mathbb R,dx)}<^{-1}nfty$$
if and only if there exists $g^{-1}n L^p(\mathbb R)$ such that $G(x+iy)=K_y\alphast g(x)$. In this case,
$\sup_{y<0}\|G(x+iy)\|_{L^p(\mathbb R,dx)}=\|g\|_{L^p}$. Moreover, if $g^{-1}n L^p(\mathbb R)$, $1<p<^{-1}nfty$, then $\lim_{y\tauo 0-} K_y\alphast g(x)=g(x)$ in $L^p(\mathbb R)$ and if $g^{-1}n L^^{-1}nfty\cap C(\mathbb R)$, then $\lim_{y\tauo 0-} K_y\alphast g(x)=g(x)$ for all $x^{-1}n\mathbb R$.
Let $Z=Z(\cdot, t)$ be a solution of \eqref{interface-r}-\eqref{interface-holo}-\eqref{a1}-\eqref{b}, satisfying the assumption of Theorem~\ref{prop:a priori};
let $\mathbb{P}si$, $U$ be the holomorphic functions as given in \S\ref{general-soln}, so
$$U(x'+iy',t)=K_{y'}\alphast \betaar Z_t(x', t),\qquad \fracrac1{\mathbb{P}si_{z'}}(x'+iy',t)=K_{y'}\alphast \fracrac1{Z_{,\alphaa}}(x',t),\qquad \tauext{for }y'< 0.$$
Let $z'=x'+iy'$. We have
\betaegin{equation}\label{domain-energy1}
\mathcal E(t)=\mathcal E_1(t)+\alphabs{\fracrac1{Z_{,\alphaa} }(0, t) }^2,
\end{equation}
where
\betaegin{equation}\label{domain-energy}
\betaegin{aligned}
\mathcal E_1(t)&:=\sup_{y'<0}\nm{U_{z'}(\cdot+iy', t)}_{L^2(\mathbb R)}^2+\sup_{y'<0}\nm{\fracrac1{\mathbb{P}si_{z'}}\partial_{z'}\paren{\fracrac1{\mathbb{P}si_{z'} }U_{z'}}(\cdot+iy',t)}_{L^2(\mathbb R)}^2
\\&+\sup_{y'<0}\nm{\fracrac1{\{\mathbb{P}si_{z'}\}^2}\partial_{z'}\paren{\fracrac1{\mathbb{P}si_{z'} }U_{z'}}(\cdot+iy',t)}_{\dot H^{1/2}(\mathbb R)}^2+\sup_{y'<0}\nm{\fracrac1{\mathbb{P}si_{z'} }U_{z'}(\cdot+iy',t)}_{\dot H^{1/2}(\mathbb R)}^2
\\&+\sup_{y'<0}\nm{ \fracrac1{\mathbb{P}si_{z'} }\partial_{z'}\paren{\fracrac1{\mathbb{P}si_{z'} }\partial_{z'}\paren{\fracrac1{\mathbb{P}si_{z'} }}}(\cdot+iy',t) }_{L^2(\mathbb R)}^2+\sup_{y'<0}\nm{ \partial_{z'}\paren{\fracrac1{\mathbb{P}si_{z'} }}(\cdot+iy',t) }_{L^2(\mathbb R)}^2.
\end{aligned}
\end{equation}
\subsection{A blow-up criteria and a stability inequality}\label{prelim-result}
The main objective of this paper is to show the unique solvability of the Cauchy problem for the water wave equation \eqref{euler} in the class where $\mathcal E<^{-1}nfty$. We will build on the existing result, Theorem~\ref{prop:local-s}, by mollifying the initial data, constructing an approximating sequence and passing to the limit. However the existence time of the solution as given in Theorem~\ref{prop:local-s} depends on the Sobolev norm of the initial data. In order to have an approximating sequence defined on a time interval that has a uniform positive lower bound, we need a blow-up criteria; a uniqueness and stability theorem will allow us to prove the convergence of the sequence, and the uniqueness and stability of the solutions obtained by this process.
Let the initial data be as given in \S\ref{id-r}.
\betaegin{theorem}[A blow-up criteria via $\mathcal E$]\label{blow-up}
Let $s\gammae 4$. Assume $Z_{,\alphalpha'}(0)^{-1}n L^^{-1}nfty (\mathbb R)$, $Z_t(0)^{-1}n H^{s+1/2}(\mathbb R)$ and $Z_{tt}(0)^{-1}n H^s(\mathbb R)$. Then there is $T>0$, such that on $[0, T]$, the initial value problem of \eqref{interface-r}-\eqref{interface-holo}-\eqref{b}-\eqref{a1} has a unique solution
$Z=Z(\cdot, t)$, satisfying
$(Z_{t}, Z_{tt})^{-1}n C^
l([0, T], H^{s+1/2-l}(\mathbb R)\tauimes H^{s-l}(\mathbb R))$ for $l=0,1$, and $Z_{,\alphalpha'}-1^{-1}n C([0, T], H^s(\mathbb R))$.
Moreover if $T^*$ is the supremum over all such times $T$, then either $T^*=^{-1}nfty$, or $T^*<^{-1}nfty$, but
\betaegin{equation}\label{eq:30}
\sup_{[0, T^*)}\mathcal E(t)=^{-1}nfty
\end{equation}
\end{theorem}
The proof for Theorem~\ref{blow-up} will be given in \S\ref{proof}. We now give the uniqueness and stability theorem.
Let $Z=Z(\alphalpha',t)$, ${\mathbb{Z}}f={\mathbb{Z}}f(\alphalpha',t)$ be solutions of the system \eqref{interface-r}-\eqref{interface-holo}-\eqref{b}-\eqref{a1}, with $z=z(\alphalpha,t)$, ${\mathfrak{z}}={\mathfrak{z}}(\alphalpha,t)$ being their re-parametrizations in Lagrangian coordinates, and their initial data as given in \S\ref{id-r}; let $$Z_t,\ Z_{tt},\ Z_{,\alphaa},\ z_\alpha,\ h,\ A_1,\ \mathcal A, \ b,\ \fracrak a, \ D_\alphaa,\ D_\alpha, \ \fracrak H,\ \fracrak E(t), \ \mathcal E(t),\quad etc. $$ be the quantities associated with $Z$, $z$ as defined in \S\ref{prelim}, \S\ref{a priori}, and
$${\mathbb{Z}}f_t,\ {\mathbb{Z}}f_{tt},\ {\mathbb{Z}}f_{,\alphaa},\ {\mathfrak{z}}_\alpha,\ \tauh,\ \tauilde {A_1},\ \tauilde {\mathcal A}, \ \tauilde b,\ \tauilde{\mathfrak{a}},\ \tauilde D_\alphaa,\ \tauilde D_\alpha, \ \tauilde{\fracrak H}, \ \tauilde{\fracrak E}(t), \ \tauilde{\mathcal E}(t),\quad etc.$$
be the corresponding quantities for ${\mathbb{Z}}f$, ${\mathfrak{z}}$. Define
\betaegin{equation}\label{def-l}
l= \tauh\circ h^{-1}.
\end{equation}
so $l(\alphaa, 0)=\alphaa$, for $\alphaa^{-1}n {\mathbb{R}}$.
\betaegin{theorem}[Uniqueness and Stability in $\mathcal E<^{-1}nfty$]\label{unique}
Assume that $Z$, $\fracrak Z$ are solutions of equation \eqref{interface-r}-\eqref{interface-holo}-\eqref{a1}-\eqref{b}, satisfying
$(Z_t, Z_{tt}), (\fracrak Z_t, \fracrak Z_{tt})^{-1}n C^l([0, T], H^{s+1/2-l}(\mathbb R)\tauimes H^{s-l}(\mathbb R))$ for $l=0,1$, $s\gammae 4$.
There is a constant $C$, depending only on $T$, $\sup_{[0, T]} \mathcal E(t)$ and $\sup_{[0, T]}\tauilde{\mathcal E}(t)$, such that
\betaegin{equation}\label{stability}
\betaegin{aligned}
&\sup_{[0, T]}\paren{\|\paren{\betaar Z_t-\betaar {\mathbb{Z}}f_t\circ l}(t)\|_{\dot{H}^{1/2}}+\|\paren{\betaar Z_{tt}-\betaar {\mathbb{Z}}f_{tt}\circ l}(t)\|_{\dot{H}^{1/2}}+\nm{\paren{\fracrac1{ Z_{,\alphaa}}-\fracrac 1{ {\mathbb{Z}}f_{,\alphaa}}\circ l}(t)}_{\dot{H}^{1/2}}}+\\&\sup_{[0, T]}\paren{\|\paren{l_\alphaa-1}(t)\|_{L^2}+\|D_\alphaa Z_t-(\tauilde D_\alphaa {\mathbb{Z}}f_t)\circ l\|_{L^2}
+\|(A_1-\tauilde {A_1}\circ l)(t)\|_{L^2}+\|(b_\alphaa-\tauilde b_\alphaa\circ l)(t)\|_{L^2}}\\&\le C\paren{ \|\paren{\betaar Z_t-\betaar {\mathbb{Z}}f_t}(0)\|_{\dot{H}^{1/2}}+\|\paren{\betaar Z_{tt}-\betaar {\mathbb{Z}}f_{tt}}(0)\|_{\dot{H}^{1/2}}+\nm{\paren{\fracrac1{ Z_{,\alphaa}}-\fracrac 1{ {\mathbb{Z}}f_{,\alphaa}}}(0)}_{\dot{H}^{1/2}}}\\&+C\paren{\|\paren{D_\alphaa Z_t-(\tauilde D_\alphaa {\mathbb{Z}}f_t)}(0)\|_{L^2}
+\nm{\paren{\fracrac1{ Z_{,\alphaa}}-\fracrac 1{ {\mathbb{Z}}f_{,\alphaa}}}(0)}_{L^^{-1}nfty}}
\end{aligned}
\end{equation}
\end{theorem}
By precomposing with $h$, we see that inequality \eqref{stability} effectively gives control of the differences, $z_t-{\mathfrak{z}}_t$, $z_{tt}-{\mathfrak{z}}_{tt}$ etc, in Lagrangian coordinates.
Notice that in the stability inequality \eqref{stability}, we control the $\dot{H}^{1/2}$ norms of the differences of $Z_t$ and ${\mathbb{Z}}f_t\circ l$, $Z_{tt}$ and ${\mathbb{Z}}f_{tt}\circ l$, and $\fracrac1{ Z_{,\alphaa}}$ and $\fracrac 1{ {\mathbb{Z}}f_{,\alphaa}}\circ l$, and the $L^2$ norms of the differences of $D_\alphaa Z_t$ and $(\tauilde D_\alphaa {\mathbb{Z}}f_t)\circ l$, and $A_1$ and $\tauilde {A_1}\circ l$, while the energy functional $\fracrak E(t)$, or equivalently $\mathcal E(t)$, gives us control of the $L^2$ norms of $Z_{t,\alphaa}$, $Z_{tt,\alphaa}$ and $\partial_\alphaa\fracrac1{ Z_{,\alphaa}}$, and the $L^^{-1}nfty$ and $\dot H^{1/2}$ norms \fracootnote{see \S\ref{basic-quantities} and \S\ref{hhalf-norm}.} of $D_\alphaa Z_t$ and $A_1$. Indeed, because the coefficient $\fracrac {A_1}{|Z_{,\alphaa}|^2}$ in equation \eqref{quasi-r} is solution dependent and possibly degenerative, for given solutions $Z=Z(\alphalpha', t)$, $\fracrak Z=\fracrak Z(\alphalpha', t)$ of equation \eqref{quasi-r}-\eqref{aux}, the sets of zeros in $\fracrac1{Z_{,\alphalpha'}}(t)$ and $\fracrac1{\fracrak Z_{,\alphalpha'}}(t)$ are different and move with the solutions, hence one cannot simply subtract the two solutions and perform energy estimates, as is usually done in classical cases.
Our approach is to first get a good understanding of the evolution of the degenerative factor $\fracrac1{Z_{,\alphaa}}$ via equation \eqref{eq:dza}, this allows us to construct a series of model equations that capture the key degenerative features of the equation \eqref{quasi-r} to get some ideas of what would work. We then tailor the ideas to the specific structure of our equations. We give the proof for Theorem~\ref{unique} in \S\ref{proof3}.
\subsection{The wellposedness of the water wave equation \eqref{euler} in $\mathcal E<^{-1}nfty$}\label{main}
Since it can be tricky to define solutions for the interface equation \eqref{interface-r} when the interface is allowed to have singularities, we will directly solve the water wave equation \eqref{euler} via the system \eqref{eq:273}-\eqref{eq:275}-\eqref{eq:274}-\eqref{eq:271}.
As we know from the discussions in \S\ref{general-soln} and \S\ref{e-1}, equation \eqref{euler} is equivalent to \eqref{eq:273}-\eqref{eq:275}-\eqref{eq:274}, for $(U, \mathbb{P}si)^{-1}n C(\overline{\mathscr P}_-\tauimes [0, T])\cap C^1({\mathscr P}_-\tauimes (0, T))$ with $U(\cdot, t)$, $\mathbb{P}si(\cdot, t)$ holomorphic, provided $\mathbb{P}si(\cdot, t)$ is a Jordan curve; and the energy functionals $\mathcal E=\mathcal E_1+|\fracrac1{Z_{,\alphaa}}(0,t)|^2$.
Observe that the energy functional $\mathcal E(t)$ does not give direct control of the lower order norms $\|Z_t(t)\|_{L^2}$, $\|Z_{tt}(t)\|_{L^2}$ and $\nm{\fracrac1{Z_{,\alphaa}}(t)-1}_{L^2(\mathbb R)}$;
in the class where we want to solve the water wave equation we require in addition that $Z_t(t)^{-1}n {L^2(\mathbb R)}$ and $\fracrac1{Z_{,\alphaa}}(t)-1^{-1}n {L^2(\mathbb R)}$. This is consistent with the decay assumption made in \S\ref{notation1}.
\subsubsection{The initial data}\label{id}
Let $\Omega(0)$ be the initial fluid domain, with the interface $\Sigma(0):=\partial\Omega(0)$ being a Jordan curve that tends to horizontal lines at the infinity, and let $\mathbb{P}si(\cdot, 0):{\mathscr P}_-\tauo \Omega(0)$
be the Riemann Mapping such that $\lim_{z'\tauo^{-1}nfty} \partial_{z'}\mathbb{P}si(z', 0)=1$. We know $\mathbb{P}si(\cdot, 0) :{\mathscr P}_-\tauo \Omega(0)$ is a homeomorphism. Let $Z(\alphalpha', 0):=\mathbb{P}si(\alphalpha', 0)$ for $\alphaa^{-1}n\mathbb R$, so $Z=Z(\cdot, 0):\mathbb R\tauo\Sigma(0)$ is the parametrization of $\Sigma(0)$ in the Riemann Mapping variable. Let $\betaold v(\cdot, 0):\Omega(0)\tauo \mathbb C$ be the initial velocity field, and $U(z', 0)=\betaar{\betaold v}(\mathbb{P}si(z', 0),0)$. Assume $\betaar{\betaold v}(\cdot, 0)$ is holomorphic on $\Omega(0)$, so $U(\cdot, 0)$ is holomorphic on ${\mathscr P}_-$. Assume that the energy functional $\mathcal E_{1}(0)$ for $(U(\cdot, 0),\mathbb{P}si(\cdot, 0))$ as given in \eqref{domain-energy} satisfy $\mathcal E_1(0)<^{-1}nfty$.
Assume in addition that \fracootnote{This is equivalent to $\|U(\cdot+i0, 0)\|_{L^2(\mathbb R)}+ \nm{\fracrac1{Z_{,\alphaa}} (0)-1}_{L^2(\mathbb R)} <^{-1}nfty$, see \S\ref{e-1}.}
\betaegin{equation}\label{iid}
c_0:=\sup_{y'<0}\|U(\cdot+iy', 0)\|_{L^2(\mathbb R)}+\sup_{y'<0}\nm{\fracrac1{\mathbb{P}si_{z'}(\cdot+iy',0)}-1}_{L^2(\mathbb R)}<^{-1}nfty.
\end{equation}
In light of the discussion in \S\ref{general-soln} and the uniqueness and stability Theorem~\ref{unique}, we define solutions for the Cauchy problem of the system \eqref{eq:273}-\eqref{eq:275}-\eqref{eq:274} as follows.
\betaegin{definition}\label{de}
Let the data be as given in \S\ref{id}, and
$(U, \mathbb{P}si, \fracrak P)^{-1}n C(\betaar{\mathscr P}_-\tauimes [0, T])$, with $(U, \mathbb{P}si)^{-1}n C^1(\mathscr P_-\tauimes (0, T))$, $\lim_{z'\tauo^{-1}nfty} (U, \mathbb{P}si_{z'}-1)(z',t)=(0,0)$ and
$U(\cdot, t)$, $\mathbb{P}si(\cdot, t)$ holomorphic in the lower half plane ${\mathscr P}_-$ for $t^{-1}n [0, T]$. We say $(U, \mathbb{P}si, \fracrak P)$ is a solution of the Cauchy problem of the system \eqref{eq:273}-\eqref{eq:275}-\eqref{eq:274}, if it satisfies the system \eqref{eq:273}-\eqref{eq:275}-\eqref{eq:274} on $\mathscr P_-\tauimes [0, T]$, and if there is a sequence $Z_n=Z_n(\alphaa,t)$, $(\alphaa,t)^{-1}n \mathbb R\tauimes[0, T]$, which are solutions of the system \eqref{interface-r}-\eqref{interface-holo}-\eqref{a1}-\eqref{b}, satisfying $(Z_{n,t}, \fracrac1{\partial_\alphaa Z_{n}}-1, \partial_\alphaa Z_{n} -1 )^{-1}n C^j([0, T], H^{s+1/2-l}(\mathbb R)\tauimes H^{s-l}(\mathbb R)\tauimes H^{s-l}(\mathbb R) )$ for some $s\gammae 4$, $l=0,1$, $\sup_{n, t^{-1}n [0, T]} \mathcal E_n(t)<^{-1}nfty$ and $\sup_{n, t^{-1}n [0, T]}(\|Z_{n,t}(t)\|_{L^2}+ \nm{\fracrac1{\partial_\alphaa Z_{n}}(t)-1}_{L^2})<^{-1}nfty$, and the holomorphic extension $(U_n, \mathbb{P}si_n)$ in ${\mathscr P}_-$ of $(\betaar Z_{n,t}, Z_n)$, with $\lim_{z'\tauo^{-1}nfty} (U_n, \partial_{z'}\mathbb{P}si_{n}-1)(z',t)=(0,0)$, and the function $\fracrak P_n$ defined by \eqref{eq:273}-\eqref{eq:274}-\eqref{eq:275},
such that $\lim_{n\tauo ^{-1}nfty} U_n=U$, $\lim_{n\tauo ^{-1}nfty} \mathbb{P}si_n=\mathbb{P}si$, $\lim_{n\tauo ^{-1}nfty} \fracrak P_n=\fracrak P$ and $\lim_{n\tauo^{-1}nfty}\fracrac1{\partial_{z'}\mathbb{P}si_n}= \fracrac1{\partial_{z'}\mathbb{P}si}$, uniformly on compact subsets of $\betaar{\mathscr P}_-\tauimes [0, T]$, and the data $(Z_n(\cdot, 0), Z_{n,t}(\cdot,0))$ converges in the topology of the right hand side of the inequality \eqref{stability} to the trace $(\mathbb{P}si(\cdot+i0, 0), \betaar U(\cdot+i0, 0))$.
\end{definition}
Let $\mathcal E(0)=\mathcal E_1(0)+|\fracrac1{Z_{,\alphaa}}(0,0)|^2$.
\betaegin{theorem}[Local wellposedness in the $\mathcal E<^{-1}nfty$ regime]\label{th:local}
1. There exists $T>0$, depending only on $\mathcal E(0)$, such that on $[0,T]$, the initial value problem of the system \eqref{eq:273}-\eqref{eq:275}-\eqref{eq:274} has a unique solution $(U, \mathbb{P}si, \fracrak P)$, with the properties that $U(\cdot, t),\mathbb{P}si(\cdot, t)$ are holomorphic on ${\mathscr P}_-$ for each fixed $t^{-1}n [0, T]$, $U, \mathbb{P}si, \fracrac1{\mathbb{P}si_{z'}}, \fracrak P$ are continuous on $\betaar {\mathscr P}_-\tauimes [0, T]$, $U, \mathbb{P}si, \fracrak P$ are continuous differentiable on ${\mathscr P}_-\tauimes [0, T]$,
$\sup_{[0, T]}\mathcal E_1(t)<^{-1}nfty$ and
\betaegin{equation}\label{iidt}
\sup_{[0, T]}\sup_{y'<0}\paren{\|U(\cdot+iy', t)\|_{L^2(\mathbb R)}+\|\fracrac1{\mathbb{P}si_{z'}(\cdot+iy',t)}-1\|_{L^2(\mathbb R)}}<^{-1}nfty.
\end{equation}
The solution $(U, \mathbb{P}si, \fracrak P)$ gives rise to a solution $(\betaar{\betaold v}, P)=(U\circ \mathbb{P}si^{-1}, \fracrak P\circ \mathbb{P}si^{-1})$ of the water wave equation \eqref{euler} so long as $\Sigma(t)=\{Z=\mathbb{P}si(\alphalpha',t)\ | \ \alphalpha'^{-1}n \mathbb R\}$ is a Jordan curve.
2. If in addition that the initial interface is chord-arc, that is, $Z_{,\alphalpha'}(\cdot,0)^{-1}n L^1_{loc}(\mathbb R)$ and there is $0<\delta<1$, such that
$$\delta ^{-1}nt_{\alphalpha'}^{\betaeta'} |Z_{,\alphalpha'}(\gammaamma,0)|\,d\gammaamma\le |Z(\alphalpha', 0)-Z(\betaeta', 0)|\le ^{-1}nt_{\alphalpha'}^{\betaeta'} |Z_{,\alphalpha'}(\gammaamma,0)|\,d\gammaamma,\quad \fracorall -^{-1}nfty<\alphalpha'< \betaeta'<^{-1}nfty.$$
Then there is $T>0, T_1>0$, $T, T_1$ depend only on $\mathcal E(0)$, such that on $[0, \min\{T, \fracrac{\delta}{T_1}\}]$, the initial value problem of the water wave equation \eqref{euler} has a unique solution, satisfying $\mathcal E_1(t)<^{-1}nfty$ and \eqref{iidt}, and the interface $Z=Z(\cdot, t)$ remains chord-arc.
\end{theorem}
We prove Theorem~\ref{th:local} in \S\ref{proof2}.
\section{The proof of Theorem~\ref{prop:a priori} and Theorem~\ref{blow-up}}\label{proof}
We need the following basic inequalities in the proof of Theorems~\ref{prop:a priori} and \ref{blow-up}.
The basic energy inequality in Lemma~\ref{basic-e} has already appeared in \cite{wu3}. We give a proof nevertheless.
\betaegin{lemma}[Basic energy inequality I, cf. \cite{wu3}, lemma 4.1]\label{basic-e}
Assume $\Thetaeta=\Thetaeta(\alphalpha',t)$, $\alphalpha'^{-1}n \mathbb R$, $t^{-1}n [0, T)$ is smooth, decays fast at the spatial infinity, satisfying $(I-\mathbb H)\Thetaeta=0$ and
\betaegin{equation}\label{eq:40}
(\partial_t+b\partial_\alphaa)^2\Thetaeta+i\mathcal A\partial_\alphaa \Thetaeta=G_\Thetaeta.
\end{equation}
Let
\betaegin{equation}\label{eq:41}
E_\Thetaeta(t):=^{-1}nt\fracrac1{\mathcal A}|(\partial_t+b\partial_\alphaa)\Thetaeta|^2\,d\alphaa+ i^{-1}nt(\partial_\alphaa\Thetaeta) \betaar\Thetaeta\,d\alphaa.
\end{equation}
Then
\betaegin{equation}\label{eq:42}
\fracrac d{dt} E_\Thetaeta(t)\le \nm{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}_{L^^{-1}nfty} E_\Thetaeta(t)+2 E_\Thetaeta(t)^{1/2}\paren{^{-1}nt\fracrac{|G_\Thetaeta|^2}{\mathcal A}\,d\alphaa}^{1/2}.
\end{equation}
\end{lemma}
\betaegin{remark}
By $\Thetaeta=\mathbb H\Thetaeta$ and \eqref{def-hhalf},
\betaegin{equation}\label{hhalf}
i^{-1}nt(\partial_{\alphalpha'}\Thetaeta) \betaar\Thetaeta\,d\alphalpha'=^{-1}nt(i\partial_{\alphalpha'}\mathbb H \Thetaeta) \betaar\Thetaeta\,d\alphalpha' =\|\Thetaeta\|_{\dot{H}^{1/2}}^2\gammae 0.
\end{equation}
\end{remark}
\betaegin{proof}
By a change of the variables in \eqref{eq:40}, we have
$$(\partial_t^2+i\fracrak a \partial_\alpha)(\Thetaeta\circ h)=G_\Theta\circ h$$
where $\fracrak a h_\alpha=\mathcal A\circ h$; and in \eqref{eq:41},
$$E_\Thetaeta(t)=^{-1}nt\fracrac1{\fracrak a}|\partial_t(\Thetaeta\circ h)|^2\,d\alpha+^{-1}nt i\partial_\alpha (\Thetaeta\circ h) \betaar{\Thetaeta\circ h}\,d\alpha.$$
So
\betaegin{equation}\label{eq:43}
\betaegin{aligned}
\fracrac d{dt} E_\Thetaeta(t)&=^{-1}nt 2{\mathbb{R}}e\betaraces{\fracrac1{\fracrak a}\partial_t^2(\Thetaeta\circ h) \partial_t(\betaar{\Thetaeta\circ h})}-\fracrac{\fracrak a_t}{\fracrak a^2}|\partial_t(\Thetaeta\circ h)|^2+2{\mathbb{R}}e \betaraces{i^{-1}nt\partial_\alphalpha(\Thetaeta\circ h) \partial_t(\betaar{\Thetaeta\circ h}) \,d\alphalpha }
\\&= 2{\mathbb{R}}e ^{-1}nt \fracrac1{\fracrak a} G_\Theta\circ h \partial_t(\betaar{\Thetaeta\circ h})\,d\alphalpha-^{-1}nt \fracrac{\fracrak a_t}{\fracrak a^2}|\partial_t(\Thetaeta\circ h)|^2\,d\alpha,
\end{aligned}
\end{equation}
where we used integration by parts in the first step. Changing back to the Riemann mapping variable, applying Cauchy-Schwarz inequality and \eqref{hhalf} yields \eqref{eq:42}.
\end{proof}
We also need the following simple energy inequality.
\betaegin{lemma}[Basic energy inequality II]\label{basic-e2} Assume $\Thetaeta=\Thetaeta(\alphaa,t)$ is smooth and decays fast at the spatial infinity. And assume
\betaegin{equation}\label{evolution-equation}
(\partial_t+b\partial_\alphaa)\Thetaeta=g_\Thetaeta.
\end{equation}
Then
\betaegin{equation}\label{basic-2}
\fracrac{d}{dt}\nm{\Thetaeta(t)}_{L^2}^2\le 2\nm{g_\Theta(t)}_{L^2}\nm{\Thetaeta(t)}_{L^2}+\|b_\alphaa(t)\|_{L^^{-1}nfty}\nm{\Thetaeta(t)}_{L^2}^2
\end{equation}
\end{lemma}
\betaegin{proof}
We have, upon changing variables,
$$^{-1}nt |\Thetaeta(\alphaa,t)|^2\,d\alphaa=^{-1}nt |\Theta( h(\alpha, t),t)|^2h_\alpha \,d\alpha,$$
so
\betaegin{equation}
\betaegin{aligned}
\fracrac{d}{dt}^{-1}nt |\Thetaeta(\alphaa,t)|^2\,d\alphaa &=^{-1}nt 2{\mathbb{R}}e \partial_t(\Theta\circ h)\betaar{\Theta\circ h}\ h_\alpha+ |\Theta\circ h|^2h_{t\alpha} \,d\alpha\\&= ^{-1}nt 2{\mathbb{R}}e \paren{(\partial_t+b\partial_\alphaa)\Thetaeta}\betaar\Theta (\alphaa,t) + b_\alphaa |\Thetaeta(\alphaa,t)|^2\,d\alphaa;
\end{aligned}
\end{equation}
here in the second step we changed back to the Riemann mapping variable, and used the fact that $\fracrac{h_{t\alpha}}{h_\alpha}=b_\alphaa\circ h$. Inequality \eqref{basic-2} follows from Cauchy-Schwarz inequality.
\end{proof}
Let \betaegin{equation}\label{P}
\mathcal P=(\partial_t+b\partial_\alphaa)^2+i\mathcal A\partial_\alphaa.
\end{equation} We need two more basic inequalities.
\betaegin{lemma}[Basic inequality III]\label{basic-3-lemma} Assume that $\Theta=\Theta(\alphaa,t)$ is smooth and decays fast at the spatial infinity, and assume $\Theta=\mathbb H\Theta$. Then
\betaegin{equation}\label{basic-3}
\betaegin{aligned}
&\nm{(I-\mathbb H)\paren{
\mathcal P\Theta}(t)
}_{L^2}\le \nm{\partial_\alphaa (\partial_t+b\partial_\alphaa)b}_{L^^{-1}nfty}\nm{\Theta(t)}_{L^2}\\&\qquad\qquad\qquad+\nm{b_\alphaa}_{L^^{-1}nfty}\nm{(\partial_t+b\partial_\alphaa)\Theta(t)}_{L^2}+\nm{ b_\alphaa}_{L^^{-1}nfty}^2\nm{\Theta(t)}_{L^2}+\nm{\mathcal A_\alphaa}_{L^^{-1}nfty}\nm{\Theta(t)}_{L^2}.
\end{aligned}
\end{equation}
\end{lemma}
\betaegin{proof}
Because $\Theta=\mathbb H\Theta$, we have
$$(I-\mathbb H)(\mathcal P\Theta)=\betaracket{\mathcal P, \mathbb H}\Theta;$$
and by \eqref{eq:c25},
$$\betaracket{\mathcal P, \mathbb H}\Theta=\betaracket{(\partial_t+b\partial_\alphaa)b,\mathbb H}\partial_\alphaa \Theta+2\betaracket{b,\mathbb H}\partial_\alphaa (\partial_t+b\partial_\alphaa)\Theta-[b,b; \partial_\alphaa \Theta]+\betaracket{i\mathcal A,\mathbb H}\partial_\alphaa \Theta.
$$
Inequality \eqref{basic-3} follows from \eqref{3.20}.
\end{proof}
\betaegin{lemma}[Basic inequality IV]\label{basic-4-lemma}
Assume $f$ is smooth and decays fast at the spatial infinity. Then
\betaegin{equation}\label{basic-4}
\betaegin{aligned}
\nm{Z_{,\alphaa}\betaracket{\mathcal P, \fracrac1{Z_{,\alphaa}}}f}_{L^2}&\lesssim \nm{ (\partial_t+b\partial_\alphaa)(b_\alphaa-D_\alphaa Z_t)}_{L^^{-1}nfty}\nm{f}_{L^2}\\&+ \nm{(b_\alphaa-D_\alphaa Z_t)}^2_{L^^{-1}nfty}\nm{f}_{L^2}+ \nm{ (b_\alphaa-D_\alphaa Z_t)}_{L^^{-1}nfty}\nm{(\partial_t+b\partial_\alphaa)f}_{L^2}\\&+ \nm{A_1}_{L^^{-1}nfty}\nm{\fracrac1{Z_{,\alphaa}}\partial_\alphaa\fracrac1{Z_{,\alphaa}}}_{L^^{-1}nfty}\nm{f}_{L^2}.
\end{aligned}
\end{equation}
\end{lemma}
\betaegin{proof}
Lemma~\ref{basic-4-lemma} is straightforward from the commutator relation \eqref{eq:c16}, identities
\eqref{eq:c26}, \eqref{eq:c27} and the definition $A_1:=\mathcal A\alphabs{Z_{,\alphaa}}^2$.
\end{proof}
Let $Z=Z(\cdot, t)$ be a solution of the system \eqref{interface-r}-\eqref{interface-holo}-\eqref{b}-\eqref{a1}, satisfying the assumption of Theorem~\ref{prop:a priori}. By \eqref{quasi-r1} and \eqref{eq:c10}, we have
\betaegin{equation}\label{base-eq}
\mathcal P \betaar Z_{t,\alphaa}=-(\partial_t+b\partial_\alphaa)(b_\alphaa \partial_{\alphaa}\betaar Z_{t})-b_\alphaa\partial_\alphaa \betaar Z_{tt}-i\mathcal A_\alphaa \partial_\alphaa \betaar Z_t+\partial_\alphaa\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} (\betaar Z_{tt}-i)}
\end{equation}
Equation \eqref{base-eq} is our base equation in the proof of Theorems~\ref{prop:a priori} and \ref{blow-up}.
\subsection{The proof of Theorem~\ref{prop:a priori}.}\label{proof0}
We begin with computing a few evolutionary equations. We have
\betaegin{equation}\label{eq-dt}
\mathcal P D_\alphaa\betaar Z_t=\betaracket{\mathcal P, \fracrac1{Z_{,\alphaa}}}\betaar Z_{t,\alphaa}+\fracrac1{Z_{,\alphaa}}\mathcal P \betaar Z_{t,\alphaa};
\end{equation}
\betaegin{equation}\label{eq-ddt}
\mathcal P \paren{\fracrac1{Z_{,\alphaa}}D^2_\alphaa\betaar Z_t}=\betaracket{\mathcal P, \fracrac1{Z_{,\alphaa}}}D_\alphaa^2\betaar Z_{t}+\fracrac1{Z_{,\alphaa}}\betaracket{\mathcal P, D_\alphaa^2}\betaar Z_{t}+\fracrac1{Z_{,\alphaa}}D_\alphaa^2\mathcal P\betaar Z_t.\end{equation}
And, by the commutator identity \eqref{eq:c7} and the fact that $(\partial_t+b\partial_\alphaa)\betaar Z_{t}=\betaar Z_{tt}$,
\betaegin{equation}\label{eq-zta}
(\partial_t+b\partial_\alphaa)\betaar Z_{t,\alphaa}=\betaar Z_{tt,\alphaa}-b_\alphaa \betaar Z_{t,\alphaa};
\end{equation}
and by \eqref{eq:dza} and \eqref{eq:c7}
\betaegin{equation}\label{eq-ddza}
\betaegin{aligned}
(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'} \fracrac{1}{ Z_{,\alphalpha'}}&=\partial_{\alphalpha'}(\partial_t+b\partial_\alphaa) \fracrac{1}{ Z_{,\alphalpha'}}+[(\partial_t+b\partial_\alphaa), \partial_{\alphalpha'}] \fracrac{1}{ Z_{,\alphalpha'}}\\&
=\paren{\partial_{\alphalpha'}\fracrac{1}{ Z_{,\alphalpha'}}}\paren{b_\alphaa-D_\alphaa Z_t}+D_\alphaa \paren{b_\alphaa-D_\alphaa Z_t} -b_{\alphalpha'}\partial_{\alphalpha'}\fracrac{1}{ Z_{,\alphalpha'}}\\&
=-D_\alphaa Z_t\paren{\partial_{\alphalpha'}\fracrac{1}{ Z_{,\alphalpha'}}}+D_\alphaa \paren{b_\alphaa-D_\alphaa Z_t}.
\end{aligned}
\end{equation}
We know from the definition of ${\betaf E}_a(t)$, ${\betaf E}_b(t)$, and $A_1:=\mathcal A |Z_{,\alphaa}|^2$,
$${\betaf E}_a(t):=E_{D_\alphaa\betaar Z_t}(t),\qquad\tauext{and }\quad {\betaf E}_b(t):=E_{ \fracrac1{Z_{,\alphaa}}D^2_\alphaa\betaar Z_t}(t),$$
where $E_\Theta(t)$ is the basic energy as defined in \eqref{eq:41}. Notice that the quantities $D_\alphaa\betaar Z_t$ and $
\fracrac1{Z_{,\alphaa}}D^2_\alphaa\betaar Z_t$ are holomorphic. So the energy functional
\betaegin{equation}\label{energy-functional}
\fracrak E(t)=E_{D_\alphaa\betaar Z_t}(t)+ E_{ \fracrac1{Z_{,\alphaa}}D^2_\alphaa\betaar Z_t}(t) +\|\betaar Z_{t,\alphaa}(t)\|_{L^2}^2+\|D_\alphaa^2\betaar Z_t(t)\|_{L^2}^2+\nm{\partial_\alphaa \fracrac{1}{ Z_{,\alphalpha'}}(t)}_{L^2}^2+\alphabs{\fracrac{1}{ Z_{,\alphalpha'}}(0,t)}^2.
\end{equation}
Our goal is to show that there is a universal polynomial $C=C(x)$, such that
\betaegin{equation}\label{energy-ineq}
\fracrac d{dt}\fracrak E(t)\le C(\fracrak E(t)).
\end{equation}
We begin with a list of quantities controlled by $\fracrak E(t)$. \subsubsection{Quantities controlled by $\fracrak E(t)$.}\label{basic-quantities} It is clear that $\fracrak E(t)$ controls the following quantities:
\betaegin{equation}\label{list1}
\nm{ D_\alphaa\betaar Z_t}_{\dot H^{1/2}}, \quad\nm{\fracrac{1}{ Z_{,\alphalpha'}} D_\alphaa^2\betaar Z_t}_{\dot H^{1/2}}, \quad \|\betaar Z_{t,\alphaa}\|_{L^2},\quad \|D_\alphaa^2\betaar Z_t\|_{L^2},\quad \nm{\partial_\alphaa \fracrac{1}{ Z_{,\alphalpha'}}}_{L^2},\quad \alphabs{\fracrac{1}{ Z_{,\alphalpha'}}(0,t)}.
\end{equation}
By \eqref{eq:b13} and \eqref{a1},
\betaegin{equation}\label{2000}
1\le A_1,\qquad{and}\quad \nm{ A_1}_{L^^{-1}nfty}\lesssim 1+\|\betaar Z_{t,\alphaa}\|_{L^2}^2\le 1+\fracrak E.
\end{equation}
We also have, by \eqref{ba} and \eqref{eq:b13}, that
\betaegin{equation}\label{2001}
\|b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t\|_{L^^{-1}nfty}\lesssim \nm{\partial_\alphaa \fracrac{1}{ Z_{,\alphalpha'}}}_{L^2}\|\betaar Z_{t,\alphaa}\|_{L^2}\le \fracrak E.
\end{equation}
We now estimate $\nm{D_\alphaa Z_t}_{L^^{-1}nfty}$. We have, by the fundamental Theorem of calculus,
\betaegin{equation}\label{2002}\paren{D_{\gammaamma'} \betaar Z_t}^2-^{-1}nt_0^1 \paren{D_{\betaeta '} \betaar Z_t}^2\,d{\betaeta '}=2^{-1}nt_0^1^{-1}nt_{\betaeta '}^{\gammaamma'}
D_\alphaa \betaar Z_t\partial_\alphaa D_\alphaa \betaar Z_t\,d\alphaa d{\betaeta '}=2^{-1}nt_0^1^{-1}nt_{\betaeta '}^{\gammaamma'}
\partial_\alphaa \betaar Z_t D_\alphaa^2 \betaar Z_t\,d\alphaa d{\betaeta '},
\end{equation}
where in the last equality, we moved $\fracrac1{Z_{,\alphaa}}$ from the first to the second factor. So for any $\gammaamma'^{-1}n \mathbb R$,
\betaegin{equation}\label{2003}
\alphabs{\paren{D_{\gammaamma'} \betaar Z_t(\gammaamma',t)}^2-^{-1}nt_0^1 \paren{D_{\betaeta '} \betaar Z_t({\betaeta '},t)}^2\,d{\betaeta '}}\le 2\|\betaar Z_{t,\alphaa}\|_{L^2} \|D_\alphaa^2\betaar Z_t\|_{L^2}\le 2\fracrak E.
\end{equation}
Now by the fundamental Theorem of calculus and Cauchy-Schwarz inequality we have, for ${\betaeta '}^{-1}n [0, 1]$,
\betaegin{equation}\label{2004}\alphabs{\fracrac{1}{ Z_{,{\betaeta '}}}({\betaeta '}, t)-\fracrac{1}{ Z_{,\alphalpha'}}(0,t)}\le ^{-1}nt_0^1\alphabs{\partial_\alphaa \fracrac{1}{ Z_{,\alphalpha'}}}\,d\alphaa\le \nm{\partial_\alphaa \fracrac{1}{ Z_{,\alphalpha'}}}_{L^2};
\end{equation}
so
$$\alphabs{^{-1}nt_0^1 \paren{D_{\betaeta '} \betaar Z_t}^2\,d{\betaeta '}}\le \nm{\fracrac{1}{ Z_{,\alphalpha'}}}_{L^^{-1}nfty[0,1]}^2\|\betaar Z_{t,\alphaa}\|_{L^2}^2\le \paren{\alphabs{\fracrac{1}{ Z_{,\alphalpha'}}(0,t)}+ \nm{\partial_\alphaa \fracrac{1}{ Z_{,\alphalpha'}}}_{L^2}}^2\|\betaar Z_{t,\alphaa}\|_{L^2}^2\lesssim \fracrak E^2.
$$
Combining the above argument, we get
\betaegin{equation}\label{2005}
\nm{D_\alphaa Z_t}_{L^^{-1}nfty}=\nm{D_\alphaa \betaar Z_t}_{L^^{-1}nfty}\lesssim C(\fracrak E).
\end{equation}
This together with \eqref{2001} gives us
\betaegin{equation}\label{2006}
\nm{b_\alphaa}_{L^^{-1}nfty}\lesssim C(\fracrak E).
\end{equation}
We now explore the remaining terms in ${\betaf E}_a(t)$ and ${\betaf E}_b(t)$.
We know
\betaegin{equation}\label{2007}{\betaf E}_a(t)=^{-1}nt\fracrac1{A_1}|Z_{,\alphaa}(\partial_t+b\partial_\alphaa) D_\alphaa\betaar Z_t |^2\,d\alphaa+ \nm{ D_\alphaa\betaar Z_t}_{\dot H^{1/2}}^2.\end{equation}
Now by \eqref{eq:dza}, product rules and \eqref{eq-zta}, \fracootnote{One can also compute by changing to the Lagrangian coordinate and using the commutator relation \eqref{eq:c1}.}
\betaegin{equation}\label{2008}
Z_{,\alphaa}(\partial_t+b\partial_\alphaa) D_\alphaa\betaar Z_t=(b_\alphaa-D_\alphaa Z_t) \betaar Z_{t,\alphaa}+(\partial_t+b\partial_\alphaa) \betaar Z_{t,\alphaa}=
\betaar Z_{tt,\alphaa}- (D_\alphaa Z_t) \betaar Z_{t,\alphaa};
\end{equation}
so
\betaegin{equation}\label{2009}
\betaegin{aligned}
\|Z_{tt,\alphaa}\|_{L^2}\le &\|D_\alphaa Z_t\|_{L^^{-1}nfty}\|Z_{t,\alphaa}\|_{L^2}+\|Z_{,\alphaa}(\partial_t+b\partial_\alphaa) D_\alphaa\betaar Z_t\|_{L^2}\\&\le \|D_\alphaa Z_t\|_{L^^{-1}nfty}\|Z_{t,\alphaa}\|_{L^2} + \paren{\|A_1\|_{L^^{-1}nfty}{\betaf E}_a}^{1/2}\lesssim C(\fracrak E).
\end{aligned}
\end{equation}
Similarly,
\betaegin{equation}\label{2010}
{\betaf E}_b(t)=^{-1}nt\fracrac1{A_1}\alphabs{Z_{,\alphaa}(\partial_t+b\partial_\alphaa) \paren{\fracrac1{Z_{,\alphaa}}D^2_\alphaa\betaar Z_t} }^2\,d\alphaa+ \nm{ \fracrac1{Z_{,\alphaa}} D^2_\alphaa\betaar Z_t}_{\dot H^{1/2}}^2,
\end{equation}
and by product rule and \eqref{eq:dza},
\betaegin{equation}\label{2010-1}
Z_{,\alphaa}(\partial_t+b\partial_\alphaa) \paren{\fracrac1{Z_{,\alphaa}}D^2_\alphaa\betaar Z_t}=(b_\alphaa-D_\alphaa Z_t)D^2_\alphaa\betaar Z_t+ (\partial_t+b\partial_\alphaa) D^2_\alphaa\betaar Z_t;
\end{equation}
so
\betaegin{equation}\label{2011}
\nm{(\partial_t+b\partial_\alphaa) D^2_\alphaa\betaar Z_t}_{L^2}\le \nm{Z_{,\alphaa}(\partial_t+b\partial_\alphaa) \paren{\fracrac1{Z_{,\alphaa}}D^2_\alphaa\betaar Z_t}}_{L^2}+\|b_\alphaa-D_\alphaa Z_t\|_{L^^{-1}nfty}\|D^2_\alphaa\betaar Z_t\|_{L^2}\lesssim C(\fracrak E).
\end{equation}
Now from
\betaegin{align}
D_\alphaa^2 Z_t=\partial_\alphaa \fracrac1{Z_{,\alphaa}}D_\alphaa Z_{t}+\fracrac1{Z_{,\alphaa}^2}\partial_\alphaa^2 Z_t,\label{2012-1}\\
D_\alphaa^2 \betaar Z_t=\partial_\alphaa \fracrac1{Z_{,\alphaa}}D_\alphaa \betaar Z_{t}+\fracrac1{Z_{,\alphaa}^2}\partial_\alphaa^2 \betaar Z_t,\label{2012-2}
\end{align}
we have
\betaegin{equation}\label{2012}
\|D_\alphaa^2 Z_t\|_{L^2}\le 2 \nm{\partial_\alphaa\fracrac1{Z_{,\alphaa}}}_{L^2}\|D_\alphaa Z_t\|_{L^^{-1}nfty}+\|D_\alphaa^2 \betaar Z_t\|_{L^2}\lesssim
C(\fracrak E).
\end{equation}
Commuting $\partial_t+b\partial_\alphaa$ with $D^2_\alphaa$ by \eqref{eq:c2-1}, we get
\betaegin{equation}\label{2013}
D^2_\alphaa\betaar Z_{tt}= (\partial_t +b\partial_\alphaa) D^2_\alphaa \betaar Z_t+2(D_\alphaa Z_t) D_\alphaa^2\betaar Z_t +(D_\alphaa^2 Z_t) D_\alphaa\betaar Z_t;
\end{equation}
by \eqref{list1}, \eqref{2005}, \eqref{2012} and \eqref{2011}, we have
\betaegin{equation}\label{2014}
\|D^2_\alphaa\betaar Z_{tt}\|_{L^2}\le C(\fracrak E).
\end{equation}
From \eqref{2014} and \eqref{2009}, we can work through the same argument as from \eqref{2002} to \eqref{2005} and get
\betaegin{equation}\label{2015}
\|D_\alphaa Z_{tt}\|_{L^^{-1}nfty}=\|D_\alphaa \betaar Z_{tt}\|_{L^^{-1}nfty}\lesssim C(\fracrak E);
\end{equation}
and then by
a similar calculation as in \eqref{2012-1}-\eqref{2012-2} and \eqref{2014}, \eqref{2015},
\betaegin{equation}\label{2016}
\|D^2_\alphaa Z_{tt}\|_{L^2}\le C(\fracrak E).
\end{equation}
Additionally, by \eqref{eq-zta},
\betaegin{equation}\label{2017}
\|(\partial_t+b\partial_\alphaa)\betaar Z_{t,\alphaa}\|_{L^2}\le \|Z_{tt,\alphaa}\|_{L^2}+\|b_\alphaa\|_{L^^{-1}nfty} \|Z_{t,\alphaa}\|_{L^2}\lesssim C(\fracrak E).
\end{equation}
Sum up the estimates from \eqref{list1} through \eqref{2017}, we have that the following quantities are controlled by $\fracrak E$:
\betaegin{equation}\label{2020}
\betaegin{aligned}
&\nm{ D_\alphaa\betaar Z_t}_{\dot H^{1/2}}, \quad\nm{\fracrac{1}{ Z_{,\alphalpha'}} D_\alphaa^2\betaar Z_t}_{\dot H^{1/2}}, \quad \|\betaar Z_{t,\alphaa}\|_{L^2},\quad \|D_\alphaa^2\betaar Z_t\|_{L^2},\quad \nm{\partial_\alphaa \fracrac{1}{ Z_{,\alphalpha'}}}_{L^2},\quad \alphabs{\fracrac{1}{ Z_{,\alphalpha'}}(0,t)},\\&
\|A_1\|_{L^^{-1}nfty}, \quad \|b_\alphaa\|_{L^^{-1}nfty}, \quad \|D_\alphaa Z_t\|_{L^^{-1}nfty},\quad \|D_\alphaa Z_{tt}\|_{L^^{-1}nfty},
\quad \|(\partial_t+b\partial_\alphaa)\betaar Z_{t,\alphaa}\|_{L^2} \\&
\|Z_{tt,\alphaa}\|_{L^2}, \quad \|D_\alphaa^2 \betaar Z_{tt}\|_{L^2},\quad \|D_\alphaa^2 Z_{tt}\|_{L^2},\quad \|(\partial_t+b\partial_\alphaa)D_\alphaa^2\betaar Z_t\|_{L^2},\quad \|D_\alphaa^2 Z_t\|_{L^2}.
\end{aligned}
\end{equation}
We will use Lemmas~\ref{basic-e}-\ref{basic-4-lemma} to do estimates. Hence we need to control
the quantities that appear on the right hand sides of the inequalities in these Lemmas.
\subsubsection{Controlling $\nm{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}_{L^^{-1}nfty}$ and
$\nm{(\partial_t+b\partial_\alphaa)A_1}_{L^^{-1}nfty}$}\label{ata-da1}
By \eqref{at},
$$\dfrac{\fracrak a_t}{\fracrak a}\circ h^{-1}= \fracrac{(\partial_t +b\partial_\alphaa) A_1}{A_1}+b_\alphaa -2{\mathbb{R}}e D_\alphaa Z_t.
$$
We have controlled $\|b_\alphaa\|_{L^^{-1}nfty}$ and $\|D_\alphaa Z_t\|_{L^^{-1}nfty}$ in \S\ref{basic-quantities}. We are left with the quantity $\nm{(\partial_t+b\partial_\alphaa)A_1}_{L^^{-1}nfty}$. By \eqref{dta1},
$$
(\partial_t +b\partial_\alphaa) A_1= -\Im \paren{\betaracket{Z_{tt},\mathbb H}\betaar Z_{t,\alphalpha'}+\betaracket{Z_t,\mathbb H}\partial_\alphaa \betaar Z_{tt}-[Z_t, b; \betaar Z_{t,\alphaa}]}.
$$
Applying \eqref{eq:b13} to the first two terms and \eqref{eq:b15} to the last we get
\betaegin{equation}\label{2021}
\nm{(\partial_t+b\partial_\alphaa)A_1}_{L^^{-1}nfty}\lesssim \|Z_{tt,\alphaa}\|_{L^2}\|Z_{t,\alphaa}\|_{L^2}+\|b_\alphaa\|_{L^^{-1}nfty}\|Z_{t,\alphaa}\|^2_{L^2}\lesssim C(\fracrak E);
\end{equation}
consequently
\betaegin{equation}\label{2022}
\nm{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}_{L^^{-1}nfty}\le \nm{(\partial_t+b\partial_\alphaa)A_1}_{L^^{-1}nfty}+
\|b_\alphaa\|_{L^^{-1}nfty}+2\|D_\alphaa Z_t\|_{L^^{-1}nfty}\lesssim
C(\fracrak E).
\end{equation}
\subsubsection{Controlling $\nm{\mathcal A_\alphaa}_{L^^{-1}nfty}$ and $\nm{\fracrac1{Z_{,\alphaa}}\partial_\alphaa \fracrac1{Z_{,\alphaa}}}_{L^^{-1}nfty}$}\label{aa-zdz}
By \eqref{interface-r}, we have
\betaegin{equation}\label{2028}
i\mathcal A=\fracrac{Z_{tt}+i}{Z_{,\alphaa}}.
\end{equation}
Differentiating with respect to $\alphaa$ yields
\betaegin{equation}\label{2029}
i\mathcal A_\alphaa=(Z_{tt}+i)\partial_\alphaa\fracrac{1}{Z_{,\alphaa}}+D_\alphaa Z_{tt}.
\end{equation}
Apply $I-\mathbb H$ to both sides of the equation and use the fact that $\partial_\alphaa\fracrac{1}{Z_{,\alphaa}}=\mathbb H\paren{\partial_\alphaa\fracrac{1}{Z_{,\alphaa}}}$ to rewrite the first term on the right hand side as a commutator, we get
\betaegin{equation}\label{2030}
i(I-\mathbb H)\mathcal A_\alphaa=\betaracket{Z_{tt}, \mathbb H}\partial_\alphaa\fracrac{1}{Z_{,\alphaa}}+(I-\mathbb H)D_\alphaa Z_{tt}.
\end{equation}
Notice that $\mathcal A_\alphaa$ is purely real, so $\Im \paren{i(I-\mathbb H)\mathcal A_\alphaa}=\mathcal A_\alphaa$, and $|\mathcal A_\alphaa|\le |\paren{i(I-\mathbb H)\mathcal A_\alphaa}|$. Therefore,
\betaegin{equation}\label{2031}
|\mathcal A_\alphaa|\le \alphabs{\betaracket{Z_{tt}, \mathbb H}\partial_\alphaa\fracrac{1}{Z_{,\alphaa}}}+ 2|D_\alphaa Z_{tt}|+|(I+\mathbb H)D_\alphaa Z_{tt}|.
\end{equation}
We estimate the first term by \eqref{eq:b13},
$$\nm{\betaracket{Z_{tt}, \mathbb H}\partial_\alphaa\fracrac{1}{Z_{,\alphaa}}}_{L^^{-1}nfty}\lesssim \|Z_{tt,\alphaa}\|_{L^2}\nm{\partial_\alphaa\fracrac{1}{Z_{,\alphaa}}}_{L^2},$$
and the second term has been controlled in \S\ref{basic-quantities}. We are left with the third term, $(I+\mathbb H)D_\alphaa Z_{tt}$. We rewrite it by commuting out $\fracrac{1}{Z_{,\alphaa}}$:
\betaegin{equation}\label{2032}
(I+\mathbb H)D_\alphaa Z_{tt}=D_\alphaa (I+\mathbb H) Z_{tt} -\betaracket{ \fracrac{1}{Z_{,\alphaa}},\mathbb H} Z_{tt,\alphaa},
\end{equation}
where we can estimate the second term by \eqref{eq:b13}. For the first term, we know $(I+\mathbb H)Z_t=0$ because $(I-\mathbb H)\betaar Z_t=0$ and $\mathbb H$ is purely imaginary;
and $Z_{tt}=(\partial_t+b\partial_\alphaa)Z_t$. So
\betaegin{equation}\label{2033}
(I+\mathbb H) Z_{tt}=-[\partial_t+b\partial_\alphaa, \mathbb H]Z_t=-[b, \mathbb H]Z_{t,\alphaa}.
\end{equation}
We further rewrite it by \eqref{b}:
\betaegin{equation}\label{bb}
b=\mathbb P_A\paren{\fracrac{Z_t}{Z_{,\alphaa}}}+\mathbb P_H\paren{\fracrac{\betaar Z_t}{\betaar Z_{,\alphaa}}}=\fracrac{\betaar Z_t}{\betaar Z_{,\alphaa}}+\mathbb P_A\paren{\fracrac{Z_t}{Z_{,\alphaa}}-\fracrac{\betaar Z_t}{\betaar Z_{,\alphaa}}},
\end{equation}
Prop~\ref{prop:comm-hilbe}, the fact that $(I+\mathbb H)Z_{t,\alphaa}=0$ and $(I+\mathbb H)\betaar{D_\alphaa \betaar Z_{t}}=0$. We have
\betaegin{equation}\label{2034}
(I+\mathbb H) Z_{tt}=-\betaracket{\fracrac{\betaar Z_t}{\betaar Z_{,\alphaa}} , \mathbb H}Z_{t,\alphaa}=-\betaracket{\betaar Z_t , \mathbb H}\betaar{D_\alphaa \betaar Z_{t}}.
\end{equation}
We have reduced the task of estimating $D_\alphaa (I+\mathbb H) Z_{tt}$ to estimating $D_\alphaa \betaracket{\betaar Z_t , \mathbb H}\betaar{D_\alphaa \betaar Z_{t}}$.
We compute, for general functions $f$ and $g$,
\betaegin{equation}\label{2026}
\partial_\alphaa [f,\mathbb H]g= f_\alphaa \mathbb H g-\fracrac1{\pi i}^{-1}nt\fracrac{(f(\alphaa)-f({\betaeta '}))}{(\alphaa-{\betaeta '})^2}g({\betaeta '})\,d{\betaeta '}
\end{equation}
therefore
\betaegin{equation}\label{2027}
\betaegin{aligned}
D_\alphaa & [f,\mathbb H]g= \fracrac1{Z_{,\alphaa}} f_\alphaa \mathbb H g\\&-\fracrac1{\pi i}^{-1}nt\fracrac{\paren{f(\alphaa)-f({\betaeta '})}\paren{\fracrac1{Z_{,\alphaa}}-\fracrac1{Z_{,{\betaeta '}}}}}{(\alphaa-{\betaeta '})^2}g({\betaeta '})\,d{\betaeta '}-\fracrac1{\pi i}^{-1}nt\fracrac{(f(\alphaa)-f({\betaeta '}))}{(\alphaa-{\betaeta '})^2} \fracrac1{Z_{,{\betaeta '}}}g({\betaeta '})\,d{\betaeta '}.
\end{aligned}
\end{equation}
Now using \eqref{2034}, \eqref{2027}, and the fact that $(I+\mathbb H)\betaar{D_\alphaa \betaar Z_{t}}=0$, we have
\betaegin{equation}\label{2035}
\betaegin{aligned}
D_\alphaa & (I+\mathbb H) Z_{tt}=\alphabs{D_\alphaa \betaar Z_t}^2
\\&+\fracrac1{\pi i}^{-1}nt\fracrac{\paren{\betaar Z_t(\alphaa)-\betaar Z_t({\betaeta '})}\paren{\fracrac1{Z_{,\alphaa}}-\fracrac1{Z_{,{\betaeta '}}}}}{(\alphaa-{\betaeta '})^2}\betaar{D_{\betaeta '} \betaar Z_{t}}\,d{\betaeta '}+\fracrac1{\pi i}^{-1}nt\fracrac{(\betaar Z_t(\alphaa)-\betaar Z_t({\betaeta '}))}{(\alphaa-{\betaeta '})^2}\fracrac1{Z_{,{\betaeta '}}}\betaar{D_{\betaeta '} \betaar Z_{t}}\,d{\betaeta '},
\end{aligned}
\end{equation}
where we rewrite the third term further
\betaegin{equation}\label{2036}
\betaegin{aligned}
\fracrac1{\pi i}^{-1}nt\fracrac{(\betaar Z_t(\alphaa)-\betaar Z_t({\betaeta '}))}{(\alphaa-{\betaeta '})^2}\fracrac1{Z_{,{\betaeta '}}}\betaar{D_{\betaeta '} \betaar Z_{t}}\,d{\betaeta '}=&
\fracrac1{\pi i}^{-1}nt\fracrac{(\betaar Z_t(\alphaa)-\betaar Z_t({\betaeta '}))}{(\alphaa-{\betaeta '})^2}\paren{\fracrac1{Z_{,{\betaeta '}}}\betaar{D_{\betaeta '} \betaar Z_{t}}-\fracrac1{Z_{,\alphaa}}\betaar{D_\alphaa \betaar Z_{t}}}\,d{\betaeta '}\\&+\fracrac1{Z_{,\alphaa}}\betaar{D_\alphaa \betaar Z_{t}}\,\betaar Z_{t,\alphaa};
\end{aligned}
\end{equation}
here we simplified the second term on the right hand side by the fact that $\betaar Z_{t}=\mathbb H\betaar Z_t$.
We can now estimate $\nm{D_\alphaa (I+\mathbb H) Z_{tt}}_{L^^{-1}nfty}$. We apply \eqref{eq:b16} to the second term on the right side of \eqref{2035}; for the third term we use \eqref{2036}, and apply \eqref{eq:b16} to the first term on the right hand side of \eqref{2036}, and notice that
\betaegin{equation}\label{2037}
\partial_\alphaa \paren{\fracrac1{Z_{,\alphaa}}\betaar{D_\alphaa \betaar Z_{t}}}=\partial_\alphaa \paren{\fracrac1{Z_{,\alphaa}}}\betaar{D_\alphaa \betaar Z_{t}}+D_\alphaa \betaar{D_\alphaa \betaar Z_{t}};
\end{equation}
we have
\betaegin{equation}\label{2038}
\nm{D_\alphaa (I+\mathbb H) Z_{tt}}_{L^^{-1}nfty}\lesssim \nm{D_\alphaa \betaar Z_t}^2_{L^^{-1}nfty}+\nm{\partial_\alphaa\fracrac1{Z_{,\alphaa}}}_{L^2}\nm{Z_{t,\alphaa}}_{L^2}\nm{D_\alphaa \betaar Z_t}_{L^^{-1}nfty}+\nm{Z_{t,\alphaa}}_{L^2}\nm{D_\alphaa^2 \betaar Z_t}_{L^2}.
\end{equation}
Sum up the calculations from \eqref{2031} through \eqref{2038}, and use the estimates in \S\ref{basic-quantities}, we conclude
\betaegin{equation}\label{2039}
\nm{\mathcal A_\alphaa}_{L^^{-1}nfty}\lesssim C(\fracrak E).
\end{equation}
Observe that the same argument also gives, by taking the real parts in \eqref{2030},
\betaegin{equation}\label{2039-1}
\nm{\mathbb H\mathcal A_\alphaa}_{L^^{-1}nfty}\lesssim C(\fracrak E).
\end{equation}
Now from \eqref{2029} and \eqref{aa1},
\betaegin{equation}
\fracrac{iA_1}{\betaar Z_{,\alphaa}}\partial_\alphaa\fracrac{1}{Z_{,\alphaa}} =i\mathcal A_\alphaa- D_\alphaa Z_{tt};
\end{equation}
Because $A_1\gammae 1$, we have
\betaegin{equation}\label{2040}
\nm{\fracrac{1}{ Z_{,\alphaa}}\partial_\alphaa\fracrac{1}{Z_{,\alphaa}} }_{L^^{-1}nfty}\le \nm{\mathcal A_\alphaa}_{L^^{-1}nfty}+\| D_\alphaa Z_{tt}\|_{L^^{-1}nfty}\lesssim C(\fracrak E).
\end{equation}
\subsubsection{Controlling $\nm{\partial_\alphaa (\partial_t+b\partial_\alphaa)\fracrac1{Z_{,\alphaa}}}_{L^2}$ and $\nm{ (\partial_t+b\partial_\alphaa)\partial_\alphaa\fracrac1{Z_{,\alphaa}}}_{L^2}$ }\label{dadtza}
We begin with \eqref{eq-ddza}, and rewrite the second term on the right hand side to get
\betaegin{equation}\label{ddza}
\betaegin{aligned}
(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'} \fracrac{1}{ Z_{,\alphalpha'}}
&=-D_\alphaa Z_t\paren{\partial_{\alphalpha'}\fracrac{1}{ Z_{,\alphalpha'}}}+D_\alphaa \paren{b_\alphaa-D_\alphaa Z_t}\\&
=-D_\alphaa Z_t\paren{\partial_{\alphalpha'}\fracrac{1}{ Z_{,\alphalpha'}}}+D_\alphaa \paren{b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t}+D_\alphaa \betaar {D_\alphaa Z_t}.
\end{aligned}
\end{equation}
We control the first and third terms by
\betaegin{equation}\label{2024}
\nm{ D_\alphaa Z_t\paren{\partial_{\alphalpha'}\fracrac{1}{ Z_{,\alphalpha'}}}}_{L^2}\le \nm{ D_\alphaa Z_t}_{L^^{-1}nfty}\nm{\partial_{\alphalpha'}\fracrac{1}{ Z_{,\alphalpha'}}}_{L^2}\lesssim C(\fracrak E)
\end{equation}
and
\betaegin{equation}\label{2025}
\nm{D_\alphaa \betaar {D_\alphaa Z_t}}_{L^2}= \nm{ {D^2_\alphaa Z_t}}_{L^2}\lesssim C(\fracrak E).
\end{equation}
We are left with the term $D_\alphaa \paren{b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t}$. We begin with \eqref{ba}:\betaegin{equation}\label{ba-1}
b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t={\mathbb{R}}e \paren{\betaracket{ \fracrac1{Z_{,\alphaa}}, \mathbb H} Z_{t,\alphalpha'}+ \betaracket{Z_t, \mathbb H}\partial_\alphaa \fracrac1{Z_{,\alphaa}} }.
\end{equation}
Notice that the right hand side consists of $\betaracket{ \fracrac1{Z_{,\alphaa}}, \mathbb H} Z_{t,\alphalpha'}$,
$\betaracket{Z_t, \mathbb H}\partial_\alphaa \fracrac1{Z_{,\alphaa}}$ and their complex conjugates. We use \eqref{2027} to compute
\betaegin{equation}\label{2041}
\betaegin{aligned}
D_\alphaa &\betaracket{ \fracrac1{Z_{,\alphaa}}, \mathbb H} Z_{t,\alphalpha'}= - \partial_\alphaa \fracrac1{Z_{,\alphaa}} D_\alphaa Z_{t}\\&-\fracrac1{\pi i}^{-1}nt\fracrac{\paren{\fracrac1{Z_{,\alphaa}}-\fracrac1{Z_{,{\betaeta '}}}}^2}{(\alphaa-{\betaeta '})^2} Z_{t,{\betaeta '}} \,d{\betaeta '}-\fracrac1{\pi i}^{-1}nt\fracrac{\paren{\fracrac1{Z_{,\alphaa}}-\fracrac1{Z_{,{\betaeta '}}}} }{(\alphaa-{\betaeta '})^2} D_{\betaeta '} Z_t \,d{\betaeta '}.
\end{aligned}
\end{equation}
Applying \eqref{eq:b12} to the second term and \eqref{3.17} to the third term yields
\betaegin{equation}\label{2042}
\nm{D_\alphaa \betaracket{ \fracrac1{Z_{,\alphaa}}, \mathbb H} Z_{t,\alphalpha'}}_{L^2}\lesssim \nm{\partial_\alphaa \fracrac1{Z_{,\alphaa}} }_{L^2}\nm{ D_\alphaa Z_{t}}_{L^^{-1}nfty}+ \nm{\partial_\alphaa \fracrac1{Z_{,\alphaa}} }_{L^2}^2\nm{ Z_{t,\alphaa}}_{L^2}.
\end{equation}
Similarly
\betaegin{equation}\label{2043}
\betaegin{aligned}
D_\alphaa &\betaracket{Z_t, \mathbb H}\partial_\alphaa \fracrac1{Z_{,\alphaa}} = Z_{t,\alphaa} \fracrac1{Z_{,\alphaa}} \partial_\alphaa \fracrac1{Z_{,\alphaa}} \\&-\fracrac1{\pi i}^{-1}nt\fracrac{\paren{Z_t(\alphaa)-Z_t({\betaeta '})}\paren{\fracrac1{Z_{,\alphaa}}-\fracrac1{Z_{,{\betaeta '}}}}}{(\alphaa-{\betaeta '})^2} \partial_{\betaeta '} \fracrac1{Z_{,{\betaeta '}}} \,d{\betaeta '}-\fracrac1{\pi i}^{-1}nt\fracrac{ \paren{Z_t(\alphaa)-Z_t({\betaeta '})} }{(\alphaa-{\betaeta '})^2} \fracrac1{Z_{,{\betaeta '}}} \partial_{\betaeta '} \fracrac1{Z_{,{\betaeta '}}} \,d{\betaeta '},
\end{aligned}
\end{equation}
and applying \eqref{eq:b12} to the second term and \eqref{3.17} to the third term yields
\betaegin{equation}\label{2044}
\nm{ D_\alphaa \betaracket{Z_t, \mathbb H}\partial_\alphaa \fracrac1{Z_{,\alphaa}} }_{L^2}\lesssim \nm{\partial_\alphaa \fracrac1{Z_{,\alphaa}} }_{L^2}^2\nm{ Z_{t,\alphaa}}_{L^2}+ \nm{Z_{t,\alphaa}}_{L^2}\nm{ \fracrac1{Z_{,\alphaa}}\partial_\alphaa \fracrac1{Z_{,\alphaa}}}_{L^^{-1}nfty} .
\end{equation}
The estimate of the complex conjugate terms is similar, we omit. This concludes, with an application of the results in \S\ref{basic-quantities} and \S\ref{aa-zdz}, that
\betaegin{equation}\label{2045}
\nm{D_\alphaa\paren{b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t}}_{L^2}\lesssim C(\fracrak E),
\end{equation}
therefore
\betaegin{equation}\label{2046}
\nm{(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'} \fracrac{1}{ Z_{,\alphalpha'}}}_{L^2}\lesssim C(\fracrak E).
\end{equation}
Now by
\betaegin{equation}\label{2047}
\partial_{\alphalpha'} (\partial_t+b\partial_\alphaa) \fracrac{1}{ Z_{,\alphalpha'}}=(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'} \fracrac{1}{ Z_{,\alphalpha'}}+b_\alphaa \partial_\alphaa \fracrac{1}{ Z_{,\alphalpha'}},
\end{equation}
we also have
\betaegin{equation}\label{2048}
\nm{\partial_{\alphalpha'} (\partial_t+b\partial_\alphaa)\fracrac{1}{ Z_{,\alphalpha'}}}_{L^2}\lesssim C(\fracrak E).
\end{equation}
\subsubsection{Controlling $\nm{\partial_\alphaa(\partial_t+b\partial_\alphaa)b}_{L^^{-1}nfty}$, $\nm{(\partial_t+b\partial_\alphaa)b_\alphaa}_{L^^{-1}nfty}$ and $\nm{(\partial_t+b\partial_\alphaa)D_\alphaa Z_t}_{L^^{-1}nfty}$}\label{dtdab}
We apply \eqref{eq:c14} to \eqref{ba-1} and get
\betaegin{equation}\label{dba-1}
\betaegin{aligned}
(\partial_t+b\partial_\alphaa)\paren{b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t}&={\mathbb{R}}e \paren{\betaracket{ (\partial_t+b\partial_\alphaa)\fracrac1{Z_{,\alphaa}}, \mathbb H} Z_{t,\alphalpha'}+ \betaracket{ \fracrac1{Z_{,\alphaa}}, \mathbb H} Z_{tt,\alphalpha'}-\betaracket{ \fracrac1{Z_{,\alphaa}}, b; Z_{t,\alphalpha'} } }
\\&+{\mathbb{R}}e\paren{ \betaracket{Z_{tt}, \mathbb H}\partial_\alphaa \fracrac1{Z_{,\alphaa}} + \betaracket{Z_{t}, \mathbb H}\partial_\alphaa (\partial_t+b\partial_\alphaa)\fracrac1{Z_{,\alphaa}} -\betaracket{ Z_{t}, b; \partial_\alphaa \fracrac1{Z_{,\alphaa}} } };
\end{aligned}
\end{equation}
using \eqref{eq:b13}, \eqref{eq:b15} and results from previous subsections we obtain
\betaegin{equation}\label{2023}
\betaegin{aligned}
\nm{(\partial_t+b\partial_\alphaa)\paren{b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t}}_{L^^{-1}nfty}&\lesssim \nm{\partial_\alphaa (\partial_t+b\partial_\alphaa)\fracrac1{Z_{,\alphaa}}}_{L^2}\nm
{ Z_{t,\alphalpha'}}_{L^2}\\&+ \nm{\partial_\alphaa \fracrac1{Z_{,\alphaa}}}_{L^2}\nm{ Z_{tt,\alphalpha'}}_{L^2}+ \nm{ \partial_\alphaa \fracrac1{Z_{,\alphaa}}}_{L^2}\|b_\alphaa\|_{L^^{-1}nfty}\| Z_{t,\alphalpha'} \|_{L^2}
\\&\lesssim C(\fracrak E).
\end{aligned}
\end{equation}
We now compute $(\partial_t+b\partial_\alphaa)D_\alphaa Z_t$. By \eqref{eq:c1-1},
\betaegin{equation}\label{2049}
(\partial_t+b\partial_\alphaa) D_\alphaa Z_t=D_\alphaa Z_{tt}-\paren{D_\alphaa Z_t}^2.
\end{equation}
So by the estimates in \S\ref{basic-quantities}, we have
\betaegin{equation}\label{2050}
\nm{(\partial_t+b\partial_\alphaa) D_\alphaa Z_t}_{L^^{-1}nfty}\le \nm{D_\alphaa Z_{tt}}_{L^^{-1}nfty}+\nm{D_\alphaa Z_t}^2_{L^^{-1}nfty}\lesssim C(\fracrak E).
\end{equation}
This combine with \eqref{2023} yields
\betaegin{equation}\label{2051}
\nm{(\partial_t+b\partial_\alphaa)b_\alphaa}_{L^^{-1}nfty}\lesssim C(\fracrak E).
\end{equation}
From $\partial_\alphaa(\partial_t+b\partial_\alphaa)b=(\partial_t+b\partial_\alphaa)b_\alphaa+(b_\alphaa)^2$,
\betaegin{equation}\label{2052}
\nm{\partial_\alphaa(\partial_t+b\partial_\alphaa)b}_{L^^{-1}nfty}\le \nm{(\partial_t+b\partial_\alphaa)b_\alphaa}_{L^^{-1}nfty}+ \nm{b_\alphaa}_{L^^{-1}nfty}^2\lesssim C(\fracrak E).
\end{equation}
We are now ready to estimate $\fracrac{d}{dt}\fracrak E$.
\subsubsection{Controlling $\fracrac d{dt} \nm{\betaar Z_{t,\alphaa}}_{L^2}^2$, $\fracrac d{dt} \nm{D_\alphaa^2 \betaar Z_{t}}_{L^2}^2$, $\fracrac d{dt} \nm{ \partial_\alphaa \fracrac1{Z_{,\alphaa}} }_{L^2}^2$ and $\fracrac d{dt} \alphabs{ \fracrac1{Z_{,\alphaa}} (0,t) }^2$}\label{ddtlower} We use Lemma~\ref{basic-e2} to control $\fracrac d{dt} \nm{\betaar Z_{t,\alphaa}}_{L^2}^2$, $\fracrac d{dt} \nm{D_\alphaa^2 \betaar Z_{t}}_{L^2}^2$ and $\fracrac d{dt} \nm{ \partial_\alphaa \fracrac1{Z_{,\alphaa}} }_{L^2}^2$. Notice that when we substitute
$$\Theta=Z_{t,\alphaa},\qquad \Theta= D_\alphaa^2 \betaar Z_{t},\qquad \tauext{and}\quad \Theta= \partial_\alphaa \fracrac1{Z_{,\alphaa}} $$
in \eqref{basic-2}, all the terms on the right hand sides are already controlled in subsections \S\ref{basic-quantities} and \S\ref{dadtza}. So we have
\betaegin{equation}\label{2053}
\fracrac d{dt} \nm{\betaar Z_{t,\alphaa}}_{L^2}^2+\fracrac d{dt} \nm{D_\alphaa^2 \betaar Z_{t}}_{L^2}^2+\fracrac d{dt} \nm{ \partial_\alphaa \fracrac1{Z_{,\alphaa}} }_{L^2}^2\lesssim C(\fracrak E).
\end{equation}
To estimate $\fracrac d{dt} \alphabs{ \fracrac1{Z_{,\alphaa}} (0,t) }^2$, we start with \eqref{eq:dza} and compute
\betaegin{equation}\label{2054}
(\partial_t+b\partial_{\alphalpha'})\alphabs{\fracrac1{Z_{,\alphaa}}}^2=2{\mathbb{R}}e \paren{\fracrac1{\betaar Z_{,\alphaa}}(\partial_t+b\partial_{\alphalpha'})\fracrac1{Z_{,\alphaa}}}= \alphabs{\fracrac1{Z_{,\alphaa}} }^2\paren{2b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t}.
\end{equation}
Recall we chose the Riemann mapping so that $h(0,t)=0$ for all $t$. So $h_t\circ h^{-1}(0,t)=b(0,t)=0$ and
\betaegin{equation}\label{2055}
\fracrac{d}{dt}\alphabs{\fracrac1{Z_{,\alphaa}}(0,t)}^2= \alphabs{\fracrac1{Z_{,\alphaa}}(0,t)}^2\paren{2b_\alphaa(0,t)-2{\mathbb{R}}e D_\alphaa Z_t(0,t)}\lesssim C(\fracrak E).
\end{equation}
We use Lemma~\ref{basic-e} to estimate the two main terms $\fracrac{ d}{dt} {\betaf E}_a(t)$ and $\fracrac{ d}{dt}{\betaf E}_b(t)$.
\subsubsection{Controlling $\fracrac{ d}{dt} {\betaf E}_a(t)$}\label{ddtea} We begin with $\fracrac{ d}{dt} {\betaf E}_a(t)$. Apply Lemma~\ref{basic-e} to $\Theta=D_\alphaa \betaar Z_t$ we get
\betaegin{equation}\label{2056}
\fracrac d{dt} {\betaf E}_a(t)\le \nm{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}_{L^^{-1}nfty} {\betaf E}_a(t)+2 {\betaf E}_a(t)^{1/2}\paren{^{-1}nt\fracrac{|\mathcal PD_\alphaa \betaar Z_t |^2}{\mathcal A}\,d\alphaa}^{1/2}.
\end{equation}
By \eqref{2022}, we know the first term is controlled by $C(\fracrak E)$. We need to estimate the factor $\paren{^{-1}nt\fracrac{|\mathcal PD_\alphaa \betaar Z_t |^2}{\mathcal A}\,d\alphaa}^{1/2}$ in the second term. By \eqref{eq-dt}:
\betaegin{equation}\label{2057}
\mathcal P D_\alphaa\betaar Z_t=\betaracket{\mathcal P, \fracrac1{Z_{,\alphaa}}}\betaar Z_{t,\alphaa}+\fracrac1{Z_{,\alphaa}}\mathcal P \betaar Z_{t,\alphaa},
\end{equation}
and we have
\betaegin{equation}\label{2058}
^{-1}nt\fracrac{| \betaracket{\mathcal P, \fracrac1{Z_{,\alphaa}}}\betaar Z_{t,\alphaa} |^2}{\mathcal A}\,d\alphaa=
^{-1}nt\fracrac{|Z_{,\alphaa} \betaracket{\mathcal P, \fracrac1{Z_{,\alphaa}}}\betaar Z_{t,\alphaa} |^2}{ A_1}\,d\alphaa
\le
\nm{Z_{,\alphaa}\betaracket{\mathcal P, \fracrac1{Z_{,\alphaa}}}\betaar Z_{t,\alphaa}}_{L^2}\lesssim C(\fracrak E),
\end{equation}
here in the last step we used \eqref{basic-4}, notice that all the terms on the right hand side of \eqref{basic-4} with $f=\betaar Z_{t,\alphaa}$ are controlled in subsections \S\ref{basic-quantities}--\S\ref{dtdab}. We are left with the term
$^{-1}nt\fracrac{| \fracrac1{Z_{,\alphaa}}\mathcal P \betaar Z_{t,\alphaa} |^2}{\mathcal A}\,d\alphaa$. Because $A_1\gammae 1$,
$$^{-1}nt\fracrac{| \fracrac1{Z_{,\alphaa}}\mathcal P \betaar Z_{t,\alphaa} |^2}{\mathcal A}\,d\alphaa\le ^{-1}nt |\mathcal P \betaar Z_{t,\alphaa} |^2\,d\alphaa.$$
By the base equation \eqref{base-eq},
\betaegin{equation}\label{2059}
\mathcal P \betaar Z_{t,\alphaa}=-(\partial_t+b\partial_\alphaa)(b_\alphaa \partial_{\alphaa}\betaar Z_{t})-b_\alphaa\partial_\alphaa \betaar Z_{tt}-i\mathcal A_\alphaa \partial_\alphaa \betaar Z_t+\partial_\alphaa\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} (\betaar Z_{tt}-i)};
\end{equation}
we expand the last term by product rules,
\betaegin{equation}\label{2061}
\partial_\alphaa\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} (\betaar Z_{tt}-i)}=\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} \betaar Z_{tt,\alphaa}+\partial_\alphaa\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} (\betaar Z_{tt}-i).
\end{equation}
It is clear that the first three terms in \eqref{2059} are controlled by $\fracrak E$, by the results of \S\ref{basic-quantities} - \S\ref{dtdab}:
\betaegin{equation}\label{2060}
\|-(\partial_t+b\partial_\alphaa)(b_\alphaa \partial_{\alphaa}\betaar Z_{t})-b_\alphaa\partial_\alphaa \betaar Z_{tt}-i\mathcal A_\alphaa \partial_\alphaa \betaar Z_t\|_{L^2}\lesssim C(\fracrak E),
\end{equation}
and the first term in \eqref{2061} satisfies
\betaegin{equation}\label{2062}
\nm{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} \betaar Z_{tt,\alphaa}}_{L^2}\le \nm{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}_{L^^{-1}nfty}\| \betaar Z_{tt,\alphaa}\|_{L^2}\lesssim C(\fracrak E).
\end{equation}
We are left with one last term $\partial_\alphaa\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} (\betaar Z_{tt}-i)$ in \eqref{2059}.
We write
\betaegin{equation}\label{2063}
\mathcal P\betaar Z_{t,\alphaa}=\partial_\alphaa\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} (\betaar Z_{tt}-i)+\mathcal R
\end{equation}
where $\mathcal R= -(\partial_t+b\partial_\alphaa)(b_\alphaa \partial_{\alphaa}\betaar Z_{t})-b_\alphaa\partial_\alphaa \betaar Z_{tt}-i\mathcal A_\alphaa \partial_\alphaa \betaar Z_t+\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} \betaar Z_{tt,\alphaa}$.
We want to take advantage of the fact that $\partial_\alphaa\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} $ is purely real; notice that we have control of $\|(I-\mathbb H)\mathcal P \betaar Z_{t,\alphaa}\|_{L^2}$ and $\|\mathcal R\|_{L^2}$,
by Lemma~\ref{basic-3-lemma} and \S\ref{basic-quantities} - \S\ref{dtdab}, and by \eqref{2060} and \eqref{2062}.
Apply $(I-\mathbb H)$ to both sides of equation \eqref{2063}, we get
\betaegin{equation}\label{2065}
\betaegin{aligned}
(I-\mathbb H)\mathcal P\betaar Z_{t,\alphaa}&=(I-\mathbb H)\paren{\partial_\alphaa\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} (\betaar Z_{tt}-i)}+(I-\mathbb H)\mathcal R\\&
= (\betaar Z_{tt}-i)(I-\mathbb H)\partial_\alphaa\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} +\betaracket{\betaar Z_{tt}, \mathbb H} \partial_\alphaa\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} +(I-\mathbb H)\mathcal R
\end{aligned}
\end{equation}
where we commuted $\betaar Z_{tt}-i$ out in the second step. Now because $\partial_\alphaa\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} $ is purely real,
\betaegin{equation}\label{2066}
\alphabs{ (\betaar Z_{tt}-i) \partial_\alphaa\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} } \le \alphabs{ (\betaar Z_{tt}-i)(I-\mathbb H)\partial_\alphaa\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} },
\end{equation}
so by \eqref{2065},
\betaegin{equation}\label{2067}
\alphabs{ (\betaar Z_{tt}-i) \partial_\alphaa\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} } \le \alphabs{(I-\mathbb H)\mathcal P\betaar Z_{t,\alphaa}}+ \alphabs{\betaracket{\betaar Z_{tt}, \mathbb H} \partial_\alphaa\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} }+ \alphabs{(I-\mathbb H)\mathcal R}.
\end{equation}
We estimate the $L^2$ norm of the first term by Lemma~\ref{basic-3-lemma}, the second term by \eqref{3.21}, and the third term by \eqref{2060} and \eqref{2062}. We obtain
\betaegin{equation}\label{2068}
\nm{ (\betaar Z_{tt}-i) \partial_\alphaa\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} }_{L^2}\lesssim C(\fracrak E)+ \nm{Z_{tt,\alphaa}}_{L^2}\nm{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}_{L^^{-1}nfty}+C(\fracrak E)\lesssim C(\fracrak E).
\end{equation}
This concludes
\betaegin{equation}\label{2069}
\fracrac d{dt}{\betaf E}_a(t)\lesssim C(\fracrak E(t)).
\end{equation}
We record here the following estimate that will be used later. By \eqref{2068}, $\betaar Z_{tt}-i=-\fracrac{iA_1}{Z_{,\alphaa}}$ and $A_1\gammae 1$, we have
\betaegin{equation}\label{2068-1}
\nm{ D_\alphaa\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} }_{L^2}\lesssim C(\fracrak E).
\end{equation}
\subsubsection{Controlling $\fracrac d{dt}{\betaf E}_b(t)$}\label{ddteb} Taking $\Theta= \fracrac1{Z_{,\alphaa}}D_\alphaa^2\betaar Z_t$ in Lemma~\ref{basic-e}, we have,
\betaegin{equation}\label{2070}
\fracrac d{dt} {\betaf E}_b(t)\le \nm{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}_{L^^{-1}nfty} {\betaf E}_b(t)+2 {\betaf E}_b(t)^{1/2}\paren{^{-1}nt\fracrac{|\mathcal P\paren{\fracrac1{Z_{,\alphaa}}D^2_\alphaa \betaar Z_t} |^2}{\mathcal A}\,d\alphaa}^{1/2}.
\end{equation}
By \eqref{2022}, the first term is controlled by $\mathfrak E$. We consider the second term. We know
\betaegin{equation}\label{2071}
\mathcal P \paren{\fracrac1{Z_{,\alphaa}}D^2_\alphaa\betaar Z_t}=\betaracket{\mathcal P, \fracrac1{Z_{,\alphaa}}}D_\alphaa^2\betaar Z_{t}+\fracrac1{Z_{,\alphaa}}\betaracket{\mathcal P, D_\alphaa^2}\betaar Z_{t}+\fracrac1{Z_{,\alphaa}}D_\alphaa^2\mathcal P\betaar Z_t,\end{equation}
and because $A_1\gammae 1$,
\betaegin{equation}\label{2072}
^{-1}nt\fracrac{|\mathcal P\fracrac1{Z_{,\alphaa}}D^2_\alphaa \betaar Z_t |^2}{\mathcal A}\,d\alphaa\lesssim ^{-1}nt \alphabs{Z_{,\alphaa}\betaracket{\mathcal P, \fracrac1{Z_{,\alphaa}}}D_\alphaa^2\betaar Z_{t}}^2\,d\alphaa+^{-1}nt\alphabs{ \betaracket{\mathcal P, D_\alphaa^2}\betaar Z_{t}}^2\,d\alphaa+^{-1}nt \alphabs{D_\alphaa^2\mathcal P\betaar Z_t}^2\,d\alphaa.
\end{equation}
Now by Lemma~\ref{basic-4-lemma} and the results of \S\ref{basic-quantities} - \S\ref{dtdab},
the first term on the right hand side of \eqref{2072} is controlled by $\fracrak E$. For the second term, we compute, using \eqref{eq:c4-1},
\betaegin{equation} \label{2074}
\betaegin{aligned}
\betaracket{\mathcal P,D_\alphaa^2}\betaar Z_t & =-4(D_\alphaa Z_{tt}) D_\alphaa^2\betaar Z_t + 6(D_\alphaa Z_t)^2 D_\alphaa^2\betaar Z_t - (2D_\alphaa^2 Z_{tt}) D_\alphaa\betaar Z_t\\&
+ 6(D_\alphaa Z_t) (D_\alphaa^2 Z_t) D_\alphaa\betaar Z_t - 2(D_\alphaa^2 Z_t) D_\alphaa \betaar Z_{tt} - 4(D_\alphaa Z_t) D_\alphaa^2 \betaar Z_{tt}.
\end{aligned}
\end{equation}
By results in \S\ref{basic-quantities}, we have
\betaegin{equation}\label{2075}
\| \betaracket{\mathcal P,D_\alphaa^2}\betaar Z_t\|_{L^2}\lesssim C(\fracrak E).
\end{equation}
We are left with the term $^{-1}nt \alphabs{D_\alphaa^2\mathcal P\betaar Z_t}^2\,d\alphaa$, where
$$\mathcal P\betaar Z_t:=\betaar Z_{ttt}+i\mathcal A \betaar Z_{t,\alphaa}=\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} (\betaar Z_{tt}-i).$$
We expand $D_\alphaa^2 \mathcal P\betaar Z_t$ by product rules,
\betaegin{equation}\label{2076}
D_\alphaa^2 \mathcal P\betaar Z_t=D_\alphaa^2\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}(\betaar Z_{tt}-i)+2D_\alphaa\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}D_\alphaa \betaar Z_{tt}+\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} D_\alphaa^2\betaar Z_{tt}.
\end{equation}
We know how to handle the second and third terms, thanks to the work in the previous subsections.
We want to use the same idea as in the previous subsection to control the first term, however $D_\alphaa$ is not purely real, so we go through the following slightly evoluted process.
First, we have
\betaegin{equation}\label{2077}
\betaegin{aligned}
&\nm{2D_\alphaa\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}D_\alphaa \betaar Z_{tt}+\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} D_\alphaa^2\betaar Z_{tt}}_{L^2}\\& \qquad\lesssim \|D_\alphaa \betaar Z_{tt}\|_{L^^{-1}nfty}\nm{D_\alphaa\paren{ \fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}}_{L^2}+\nm{ \fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}_{L^^{-1}nfty}\nm{D_\alphaa^2 \betaar Z_{tt}}_{L^2}\lesssim C(\fracrak E);
\end{aligned}
\end{equation}
and by Lemma~\ref{basic-3-lemma} and \eqref{2075},
\betaegin{equation}\label{2084}
\nm{(I-\mathbb H)D_\alphaa^2 \mathcal P\betaar Z_t}_{L^2}\le \nm{(I-\mathbb H)\mathcal PD_\alphaa^2 \betaar Z_t}_{L^2}+\nm{(I-\mathbb H)\betaracket{D_\alphaa^2, \mathcal P}\betaar Z_t}_{L^2}\lesssim C(\fracrak E).
\end{equation}
So
\betaegin{equation}\label{2078}
\nm{(I-\mathbb H)\paren{D_\alphaa^2\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}(\betaar Z_{tt}-i)}}_{L^2}\lesssim C(\fracrak E).
\end{equation}
This gives, from
\betaegin{equation}\label{2079}
\betaegin{aligned}
(I-\mathbb H)\paren{D_\alphaa^2\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}(\betaar Z_{tt}-i)}&= \fracrac{(\betaar Z_{tt}-i)}{Z_{,\alphaa}}(I-\mathbb H)\paren{\partial_\alphaa D_\alphaa \paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}}\\&+\betaracket{\fracrac{(\betaar Z_{tt}-i)}{Z_{,\alphaa}}, \mathbb H}\paren{\partial_\alphaa D_\alphaa \paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}},
\end{aligned}
\end{equation}
and \eqref{3.20} that
\betaegin{equation}\label{2080}
\nm{\fracrac{(\betaar Z_{tt}-i)}{Z_{,\alphaa}}(I-\mathbb H)\paren{\partial_\alphaa D_\alphaa \paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}}}_{L^2}\lesssim C(\fracrak E).
\end{equation}
Now we move the factor $\fracrac{(\betaar Z_{tt}-i)}{|Z_{,\alphaa}|}$ back into $(I-\mathbb H)$ to get
\betaegin{equation}\label{2081}
\betaegin{aligned}
\fracrac{(\betaar Z_{tt}-i)}{|Z_{,\alphaa}|}(I-\mathbb H)\paren{\partial_\alphaa D_\alphaa \paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}}&=- \betaracket{\fracrac{(\betaar Z_{tt}-i)}{|Z_{,\alphaa}|}, \mathbb H}\paren{\partial_\alphaa D_\alphaa \paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}}\\&+(I-\mathbb H) \paren{\fracrac{(\betaar Z_{tt}-i)}{|Z_{,\alphaa}|}\partial_\alphaa D_\alphaa \paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}};
\end{aligned}
\end{equation}
and observe that
\betaegin{equation}\label{2082}
\betaegin{aligned}
&\fracrac{(\betaar Z_{tt}-i)}{|Z_{,\alphaa}|}\partial_\alphaa D_\alphaa \paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}=\fracrac{(\betaar Z_{tt}-i)}{Z_{,\alphaa}}\partial_\alphaa \paren{\fracrac1{|Z_{,\alphaa}|}\partial_\alphaa \paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}}\\&+\fracrac{(\betaar Z_{tt}-i)}{|Z_{,\alphaa}|}\partial_\alphaa\paren{\fracrac1{Z_{,\alphaa}} }\partial_\alphaa \paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}-\fracrac{(\betaar Z_{tt}-i)}{Z_{,\alphaa}}\partial_\alphaa\paren{\fracrac1{|Z_{,\alphaa}|} }\partial_\alphaa \paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}.
\end{aligned}
\end{equation}
We know the $L^2$ norms of the last two terms on the right hand side of \eqref{2082} are controlled by $C(\fracrak E)$; and by \eqref{3.20}, the $L^2$ norm of the commutator in \eqref{2081} is also controlled by $C(\fracrak E)$, therefore by \eqref{2081}, \eqref{2080}, \eqref{2082},
\betaegin{equation}\label{2083}
\nm{(I-\mathbb H) \paren{\fracrac{(\betaar Z_{tt}-i)}{Z_{,\alphaa}}\partial_\alphaa \paren{\fracrac1{|Z_{,\alphaa}|}\partial_\alphaa \paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}} } }_{L^2}\lesssim C(\fracrak E).
\end{equation}
Now we commute out the factor $\fracrac{(\betaar Z_{tt}-i)}{Z_{,\alphaa}}$ from $(I-\mathbb H)$ to get
\betaegin{equation}\label{2085}
\betaegin{aligned}
(I-\mathbb H) \paren{\fracrac{(\betaar Z_{tt}-i)}{Z_{,\alphaa}}\partial_\alphaa \paren{\fracrac1{|Z_{,\alphaa}|}\partial_\alphaa \paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}} }&= \fracrac{(\betaar Z_{tt}-i)}{Z_{,\alphaa}} (I-\mathbb H) \partial_\alphaa \paren{\fracrac1{|Z_{,\alphaa}|}\partial_\alphaa \paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}} \\&+\betaracket{\fracrac{(\betaar Z_{tt}-i)}{Z_{,\alphaa}}, \mathbb H}\partial_\alphaa \paren{\fracrac1{|Z_{,\alphaa}|} \partial_\alphaa \paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}}
\end{aligned}
\end{equation}
Observe that the quantity the operator $(I-\mathbb H)$ acts on in the first term on the right hand side of \eqref{2085} is purely real. Applying \eqref{3.20} again to the commutator in \eqref{2085} and using \eqref{2083} and the fact that $|f|\le |(I-\mathbb H)f|$ for $f$ real, we obtain
\betaegin{equation}\label{2086}
\betaegin{aligned}
&\nm{ \fracrac{(\betaar Z_{tt}-i)}{Z_{,\alphaa}} \partial_\alphaa \paren{\fracrac1{|Z_{,\alphaa}|}\partial_\alphaa \paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}} }_{L^2}\\&\qquad\le \nm{\fracrac{(\betaar Z_{tt}-i)}{Z_{,\alphaa}} (I-\mathbb H) \partial_\alphaa \paren{\fracrac1{|Z_{,\alphaa}|}\partial_\alphaa \paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}} }_{L^2}\lesssim C(\fracrak E).
\end{aligned}
\end{equation}
Applying \eqref{2086} to \eqref{2082} yields,
$$\nm{D_\alphaa^2\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}(\betaar Z_{tt}-i)}_{L^2}\lesssim C(\fracrak E);$$
and by \eqref{2077}, \eqref{2076},
\betaegin{equation}\label{2088}
\nm{D_\alphaa^2\mathcal P\betaar Z_t}_{L^2}\lesssim C(\fracrak E).
\end{equation}
This finishes the proof of
\betaegin{equation}\label{2089}
\fracrac d{dt}{\betaf E}_b(t)\lesssim C(\fracrak E(t)).
\end{equation}
Sum up the results in subsections \S\ref{ddtlower} - \S\ref{ddteb}, we obtain
\betaegin{equation}\label{2090}
\fracrac d{dt}\fracrak E(t)\lesssim C(\fracrak E(t)).
\end{equation}
\subsection{The proof of Theorem ~\ref{blow-up}}\label{proof1} Assume that the initial data satisfies the assumption of Theorem~\ref{blow-up}, we know by \eqref{interface-a1}, Proposition~\ref{B2} and Sobolev embedding, that $\fracrac1{Z_{,\alphaa}}(0)-1, Z_{,\alphaa}(0)-1^{-1}n H^s(\mathbb R)$, with
\betaegin{equation}\label{2000-1}
\betaegin{aligned}
\nm{\fracrac1{Z_{,\alphaa}}(0)}_{L^^{-1}nfty}&\le \|Z_{tt}(0)\|_{L^^{-1}nfty}+1\lesssim \|Z_{tt}(0)\|_{H^1}+1<^{-1}nfty;\\
\nm{\fracrac1{Z_{,\alphaa}}(0)-1}_{H^s}&\lesssim C\paren{ \|Z_t(0)\|_{H^s}, \|Z_{tt}(0)\|_{H^s}};\\
\nm{Z_{,\alphaa}(0)-1}_{H^s}&\lesssim C\paren{ \|Z_t(0)\|_{H^s}, \|Z_{tt}(0)\|_{H^s}}.
\end{aligned}
\end{equation}
From Theorem~\ref{prop:local-s} and Proposition~\ref{prop:energy-eq}, we know to prove the blow-up criteria, Theorem~\ref{blow-up}, it suffices to show that for any solution of \eqref{interface-r}-\eqref{interface-holo}-\eqref{b}-\eqref{a1}, satisfying the regularity properties in Theorem~\ref{blow-up}, and for any $T_0>0$,
$$\sup_{[0, T_0)} \fracrak E(t)<^{-1}nfty \quad \tauext{implies} \quad \sup_{[0,T_0)}( \|Z_{,\alphaa}(t)\|_{L^^{-1}nfty}+\|Z_t(t)\|_{H^{3+1/2}}+\|Z_{tt}(t)\|_{H^3})<^{-1}nfty.$$
We begin with the lower order norms. We first show that, as a consequence of equation \eqref{eq:dza}, if $\|Z_{,\alphaa}(0)\|_{L^^{-1}nfty} <^{-1}nfty$, then \betaegin{equation}\label{2000-2}\sup_{[0, T_0)}\|Z_{,\alphaa}(t)\|_{L^^{-1}nfty}<^{-1}nfty\qquad \tauext{ as long as }\quad \sup_{[0, T_0)}\fracrak E(t)<^{-1}nfty.\end{equation}
Solving equation \eqref{eq:dza} we get, because $\partial_t+b\partial_\alphaa=U_h^{-1}\partial_t U_h$,
\betaegin{equation}\label{2100}
\fracrac1{Z_{,\alphaa}}(h(\alpha,t),t)=\fracrac1{Z_{,\alphaa}}(\alpha,0)e^{^{-1}nt_0^t (b_\alphaa\circ h(\alpha,\tauau)-D_\alpha z_t(\alpha,\tauau))\,d\tauau};
\end{equation}
so by \eqref{2020} of \S\ref{basic-quantities},
\betaegin{equation}\label{2101}
\sup_{[0, T]}\nm{Z_{,\alphaa}(t)}_{L^^{-1}nfty} \le \nm{Z_{,\alphaa}(0)}_{L^^{-1}nfty}e^{^{-1}nt_0^T \|b_\alphaa(\tauau)-D_\alphaa Z_t(\tauau)\|_{L^^{-1}nfty}\,d\tauau}\lesssim \nm{Z_{,\alphaa}(0)}_{L^^{-1}nfty}e^{T\sup_{[0, T]}C(\fracrak E(t))},
\end{equation}
hence \eqref{2000-2} holds. Notice that from \eqref{2100}, we also have
\betaegin{equation}\label{2101-1}
\sup_{[0, T]}\nm{\fracrac1 {Z_{,\alphaa}}(t)}_{L^^{-1}nfty} \lesssim \nm{\fracrac1{Z_{,\alphaa}}(0)}_{L^^{-1}nfty}e^{T\sup_{[0, T]}C(\fracrak E(t))}.
\end{equation}
Now by Lemma~\ref{basic-e2},
\betaegin{align}\label{2102}
\fracrac d{dt} \|Z_t(t)\|^2_{L^2}&\lesssim \|Z_{tt}(t)\|_{L^2}\|Z_t(t)\|_{L^2} +\|b_\alphaa(t)\|_{L^^{-1}nfty}\|Z_t(t)\|^2_{L^2},\\
\label{2103}\fracrac d{dt} \|Z_{tt}(t)\|^2_{L^2}&\lesssim \|Z_{ttt}(t)\|_{L^2}\|Z_{tt}(t)\|_{L^2} +\|b_\alphaa(t)\|_{L^^{-1}nfty}\|Z_{tt}(t)\|^2_{L^2};
\end{align}
and from equations \eqref{eq:dztt} and \eqref{aa1},
\betaegin{equation}\label{2104}
\betaar Z_{ttt}= (\betaar Z_{tt}-i)\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}+ \betaar {D_\alphaa Z_t}}=-\fracrac{iA_1}{Z_{,\alphaa}}\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}+ \betaar {D_\alphaa Z_t}},
\end{equation}
so
\betaegin{equation}\label{2105}
\|\betaar Z_{ttt}(t)\|_{L^2}\lesssim \|A_1(t)\|_{L^^{-1}nfty}\nm{\fracrac{1}{Z_{,\alphaa}}(t)}_{L^^{-1}nfty} \paren{ \nm{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}(t)}_{L^2}+ \nm{ D_\alphaa Z_t(t)}_{L^2}}.
\end{equation}
We want to show that $\nm{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}(t)}_{L^2}$ and $\nm{ D_\alphaa Z_t(t)}_{L^2}$ can be controlled by $\fracrak E$ and the initial data; by \eqref{at}, it suffices to control $\|b_\alphaa(t)\|_{L^2}$, $\nm{ D_\alphaa Z_t(t)}_{L^2}$ and $\|(\partial_t+b\partial_\alphaa)A_1(t)\|_{L^2}$.
Applying H\"older's inequality and \eqref{3.21} to \eqref{ba} yields
\betaegin{equation}\label{2106}
\|b_\alphaa(t)\|_{L^2}+\|D_\alphaa Z_t(t)\|_{L^2}\lesssim \nm{\fracrac1{Z_{,\alphaa}}(t)}_{L^^{-1}nfty}\|Z_{t,\alphaa}(t)\|_{L^2},
\end{equation}
and applying H\"older's inequality, \eqref{3.21}, \eqref{eq:b12} to \eqref{dta1} gives
\betaegin{equation}\label{2106-1}
\|(\partial_t+b\partial_\alphaa)A_1(t)\|_{L^2}\lesssim \nm{Z_{tt}(t)}_{L^^{-1}nfty}\|Z_{t,\alphaa}(t)\|_{L^2}+\|b_\alphaa(t)\|_{L^2}\|Z_{t,\alphaa}(t)\|_{L^2}^2;
\end{equation}
so by \eqref{at}, using the fact \eqref{aa1}, we have
\betaegin{equation}\label{2107}
\nm{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}_{L^2}+\nm{ D_\alphaa Z_t}_{L^2} \lesssim \paren{\nm{A_1}_{L^^{-1}nfty}\nm{\fracrac1{Z_{,\alphaa}}}_{L^^{-1}nfty}+1}\|Z_{t,\alphaa}\|_{L^2}+\|b_\alphaa\|_{L^2}\|Z_{t,\alphaa}\|_{L^2}^2.
\end{equation}
This gives, by further applying the estimates \eqref{2020} in \S\ref{basic-quantities} and \eqref{2101-1}, that for $t^{-1}n [0, T]$,
\betaegin{equation}\label{2108}
\|Z_{ttt}(t)\|_{L^2}\lesssim C(T, \sup_{[0, T]}\fracrak E(t)) \paren{1+\nm{\fracrac1{Z_{,\alphaa}}(t)}_{L^^{-1}nfty}^2}\lesssim C(T, \sup_{[0, T]}\fracrak E(t)) \paren{1+\nm{\fracrac1{Z_{,\alphaa}}(0)}_{L^^{-1}nfty}^2}.
\end{equation}
We now apply Gronwall's inequality to \eqref{2103}. This yields
\betaegin{equation}\label{2109}
\sup_{[0, T]} \|Z_{tt}(t)\|_{L^2}\lesssim C\paren{T, \sup_{[0, T]}\fracrak E(t), \|Z_{tt}(0)\|_{L^2}, \nm{\fracrac1{Z_{,\alphaa}}(0)}_{L^^{-1}nfty}}.
\end{equation}
We then apply Gronwall's inequality to \eqref{2102}, using \eqref{2109}. We obtain
\betaegin{equation}\label{2110}
\sup_{[0, T]} \|Z_{t}(t)\|_{L^2}\lesssim C\paren{T, \sup_{[0, T]}\fracrak E(t), \|Z_{t}(0)\|_{L^2}, \|Z_{tt}(0)\|_{L^2}, \nm{\fracrac1{Z_{,\alphaa}}(0)}_{L^^{-1}nfty}}.
\end{equation}
Therefore the lower order norm $\sup_{[0, T]}(\|Z_t(t)\|_{L^2}+\|Z_{tt}(t)\|_{L^2})$ is controlled by $\sup_{[0, T]}\fracrak E(t)$, the $L^2$ norm of $(Z_t(0), Z_{tt}(0))$ and the $L^^{-1}nfty$ norm of $\fracrac1{Z_{,\alphaa}(0)}$.\fracootnote{$\nm{\fracrac1{Z_{,\alphaa}(0)}}_{L^^{-1}nfty}$ is controlled by the $H^1$ norm of $Z_{tt}(0)$, see \eqref{2000-1}.}
We are left with proving
\betaegin{equation}\label{2111}
\sup_{[0, T_0)}\fracrak E(t)<^{-1}nfty \qquad\tauext{implies }\quad \sup_{[0, T_0)}(\|\partial_\alphaa^3Z_{t}(t)\|_{\dot H^{1/2}}+\|\partial_\alphaa^3 Z_{tt}(t)\|_{L^2})<^{-1}nfty.
\end{equation}
We do so via two stronger results, Propositions~\ref{step1} and \ref{step2}.
Let
\betaegin{equation}\label{2114}
\betaegin{aligned}
E_{k}(t):&=E_{D_\alphaa \partial_\alphaa^{k-1}\betaar Z_{t}}(t)+\|\partial_\alphaa^k \betaar Z_t(t)\|_{L^2}^2
\\&:=^{-1}nt \fracrac1{A_1}\alphabs{Z_{,\alphalpha'}(\partial_t+b\partial_\alphaa) \paren{\fracrac1{Z_{,\alphalpha'}}\partial_{\alphalpha'}^k\betaar Z_t}}^2\,d\alphalpha'+\nm{\fracrac1{Z_{,\alphalpha'}}\partial_{\alphalpha'}^k\betaar Z_t(t)}_{\dot H^{1/2}}^2+\|\partial_\alphaa^k \betaar Z_t(t)\|_{L^2}^2,
\end{aligned}
\end{equation}
where $k=2, 3$. We have
\betaegin{proposition}\label{step1}
There exists a polynomial $p_1=p_1(x)$ with universal coefficients such that
\betaegin{equation}\label{2115}
\fracrac d{dt} E_2(t)\le p_1\paren{\fracrak E(t)} E_2(t).
\end{equation}
\end{proposition}
\betaegin{proposition}\label{step2}
There exists a polynomial $p_2=p_2(x,y, z)$ with universal coefficients such that
\betaegin{equation}\label{2116}
\fracrac d{dt} E_3(t)\le p_2\paren{\fracrak E(t), E_2(t), \nm{\fracrac1{Z_{,\alphaa}}(t)}_{L^^{-1}nfty}} (E_3(t)+1).
\end{equation}
\end{proposition}
By Gronwall's inequality, we have from \eqref{2115} and \eqref{2116} that
\betaegin{equation}\label{step1-2}
\betaegin{aligned}
E_2(t)&\le E_2(0)e^{^{-1}nt_0^t p_1(\fracrak E(s))\,ds};\qquad\tauext{and }\\
E_3(t)&\le \paren{E_3(0)+^{-1}nt_0^t p_2\paren{\fracrak E(s), E_2(s), \nm{\fracrac1{Z_{,\alphaa}}(s)}_{L^^{-1}nfty}}\,ds}e^{^{-1}nt_0^t p_2\paren{\fracrak E(s), E_2(s), \nm{\fracrac1{Z_{,\alphaa}}(s)}_{L^^{-1}nfty}}\,ds},
\end{aligned}
\end{equation}
so $\sup_{[0, T]} E_2(t)$ is controlled by $E_2(0)$ and $\sup_{[0, T]}\fracrak E(t)$; and $\sup_{[0, T]} E_3(t)$
is controlled by $E_3(0)$, $\sup_{[0, T]}\fracrak E(t)$, $\sup_{[0, T]} E_2(t)$ and $\sup_{[0, T]} \nm{\fracrac1{Z_{,\alphaa}}(t)}_{L^^{-1}nfty}$. And by \eqref{2101-1}, $\sup_{[0, T]} E_3(t)$ is in turn controlled by $E_3(0)$, $\sup_{[0, T]}\fracrak E(t)$, $E_2(0)$ and $ \nm{\fracrac1{Z_{,\alphaa}}(0)}_{L^^{-1}nfty}$.
We will prove Propositions~\ref{step1} and ~\ref{step2} in the next two subsections.
In \S\ref{complete1} we will exam the relation between the energy functionals $E_2$, $E_3$ and the Sobolev norms $\| Z_t(t)\|_{H^{3+1/2}}$, $\| Z_{tt}(t)\|_{H^3}$ and
complete the proof of Theorem~\ref{blow-up}.
\subsection{ The proof of Proposition~\ref{step1}}\label{proof-prop1}
We begin with a list of quantities controlled by $E_2(t)$.
\subsubsection{Quantities controlled by $E_2(t)$.}\label{quantities-e2}
It is clear by the definition that the following are controlled by $E_2(t)$.
\betaegin{equation}\label{2117}
\|\partial_{\alphalpha'}^2\betaar Z_t\|_{L^2}^2\le E_2,\quad \nm{\fracrac1{Z_{,\alphalpha'}}\partial_{\alphalpha'}^2\betaar Z_t}_{\dot H^{1/2}}^2 \le E_2,\quad \nm{Z_{,\alphalpha'}(\partial_t+b\partial_\alphaa)\paren{ \fracrac1{Z_{,\alphalpha'}}\partial_{\alphalpha'}^2\betaar Z_t}}_{L^2}^2\le C(\fracrak E) E_2,
\end{equation}
because $1\le A_1\le C(\fracrak E)$ by \eqref{2000}. We compute, by product rules and \eqref{eq:dza}, that
\betaegin{equation}\label{2117-1}
Z_{,\alphalpha'}(\partial_t+b\partial_\alphaa) \paren{\fracrac1{Z_{,\alphalpha'}}\partial_{\alphalpha'}^2\betaar Z_t}= (\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^2\betaar Z_t+ (b_\alphaa-D_\alphaa Z_t)\partial_{\alphalpha'}^2\betaar Z_t,
\end{equation}
therefore, by estimates \eqref{2020} in \S\ref{basic-quantities},
\betaegin{equation}\label{2118}
\alphabs{\nm{(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^2\betaar Z_t}_{L^2}-\nm{Z_{,\alphalpha'}(\partial_t+b\partial_\alphaa) \fracrac1{Z_{,\alphalpha'}}\partial_{\alphalpha'}^2\betaar Z_t}_{L^2}}
\le C(\fracrak E)\|\partial_{\alphalpha'}^2\betaar Z_t\|_{L^2},
\end{equation}
so
\betaegin{equation}\label{2119}
\nm{(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^2\betaar Z_t}_{L^2}^2\le C(\fracrak E)E_2.
\end{equation}
Now by \eqref{eq:c7},
\betaegin{equation}
\partial_{\alphalpha'}(\partial_t+b\partial_\alphaa)\betaar Z_{t,\alphalpha'}
= (\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^2\betaar Z_{t}+b_{\alphalpha'}\partial_{\alphalpha'}^2\betaar Z_{t},
\end{equation}
so by \eqref{2020},
\betaegin{equation}\label{2120}
\|\partial_{\alphalpha'}(\partial_t+b\partial_\alphaa)\betaar Z_{t,\alphalpha'}\|_{L^2}^2\le C(\fracrak E)E_2.
\end{equation}
Using Sobolev inequality \eqref{eq:sobolev} and \eqref{2020}, we obtain
\betaegin{align}\label{2121}
\|Z_{t,\alphalpha'}\|_{L^^{-1}nfty}^2&\le 2\|Z_{t,\alphalpha'}\|_{L^2}\|\partial_{\alphalpha'}^2Z_{t}\|_{L^2}\le C(\fracrak E)E_2^{1/2};\qquad\qquad\tauext{and}\\
\label{2122}
\|(\partial_t+b\partial_\alphaa)Z_{t,\alphalpha'}\|_{L^^{-1}nfty}^2&\le 2\|(\partial_t+b\partial_\alphaa)Z_{t,\alphalpha'}\|_{L^2}\|\partial_{\alphalpha'}(\partial_t+b\partial_\alphaa)Z_{t,\alphaa}\|_{L^2}\le C(\fracrak E)E_2^{1/2}.
\end{align}
We need the estimates for some additional quantities, which we give in the following subsections.
\subsubsection{Controlling the quantity $\nm{\partial_{\alphalpha'}(b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t)}_{L^2}$.} \label{ddb}
We begin with equation \eqref{ba}, and differentiate with respect to $\alphaa$. We get
\betaegin{equation}\label{2122-1}
\betaegin{aligned}
\partial_\alphaa(b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t)&={\mathbb{R}}e \paren{\betaracket{ \partial_\alphaa \fracrac1{Z_{,\alphaa}}, \mathbb H} Z_{t,\alphalpha'}+ \betaracket{Z_{t,\alphaa}, \mathbb H}\partial_\alphaa \fracrac1{Z_{,\alphaa}} }\\&+{\mathbb{R}}e \paren{\betaracket{ \fracrac1{Z_{,\alphaa}}, \mathbb H} \partial_\alphaa^2 Z_{t}+ \betaracket{Z_t, \mathbb H}\partial_\alphaa^2 \fracrac1{Z_{,\alphaa}} };
\end{aligned}
\end{equation}
using $\mathbb H Z_{t,\alphaa}=-Z_{t,\alphaa}$ to rewrite the first term,
\betaegin{equation}\label{2129-1}
\betaracket{\partial_\alphaa \fracrac1{Z_{,\alphaa}}, \mathbb H} Z_{t,\alphalpha'}=-(I+\mathbb H)\paren{\partial_\alphaa \fracrac1{Z_{,\alphaa}} Z_{t,\alphalpha'}}
\end{equation}
and then applying \eqref{3.21} and \eqref{3.20} to the last two terms. We get, by \eqref{2121} and \eqref{2020},
\betaegin{equation}\label{2123}
\nm{\partial_\alphaa(b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t)}_{L^2}\lesssim \|Z_{t,\alphalpha'}\|_{L^^{-1}nfty}\nm{\partial_{\alphalpha'}\fracrac1{Z_{,\alphalpha'}}}_{L^2}\le C(\fracrak E) E_2^{1/4}.
\end{equation}
\subsubsection{Controlling $\|\partial_{\alphalpha'}^2\betaar Z_{tt}\|_{L^2}$}\label{ddzt}
We start with $(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^2\betaar Z_t$, and commute $\partial_t+b\partial_\alphaa$ with $\partial_{\alphalpha'}^2$; by \eqref{eq:c11}, we have
\betaegin{equation}\label{2124}
\betaegin{aligned}
\partial_{\alphalpha'}^2\betaar Z_{tt}-(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^2\betaar Z_t
&= [\partial_{\alphalpha'}^2, (\partial_t+b\partial_\alphaa)]\betaar Z_t
\\&=2b_{\alphalpha'} \partial_{\alphalpha'}^2\betaar Z_t +\paren{\partial_{\alphalpha'}b_{\alphalpha'}} \betaar Z_{t,\alphalpha'},
\end{aligned}
\end{equation}
We further expand the second term
\betaegin{equation}\label{2125}
\paren{\partial_{\alphalpha'}b_{\alphalpha'}} \betaar Z_{t,\alphalpha'}=\paren{\partial_{\alphalpha'}(b_{\alphalpha'}-2{\mathbb{R}}e D_\alphaa Z_t)} \betaar Z_{t,\alphalpha'}+2{\mathbb{R}}e\paren{\partial_\alphaa\fracrac1{Z_{,\alphaa}}Z_{t,\alphaa}}\betaar Z_{t,\alphaa}+2{\mathbb{R}}e \paren{\fracrac1{Z_{,\alphaa}}\partial_\alphaa^2 Z_{t}}\betaar Z_{t,\alphaa};
\end{equation}
we get, by \eqref{2124} and \eqref{2125} that
\betaegin{equation}\label{2126}
\betaegin{aligned}
\|\partial_{\alphalpha'}^2\betaar Z_{tt}\|_{L^2}&\lesssim \|(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^2\betaar Z_t\|_{L^2}+
\|b_\alphaa\|_{L^^{-1}nfty}\|\partial_{\alphalpha'}^2\betaar Z_t\|_{L^2}+\nm{\partial_\alphaa(b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t)}_{L^2}\|Z_{t,\alphalpha'}\|_{L^^{-1}nfty}\\&+\nm{ \partial_\alphaa\fracrac1{Z_{,\alphaa}}}_{L^2}\|Z_{t,\alphalpha'}\|_{L^^{-1}nfty}^2+\|D_\alphaa Z_t\|_{L^^{-1}nfty}\|\partial_{\alphalpha'}^2\betaar Z_t\|_{L^2}
\end{aligned}
\end{equation}
Therefore by the estimates in \S\ref{quantities-e2}, \S\ref{basic-quantities} and \eqref{2123},
\betaegin{equation}\label{2127}
\|\partial_{\alphalpha'}^2\betaar Z_{tt}\|^2_{L^2}\lesssim C(\fracrak E)E_2.
\end{equation}
As a consequence of the Sobolev inequality \eqref{eq:sobolev}, and estimates \eqref{2020} in \S\ref{basic-quantities},
\betaegin{equation}\label{2128}
\|\partial_{\alphalpha'}\betaar Z_{tt}\|^2_{L^^{-1}nfty}\le 2\|\partial_{\alphalpha'}\betaar Z_{tt}\|_{L^2}\|\partial_{\alphalpha'}^2\betaar Z_{tt}\|_{L^2} \lesssim C(\fracrak E)E_2^{1/2}.
\end{equation}
We also have, by the $L^2$ boundedness of $\mathbb H$,
\betaegin{equation}\label{2128-1}
\|\mathbb H\betaar Z_{tt,\alphaa}\|^2_{L^^{-1}nfty}\le 2\|\partial_{\alphalpha'}\mathbb H\betaar Z_{tt}\|_{L^2}\|\partial_{\alphalpha'}^2\mathbb H\betaar Z_{tt}\|_{L^2}\lesssim \|\partial_{\alphalpha'}\betaar Z_{tt}\|_{L^2}\|\partial_{\alphalpha'}^2\betaar Z_{tt}\|_{L^2} \lesssim C(\fracrak E)E_2^{1/2}.
\end{equation}
\subsubsection{Controlling the quantity $\nm{(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}(b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t)}_{L^2}$.}\label{dtdabl2}
We begin with \eqref{2122-1}, replacing the first term by \eqref{2129-1},
then use \eqref{eq:c21} on the first term and use \eqref{eq:c14} to compute the remaining three terms,
\betaegin{equation}\label{2129}
\betaegin{aligned}
(\partial_t&+b\partial_\alphaa)\partial_\alphaa(b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t)\\&=-{\mathbb{R}}e \paren{[b,\mathbb H]\partial_\alphaa \paren{ \partial_\alphaa \fracrac1{Z_{,\alphaa}} Z_{t,\alphalpha'}} +(I+\mathbb H) (\partial_t+b\partial_\alphaa)\paren{ \partial_\alphaa \fracrac1{Z_{,\alphaa}} Z_{t,\alphalpha'}}}\\&
+{\mathbb{R}}e\paren{ \betaracket{(\partial_t+b\partial_\alphaa) Z_{t,\alphaa}, \mathbb H}\partial_\alphaa \fracrac1{Z_{,\alphaa}} +\betaracket{ Z_{t,\alphaa}, \mathbb H}\partial_\alphaa (\partial_t+b\partial_\alphaa)\fracrac1{Z_{,\alphaa}}-\betaracket{ Z_{t,\alphaa}, b; \partial_\alphaa \fracrac1{Z_{,\alphaa}} }}\\&+{\mathbb{R}}e \paren{\betaracket{ \fracrac1{Z_{,\alphaa}}, \mathbb H} \partial_\alphaa (\partial_t+b\partial_\alphaa) Z_{t,\alphaa} +\betaracket{ (\partial_t+b\partial_\alphaa) \fracrac1{Z_{,\alphaa}}, \mathbb H} \partial_\alphaa^2 Z_{t}-\betaracket{ \fracrac1{Z_{,\alphaa}}, b; \partial_\alphaa^2 Z_{t} } } \\&+{\mathbb{R}}e \paren{\betaracket{Z_{tt}, \mathbb H}\partial_\alphaa^2 \fracrac1{Z_{,\alphaa}} + \betaracket{Z_t, \mathbb H}\partial_\alphaa(\partial_t+b\partial_\alphaa)\partial_\alphaa \fracrac1{Z_{,\alphaa}} - \betaracket{Z_t, b; \partial_\alphaa^2 \fracrac1{Z_{,\alphaa}} } }.
\end{aligned}
\end{equation}
We have, by \eqref{3.20}, \eqref{3.16}, \eqref{3.21}, \eqref{3.17}, and estimates in \S\ref{basic-quantities}, \S\ref{dadtza}, \S\ref{quantities-e2}-\S\ref{ddzt},
\betaegin{equation}\label{2130}
\betaegin{aligned}
&\nm{(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}(b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t)}_{L^2}\lesssim \|b_\alphaa\|_{L^^{-1}nfty}\nm{ \partial_\alphaa \fracrac1{Z_{,\alphaa}}}_{L^2}\| Z_{t,\alphalpha'}\|_{L^^{-1}nfty}\\&+\nm{ (\partial_t+b\partial_\alphaa)\partial_\alphaa \fracrac1{Z_{,\alphaa}}}_{L^2}\| Z_{t,\alphalpha'}\|_{L^^{-1}nfty}+\nm{ \partial_\alphaa \fracrac1{Z_{,\alphaa}}}_{L^2}\| (\partial_t+b\partial_\alphaa)Z_{t,\alphalpha'}\|_{L^^{-1}nfty}\\&
+\nm{ \partial_\alphaa(\partial_t+b\partial_\alphaa) \fracrac1{Z_{,\alphaa}}}_{L^2}\| Z_{t,\alphalpha'}\|_{L^^{-1}nfty}+\nm{ \partial_\alphaa \fracrac1{Z_{,\alphaa}}}_{L^2}\| Z_{tt,\alphalpha'}\|_{L^^{-1}nfty}\\&
\lesssim C(\fracrak E) E_2^{1/4}.
\end{aligned}
\end{equation}
\subsubsection{Controlling the quantity $\partial_\alphaa \mathcal A_\alphaa$}
We begin with equation \eqref{2029},
\betaegin{equation}\label{2131}
i \mathcal A_\alphaa= \fracrac1{Z_{,\alphaa}}\partial_{\alphalpha'} Z_{tt}+(Z_{tt}+i)\partial_{\alphalpha'}\fracrac{1}{Z_{,\alphalpha'}}
\end{equation}
and differentiate with respect to $\alphaa$. We get
\betaegin{equation}\label{2132}
i\partial_{\alphalpha'}\mathcal A_\alphaa=\fracrac{\partial_{\alphalpha'}^2Z_{tt}}{Z_{,\alphalpha'}}+2\partial_{\alphalpha'} Z_{tt}\partial_{\alphalpha'}\fracrac{1}{Z_{,\alphalpha'}}+(Z_{tt}+i)\partial_{\alphalpha'}^2\fracrac{1}{Z_{,\alphalpha'}}.
\end{equation}
Applying $(I-\mathbb H)$ yields
\betaegin{equation}\label{2133}
i(I-\mathbb H) \partial_{\alphalpha'}\mathcal A_\alphaa=(I-\mathbb H)(\fracrac{\partial_{\alphalpha'}^2Z_{tt}}{Z_{,\alphalpha'}})+2(I-\mathbb H)(\partial_{\alphalpha'} Z_{tt}\partial_{\alphalpha'}\fracrac{1}{Z_{,\alphalpha'}})+(I-\mathbb H)((Z_{tt}+i)\partial_{\alphalpha'}^2\fracrac{1}{Z_{,\alphalpha'}}).
\end{equation}
We rewrite the first term on the right by commuting out $\fracrac1{Z_{,\alphalpha'}}$,
and use $(I-\mathbb H) \partial_{\alphalpha'}^2\fracrac{1}{Z_{,\alphalpha'}}=0$ to rewrite the third term on the right of \eqref{2133} as a commutator.
We have
\betaegin{equation}\label{2136}
i(I-\mathbb H) \partial_{\alphalpha'}\mathcal A_\alphaa-\fracrac1{Z_{,\alphaa}}(I-\mathbb H)\partial_{\alphalpha'}^2Z_{tt}=
[\fracrac{1}{Z_{,\alphalpha'}}, \mathbb H]{\partial_{\alphalpha'}^2Z_{tt}}
+2(I-\mathbb H)(\partial_{\alphalpha'} Z_{tt}\partial_{\alphalpha'}\fracrac{1}{Z_{,\alphalpha'}})+ [Z_{tt}, \mathbb H]\partial_{\alphalpha'}^2\fracrac{1}{Z_{,\alphalpha'}}.
\end{equation}
Taking imaginary parts, then applying \eqref{3.20}, \eqref{3.21} and H\"older's inequality gives
\betaegin{equation}\label{2137}
\nm{\partial_{\alphalpha'}\mathcal A_\alphaa-\Im\betaraces{\fracrac{1}{Z_{,\alphalpha'}}(I-\mathbb H)({\partial_{\alphalpha'}^2Z_{tt}})}}_{L^2}\lesssim \nm{\partial_{\alphalpha'}\fracrac{1}{Z_{,\alphalpha'}}}_{L^2}\|Z_{tt,\alphalpha'}\|_{L^^{-1}nfty}\lesssim C(\fracrak E)E_2^{1/4}.
\end{equation}
\subsubsection{Controlling $\nm{\partial_\alphaa\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}}_{L^2}$}\label{data}
We begin with \eqref{at}. We have controlled $\nm{\partial_\alphaa(b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t)}_{L^2}$ in \eqref{2123}, we are left with $\nm{\partial_\alphaa \paren{\fracrac{(\partial_t+b\partial_\alphaa)A_1}{A_1}}}_{L^2}$.
We proceed with computing $\partial_\alphaa A_1$, using \eqref{a1}. We have
\betaegin{equation}\label{2138}
\betaegin{aligned}
\partial_\alphaa A_1&=-\Im\paren{ [Z_{t,\alphaa},\mathbb H]\betaar Z_{t,\alphaa}+[Z_t, \mathbb H]\partial_\alphaa^2\betaar Z_t}\\&=-\Im (-\mathbb H |\betaar Z_{t,\alphaa}|^2+[Z_t, \mathbb H]\partial_\alphaa^2\betaar Z_t);
\end{aligned}
\end{equation}
here we used the fact $\mathbb H \betaar Z_{t,\alphaa}=\betaar Z_{t,\alphaa}$ to expand the first term, then removed the term $\Im |Z_{t,\alphaa}|^2=0$. Applying \eqref{3.20}, \eqref{2121} and \eqref{2020} gives
\betaegin{equation}\label{2139}
\|\partial_\alphaa A_1\|_{L^2}\lesssim \|Z_{t,\alphaa}\|_{L^^{-1}nfty}\|Z_{t,\alphaa}\|_{L^2}\lesssim C(\fracrak E) E_2^{1/4}.
\end{equation}
Now taking derivative $\partial_t+b\partial_\alphaa$ to \eqref{2138}, using \eqref{eq:c21} and \eqref{eq:c14}, yields
\betaegin{equation}\label{2140}
\betaegin{aligned}
(\partial_t+b\partial_\alphaa)\partial_\alphaa A_1&=\Im \paren{[b, \mathbb H]\partial_\alphaa |\betaar Z_{t,\alphaa}|^2+2 \mathbb H {\mathbb{R}}e\betaraces{Z_{t,\alphaa} (\partial_t+b\partial_\alphaa)\betaar Z_{t,\alphaa} }}\\&
-\Im ([Z_{tt}, \mathbb H]\partial_\alphaa^2\betaar Z_t)+[Z_t,\mathbb H]\partial_\alphaa (\partial_t+b\partial_\alphaa )\betaar Z_{t,\alphaa}-[Z_t, b; \partial_\alphaa^2\betaar Z_t ]);
\end{aligned}
\end{equation}
By \eqref{3.20}, then use \eqref{2020}, \eqref{2121}, \eqref{2128} we get
\betaegin{equation}\label{2141}
\betaegin{aligned}
\nm{(\partial_t+b\partial_\alphaa)\partial_\alphaa A_1}_{L^2}&\lesssim \|b_\alphaa\|_{L^^{-1}nfty}\|Z_{t,\alphaa}\|_{L^2}\|Z_{t,\alphaa}\|_{L^^{-1}nfty}+ \|Z_{tt,\alphaa}\|_{L^^{-1}nfty}\|Z_{t,\alphaa}\|_{L^2}\\&+\|Z_{t,\alphaa}\|_{L^^{-1}nfty}\|(\partial_t+b\partial_\alphaa)\betaar Z_{t,\alphaa}\|_{L^2}\lesssim C(\fracrak E) E_2^{1/4}.
\end{aligned}
\end{equation}
Commuting $\partial_\alphaa$ with $\partial_t+b\partial_\alphaa$ gives
\betaegin{equation}\label{2142}
\nm{\partial_\alphaa (\partial_t+b\partial_\alphaa)A_1}_{L^2}\lesssim \nm{(\partial_t+b\partial_\alphaa)\partial_\alphaa A_1}_{L^2}+\nm{b_\alphaa \partial_\alphaa A_1}_{L^2}\lesssim C(\fracrak E) E_2^{1/4}.
\end{equation}
Combine \eqref{2142} with \eqref{2139} and \eqref{2123}, using \eqref{2000}, \eqref{2021}, we obtain
\betaegin{equation}\label{2143}
\nm{\partial_\alphaa\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}}_{L^2} \lesssim C(\fracrak E) E_2^{1/4}.
\end{equation}
Sum up the estimates obtained in \S\ref{quantities-e2} - \S\ref{data}, we have that the following quantities are controlled by $C(\fracrak E)E_2^{1/2}$:
\betaegin{equation}\label{2144}
\betaegin{aligned}
&\|\partial_{\alphalpha'}^2\betaar Z_t\|_{L^2}, \quad \nm{\fracrac1{Z_{,\alphalpha'}}\partial_{\alphalpha'}^2\betaar Z_t}_{\dot H^{1/2}},\quad \nm{Z_{,\alphalpha'}(\partial_t+b\partial_\alphaa)\paren{ \fracrac1{Z_{,\alphalpha'}}\partial_{\alphalpha'}^2\betaar Z_t}}_{L^2}, \\& \quad \nm{(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^2\betaar Z_t}_{L^2}, \quad
\nm{\partial_{\alphalpha'}(\partial_t+b\partial_\alphaa)\betaar Z_{t,\alphaa}}_{L^2},\quad \|\partial_{\alphalpha'}^2\betaar Z_{tt}\|_{L^2}\\&
\|Z_{t,\alphalpha'}\|_{L^^{-1}nfty}^2,\quad
\|(\partial_t+b\partial_\alphaa)Z_{t,\alphalpha'}\|_{L^^{-1}nfty}^2,\quad \|\partial_{\alphalpha'}\betaar Z_{tt}\|^2_{L^^{-1}nfty},
\quad \|\mathbb H\betaar Z_{tt,\alphaa}\|^2_{L^^{-1}nfty},\\&
\nm{\partial_\alphaa(b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t)}_{L^2}^2, \quad \nm{(\partial_t+b\partial_\alphaa)\partial_\alphaa(b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t)}_{L^2}^2, \\&\quad \nm{\partial_{\alphalpha'}\mathcal A_\alphaa-\Im\betaraces{\fracrac{1}{Z_{,\alphalpha'}}(I-\mathbb H)({\partial_{\alphalpha'}^2Z_{tt}})}}_{L^2}^2,\quad \nm{\partial_\alphaa\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}}_{L^2}^2,\quad \|\partial_\alphaa A_1\|_{L^2}^2
\end{aligned}
\end{equation}
\subsubsection{Controlling $\fracrac d{dt}E_2(t)$} We are now ready to estimate $\fracrac d{dt}E_2(t)$.
We know
$$E_2(t)=E_{D_\alphaa \partial_\alphaa \betaar Z_{t}}(t)+\|\partial_\alphaa^2 \betaar Z_t(t)\|_{L^2}^2,$$
where $E_{D_\alphaa \partial_\alphaa \betaar Z_{t}}(t)$ is as defined in \eqref{eq:41}. We use Lemma~\ref{basic-e} on $E_{D_\alphaa \partial_\alphaa \betaar Z_{t}}(t)$ and Lemma~\ref{basic-e2} on $\|\partial_\alphaa^2 \betaar Z_t(t)\|_{L^2}^2$.
We start with $\|\partial_\alphaa^2 \betaar Z_t(t)\|_{L^2}^2$. We know by Lemma~\ref{basic-e2} that
\betaegin{equation}\label{2144-1}
\fracrac d{dt} \|\partial_\alphaa^2 \betaar Z_t(t)\|_{L^2}^2\lesssim \|(\partial_t+b\partial_\alphaa)\partial_\alphaa^2 \betaar Z_t(t)\|_{L^2}\|\partial_\alphaa^2 \betaar Z_t(t)\|_{L^2}+\|b_\alphaa\|_{L^^{-1}nfty}\|\partial_\alphaa^2 \betaar Z_t(t)\|_{L^2}^2
\end{equation}
We have controlled $\|(\partial_t+b\partial_\alphaa)\partial_\alphaa^2 \betaar Z_t(t)\|_{L^2}$ in \eqref{2119}, and $\|b_\alphaa\|_{L^^{-1}nfty}$ in \S\ref{basic-quantities}, therefore
\betaegin{equation}\label{2145}
\fracrac d{dt} \|\partial_\alphaa^2 \betaar Z_t(t)\|_{L^2}^2\lesssim C(\fracrak E(t))E_2(t).
\end{equation}
We now estimate $\fracrac d{dt} E_{D_\alphaa \partial_\alphaa \betaar Z_{t}}(t)$. Take $\Theta=D_\alphaa \betaar Z_{t,\alphaa}$ in Lemma~\ref{basic-e}, we have
\betaegin{equation}\label{2149}
\fracrac d{dt} E_{D_\alphaa \partial_\alphaa \betaar Z_{t}}(t) \le \nm{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}_{L^^{-1}nfty} E_{D_\alphaa \partial_\alphaa \betaar Z_{t}}(t)+2 E_{D_\alphaa \partial_\alphaa \betaar Z_{t}}(t)^{1/2}\paren{^{-1}nt\fracrac{|\mathcal P D_\alphaa \betaar Z_{t,\alphaa}|^2}{\mathcal A}\,d\alphaa}^{1/2}.
\end{equation}
We have controlled $\nm{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}_{L^^{-1}nfty}$ in \eqref{2022}. We need to control
$^{-1}nt\fracrac{|\mathcal P D_\alphaa \betaar Z_{t,\alphaa}|^2}{\mathcal A}\,d\alphaa$.
We know $\mathcal A=\fracrac{A_1}{|Z_{,\alphaa}|^2}$ and $A_1\gammae 1$, so
\betaegin{equation}\label{2150}
^{-1}nt\fracrac{|\mathcal P D_\alphaa \betaar Z_{t,\alphaa}|^2}{\mathcal A}\,d\alphaa\le ^{-1}nt |Z_{,\alphaa}\mathcal P D_\alphaa \betaar Z_{t,\alphaa}|^2 \,d\alphaa.
\end{equation}
We compute
\betaegin{equation}\label{2112}
\mathcal P D_\alphaa \betaar Z_{t,\alphaa}= \betaracket{\mathcal P, D_\alphaa} \betaar Z_{t,\alphaa}+ \fracrac1{Z_{,\alphaa}}\partial_\alphaa\mathcal P \betaar Z_{t,\alphaa};
\end{equation}
further expanding $\betaracket{\mathcal P, D_\alphaa} \betaar Z_{t,\alphaa}$ by \eqref{eq:c5-1} yields
\betaegin{equation}\label{2146}
\betaracket{\mathcal P, D_\alphaa} \betaar Z_{t,\alphaa}= (-2D_\alphaa Z_{tt}) D_\alphaa \betaar Z_{t,\alphaa} -2(D_\alphaa Z_t)(\partial_t +b\partial_\alphaa)D_\alphaa \betaar Z_{t,\alphaa};
\end{equation}
and by \eqref{2020} and \eqref{2117},
\betaegin{equation}\label{2146-1}
\betaegin{aligned}
\|Z_{,\alphaa}\betaracket{\mathcal P, D_\alphaa} \betaar Z_{t,\alphaa}\|_{L^2}&\lesssim \|D_\alphaa Z_{tt}\|_{L^^{-1}nfty}\|\partial_\alphaa^2 \betaar Z_{t}\|_{L^2}+\|D_\alphaa Z_t\|_{L^^{-1}nfty}\nm{Z_{,\alphaa}(\partial_t +b\partial_\alphaa)D_\alphaa \betaar Z_{t,\alphaa}}_{L^2}\\&\lesssim C(\fracrak E) E_2^{1/2}.
\end{aligned}
\end{equation}
We are left with controlling $\|\partial_\alphaa \mathcal P \betaar Z_{t,\alphaa}\|_{L^2}$.
Taking derivative to $\alphaa$ to \eqref{base-eq} yields
\betaegin{equation}\label{2147}
\betaegin{aligned}
\partial_\alphaa \mathcal P \betaar Z_{t,\alphaa}&=-(\partial_t+b\partial_\alphaa)(\paren{\partial_\alphaa b_\alphaa} \partial_{\alphaa}\betaar Z_{t})-\paren{\partial_\alphaa b_\alphaa}\partial_\alphaa \betaar Z_{tt}-i\paren{\partial_\alphaa \mathcal A_\alphaa} \partial_\alphaa \betaar Z_t\\&
-(\partial_t+b\partial_\alphaa)( b_\alphaa \partial_{\alphaa}^2\betaar Z_{t})- b_\alphaa\partial_\alphaa^2 \betaar Z_{tt}-i \mathcal A_\alphaa \partial_\alphaa^2 \betaar Z_t-b_\alphaa^2 \partial_\alphaa^2 \betaar Z_{t}- b_\alphaa (\partial_\alphaa b_\alphaa) \partial_{\alphaa}\betaar Z_{t}\\&
+\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} \partial_\alphaa^2\betaar Z_{tt}+2\paren{ \partial_\alphaa\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} }\partial_\alphaa \betaar Z_{tt}+\paren{\partial_\alphaa^2 \fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} (\betaar Z_{tt}-i);
\end{aligned}
\end{equation}
we further expand the terms in the first line and the last term in the second line according to the available estimates in \S\ref{ddb} - \S\ref{data},
\betaegin{equation}\label{2148}
\betaegin{aligned}
(\partial_t+b\partial_\alphaa)&(\paren{\partial_\alphaa b_\alphaa} \partial_{\alphaa}\betaar Z_{t})=(\partial_t+b\partial_\alphaa)\betaraces{\partial_\alphaa \paren{ b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t} \partial_{\alphaa}\betaar Z_{t}}\\&+2 \betaraces{{\mathbb{R}}e \partial_\alphaa \paren{ D_\alphaa Z_t}} (\partial_t+b\partial_\alphaa)\partial_{\alphaa}\betaar Z_{t}+2 \betaraces{{\mathbb{R}}e (\partial_t+b\partial_\alphaa)\partial_\alphaa \paren{ D_\alphaa Z_t}} \partial_{\alphaa}\betaar Z_{t}
\end{aligned}
\end{equation}
we expand the factors in the second line further by product rules,
\betaegin{equation}\label{2151}
{\mathbb{R}}e \partial_\alphaa \paren{ D_\alphaa Z_t}={\mathbb{R}}e \partial_\alphaa \fracrac1{Z_{,\alphaa}} \partial_\alphaa Z_t +{\mathbb{R}}e\fracrac {\partial_\alphaa^2 Z_t}{Z_{,\alphaa}},
\end{equation}
\betaegin{equation}\label{2152}
\betaegin{aligned}
{\mathbb{R}}e (\partial_t+b\partial_\alphaa)\partial_\alphaa \paren{ D_\alphaa Z_t}&= {\mathbb{R}}e (\partial_t+b\partial_\alphaa)\partial_\alphaa \fracrac1{Z_{,\alphaa}} \partial_\alphaa Z_t +{\mathbb{R}}e \partial_\alphaa \fracrac1{Z_{,\alphaa}} (\partial_t+b\partial_\alphaa)\partial_\alphaa Z_t \\&+{\mathbb{R}}e\fracrac {(\partial_t+b\partial_\alphaa)\partial_\alphaa^2 Z_t}{Z_{,\alphaa}}+{\mathbb{R}}e\fracrac {\partial_\alphaa^2 Z_t}{Z_{,\alphaa}}(b_\alphaa-D_\alphaa Z_t),
\end{aligned}
\end{equation}
here we used \eqref{eq:dza} in the last term; and by \eqref{eq:c7},
\betaegin{equation}\label{2153}
(\partial_t+b\partial_\alphaa)\partial_{\alphaa}\betaar Z_{t}=\betaar Z_{tt,\alphaa}-b_\alphaa \betaar Z_{t,\alphaa}.
\end{equation}
We are now ready to conclude, by \eqref{2123}, \eqref{2130}, \eqref{2121}, \eqref{2122}, \eqref{2020}, \eqref{2046}, \S\ref{quantities-e2} and the expansions \eqref{2148} - \eqref{2153} that
\betaegin{equation}\label{2154}
\|(\partial_t+b\partial_\alphaa)(\paren{\partial_\alphaa b_\alphaa} \partial_{\alphaa}\betaar Z_{t})\|_{L^2}\lesssim C(\fracrak E) E_2^{1/2}.
\end{equation}
Similarly we can conclude, after expanding if necessary, with a similar estimate for all the terms on the right hand side of \eqref{2147} except for $\paren{\partial_\alphaa^2 \fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} (\betaar Z_{tt}-i)$. Let
\betaegin{equation}\label{2155}
\partial_\alphaa \mathcal P \betaar Z_{t,\alphaa}=\mathcal R_1+\paren{\partial_\alphaa^2 \fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} (\betaar Z_{tt}-i);
\end{equation}
where $\mathcal R_1$ is the sum of the remaining terms on the right hand side of \eqref{2147}. We have, by the argument above, that
\betaegin{equation}\label{2156}
\|\mathcal R_1\|_{L^2}\lesssim C(\fracrak E) E_2^{1/2}.
\end{equation}
We control the term $\paren{\partial_\alphaa^2 \fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} (\betaar Z_{tt}-i)$ with a similar idea as that in \S\ref{ddtea}, by taking advantage of the fact that $\partial_\alphaa^2 \fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}$ is purely real.
Applying $(I-\mathbb H)$ to both sides of \eqref{2155}, and commuting out $\betaar Z_{tt}-i$ yields
\betaegin{equation}\label{2157}
(I-\mathbb H) \partial_\alphaa \mathcal P \betaar Z_{t,\alphaa}=(I-\mathbb H)\mathcal R_1+\betaracket{\betaar Z_{tt}, \mathbb H}\partial_\alphaa^2 \fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} + (\betaar Z_{tt}-i)(I-\mathbb H)\partial_\alphaa^2 \fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} ;
\end{equation}
Because $\mathbb H$ is purely imaginary, we have $\alphabs{\partial_\alphaa^2 \fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}\le \alphabs{(I-\mathbb H)\partial_\alphaa^2 \fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}$, and
\betaegin{equation}\label{2158}
\alphabs{ (\betaar Z_{tt}-i)\partial_\alphaa^2 \fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}\le \alphabs{(I-\mathbb H) \partial_\alphaa \mathcal P \betaar Z_{t,\alphaa}}+\alphabs{(I-\mathbb H)\mathcal R_1}+\alphabs{\betaracket{\betaar Z_{tt}, \mathbb H}\partial_\alphaa^2 \fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}.
\end{equation}
Now by \eqref{eq:c10},
\betaegin{equation}\label{2159}
\betaracket{\mathcal P, \partial_\alphaa} \betaar Z_{t,\alphaa}
=-(\partial_t+b\partial_\alphaa)(b_\alphaa\partial_\alphaa \betaar Z_{t,\alphaa})-b_\alphaa\partial_\alphaa (\partial_t+b\partial_\alphaa)\betaar Z_{t,\alphaa}-i\mathcal A_\alphaa \partial_\alphaa \betaar Z_{t,\alphaa};
\end{equation}
so
\betaegin{equation}\label{2160}
\|\betaracket{\mathcal P, \partial_\alphaa} \betaar Z_{t,\alphaa}\|_{L^2}\lesssim C(\fracrak E) E_2^{1/2}
\end{equation}
by \eqref{2020}, \S\ref{aa-zdz} and \S\ref{dtdab}. By Lemma~\ref{basic-3-lemma}, and \eqref{2160},
\betaegin{equation}\label{2161}
\|(I-\mathbb H) \partial_\alphaa \mathcal P \betaar Z_{t,\alphaa}\|_{L^2}\le \|(I-\mathbb H) \mathcal P \partial_\alphaa \betaar Z_{t,\alphaa}\|_{L^2}+ \|(I-\mathbb H)\betaracket{\mathcal P, \partial_\alphaa} \betaar Z_{t,\alphaa}\|_{L^2}\lesssim C(\fracrak E) E_2^{1/2}.
\end{equation}
Now we apply \eqref{3.20} to the commutator on the right hand side of \eqref{2158}. By \eqref{2156} and \eqref{2161}, we have
\betaegin{equation}\label{2162}
\nm{ (\betaar Z_{tt}-i)\partial_\alphaa^2 \fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}_{L^2}\lesssim C(\fracrak E) E_2^{1/2}+\nm{\betaar Z_{tt,\alphaa}}_{L^^{-1}nfty}\nm{\partial_\alphaa \fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}_{L^2}\lesssim C(\fracrak E) E_2^{1/2} .
\end{equation}
This together with \eqref{2156} and \eqref{2155} gives
\betaegin{equation}\label{2163}
\|\partial_\alphaa \mathcal P \betaar Z_{t,\alphaa}\|_{L^2}\lesssim C(\fracrak E) E_2^{1/2}.
\end{equation}
We can now conclude, by \eqref{2150}, \eqref{2112}, \eqref{2146-1} and \eqref{2163} that
\betaegin{equation}\label{2164}
^{-1}nt\fracrac{|\mathcal P D_\alphaa \betaar Z_{t,\alphaa}|^2}{\mathcal A}\,d\alphaa\lesssim C(\fracrak E) E_2^{1/2};
\end{equation}
and consequently,
\betaegin{equation}\label{2165}
\fracrac d{dt} E_{D_\alphaa \partial_\alphaa \betaar Z_t}(t) \lesssim C(\fracrak E) E_2.
\end{equation}
Combining \eqref{2145} and \eqref{2165} yields
\betaegin{equation}\label{2166}
\fracrac d{dt} E_2(t) \lesssim C(\fracrak E(t)) E_2(t).
\end{equation}
This concludes the proof for Proposition~\ref{step1}.
\subsection{The proof of Proposition~\ref{step2}}\label{proof-prop2}
We begin with discussing quantities controlled by $E_3$. Since the idea is similar to that in previous sections, when the estimates are straightforward, we don't always give the full details.
\subsubsection{Quantities controlled by $E_3$ and a polynomial of $\fracrak E$ and $E_2$}\label{quantities-e3}
By the definition of $E_3$, and the fact that $1\le A_1 \le C(\fracrak E)$, cf. \eqref{2000},
\betaegin{equation}\label{2200}
\|\partial_{\alphalpha'}^3\betaar Z_t\|_{L^2}^2\le E_3,\quad \nm{Z_{,\alphalpha'}(\partial_t+b\partial_\alphaa)\paren{ \fracrac1{Z_{,\alphalpha'}}\partial_{\alphalpha'}^3\betaar Z_t}}_{L^2}^2\le C(\fracrak E) E_3,\quad \nm{\fracrac1{Z_{,\alphalpha'}}\partial_{\alphalpha'}^3\betaar Z_t}_{\dot H^{1/2}}^2\le E_3 .
\end{equation}
By \eqref{eq:dza} and product rules,
\betaegin{equation}\label{2201}
Z_{,\alphalpha'}(\partial_t+b\partial_\alphaa)\paren{ \fracrac1{Z_{,\alphalpha'}}\partial_{\alphalpha'}^3\betaar Z_t}=(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^3\betaar Z_t+(b_\alphaa-D_\alphaa Z_t)
\partial_{\alphalpha'}^3\betaar Z_t
\end{equation}
so by \eqref{2020},
\betaegin{equation}\label{2202}
\alphabs{\nm{(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^3\betaar Z_t}_{L^2}-\nm{Z_{,\alphalpha'}(\partial_t+b\partial_\alphaa) \paren{\fracrac1{Z_{,\alphalpha'}}\partial_{\alphalpha'}^3\betaar Z_t}}_{L^2}}
\le C(\fracrak E)\|\partial_{\alphalpha'}^3\betaar Z_t\|_{L^2},
\end{equation}
therefore
\betaegin{equation}\label{2203}
\nm{(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^3\betaar Z_t}_{L^2}^2\le C(\fracrak E)E_3.
\end{equation}
We commute out $\partial_\alphaa$, by \eqref{eq:c7},
\betaegin{equation}\label{2204}
\partial_{\alphalpha'}(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^2\betaar Z_t=(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^3\betaar Z_t+b_{\alphalpha'}\partial_{\alphalpha'}^3\betaar Z_t,
\end{equation}
so
\betaegin{equation}\label{2205}
\|\partial_{\alphalpha'}(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^2\betaar Z_t\|_{L^2}^2\le C(\fracrak E)E_3.
\end{equation}
As a consequence of the Sobolev inequality \eqref{eq:sobolev}, and \eqref{2144},
\betaegin{align}\label{2206}
\|\partial_{\alphalpha'}^2\betaar Z_t\|_{L^^{-1}nfty}^2&\le 2\|\partial_{\alphalpha'}^2\betaar Z_t\|_{L^2}\|\partial_{\alphalpha'}^3\betaar Z_t\|_{L^2} \lesssim C(\fracrak E, E_2)E_3^{1/2},\\
\label{2207}
\|(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^2\betaar Z_t\|_{L^^{-1}nfty}^2&\le 2\|(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^2\betaar Z_t\|_{L^2}\|\partial_\alphaa (\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^2\betaar Z_t\|_{L^2} \lesssim C(\fracrak E, E_2)E_3^{1/2}.
\end{align}
Now we commute out $\partial_\alphaa^2$ by \eqref{eq:c11}, and get
\betaegin{equation}\label{2208}
\partial_{\alphalpha'}^2(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}\betaar Z_t=(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^3\betaar Z_t+\paren{\partial_{\alphalpha'}b_{\alphalpha'}}\partial_{\alphalpha'}^2\betaar Z_t+2b_{\alphalpha'}\partial_{\alphalpha'}^3\betaar Z_t,
\end{equation}
We expand the second term further according to the available estimate \eqref{2123}, as we did in \eqref{2125}; we get
\betaegin{equation}\label{2209}
\|\partial_{\alphalpha'}^2(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}\betaar Z_t\|_{L^2}^2\le C\paren{\fracrak E, E_2, \nm{\fracrac1{Z_{,\alphaa}}}_{L^^{-1}nfty}}(E_3+1);
\end{equation}
and consequently by Sobolev inequality \eqref{eq:sobolev} and \eqref{2144},
\betaegin{equation}\label{2210}
\|\partial_{\alphalpha'}(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}\betaar Z_t\|_{L^^{-1}nfty}^2\le C\paren{\fracrak E, E_2, \nm{\fracrac1{Z_{,\alphaa}}}_{L^^{-1}nfty}}(E_3^{1/2}+1).
\end{equation}
We need to control some additional quantities.
\subsubsection{Controlling $\|\partial_{\alphalpha'}A_1\|_{L^^{-1}nfty}$, $\|\partial_{\alphalpha'}^2A_1\|_{L^2}$ and $\nm{\partial_{\alphalpha'}^2\fracrac{1}{Z_{,\alphalpha'}}}_{L^2}$}
We begin with \eqref{2138}:
\betaegin{equation}\label{2211}
\partial_\alphaa A_1=-\Im\paren{ [Z_{t,\alphaa},\mathbb H]\betaar Z_{t,\alphaa}+[Z_t, \mathbb H]\partial_\alphaa^2\betaar Z_t}.
\end{equation}
By \eqref{eq:b13}, \eqref{2020}, \eqref{2144},
\betaegin{equation}\label{2212}
\nm{\partial_{\alphalpha'}A_1}_{L^^{-1}nfty}\lesssim \|Z_{t,\alphalpha'}\|_{L^2}\|\partial_{\alphalpha'}^2Z_{t}\|_{L^2}\lesssim C(\fracrak E)E_2^{1/2}.
\end{equation}
Differentiating \eqref{2211} with respect to $\alphalpha'$ then apply \eqref{3.20}, \eqref{3.22} and use $\mathbb H \betaar Z_{t,\alphaa}=\betaar Z_{t,\alphaa}$ gives
\betaegin{equation}\label{2213}
\|\partial_{\alphalpha'}^2A_1\|_{L^2}\lesssim \|Z_{t,\alphalpha'}\|_{L^^{-1}nfty}\|\partial_{\alphalpha'}^2Z_{t}\|_{L^2}\le C(\fracrak E, E_2),
\end{equation}
where in the last step we used \eqref{2144}. To estimate $\partial_{\alphalpha'}^2\fracrac{1}{Z_{,\alphalpha'}}$ we begin with
\eqref{interface-a1}:
$$-i\fracrac 1{Z_{,\alphalpha'}}=\fracrac{\betaar Z_{tt}-i}{A_1}.$$
Taking two derivatives with respect to $\alphalpha'$ gives
\betaegin{equation}\label{2214}
-i\partial_{\alphalpha'}^2\fracrac{1}{Z_{,\alphalpha'}}=\fracrac{\partial_{\alphalpha'}^2\betaar Z_{tt}}{A_1}-2\betaar Z_{tt,\alphalpha'}\fracrac{\partial_{\alphalpha'}A_1}{A_1^2}+(\betaar Z_{tt}-i)\paren{-\fracrac{\partial_{\alphalpha'}^2A_1}{A_1^2}+2\fracrac{(\partial_{\alphalpha'}A_1)^2}{A_1^3}};
\end{equation}
therefore, because $A_1\gammae 1$, and \eqref{aa1}, \eqref{2020}, \eqref{2144}, \eqref{2212}, \eqref{2213},
\betaegin{equation}\label{2215}
\betaegin{aligned}
\nm{\partial_{\alphalpha'}^2\fracrac{1}{Z_{,\alphalpha'}}}_{L^2}\lesssim &\|\partial_{\alphalpha'}^2\betaar Z_{tt}\|_{L^2}+\|\partial_{\alphalpha'}\betaar Z_{tt}\|_{L^2}\|\partial_{\alphalpha'}A_1\|_{L^^{-1}nfty}\\&+\nm{\fracrac{1}{Z_{,\alphalpha'}}}_{L^^{-1}nfty}(\|\partial_{\alphalpha'}^2A_1\|_{L^2}+\|\partial_{\alphalpha'}A_1\|_{L^2}\|\partial_{\alphalpha'}A_1\|_{L^^{-1}nfty})\le C\paren{\fracrak E, E_2, \nm{\fracrac{1}{Z_{,\alphalpha'}}}_{L^^{-1}nfty}},
\end{aligned}
\end{equation}
and consequently by Sobolev inequality \eqref{eq:sobolev}, and \eqref{2020},
\betaegin{equation}\label{2216}
\nm{\partial_{\alphalpha'}\fracrac{1}{Z_{,\alphalpha'}}}_{L^^{-1}nfty} \le C\paren{\fracrak E, E_2, \nm{\fracrac{1}{Z_{,\alphalpha'}}}_{L^^{-1}nfty}}.
\end{equation}
\subsubsection{Controlling $\|\partial_{\alphalpha'}^2 b_{\alphalpha'}\|_{L^2}$ and $\|\partial_{\alphalpha'}^3Z_{tt}\|_{L^2}$}
We are now ready to give the estimates for $ \|\partial_{\alphalpha'}^2b_{\alphalpha'}\|_{L^2} $ and $\| \partial_{\alphalpha'}^3\betaar Z_{tt} \|_{L^2}$. We begin with \eqref{2122-1}, differentiating with respect to $\alphaa$, then use \eqref{3.20}, \eqref{3.21}, the fact that $\mathbb H Z_{t,\alphaa}=-Z_{t,\alphaa}$, $\mathbb H\fracrac1{Z_{,\alphaa}}=\fracrac1{Z_{,\alphaa}}$, and H\"older's inequality; we get
\betaegin{equation}\label{2217}
\betaegin{aligned}
\|\partial_{\alphalpha'}^2(b_{\alphalpha'}-2{\mathbb{R}}e D_\alphaa Z_t)\|_{L^2}&\lesssim \|\partial_{\alphalpha'}^2Z_{t}\|_{L^2}\nm{\partial_{\alphalpha'}\fracrac{1}{Z_{,\alphalpha'}}}_{L^^{-1}nfty}+\|Z_{t,\alphalpha'}\|_{L^^{-1}nfty}\nm{\partial_{\alphalpha'}^2\fracrac{1}{Z_{,\alphalpha'}}}_{L^2}\\& \le C\paren{\fracrak E, E_2, \nm{\fracrac{1}{Z_{,\alphalpha'}}}_{L^^{-1}nfty}} .
\end{aligned}
\end{equation}
It is easy to show, by product rules and H\"older's inequality that
\betaegin{equation}\label{2217-1}
\betaegin{aligned}
\|\partial_\alphaa^2 D_\alphaa Z_t\|_{L^2}&\lesssim \|\partial_{\alphalpha'}^2Z_{t}\|_{L^2}\nm{\partial_{\alphalpha'}\fracrac{1}{Z_{,\alphalpha'}}}_{L^^{-1}nfty}+\|Z_{t,\alphalpha'}\|_{L^^{-1}nfty}\nm{\partial_{\alphalpha'}^2\fracrac{1}{Z_{,\alphalpha'}}}_{L^2}+\nm{\fracrac1{Z_{,\alphaa}}\partial_\alphaa^3 Z_t}_{L^2}\\&\lesssim C\paren{\fracrak E, E_2, \nm{\fracrac{1}{Z_{,\alphalpha'}}}_{L^^{-1}nfty}}+\nm{\fracrac1{Z_{,\alphaa}}}_{L^^{-1}nfty}E_3^{1/2},
\end{aligned}
\end{equation}
so
\betaegin{equation}\label{2218}
\|\partial_{\alphalpha'}^2 b_{\alphalpha'}\|_{L^2}\lesssim C\paren{\fracrak E, E_2, \nm{\fracrac{1}{Z_{,\alphalpha'}}}_{L^^{-1}nfty}}+\nm{\fracrac1{Z_{,\alphaa}}}_{L^^{-1}nfty}E_3^{1/2}.
\end{equation}
Now starting from \eqref{eq-zta} and taking two derivatives to $\alphaa$ gives
\betaegin{equation}\label{2219}
\betaegin{aligned}
\partial_{\alphalpha'}^3\betaar Z_{tt}&=\partial_{\alphalpha'}^2(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}\betaar Z_t+\partial_\alphaa^2 (b_\alphaa\betaar Z_{t,\alphaa})
\\&=\partial_{\alphalpha'}^2(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}\betaar Z_t+(\partial_\alphaa^2 b_\alphaa)\betaar Z_{t,\alphaa}+2(\partial_\alphaa b_\alphaa)\partial_\alphaa \betaar Z_{t,\alphaa}+ b_\alphaa \partial_\alphaa^3\betaar Z_{t},
\end{aligned}
\end{equation}
so
\betaegin{equation}\label{2220}
\| \partial_{\alphalpha'}^3\betaar Z_{tt} \|_{L^2}^2\le C\paren{\fracrak E, E_2, \nm{\fracrac{1}{Z_{,\alphalpha'}}}_{L^^{-1}nfty}}(E_3+1),
\end{equation}
and as a consequence of \eqref{eq:sobolev},
\betaegin{equation}\label{2221}
\| \partial_{\alphalpha'}^2\betaar Z_{tt} \|_{L^^{-1}nfty}^2\le C\paren{\fracrak E, E_2, \nm{\fracrac{1}{Z_{,\alphalpha'}}}_{L^^{-1}nfty}}(E_3^{1/2}+1) .
\end{equation}
\subsubsection {Controlling $\partial_{\alphalpha'}^2\mathcal A_\alphaa$.} We differentiate \eqref{2136} with respect to $\alphalpha'$ then take the imaginary parts and use H\"older's inequality, \eqref{3.20}, \eqref{3.21}. We have,
\betaegin{equation}\label{2222}
\betaegin{aligned}
\|\partial_{\alphalpha'}^2\mathcal A_\alphaa\|_{L^2}&\le \nm{\fracrac1{Z_{,\alphalpha'}}}_{L^^{-1}nfty}\|\partial_{\alphalpha'}^3\betaar Z_{tt}\|_{L^2}+ \nm{\partial_{\alphalpha'}\fracrac1{Z_{,\alphalpha'}}}_{L^^{-1}nfty}\|\partial_{\alphalpha'}^2\betaar Z_{tt}\|_{L^2}\\&+\nm{\partial_{\alphalpha'}^2\fracrac1{Z_{,\alphalpha'}}}_{L^2}\|\partial_{\alphalpha'}\betaar Z_{tt}\|_{L^^{-1}nfty}\le C\paren{\fracrak E, E_2, \nm{\fracrac1{Z_{,\alphalpha'}}}_{L^^{-1}nfty}} ( E_3^{1/2}+1).
\end{aligned}
\end{equation}
\subsubsection{Controlling $\nm{\partial_\alphaa^2\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}_{L^2}$}
We begin with \eqref{at}, and take two derivatives to $\alphaa$.
\betaegin{equation}
\betaegin{aligned}
\partial_\alphaa^2\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}&=\fracrac{\partial_\alphaa^2 (\partial_t+b\partial_\alphaa)A_1}{A_1}-2\fracrac{\partial_\alphaa (\partial_t+b\partial_\alphaa)A_1\partial_\alphaa A_1}{A_1^2}\\&+(\partial_t+b\partial_\alphaa)A_1 \paren{\fracrac{-\partial_\alphaa^2 A_1}{A_1^2} +2\fracrac{(\partial_\alphaa A_1)^2}{A_1^3}}+ \partial_\alphaa^2 (b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t)
\end{aligned}
\end{equation}
We have controlled $\nm{\partial_\alphaa^2 (b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t)}_{L^2}$, $\nm{\partial_\alphaa^2 A_1}_{L^2}$, $\nm{\partial_\alphaa A_1}_{L^^{-1}nfty}$, $\nm{\partial_\alphaa A_1}_{L^2}$ and $\nm{\partial_\alphaa (\partial_t+b\partial_\alphaa) A_1}_{L^2}$ etc. in \eqref{2217}, \eqref{2212}, \eqref{2213}, \eqref{2139}, \eqref{2142} and \eqref{2000}, \eqref{2021}. We are left with $\partial_\alphaa^2 (\partial_t+b\partial_\alphaa)A_1$.
We begin with \eqref{a1}, taking two derivatives to $\alphaa$, then one derivative to $\partial_t+b\partial_\alphaa$. We have
\betaegin{equation}\label{2222-1}
(\partial_t+b\partial_\alphaa)\partial_\alphaa^2 A_1=-\sum_{k=0}^2 C_2^k\Im(\partial_t+b\partial_\alphaa) \paren{\betaracket{ \partial_\alphaa^k Z_t, \mathbb H}\partial_\alphaa^{2-k} \betaar Z_{t,\alphalpha'}}
\end{equation}
where $C_2^0=1, C_2^1=2, C_2^2=1$. Using \eqref{eq:c14} to expand the right hand side, then use
\eqref{3.20}, \eqref{3.21} and \eqref{3.22} to do the estimates. We have
\betaegin{equation}
\|(\partial_t+b\partial_\alphaa)\partial_\alphaa^2 A_1\|_{L^2}\lesssim C\paren{\fracrak E, E_2, \nm{\fracrac 1{Z_{,\alphaa}}}_{L^^{-1}nfty}}(E_3^{1/4}+1).
\end{equation}
Now we use \eqref{eq:c11} to compute
\betaegin{equation}\label{2223-1}
\partial_\alphaa^2 (\partial_t+b\partial_\alphaa)A_1= (\partial_t+b\partial_\alphaa)\partial_\alphaa^2 A_1+\partial_\alphaa b_\alphaa \partial_\alphaa A_1+2 b_\alphaa \partial_\alphaa^2 A_1.
\end{equation}
Therefore
\betaegin{equation}
\|\partial_\alphaa^2(\partial_t+b\partial_\alphaa) A_1\|_{L^2}\lesssim C\paren{\fracrak E, E_2, \nm{\fracrac 1{Z_{,\alphaa}}}_{L^^{-1}nfty}}(E_3^{1/4}+1),
\end{equation}
consequently
\betaegin{equation}\label{2224-1}
\nm{\partial_{\alphalpha'}^2\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} }_{L^2}\lesssim C\paren{\fracrak E, E_2, \nm{\fracrac 1{Z_{,\alphaa}}}_{L^^{-1}nfty}}(E_3^{1/2}+1).
\end{equation}
\subsubsection{ Controlling $\nm{(\partial_t+b\partial_\alphaa)\fracrac{1}{Z_{,\alphalpha'}}}_{L^^{-1}nfty}$ and $\nm{(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^2\fracrac{1}{Z_{,\alphalpha'}}}_{L^2}$ }
We begin with \eqref{eq:dza},
\betaegin{equation}\label{2225}
(\partial_t+b\partial_\alphaa)\fracrac{1}{Z_{,\alphalpha'}}=\fracrac{1}{Z_{,\alphalpha'}}(b_\alphaa-D_\alphaa Z_t),
\end{equation}
differentiating twice with respect to $\alphaa$; we get
\betaegin{equation}\label{2226}
\betaegin{aligned}
&\partial_{\alphalpha'}^2 (\partial_t+b\partial_\alphaa)\fracrac{1}{Z_{,\alphalpha'}}=\paren{\partial_{\alphalpha'}^2\fracrac{1}{Z_{,\alphalpha'}}}( b_{\alphalpha'}-D_{\alphalpha'}Z_t)\\&+2\paren{\partial_{\alphalpha'}\fracrac{1}{Z_{,\alphalpha'}}}( \partial_{\alphalpha'}b_{\alphalpha'}-\partial_{\alphalpha'}D_{\alphalpha'}Z_t)+\fracrac{1}{Z_{,\alphalpha'}}( \partial_{\alphalpha'}^2b_{\alphalpha'}-\partial_{\alphalpha'}^2D_{\alphalpha'}Z_t).
\end{aligned}
\end{equation}
We further expand
$\partial_{\alphalpha'}D_{\alphalpha'}Z_t$
and
$\partial_{\alphalpha'}^2D_{\alphalpha'}Z_t$
by product rules then use H\"older's inequality, \eqref{2020}, \eqref{2144} and \eqref{2215}, \eqref{2216}, \eqref{2217-1}, \eqref{2218}. We have
\betaegin{equation}\label{2227}
\betaegin{aligned}
\nm{\partial_{\alphalpha'}^2(\partial_t+b\partial_\alphaa)\fracrac{1}{Z_{,\alphalpha'}}}_{L^2}&\lesssim C(\fracrak E)\nm{\partial_{\alphalpha'}^2\fracrac{1}{Z_{,\alphalpha'}}}_{L^2}+\nm{\partial_{\alphalpha'}\fracrac{1}{Z_{,\alphalpha'}}}_{L^^{-1}nfty}\| \partial_{\alphalpha'}b_{\alphalpha'}-\partial_{\alphalpha'}D_{\alphalpha'}Z_t\|_{L^2}\\&+\nm{\fracrac{1}{Z_{,\alphalpha'}}}_{L^^{-1}nfty}\| \partial_{\alphalpha'}^2b_{\alphalpha'}-\partial_{\alphalpha'}^2D_{\alphalpha'}Z_t\|_{L^2}\lesssim C\paren{\fracrak E, E_2, \nm{\fracrac{1}{Z_{,\alphalpha'}}}_{L^^{-1}nfty}}(E_3^{1/2}+1).
\end{aligned}
\end{equation}
Now by \eqref{eq:c11},
\betaegin{equation}\label{2228}
(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^2\fracrac{1}{Z_{,\alphalpha'}}=\partial_{\alphalpha'}^2 (\partial_t+b\partial_\alphaa)\fracrac{1}{Z_{,\alphalpha'}}-(\partial_{\alphalpha'}b_{\alphalpha'})\partial_{\alphalpha'}\fracrac{1}{Z_{,\alphalpha'}}-2b_{\alphalpha'}\partial_{\alphalpha'}^2\fracrac{1}{Z_{,\alphalpha'}}
\end{equation}
so
\betaegin{equation}\label{2229}
\nm{(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^2\fracrac{1}{Z_{,\alphalpha'}}}_{L^2}\lesssim
C\paren{\fracrak E, E_2, \nm{\fracrac{1}{Z_{,\alphalpha'}}}_{L^^{-1}nfty}}(E_3^{1/2}+1).
\end{equation}
We apply \eqref{2020} to \eqref{2225}, and obtain
\betaegin{equation}\label{2230}
\nm{(\partial_t+b\partial_\alphaa)\fracrac{1}{Z_{,\alphalpha'}}}_{L^^{-1}nfty} \le C(\fracrak E)\nm{\fracrac{1}{Z_{,\alphalpha'}}}_{L^^{-1}nfty}.
\end{equation}
\subsubsection{ Controlling $\|\partial_{\alphalpha'}^2(\partial_t+b\partial_\alphaa) b_{\alphalpha'}\|_{L^2}$
}\label{da2dtba} By \eqref{eq:c11},
\betaegin{equation}\label{2223}
\partial_{\alphalpha'}^2(\partial_t+b\partial_\alphaa) b_{\alphalpha'}=(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^2 b_{\alphalpha'}+(\partial_{\alphalpha'} b_{\alphalpha'})^2+2 b_{\alphalpha'}\partial_{\alphalpha'}^2 b_{\alphalpha'}
\end{equation}
where by H\"older's and Sobolev inequalities \eqref{eq:sobolev},
\betaegin{equation}\label{2224}
\betaegin{aligned}
&\|(\partial_{\alphalpha'} b_{\alphalpha'})^2\|_{L^2}+\| b_{\alphalpha'}\partial_{\alphalpha'}^2 b_{\alphalpha'}\|_{L^2} \lesssim \|\partial_{\alphalpha'} b_{\alphalpha'}\|_{L^2}\|\partial_{\alphalpha'} b_{\alphalpha'}\|_{L^^{-1}nfty}+\|\partial_{\alphalpha'}^2 b_{\alphalpha'}\|_{L^2}\| b_{\alphalpha'}\|_{L^^{-1}nfty}\\&\lesssim \|\partial_{\alphalpha'} b_{\alphalpha'}\|_{L^2}^{3/2}\|\partial_{\alphalpha'}^2b_{\alphalpha'}\|_{L^2}^{1/2}+\|\partial_{\alphalpha'}^2 b_{\alphalpha'}\|_{L^2}\| b_{\alphalpha'}\|_{L^^{-1}nfty}\\&\le C\paren{\fracrak E, E_2, \nm{\fracrac 1{Z_{,\alphaa}}}_{L^^{-1}nfty}} (E_3^{1/2}+1).
\end{aligned}
\end{equation}
Now we consider $(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^2 b_{\alphalpha'}$. We begin with \eqref{ba-1},
differentiating twice with respect to $\alphaa$, then to $\partial_t+b\partial_\alphaa$,
\betaegin{equation}\label{2232}
\betaegin{aligned}
(\partial_t+b\partial_\alphaa)\partial_\alphaa^2(b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t)&=\sum_{k=0}^2 C_2^k{\mathbb{R}}e(\partial_t+b\partial_\alphaa) \paren{\betaracket{ \partial_\alphaa^k \fracrac1{Z_{,\alphaa}}, \mathbb H}\partial_\alphaa^{2-k} Z_{t,\alphalpha'}}\\&+ \sum_{k=0}^2 C_2^k{\mathbb{R}}e(\partial_t+b\partial_\alphaa) \paren{\betaracket{\partial_\alphaa^k Z_t, \mathbb H}\partial_\alphaa^{3-k} \fracrac1{Z_{,\alphaa}} }.
\end{aligned}
\end{equation}
where $C_2^0=1, C_2^1=2, C_2^2=1$. We expand $(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^2 D_\alphaa Z_t$ by product rules, and use \eqref{eq:c14} to further expand the right hand side of \eqref{2232};
we then use \eqref{3.20}, \eqref{3.21}, \eqref{3.22}, \eqref{eq:b12} and H\"older's inequality to do the estimates. We have
\betaegin{equation}\label{2233}
\betaegin{aligned}
&\|(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^2b_{\alphalpha'}\|_{L^2}\lesssim \|(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^3Z_{t}\|_{L^2}\nm{\fracrac1{Z_{,\alphalpha'}}}_{L^^{-1}nfty}\\&+\|\partial_{\alphalpha'}^3Z_{t}\|_{L^2}\nm{(\partial_t+b\partial_\alphaa)\fracrac1{Z_{,\alphalpha'}}}_{L^^{-1}nfty}+ \|(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^2Z_{t}\|_{L^^{-1}nfty}\nm{\partial_{\alphalpha'}\fracrac{1}{Z_{,\alphalpha'}}}_{L^2}\\&+\|\partial_{\alphalpha'}^2Z_{t}\|_{L^^{-1}nfty}\paren{\nm{(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}\fracrac{1}{Z_{,\alphalpha'}}}_{L^2}+\nm{\partial_{\alphalpha'}(\partial_t+b\partial_\alphaa)\fracrac{1}{Z_{,\alphalpha'}}}_{L^2}}\\&
+\|\partial_{\alphalpha'}^2Z_{t}\|_{L^^{-1}nfty}\|b_{\alphalpha'}\|_{L^^{-1}nfty}\nm{\partial_{\alphalpha'}\fracrac{1}{Z_{,\alphalpha'}}}_{L^2}+\|(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}Z_{t}\|_{L^^{-1}nfty}\nm{\partial_{\alphalpha'}^2\fracrac{1}{Z_{,\alphalpha'}}}_{L^2}\\&+\|\partial_{\alphalpha'}Z_{t}\|_{L^^{-1}nfty}\|b_{\alphalpha'}\|_{L^^{-1}nfty}\nm{\partial_{\alphalpha'}^2\fracrac{1}{Z_{,\alphalpha'}}}_{L^2}+\|Z_{tt,\alphalpha'}\|_{L^^{-1}nfty}\nm{\partial_{\alphalpha'}^2\fracrac{1}{Z_{,\alphalpha'}}}_{L^2}\\&+\|Z_{t,\alphalpha'}\|_{L^^{-1}nfty}\nm{(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^2\fracrac{1}{Z_{,\alphalpha'}}}_{L^2}.
\end{aligned}
\end{equation}
This, together with \eqref{2224} gives,
\betaegin{equation}\label{2234}
\|(\partial_t+b\partial_\alphaa)\partial_{\alphalpha'}^2b_{\alphalpha'}\|_{L^2}+\|\partial_{\alphalpha'}^2(\partial_t+b\partial_\alphaa)b_{\alphalpha'}\|_{L^2}\lesssim C\paren{\fracrak E, E_2, \nm{\fracrac1{Z_{,\alphaa}}}_{L^^{-1}nfty}}(E_3^{1/2}+1).
\end{equation}
\subsubsection{Controlling $\fracrac d{dt} E_3(t)$}\label{ddte3} We know $E_3(t)$ consists of $E_{D_\alphaa \partial_\alphaa^2 \betaar Z_t}$ and $\|\partial_\alphaa^3 \betaar Z_t\|_{L^2}^2$. We apply Lemma~\ref{basic-e} to $E_{D_\alphaa \partial_\alphaa^2 \betaar Z_t}$ and Lemma~\ref{basic-e2} to $\|\partial_\alphaa^3 \betaar Z_t\|_{L^2}^2$. We begin with $\|\partial_\alphaa^3 \betaar Z_t\|_{L^2}^2$. We have, by Lemma~\ref{basic-e2},
\betaegin{equation}\label{2235}
\fracrac d{dt}\|\partial_\alphaa^3 \betaar Z_t\|_{L^2}^2\lesssim \|(\partial_t+b\partial_\alphaa)\partial_\alphaa^3 \betaar Z_t\|_{L^2}\|\partial_\alphaa^3 \betaar Z_t\|_{L^2}+\|b_\alphaa\|_{L^^{-1}nfty}\|\partial_\alphaa^3 \betaar Z_t\|_{L^2}^2
\end{equation}
We have controlled all the factors, in \eqref{2020} and \eqref{2203}. We have
\betaegin{equation}\label{2236}
\fracrac d{dt}\|\partial_\alphaa^3 \betaar Z_t\|_{L^2}^2\lesssim C(\fracrak E) E_3(t).
\end{equation}
We now consider $E_{D_\alphaa \partial_\alphaa^2 \betaar Z_t}$. Applying Lemma~\ref{basic-e} to $\Theta=D_\alphaa \partial_\alphaa^2 \betaar Z_t$ yields
\betaegin{equation}\label{2237}
\fracrac d{dt} E_{D_\alphaa \partial_\alphaa^2 \betaar Z_t}(t)\le \nm{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}_{L^^{-1}nfty} E_{D_\alphaa \partial_\alphaa^2 \betaar Z_t}(t)+ 2E_{D_\alphaa \partial_\alphaa^2 \betaar Z_t}(t)^{1/2}\paren{^{-1}nt \fracrac{|\mathcal PD_\alphaa \partial_\alphaa^2 \betaar Z_t|^2}{\mathcal A}\,d\alphaa}^{1/2}
\end{equation}
We have controlled the factor $\nm{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}_{L^^{-1}nfty} $ in \S\ref{ata-da1}, we are left with the second term. We know, by $\mathcal A |Z_{,\alphaa}|^2=A_1\gammae 1$, that
\betaegin{equation}\label{2238}
^{-1}nt \fracrac{|\mathcal PD_\alphaa \partial_\alphaa^2 \betaar Z_t|^2}{\mathcal A}\,d\alphaa\le ^{-1}nt |Z_{,\alphaa}\mathcal PD_\alphaa \partial_\alphaa^2 \betaar Z_t|^2\,d\alphaa.
\end{equation}
We compute
\betaegin{equation}
\label{2239}
\mathcal P D_\alphaa \partial_\alphaa^2 \betaar Z_{t}= \betaracket{\mathcal P, D_\alphaa} \partial_\alphaa^2 \betaar Z_{t}+ D_\alphaa\betaracket{\mathcal P,\partial_\alphaa} \betaar Z_{t,\alphaa}+ D_\alphaa\partial_\alphaa [\mathcal P , \partial_\alphaa] \betaar Z_{t}+D_\alphaa \partial_\alphaa^2\mathcal P \betaar Z_t;
\end{equation}
and expand further by \eqref{eq:c5-1},
\betaegin{equation}\label{2240}
\betaracket{\mathcal P, D_\alphaa} \partial_\alphaa^2 \betaar Z_{t}= (-2D_\alphaa Z_{tt}) D_\alphaa \partial_\alphaa^2 \betaar Z_{t}-2(D_\alphaa Z_t)(\partial_t+b\partial_\alphaa) D_\alphaa \partial_\alphaa^2 \betaar Z_{t};
\end{equation}
by
\eqref{eq:c10} and product rules,
\betaegin{equation}\label{2241}
\betaegin{aligned}
\partial_\alphaa[\mathcal P, \partial_\alphaa]\betaar Z_{t,\alphaa}& =-(\partial_t+b\partial_\alphaa)(\partial_\alphaa b_\alphaa\partial_\alphaa^2 \betaar Z_{t})-\partial_\alphaa b_\alphaa\partial_\alphaa (\partial_t+b\partial_\alphaa)\betaar Z_{t,\alphaa} -i\partial_\alphaa\mathcal A_\alphaa \partial_\alphaa^2 \betaar Z_{t}\\&
-(\partial_t+b\partial_\alphaa)( b_\alphaa\partial_\alphaa^3 \betaar Z_{t})- b_\alphaa\partial_\alphaa^2 (\partial_t+b\partial_\alphaa)\betaar Z_{t,\alphaa} -i\mathcal A_\alphaa \partial_\alphaa^3 \betaar Z_{t}\\&- b_\alphaa^2\partial_\alphaa^3 \betaar Z_{t}-b_\alphaa\partial_\alphaa b_\alphaa \partial_\alphaa^2 \betaar Z_{t};
\end{aligned}
\end{equation}
and by \eqref{eq:c10},
\betaegin{equation}\label{2242}
\partial_\alphaa^2[\mathcal P,\partial_\alphaa]\betaar Z_t=-\partial_\alphaa^2(\partial_t+b\partial_\alphaa)(b_\alphaa \partial_{\alphaa}\betaar Z_{t})-\partial_\alphaa^2(b_\alphaa\partial_\alphaa \betaar Z_{tt})-i\partial_\alphaa^2(\mathcal A_\alphaa \partial_\alphaa \betaar Z_t),
\end{equation}
and then expand \eqref{2242} by product rules.
We have controlled all the factors on the right hand sides of \eqref{2240}, \eqref{2241} and \eqref{2242} in \eqref{2020}, \eqref{2144} and \S\ref{quantities-e3} - \S\ref{da2dtba}. We have, by H\"older's inequality,
\betaegin{equation}\label{2243}
^{-1}nt |Z_{,\alphaa} \betaracket{\mathcal P, D_\alphaa} \partial_\alphaa^2 \betaar Z_{t}|^2+|\partial_\alphaa[\mathcal P, \partial_\alphaa]\betaar Z_{t,\alphaa}|^2+|\partial_\alphaa^2[\mathcal P,\partial_\alphaa]\betaar Z_t|^2\,d\alphaa\lesssim C\paren{\fracrak E, E_2, \nm{\fracrac1{Z_{,\alphaa}}}_{L^^{-1}nfty}}(E_3+1).
\end{equation}
We are left with the last term $\partial_\alphaa^3 \mathcal P\betaar Z_{t}$. We expand by product rules, starting from \eqref{quasi-r1}; we have
\betaegin{equation}\label{2244}
\betaegin{aligned}
\partial_\alphaa^3\mathcal P \betaar Z_{t}&=
\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} \partial_\alphaa^3\betaar Z_{tt}+3\partial_\alphaa\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} }\partial_\alphaa^2\betaar Z_{tt}+3\partial_\alphaa^2\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} }\betaar Z_{tt,\alphaa}\\&+\partial_\alphaa^3\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} }(\betaar Z_{tt}-i).
\end{aligned}
\end{equation}
Let
\betaegin{equation}\label{2245}
\mathcal R_2= \fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} \partial_\alphaa^3\betaar Z_{tt}+3\partial_\alphaa\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} }\partial_\alphaa^2\betaar Z_{tt}+3\partial_\alphaa^2\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} }\betaar Z_{tt,\alphaa}.
\end{equation}
We have controlled all the factors of the terms in $\mathcal R_2$, with
\betaegin{equation}\label{2246}
\betaegin{aligned}
&\nm{\mathcal R_2}_{L^2} \le \nm{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}_{L^^{-1}nfty}\nm{ \partial_\alphaa^3\betaar Z_{tt}}_{L^2}+3\nm{\partial_\alphaa\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} }}_{L^2}\nm{\partial_\alphaa^2\betaar Z_{tt}}_{L^^{-1}nfty}\\&+3\nm{\partial_\alphaa^2\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} }}_{L^2}\nm{\betaar Z_{tt,\alphaa}}_{L^2}
\lesssim C\paren{\fracrak E, E_2, \nm{\fracrac1{Z_{,\alphaa}}}_{L^^{-1}nfty}}(E_3^{1/2}+1).
\end{aligned}
\end{equation}
We are left with controlling $\nm{ \partial_\alphaa^3\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} }(\betaar Z_{tt}-i)}_{L^2}$. We use a similar idea as that in \S\ref{ddtea}, that is, to take advantage of the fact that $
\partial_\alphaa^3\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} }$ is purely real.
Applying $(I-\mathbb H)$ to both sides of \eqref{2244}, with the first three terms replaced by $\mathcal R_2$, and commuting out $\betaar Z_{tt}-i$ yields,
\betaegin{equation}\label{2247}
(I-\mathbb H)\partial_\alphaa^3\mathcal P \betaar Z_{t}=(I-\mathbb H)\mathcal R_2+[\betaar Z_{tt},\mathbb H]\partial_\alphaa^3\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} }+ (\betaar Z_{tt}-i)(I-\mathbb H)\partial_\alphaa^3\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} }.
\end{equation}
Now
\betaegin{equation}\label{2248}
\partial_\alphaa^3\mathcal P \betaar Z_{t}=\partial_\alphaa^2 [\partial_\alphaa, \mathcal P]\betaar Z_t+ \partial_\alphaa [\partial_\alphaa, \mathcal P] \betaar Z_{t,\alphaa}+[\partial_\alphaa, \mathcal P]\partial_\alphaa^2\betaar Z_t+\mathcal P\partial_\alphaa^3 \betaar Z_t.
\end{equation}
and by \eqref{eq:c10},
\betaegin{equation}\label{2249}
[\mathcal P, \partial_\alphaa]\partial_\alphaa^2\betaar Z_t=-(\partial_t+b\partial_\alphaa)(b_\alphaa\partial_\alphaa \partial_\alphaa^2\betaar Z_t)-b_\alphaa\partial_\alphaa (\partial_t+b\partial_\alphaa)\partial_\alphaa^2\betaar Z_t-i\mathcal A_\alphaa \partial_\alphaa \partial_\alphaa^2\betaar Z_t
\end{equation}
so by \S\ref{basic-quantities}-\S\ref{dtdab}, \S\ref{quantities-e3} and H\"older's inequality,
\betaegin{equation}\label{2250}
\|[\mathcal P, \partial_\alphaa]\partial_\alphaa^2\betaar Z_t\|_{L^2}\lesssim C(\fracrak E) E_3^{1/2}.
\end{equation}
Applying Lemma~\ref{basic-3-lemma} to the last term in \eqref{2248}. We have, by \eqref{2243}, \eqref{2250} and Lemma~\ref{basic-3-lemma}, that
\betaegin{equation}\label{2251}
\| (I-\mathbb H)\partial_\alphaa^3\mathcal P \betaar Z_{t}\|_{L^2}\lesssim C\paren{\fracrak E, E_2, \nm{\fracrac1{Z_{,\alphaa}}}_{L^^{-1}nfty}}(E_3^{1/2}+1).
\end{equation}
This gives, by \eqref{2247}, that
\betaegin{equation}\label{2252}
\betaegin{aligned}
\nm{ \partial_\alphaa^3\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} }(\betaar Z_{tt}-i)}_{L^2}&\le \|(I-\mathbb H)\partial_\alphaa^3\mathcal P \betaar Z_{t}\|_{L^2}+\|\mathcal R_2\|_{L^2} +\nm{[\betaar Z_{tt},\mathbb H]\partial_\alphaa^3\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} }}_{L^2}\\&
\lesssim C\paren{\fracrak E, E_2, \nm{\fracrac1{Z_{,\alphaa}}}_{L^^{-1}nfty}}(E_3^{1/2}+1);
\end{aligned}
\end{equation}
here for the commutator, we used \eqref{3.20}, \eqref{2144}, and \eqref{2224-1}. Combining with \eqref{2244},
\eqref{2245}, \eqref{2246} yields
\betaegin{equation}
\nm{ \partial_\alphaa^3\mathcal P \betaar Z_{t} }_{L^2} \lesssim C\paren{\fracrak E, E_2, \nm{\fracrac1{Z_{,\alphaa}}}_{L^^{-1}nfty}}(E_3^{1/2}+1).
\end{equation}
Further combing with \eqref{2238}, \eqref{2239}, \eqref{2243} gives
\betaegin{equation}
^{-1}nt \fracrac{|\mathcal PD_\alphaa \partial_\alphaa^2 \betaar Z_t|^2}{\mathcal A}\,d\alphaa\lesssim C\paren{\fracrak E, E_2, \nm{\fracrac1{Z_{,\alphaa}}}_{L^^{-1}nfty}}(E_3+1).
\end{equation}
By \eqref{2237}, this shows that Proposition~\ref{step2} holds.
\subsection{Completing the proof for Theorem~\ref{blow-up}}\label{complete1}
Now we continue the discussion in \S\ref{proof1},
assuming that the initial data satisfies the assumption of Theorem~\ref{blow-up}, and the solution $Z$ satisfies the regularity property in Theorem~\ref{blow-up}. By \eqref{step1-2} and the ensuing discussion, to complete the proof of Theorem~\ref{blow-up}, it suffices to show that for the given data, $E_2(0)<^{-1}nfty$ and $E_3(0)<^{-1}nfty$; and $\sup_{[0, T]}(E_2(t)+E_3(t)+\fracrak E(t))$ control the higher order Sobolev norm $\sup_{[0, T]}(\|\partial_\alphaa^3Z_t(t)\|_{\dot{H}^{1/2}}+\|\partial_\alphaa^3 Z_{tt}(t)\|_{L^2})$.
By \eqref{2117-1} and \eqref{2124},
\betaegin{equation}
Z_{,\alphalpha'}(\partial_t+b\partial_\alphaa) \paren{\fracrac1{Z_{,\alphalpha'}}\partial_{\alphalpha'}^2\betaar Z_t}= \partial_{\alphalpha'}^2\betaar Z_{tt}- (b_\alphaa+D_\alphaa Z_t)\partial_{\alphalpha'}^2\betaar Z_t- (\partial_\alphaa b_\alphaa)\betaar Z_{t,\alphaa};
\end{equation}
expanding further according to the available estimates in \eqref{2001}, \eqref{2123}, and using H\"older's inequality, we have
\betaegin{equation}
\betaegin{aligned}
\nm{ Z_{,\alphalpha'}(\partial_t+b\partial_\alphaa) D_\alphaa\partial_{\alphalpha'}\betaar Z_t}_{L^2}&\lesssim
\|\partial_{\alphalpha'}^2\betaar Z_{tt}\|_{L^2}+ \|Z_{t,\alphaa}\|_{L^2}\nm{\partial_\alphaa\fracrac1{Z_{,\alphaa}}}_{L^2}\|\partial_\alphaa^2 Z_{t}\|_{L^2}+\|D_\alphaa Z_t\partial_\alphaa^2 \betaar Z_t\|_{L^2}\\&+ \|Z_{t,\alphaa}\|_{L^^{-1}nfty}^2\nm{\partial_\alphaa\fracrac1{Z_{,\alphaa}}}_{L^2}+\|(\partial_\alphaa D_\alphaa Z_t)\partial_\alphaa \betaar Z_t\|_{L^2};
\end{aligned}
\end{equation}
it is clear that we also have
\betaegin{equation}
\nm{\fracrac1{Z_{,\alphaa}}\partial_\alphaa^2\betaar Z_t}_{\dot H^{1/2}}\le C\paren{\nm{\fracrac 1{Z_{,\alphaa}}-1}_{H^1}, \|Z_t\|_{H^{2+1/2}}};
\end{equation}
so for the given initial data, we have $E_2(0)<^{-1}nfty$. A similar argument shows that we also have $E_3(0)<^{-1}nfty$. This implies, by \eqref{step1-2} and the ensuing discussion, that $\sup_{[0, T_0)}(E_2(t)+E_3(t))<^{-1}nfty$ provided $\sup_{[0, T_0)}\fracrak E(t)<^{-1}nfty$.
On the other hand, we have shown in \eqref{2220} that $\|\partial_\alphaa^3 \betaar Z_{tt}(t)\|_{L^2}$ is controlled by $E_2(t)$, $E_3(t)$ and $\nm{\fracrac1{Z_{,\alphaa}}(t)}_{L^^{-1}nfty}$; and by \eqref{Hhalf},
$$\|\partial_{\alphalpha'}^3 \betaar Z_{t}\|_{\dot H^{1/2}}\lesssim \|Z_{,\alphalpha'}\|_{L^^{-1}nfty}\paren{\nm{\fracrac1{Z_{,\alphalpha'}}\partial_{\alphalpha'}^3 \betaar Z_{t}}_{\dot H^{1/2}}+\nm{\partial_{\alphalpha'}\fracrac1{Z_{,\alphalpha'}}}_{L^2}\|\partial_{\alphalpha'}^3 \betaar Z_{t}\|_{L^2}},
$$
so $\|\partial_{\alphalpha'}^3 \betaar Z_{t}\|_{\dot H^{1/2}}$ is controlled by $E_3(t)$, $\fracrak E(t)$ and $\|Z_{,\alphalpha'}(t)\|_{L^^{-1}nfty}$. With a further application of \eqref{2101} and \eqref{2101-1}, we have
\betaegin{equation}
\sup_{[0, T_0)} \|\partial_\alphaa^3 \betaar Z_{tt}(t)\|_{L^2}+ \|\partial_{\alphalpha'}^3 \betaar Z_{t}\|_{\dot H^{1/2}}<^{-1}nfty,\quad\tauext{provided}\quad \sup_{[0, T_0)}\fracrak E(t)<^{-1}nfty.
\end{equation}
This, together with \eqref{2101}, \eqref{2109}, \eqref{2110} and Theorem~\ref{prop:local-s} shows
that Theorem~\ref{blow-up} holds.
\section{The proof of Theorem~\ref{unique}}\label{proof3}
\subsection{Some basic preparations}\label{prepare}
We begin with some basic preparatory analysis that will be used in the proof of Theorem~\ref{unique}.
In the first lemma we construct an energy functional for the difference of the solutions of an equation of the type \eqref{quasi-r}.
We will apply Lemma~\ref{dlemma1} to $\Thetaeta=\betaar Z_t,\ \fracrac1{Z_{,\alphaa}}-1$ and $\betaar Z_{tt}$.
\betaegin{lemma}\label{dlemma1}
Assume $\Thetaeta$, $\tauilde{\Theta}$ are smooth and decay at the spatial infinity, and satisfy
\betaegin{equation}\label{eqdf}
\betaegin{aligned}
&(\partial_t+b\partial_\alphaa)^2\Thetaeta+i{\mathcal A}\partial_\alphaa\Thetaeta=G,\\
&(\partial_t+\tauilde b\partial_\alphaa)^2\tauilde{\Theta}+i\tauilde {\mathcal A}\partial_\alphaa\tauilde{\Theta}=\tauilde G.\\
\end{aligned}
\end{equation}
Let
\betaegin{equation}
\mathfrak F(t)=^{-1}nt \fracrac{\kappa}{A_1}\alphabs{ Z_{,\alphaa}\paren{(\partial_t+b\partial_\alphaa)\Thetaeta+\mathfrak c}- {\mathbb{Z}}f_{,\alphaa}\circ l\paren{(\partial_t+\tauilde b\partial_\alphaa)\tauilde{\Theta}\circ l+\mathfrak c}}^2+i\partial_\alphaa(\Thetaeta-\tauilde{\Theta}\circ l)\betaar{(\Thetaeta-\tauilde{\Theta}\circ l)}\,d\alphaa,
\end{equation}
where $\kappa=\sqrt{\fracrac{A_1}{\tauilde {A_1}\circ l} l_\alphaa}$, $\fracrak c$ is a constant, and
\betaegin{equation}
{\betaf F}(t)=^{-1}nt \alphabs{ Z_{,\alphaa}\paren{(\partial_t+b\partial_\alphaa)\Thetaeta+\fracrak c}- {\mathbb{Z}}f_{,\alphaa}\circ l\paren{(\partial_t+\tauilde b\partial_\alphaa)\tauilde{\Theta}\circ l+\fracrak c}}^2\ \ d\alphaa.
\end{equation}
Then
\betaegin{equation}\label{dlemma1-inq}
\betaegin{aligned}
&\fracrak F'(t)\lesssim {\betaf F}(t)^{\fracrac12} \nm{\fracrac{\kappa}{A_1}}_{L^^{-1}nfty}\nm{ Z_{,\alphaa}G- {\mathbb{Z}}f_{,\alphaa}\circ l\tauilde G\circ l}_{L^2}\\&\quad+{\betaf F}(t)^{\fracrac12} \nm{\fracrac{\kappa}{A_1}}_{L^^{-1}nfty}\nm{\fracrac{(\partial_t+b\partial_\alphaa)\kappa }{\kappa}}_{L^2}\paren{\nm{Z_{,\alphaa}((\partial_t+b\partial_\alphaa)\Thetaeta+\mathfrak c) }_{L^^{-1}nfty}+\nm{{\mathbb{Z}}f_{,\alphaa}((\partial_t+\tauilde b\partial_\alphaa)\tauilde{\Theta}+\mathfrak c)}_{L^^{-1}nfty}}\\&
+{\betaf F}(t)^{\fracrac12} \nm{\fracrac{\kappa}{A_1}}_{L^^{-1}nfty}\paren{\nm{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}-\fracrac{\tauilde{\mathfrak{a}}t}{\tauilde{\mathfrak{a}}}\circ h^{-1}}_{L^2}+\nm{D_\alphaa Z_t-\tauilde D_\alphaa{\mathbb{Z}}f_t\circ l }_{L^2}}\nm{{\mathbb{Z}}f_{,\alphaa}((\partial_t+\tauilde b\partial_\alphaa)\tauilde{\Theta}+\mathfrak c)}_{L^^{-1}nfty}\\&+{\betaf F}(t) \nm{\fracrac{\kappa}{A_1}}_{L^^{-1}nfty}\paren{
\nm{\fracrac{\fracrak a_t}{\fracrak a}}_{L^^{-1}nfty}+\nm{D_\alphaa Z_t}_{L^^{-1}nfty}}
+\nm{1-\kappa}_{L^2}{\betaf F}(t)^{\fracrac12}\paren{\nm{D_\alphaa\Thetaeta}_{L^^{-1}nfty}+\nm{\fracrac{l_\alphaa}{\kappa}}_{L^^{-1}nfty}\nm{\tauilde D_\alphaa\tauilde{\Theta}}_{L^^{-1}nfty}}\\&+2{\mathbb{R}}e i^{-1}nt \betaar{\paren{\fracrac1{Z_{,\alphaa}}-U_l\fracrac1{{\mathbb{Z}}f_{,\alphaa}} }} \paren{\betaar{U_l({\mathbb{Z}}f_{,\alphaa}((\partial_t+\tauilde b\partial_\alphaa)\tauilde{\Theta}+\fracrak c))}\Thetaeta_\alphaa-\betaar{Z_{,\alphaa}((\partial_t+b\partial_\alphaa)\Thetaeta+\fracrak c)}(\tauilde{\Theta}\circ l)_\alphaa}\,d\alphaa.
\end{aligned}
\end{equation}
\end{lemma}
\betaegin{remark}
By definition, $\fracrac{\kappa}{A_1}= \sqrt{\fracrac{l_\alphaa}{A_1\tauilde {A_1}\circ l}}$. And in what follows, $\sqrt{\tauilde a h_\alphaa}\, \kappa\circ h=\fracrac {\sqrt {A_1}}{{{\mathbb{Z}}f_{,\alphaa}}\circ l}\circ h$, $\sqrt{a h_\alphaa}=\fracrac {\sqrt {A_1}}{Z_{,\alphaa}}\circ h$.
\end{remark}
\betaegin{proof}
Let $\tauheta=\Thetaeta\circ h$, and $\tauilde\tauheta=\tauilde{\Theta}\circ \tauh$. We know $\tauheta$, $\tauilde{\tauheta}$ satisfy
\betaegin{equation}\label{eqdfl}
\betaegin{aligned}
&\partial_t^2\tauheta+i\mathfrak{a} \partial_\alpha\tauheta=G\circ h,\\
&\partial_t^2\tauilde{\tauheta}+i\tauilde{\mathfrak{a}}\partial_\alpha\tauilde{\tauheta}=\tauilde G\circ \tauh.\\
\end{aligned}
\end{equation}
Changing coordinate by $h$, we get
\betaegin{equation}
\mathfrak F(t)=^{-1}nt\alphabs{ \sqrt{\fracrac{k}{a}} \paren{\tauheta_t+\mathfrak c}- \fracrac1{\sqrt{{\tauilde a} k}}\paren{\tauilde{\tauheta}_t+\mathfrak c}}^2+i\partial_\alpha(\tauheta-\tauilde{\tauheta})\betaar{(\tauheta-\tauilde{\tauheta})}\,d\alpha,
\end{equation}
where $k=\kappa\circ h$, $\sqrt{a}:= \fracrac{\sqrt{A_1\circ h h_\alpha}}{z_{\alpha}}$ and
$\sqrt{{\tauilde a} }:= \fracrac{\sqrt{\tauilde {A_1}\circ \tauh \tauh_\alpha}}{{\mathfrak{z}}_{\alpha}}$. Notice that here, $\sqrt{a}$ and
$\sqrt{{\tauilde a} }$ are complex valued, and $|\sqrt{a}|^2=\fracrak a$, $|\sqrt{\tauilde a}|^2=\tauilde{\mathfrak{a}}$. Differentiating to $t$, integrating by parts, then applying equations \eqref{eqdfl}, we get
\betaegin{equation}\label{2300}
\betaegin{aligned}
&\mathfrak F'(t)=2{\mathbb{R}}e^{-1}nt \betaar{\betaraces{
\sqrt{\fracrac{k}{a}} \paren{\tauheta_t+\mathfrak c}- \fracrac1{\sqrt{{\tauilde a} k}}\paren{\tauilde{\tauheta}_t+\mathfrak c}}}\left\{\sqrt{\fracrac{k}{a}} G\circ h- \fracrac1{\sqrt{{\tauilde a} k}}\tauilde G\circ \tauh\right\}\\&\qquad+\betaar{\betaraces{
\sqrt{\fracrac{k}{a}} \paren{\tauheta_t+\mathfrak c}- \fracrac1{\sqrt{{\tauilde a} k}}\paren{\tauilde{\tauheta}_t+\mathfrak c}}}\left\{\fracrac12{\fracrac{k_t}k \paren{\sqrt{\fracrac{k}{a}} \paren{\tauheta_t+\mathfrak c}+ \fracrac1{\sqrt{{\tauilde a} k}}\paren{\tauilde{\tauheta}_t+\mathfrak c} } }\right\}\\&-\betaar{\betaraces{
\sqrt{\fracrac{k}{a}} \paren{\tauheta_t+\mathfrak c}- \fracrac{\tauilde{\tauheta}_t+\mathfrak c}{\sqrt{{\tauilde a} k}}}}\betaraces{\paren{\fracrac12\fracrac{\mathfrak{a}t}{\mathfrak{a}}-i\Im D_\alpha z_t}\sqrt{\fracrac{k}{a}}\paren{\tauheta_t+\mathfrak c}-\paren{\fracrac12\fracrac{\tauilde{\mathfrak{a}}t}{\tauilde{\mathfrak{a}}}-i\Im D_\alpha {\mathfrak{z}}_t}\fracrac{\tauilde{\tauheta}_t+\mathfrak c}{\sqrt{{\tauilde a} k}} }\,d\alpha \\&+
2{\mathbb{R}}e i^{-1}nt \betaar{(\tauheta_t-\tauilde{\tauheta}_t)}(\tauheta_\alpha-\tauilde{\tauheta}_a)-\betaar{\betaraces{
\sqrt{\fracrac{k}{a}} \paren{\tauheta_t+\mathfrak c}- \fracrac1{\sqrt{{\tauilde a} k}}\paren{\tauilde{\tauheta}_t+\mathfrak c}}}\betaraces{\sqrt k\betaar{\sqrt{ a}}\tauheta_\alpha-\fracrac{\betaar{\sqrt{{\tauilde a}}}}{\sqrt{k}}\tauilde{\tauheta}_\alpha } \,d\alpha\\&=I+II
\end{aligned}
\end{equation}
where $I$ consists of the terms in the first three lines and $II$ is the last line
$$II:=2{\mathbb{R}}e i^{-1}nt \betaar{(\tauheta_t-\tauilde{\tauheta}_t)}(\tauheta_\alpha-\tauilde{\tauheta}_a)-\betaar{\betaraces{
\sqrt{\fracrac{k}{a}} \paren{\tauheta_t+\mathfrak c}- \fracrac1{\sqrt{{\tauilde a} k}}\paren{\tauilde{\tauheta}_t+\mathfrak c}}}\betaraces{\sqrt k\betaar{\sqrt{ a}}\tauheta_\alpha-\fracrac{\betaar{\sqrt{{\tauilde a}}}}{\sqrt{k}}\tauilde{\tauheta}_\alpha } \,d\alpha.$$
Further regrouping terms in $II$ we get
\betaegin{equation}\label{2301}
\betaegin{aligned}
II=&2{\mathbb{R}}e i^{-1}nt \fracrac{(1-k)}{\sqrt{k}}\betaar{ \betaraces{
\sqrt{\fracrac{k}{a}} \paren{\tauheta_t+\mathfrak c}- \fracrac1{\sqrt{{\tauilde a} k}}\paren{\tauilde{\tauheta}_t+\mathfrak c}} }\betaraces{\betaar{\sqrt{a}} \tauheta_\alpha+\betaar{\sqrt{{\tauilde a}}}\tauilde{\tauheta}_\alpha} \,d\alpha\\&+
2{\mathbb{R}}e i^{-1}nt \betaar{(\sqrt{a}-\sqrt{{\tauilde a}} k )}\paren{\betaar{\fracrac1{\sqrt{{\tauilde a}} k}(\tauilde{\tauheta}_t+\fracrak c)}\tauheta_\alpha-\betaar{\fracrac1{\sqrt{a}}(\tauheta_t+\fracrak c)}\tauilde{\tauheta}_\alpha}\,d\alpha.
\end{aligned}
\end{equation}
Changing variables by $h^{-1}$ in the integrals in \eqref{2300} and \eqref{2301}, and then applying Cauchy-Schwarz and H\"older's inequalities, we obtain \eqref{dlemma1-inq}.
\end{proof}
We have the following basic identities and inequalities.
\betaegin{proposition} Let $\mathcal Q_l= U_{l}\mathbb H U_{l^{-1}}-\mathbb H$, where $l:\mathbb R\tauo \mathbb R$ is a diffeomorphism,\fracootnote{We say $l:\mathbb R\tauo\mathbb R$ is a diffeomorphism, if
$l:\mathbb R\tauo\mathbb R$ is one-to-one and onto, and $l, \ l^{-1}^{-1}n C^1(\mathbb R)$, with $\|l_\alphaa\|_{L^^{-1}nfty}+\|(l^{-1})_\alphaa\|_{L^^{-1}nfty}<^{-1}nfty$.}
with $l_\alphaa-1^{-1}n L^2$. For any $f^{-1}n H^1(\mathbb R)$, we have
\betaegin{align}
\nm{\mathcal Q_l f}_{\dot H^{\fracrac12}}&\le C(\nm{(l^{-1})_\alphaa}_{L^^{-1}nfty}, \nm{l_\alphaa}_{L^^{-1}nfty})\|l_\alphaa-1\|_{L^2}\|\partial_\alphaa f\|_{L^2};\label{q1}\\
\nm{\mathcal Q_l f}_{L^^{-1}nfty}&\le C(\nm{(l^{-1})_\alphaa}_{L^^{-1}nfty}, \nm{l_\alphaa}_{L^^{-1}nfty})\|l_\alphaa-1\|_{L^2}\|\partial_\alphaa f\|_{L^2};\label{q2}\\
\nm{\mathcal Q_l f}_{L^2}&\le C(\nm{(l^{-1})_\alphaa}_{L^^{-1}nfty}, \nm{l_\alphaa}_{L^^{-1}nfty})\|l_\alphaa-1\|_{L^2}\| f\|_{L^^{-1}nfty};\label{q3}\\
\nm{\mathcal Q_l f}_{L^2}&\le C(\nm{(l^{-1})_\alphaa}_{L^^{-1}nfty}, \nm{l_\alphaa}_{L^^{-1}nfty})\|l_\alphaa-1\|_{L^2}\| f\|_{\dot H^{1/2} }.\label{q4}
\end{align}
\end{proposition}
\betaegin{proof}
We know
\betaegin{equation}\label{2305}
U_{l}\mathbb H U_{l^{-1}}f(\alphaa)=\fracrac1{\pi i}^{-1}nt\fracrac{f({\betaeta '}) l_{\betaeta '}({\betaeta '})}{l(\alphaa)-l({\betaeta '})}\,d{\betaeta '}
\end{equation}
so
\betaegin{equation}\label{2306}
\betaegin{aligned}
\mathcal Q_l f&=\fracrac1{\pi i}^{-1}nt\paren{\fracrac{ l_{\betaeta '}({\betaeta '})-1}{l(\alphaa)-l({\betaeta '})}+\fracrac{ \alphaa-l(\alphaa)-{\betaeta '}+l({\betaeta '})}{(l(\alphaa)-l({\betaeta '}))(\alphaa-{\betaeta '})} } f({\betaeta '})\,d{\betaeta '}\\&
=\fracrac1{\pi i}^{-1}nt\paren{\fracrac{ l_{\betaeta '}({\betaeta '})-1}{l(\alphaa)-l({\betaeta '})}+ \fracrac{ \alphaa-l(\alphaa)-{\betaeta '}+l({\betaeta '})}{(l(\alphaa)-l({\betaeta '}))(\alphaa-{\betaeta '})} } (f({\betaeta '})-f(\alphaa))\,d{\betaeta '},
\end{aligned}
\end{equation}
here in the second step we inserted $-f(\alphaa)$ because $\mathbb H1=0$. Apply Cauchy-Schwarz inequality and Hardy's inequality \eqref{eq:77} on the second equality in \eqref{2306} we obtain \eqref{q2} and \eqref{q4}. Using \eqref{3.16} and \eqref{3.17} on the first equality in \eqref{2306} we get \eqref{q3}. We are left with \eqref{q1}.
Differentiate with respect to $\alphaa$ and integrate by parts gives
\betaegin{equation}\label{2307}
\partial_\alphaa \mathcal Q_lf(\alphaa)=\fracrac1{\pi i}^{-1}nt\paren{\fracrac{l_\alphaa(\alphaa)}{l(\alphaa)-l({\betaeta '})}- \fracrac1{\alphaa-{\betaeta '}}} f_{\betaeta '}({\betaeta '}) \,d{\betaeta '}
\end{equation}
Let $p^{-1}n C^^{-1}nfty_0(\mathbb R)$. We have, by using the fact $\mathbb H1=0$ to insert $-p({\betaeta '})$, that
\betaegin{equation}\label{2308}
\betaegin{aligned}
^{-1}nt p(\alphaa)\partial_\alphaa \mathcal Q_lf(\alphaa)\,d\alphaa&= \fracrac1{\pi i}^{-1}int\paren{\fracrac{l_\alphaa(\alphaa)}{l(\alphaa)-l({\betaeta '})}- \fracrac1{\alphaa-{\betaeta '}}} f_{\betaeta '}({\betaeta '}) (p(\alphaa)-p({\betaeta '})) \,d\alphaa d{\betaeta '}\\&=
\fracrac1{\pi i}^{-1}int\fracrac{p(\alphaa)-p({\betaeta '}) }{l(\alphaa)-l({\betaeta '})} (l_\alphaa(\alphaa)-1)f_{\betaeta '}({\betaeta '}) \,d\alphaa d{\betaeta '}\\&+
\fracrac1{\pi i}^{-1}int\fracrac{ \alphaa-l(\alphaa)-{\betaeta '}+l({\betaeta '})}{(l(\alphaa)-l({\betaeta '}))(\alphaa-{\betaeta '})} f_{\betaeta '}({\betaeta '}) (p(\alphaa)-p({\betaeta '})) \,d\alphaa d{\betaeta '}.
\end{aligned}
\end{equation}
Applying Cauchy-Schwarz inequality and Hardy's inequality \eqref{eq:77} to \eqref{2308}. We get, for some constant $c$ depending only on $\|l_\alphaa\|_{L^^{-1}nfty}$ and $\|(l^{-1})_\alphaa\|_{L^^{-1}nfty}$,
\betaegin{equation}\label{2309}
\alphabs{^{-1}nt p(\alphaa)\partial_\alphaa \mathcal Q_lf(\alphaa)\,d\alphaa}\le c\|p\|_{\dot H^{1/2}}\|l_\alphaa-1\|_{L^2}\|\partial_\alphaa f\|_{L^2}.
\end{equation}
This proves inequality \eqref{q1}.
\end{proof}
\betaegin{lemma}\label{hhalf2}
Assume that $f, \ g,\ f_1,\ g_1 ^{-1}n H^1(\mathbb R)$ are the boundary values of some holomorphic functions on $\mathscr P_-$. Then
\betaegin{equation}\label{halfholo}
^{-1}nt \partial_\alphaa \mathbb P_A (\betaar f g)(\alphaa)f_1(\alphaa) \betaar g_1(\alphaa)\,d\alphaa=-\fracrac1{2\pi i}^{-1}int \fracrac{(\betaar f(\alphaa)-\betaar f({\betaeta '}))( f_1(\alphaa)- f_1({\betaeta '}))}{(\alphaa-{\betaeta '})^2}g({\betaeta '})\betaar g_1(\alphaa)\,d\alphaa d{\betaeta '}.
\end{equation}
\end{lemma}
\betaegin{proof}
Let $f, \ g,\ f_1,\ g_1^{-1}n H^1(\mathbb R)$, and are the boundary values of some holomorphic functions in $\mathscr P_-$. We have
\betaegin{equation}\label{2310}
2\mathbb P_A (\betaar f g)=(I-\mathbb H)(\betaar f g)= [\betaar f,\mathbb H]g
\end{equation}
and
\betaegin{equation}\label{2311}
2\partial_\alphaa\mathbb P_A (\betaar f g)= \partial_\alphaa \betaar f\, \mathbb H g-\fracrac1{\pi i}^{-1}nt\fracrac{\betaar f(\alphaa)-\betaar f({\betaeta '})}{(\alphaa-{\betaeta '})^2} g({\betaeta '})\,d{\betaeta '}.
\end{equation}
Because $\betaar g_1 \partial_\alphaa\mathbb P_A (\betaar f f_1 g)^{-1}n L^1(\mathbb R)$ is the boundary value of an anti-holomorphic function in $\mathscr P_-$, by Cauchy integral theorem,
\betaegin{equation}\label{2312}
0=2 ^{-1}nt \betaar g_1 \partial_\alphaa\mathbb P_A (\betaar f f_1 g)\,d\alphaa
=^{-1}nt \partial_\alphaa \betaar f\, f_1 g \betaar g_1\,d\alphaa-\fracrac1{\pi i}^{-1}int\fracrac{\betaar f(\alphaa)-\betaar f({\betaeta '})}{(\alphaa-{\betaeta '})^2} f_1({\betaeta '})g({\betaeta '})\betaar g_1(\alphaa)\,d\alphaa d{\betaeta '},
\end{equation}
here we applied formula \eqref{2311} to the pair of holomorphic functions $f$ and $f_1 g$, and used the fact that $\mathbb H(f_1 g)=f_1 g$.
Now we use \eqref{2311} to compute, because $\mathbb H g=g$,
\betaegin{equation}\label{2313}
2^{-1}nt \partial_\alphaa \mathbb P_A (\betaar f g) \, f_1 \betaar g_1\,d\alphaa
=^{-1}nt \partial_\alphaa \betaar f\, g f_1 \betaar g_1\,d\alphaa- \fracrac1{\pi i}^{-1}int\fracrac{\betaar f(\alphaa)-\betaar f({\betaeta '})}{(\alphaa-{\betaeta '})^2} g({\betaeta '})f_1(\alphaa)\betaar g_1(\alphaa)\,d\alphaa d{\betaeta '}.
\end{equation}
Substituting \eqref{2312} in \eqref{2313}, we get \eqref{halfholo}.
\end{proof}
\betaegin{remark}\label{hhalf3}
By Cauchy integral theorem, we know for $f, \ g,\ f_1,\ g_1 ^{-1}n H^1(\mathbb R)$,
$$^{-1}nt \partial_\alphaa \mathbb P_A (\betaar f g)(\alphaa)f_1(\alphaa) \betaar g_1(\alphaa)\,d\alphaa=^{-1}nt \partial_\alphaa \mathbb P_A (\betaar f g)\mathbb P_H(f_1 \betaar g_1)\,d\alphaa=^{-1}nt \partial_\alphaa \mathbb P_A (\betaar f g)\betaar {\mathbb P_A(\betaar f_1 g_1)}\,d\alphaa .$$
\end{remark}
As a corollary of Lemma~\ref{hhalf2} and Remark~\ref{hhalf3} we have
\betaegin{proposition}\label{hhalf4}
Assume that $f, \ g ^{-1}n H^1(\mathbb R)$. We have
\betaegin{align}
\nm{\betaracket{f, \mathbb H} g}_{\dot H^{1/2}}&\lesssim \|f\|_{\dot H^{1/2}}(\|g\|_{L^^{-1}nfty} +\|\mathbb H g\|_{L^^{-1}nfty});\label{hhalf41}\\
\nm{ \betaracket{f, \mathbb H} g }_{\dot H^{1/2}}&\lesssim \|\partial_\alphaa f\|_{L^2}\|g\|_{L^2};\label{hhalf42}\\
\nm{\betaracket{f, \mathbb H} \partial_\alphaa g}_{\dot H^{1/2}}&\lesssim \|g\|_{\dot H^{1/2}}(\|\partial_\alphaa f\|_{L^^{-1}nfty}+\|\partial_\alphaa \mathbb H f\|_{L^^{-1}nfty}).\label{hhalf43}
\end{align}
\end{proposition}
\betaegin{proof}
By Proposition~\ref{prop:comm-hilbe} and the decompositions $f=\mathbb P_A f+\mathbb P_H f$, $g=\mathbb P_A g+\mathbb P_H g$,
\betaegin{equation}\label{2318}
\betaracket{f,\mathbb H}g=\betaracket{\mathbb P_A f,\mathbb H}\mathbb P_H g+\betaracket{\mathbb P_H f,\mathbb H}\mathbb P_A g.
\end{equation}
So without loss of generality, we assume $f$ is anti-holomorphic and $g$ is holomorphic, i.e. $f=-\mathbb H f$, $g=\mathbb Hg$.
\eqref{hhalf41} is straightforward from \eqref{halfholo}, Remark~\ref{hhalf3} and the definition \eqref{def-hhalf}; and
\eqref{hhalf42} can be easily obtained by applying Cauchy-Schwarz inequality and Hardy's inequality \eqref{eq:77} to \eqref{halfholo}. We are left with \eqref{hhalf43}.
By integration by parts, we know
\betaegin{equation}\label{2314}
[ f,\mathbb H] \partial_\alphaa g+[g,\mathbb H]\partial_\alphaa f=\fracrac1{\pi i}^{-1}nt \fracrac{( f(\alphaa)-f({\betaeta '}))(g(\alphaa)-g({\betaeta '}))}{(\alphaa-{\betaeta '})^2}\,d{\betaeta '}:={\betaf r};
\end{equation}
and by \eqref{hhalf41},
$$\nm{[g,\mathbb H]\partial_\alphaa f}_{\dot H^{1/2}}\lesssim \|g\|_{\dot H^{1/2}}\|\partial_\alphaa f\|_{L^^{-1}nfty}.$$ For the term $\betaf r$ in the right hand side of \eqref{2314}, we have
\betaegin{equation}\label{2315}
\partial_\alphaa {\betaf r}=\fracrac{-2}{\pi i}^{-1}nt \fracrac{( f(\alphaa)- f({\betaeta '}))(g(\alphaa)-g({\betaeta '}))}{(\alphaa-{\betaeta '})^3}\,d{\betaeta '}+ f_\alphaa \mathbb H g_\alphaa+g_\alphaa \mathbb H f_\alphaa;
\end{equation}
and using $f=-\mathbb Hf$, $g=\mathbb H g$, we find
$$ f_\alphaa \mathbb H g_\alphaa+g_\alphaa \mathbb H f_\alphaa= f_\alphaa g_\alphaa-g_\alphaa f_\alphaa=0.$$
Let $p^{-1}n C_0^^{-1}nfty(\mathbb R)$. We have, using the symmetry of the integrand,
\betaegin{equation}\label{2316}
^{-1}nt p\partial_\alphaa {\betaf r}\,d\alphaa=\fracrac{-1}{\pi i}^{-1}int \fracrac{( f(\alphaa)- f({\betaeta '}))(g(\alphaa)-g({\betaeta '}))(p(\alphaa)-p({\betaeta '}))}{(\alphaa-{\betaeta '})^3}\,d\alphaa d{\betaeta '};
\end{equation}
applying Cauchy-Schwarz inequality and the definition \eqref{def-hhalf}, we get
\betaegin{equation}\label{2317}
\alphabs{^{-1}nt p\partial_\alphaa {\betaf r}\,d\alphaa}\lesssim \|\partial_\alphaa f\|_{L^^{-1}nfty} \|g\|_{\dot H^{1/2}}\|p\|_{\dot H^{1/2}},
\end{equation}
so $ \|{\betaf r}\|_{\dot H^{1/2}}\lesssim \|\partial_\alphaa f\|_{L^^{-1}nfty} \|g\|_{\dot H^{1/2}}$. This finishes the proof for \eqref{hhalf43}.
\end{proof}
\betaegin{proposition}\label{dl21}
Assume $f,\ g, \ f_1, \ g_1^{-1}n H^1(\mathbb R)$, and $l:\mathbb R\tauo \mathbb R$ is a diffeomorphism, with $l_\alphaa-1^{-1}n L^2$. Then
\betaegin{equation}\label{dl21-inq}
\betaegin{aligned}
&\nm{\betaracket{f,\mathbb H}\partial_\alphaa g-U_l\betaracket{f_1,\mathbb H}\partial_\alphaa g_1}_{L^2}\lesssim \nm{f-f_1\circ l}_{\dot H^{1/2}}\|\partial_\alphaa g\|_{L^2}\\&\qquad\qquad+ \|\partial_\alphaa f_1\|_{L^2} \|l_\alphaa\|_{L^^{-1}nfty}^{\fracrac12} \nm{g-g_1\circ l}_{\dot H^{1/2}}+\|\partial_\alphaa f_1\|_{L^2} \|\partial_\alphaa g_1\|_{L^2}\nm{l_\alphaa-1}_{L^2}.
\end{aligned}
\end{equation}
\end{proposition}
\betaegin{proof}
We know
\betaegin{equation}\label{2320}
\betaegin{aligned}
\betaracket{f,\mathbb H}\partial_\alphaa g&-U_l\betaracket{f_1,\mathbb H}\partial_\alphaa g_1=\betaracket{f,\mathbb H}\partial_\alphaa g-\betaracket{f_1\circ l,U_l\mathbb HU_{l^{-1}}(l_\alphaa)^{-1}}\partial_\alphaa (g_1\circ l)\\&=
\betaracket{f,\mathbb H}\partial_\alphaa g-\betaracket{f_1\circ l,\mathbb H}\partial_\alphaa (g_1\circ l)+\betaracket{f_1\circ l,\mathbb H-U_l\mathbb HU_{l^{-1}}(l_\alphaa)^{-1}}\partial_\alphaa (g_1\circ l);
\end{aligned}
\end{equation}
applying Proposition~\ref{prop:half-dir} to the term
\betaegin{equation}\label{2321}
\betaracket{f,\mathbb H}\partial_\alphaa g-\betaracket{f_1\circ l,\mathbb H}\partial_\alphaa (g_1\circ l)=\betaracket{f-f_1\circ l,\mathbb H}\partial_\alphaa g+\betaracket{f_1\circ l,\mathbb H}\partial_\alphaa (g-g_1\circ l)
\end{equation}
gives
\betaegin{equation}\label{2322}
\nm{\betaracket{f,\mathbb H}\partial_\alphaa g-\betaracket{f_1\circ l,\mathbb H}\partial_\alphaa (g_1\circ l)}_{L^2}\lesssim \nm{f-f_1\circ l}_{\dot H^{1/2}}\|\partial_\alphaa g\|_{L^2}+ \|\partial_\alphaa (f_1\circ l)\|_{L^2} \nm{g-g_1\circ l}_{\dot H^{1/2}}.
\end{equation}
Now by \eqref{2305},
\betaegin{equation}\label{2323}
\betaegin{aligned}
&\betaracket{f_1\circ l,\mathbb H-U_l\mathbb HU_{l^{-1}}(l_\alphaa)^{-1}}\partial_\alphaa (g_1\circ l)\\&\qquad\qquad=\fracrac1{\pi i} ^{-1}nt\fracrac{(f_1\circ l(\alphaa)-f_1\circ l({\betaeta '}))(l(\alphaa)-\alphaa-l({\betaeta '})+{\betaeta '})}{(l(\alphaa)-l({\betaeta '}))(\alphaa-{\betaeta '})}\, \partial_{\betaeta '} (g_1\circ l)({\betaeta '}) \,d{\betaeta '};
\end{aligned}
\end{equation}
applying Cauchy-Schwarz inequality and Hardy's inequality \eqref{eq:77} we get
\betaegin{equation}\label{2324}
\nm{\betaracket{f_1\circ l,\mathbb H-U_l\mathbb HU_{l^{-1}}(l_\alphaa)^{-1}}\partial_\alphaa (g_1\circ l)}_{L^2}\lesssim
\|\partial_\alphaa f_1\|_{L^2}\|l_\alphaa-1\|_{L^2}\|\partial_\alphaa g_1\|_{L^2}.\end{equation}
This finishes the proof for \eqref{dl21-inq}.
\end{proof}
\betaegin{proposition}\label{dl22}
Assume that $f,\ g,\ f_1, \ g_1$ are smooth and decay at infinity, and $l:\mathbb R\tauo \mathbb R$ is a diffeomorphism, with $l_\alphaa-1^{-1}n L^2$. Then there is a constant $c(\|l_\alphaa\|_{L^^{-1}nfty}, \|(l^{-1})_\alphaa\|_{L^^{-1}nfty})$, depending on $\|l_\alphaa\|_{L^^{-1}nfty}, \|(l^{-1})_\alphaa\|_{L^^{-1}nfty}$, such that
\betaegin{equation}\label{dl221}
\betaegin{aligned}
&\nm{\betaracket{f,\mathbb H}\partial_\alphaa g-U_l\betaracket{f_1,\mathbb H}\partial_\alphaa g_1}_{L^2}\le c(\|l_\alphaa\|_{L^^{-1}nfty}, \|(l^{-1})_\alphaa\|_{L^^{-1}nfty})\nm{\partial_\alphaa f-\partial_\alphaa(f_1\circ l)}_{L^2}\|g\|_{L^^{-1}nfty}\\&\qquad\qquad+ c(\|l_\alphaa\|_{L^^{-1}nfty}, \|(l^{-1})_\alphaa\|_{L^^{-1}nfty})(\|\partial_\alphaa f_1\|_{L^^{-1}nfty} \nm{g-g_1\circ l}_{L^2}+\|\partial_\alphaa f_1\|_{L^^{-1}nfty} \| g_1\|_{L^^{-1}nfty}\nm{l_\alphaa-1}_{L^2}).
\end{aligned}
\end{equation}
\betaegin{equation}\label{dl222}
\betaegin{aligned}
&\nm{\betaracket{f,\mathbb H}\partial_\alphaa g-U_l\betaracket{f_1,\mathbb H}\partial_\alphaa g_1}_{L^2}\lesssim \nm{\partial_\alphaa f-\partial_\alphaa(f_1\circ l)}_{L^2}\|g\|_{\dot H^{1/2}}\\&\qquad\qquad+ c(\|l_\alphaa\|_{L^^{-1}nfty}, \|(l^{-1})_\alphaa\|_{L^^{-1}nfty})(\|\partial_\alphaa f_1\|_{L^^{-1}nfty} \nm{g-g_1\circ l}_{L^2}+\|\partial_\alphaa f_1\|_{L^^{-1}nfty} \| g_1\|_{\dot H^{1/2}}\nm{l_\alphaa-1}_{L^2}).
\end{aligned}
\end{equation}
\end{proposition}
\betaegin{proof}
We use the same computation as in the proof for Proposition~\ref{dl21}, and apply Proposition~\ref{B2} to the terms in \eqref{2321} and \eqref{2323} to get \eqref{dl221}. To obtain \eqref{dl222} we apply \eqref{eq:b11} and \eqref{3.20} to \eqref{2321}; and for the term in \eqref{2323}, we first integrate by parts, then apply Cauchy-Schwarz inequality and Hardy's inequality \eqref{eq:77}.
\end{proof}
\betaegin{proposition}\label{dhhalf1}
Assume that $f,\ g,\ f_1, \ g_1$ are smooth and decay at infinity, and $l:\mathbb R\tauo \mathbb R$ is a diffeomorphism, with $l_\alphaa-1^{-1}n L^2$. Then there is a constant $c:=c(\|l_\alphaa\|_{L^^{-1}nfty}, \|(l^{-1})_\alphaa\|_{L^^{-1}nfty})$, depending on $\|l_\alphaa\|_{L^^{-1}nfty}, \|(l^{-1})_\alphaa\|_{L^^{-1}nfty}$, such that
\betaegin{equation}\label{dhhalf1-inq}
\betaegin{aligned}
&\nm{\betaracket{f,\mathbb H}\partial_\alphaa g-U_l\betaracket{f_1,\mathbb H}\partial_\alphaa g_1}_{\dot H^{1/2}}\lesssim \nm{\partial_\alphaa f-\partial_\alphaa(f_1\circ l)}_{L^2}\|\partial_\alphaa g_1\|_{L^2}\|l_\alphaa\|_{L^^{-1}nfty}^{\fracrac12}\\&\qquad+ (\|\partial_\alphaa f\|_{L^^{-1}nfty}+\|\partial_\alphaa \mathbb H f\|_{L^^{-1}nfty}) \nm{g-g_1\circ l}_{\dot H^{1/2}}+c\|\partial_\alphaa f_1\|_{L^^{-1}nfty} \| \partial_\alphaa g_1\|_{L^2}\nm{l_\alphaa-1}_{L^2}.
\end{aligned}
\end{equation}
\end{proposition}
\betaegin{proof}
We begin with \eqref{2320} and write the first two terms on the right hand side as
\betaegin{equation}\label{2325}
\betaracket{f,\mathbb H}\partial_\alphaa g-\betaracket{f_1\circ l,\mathbb H}\partial_\alphaa (g_1\circ l)=\betaracket{f-f_1\circ l,\mathbb H}\partial_\alphaa (g_1\circ l)+\betaracket{f,\mathbb H}\partial_\alphaa (g-g_1\circ l);
\end{equation}
applying \eqref{hhalf42} and \eqref{hhalf43} to \eqref{2325} we get
\betaegin{equation}\label{2326}
\betaegin{aligned}
\nm{\betaracket{f,\mathbb H}\partial_\alphaa g-\betaracket{f_1\circ l,\mathbb H}\partial_\alphaa (g_1\circ l)}_{\dot H^{1/2}}&\lesssim \nm{\partial_\alphaa(f-f_1\circ l)}_{L^2}\|\partial_\alphaa (g_1\circ l)\|_{L^2}\\&+ (\|\partial_\alphaa f\|_{L^^{-1}nfty} + \|\partial_\alphaa \mathbb Hf\|_{L^^{-1}nfty})\nm{g-g_1\circ l}_{\dot H^{1/2}}.
\end{aligned}
\end{equation}
Consider the last term on the right hand side of \eqref{2320}. For any $p^{-1}n C_0^^{-1}nfty(\mathbb R)$,
\betaegin{equation}\label{2327}
\betaegin{aligned}
^{-1}nt\partial_\alphaa p \betaracket{f_1\circ l,\mathbb H-U_l\mathbb HU_{l^{-1}}(l_\alphaa)^{-1}}&\partial_\alphaa (g_1\circ l)\,d\alphaa\\&=^{-1}nt\partial_\alphaa (g_1\circ l)\betaracket{f_1\circ l,\mathbb H-U_l\mathbb HU_{l^{-1}}(l_\alphaa)^{-1}}\partial_\alphaa p \,d\alphaa;
\end{aligned}
\end{equation}
the same argument as in the proof of \eqref{dl222}, that is, integrating by parts, then applying Cauchy-Schwarz inequality and Hardy's inequality \eqref{eq:77} gives
$$\|\betaracket{f_1\circ l,\mathbb H-U_l\mathbb HU_{l^{-1}}(l_\alphaa)^{-1}}\partial_\alphaa p\|_{L^2}\le c\,\|\partial_\alphaa f_1\|_{L^^{-1}nfty} \|l_\alphaa-1\|_{L^2}\|p\|_{\dot H^{1/2}}, $$
where $c:= c(\|l_\alphaa\|_{L^^{-1}nfty}, \|(l^{-1})_\alphaa\|_{L^^{-1}nfty})$ is a constant depending on $\|l_\alphaa\|_{L^^{-1}nfty}$ and $ \|(l^{-1})_\alphaa\|_{L^^{-1}nfty}$; so
$$\alphabs{^{-1}nt\partial_\alphaa p \betaracket{f_1\circ l,\mathbb H-U_l\mathbb HU_{l^{-1}}(l_\alphaa)^{-1}}\partial_\alphaa (g_1\circ l)\,d\alphaa}\le c\|\partial_\alphaa (g_1\circ l)\|_{L^2}\|\partial_\alphaa f_1\|_{L^^{-1}nfty} \|l_\alphaa-1\|_{L^2}\|p\|_{\dot H^{1/2}}.$$
This finishes the proof for \eqref{dhhalf1-inq}.
\end{proof}
\betaegin{proposition}\label{dhhalf2}
Assume that $f,\ g,\ f_1, \ g_1$ are smooth and decay at infinity, and $l:\mathbb R\tauo \mathbb R$ is a diffeomorphism, with $l_\alphaa-1^{-1}n L^2$. Then there is a constant $c:=c(\|l_\alphaa\|_{L^^{-1}nfty}, \|(l^{-1})_\alphaa\|_{L^^{-1}nfty})$, depending on $\|l_\alphaa\|_{L^^{-1}nfty}, \|(l^{-1})_\alphaa\|_{L^^{-1}nfty}$, such that
\betaegin{equation}\label{dhhalf2-inq}
\betaegin{aligned}
&\nm{\betaracket{f,\mathbb H} g-U_l\betaracket{f_1,\mathbb H} g_1}_{\dot H^{1/2}}\lesssim \nm{ f-f_1\circ l}_{\dot H^{1/2}}(\| g\|_{L^^{-1}nfty}+\|\mathbb H g\|_{L^^{-1}nfty}) \\&\qquad+ \|\partial_\alphaa f_1\|_{L^2}\|l_\alphaa\|_{L^^{-1}nfty}^{\fracrac12} \nm{g-g_1\circ l}_{L^2}+c\|\partial_\alphaa f_1\|_{L^2} \| g_1\|_{L^^{-1}nfty}\nm{l_\alphaa-1}_{L^2}.
\end{aligned}
\end{equation}
\end{proposition}
\betaegin{proof}
Similar to the proof of Proposition~\ref{dl21}, we have
\betaegin{equation}\label{2328}
\betaracket{f,\mathbb H} g-U_l\betaracket{f_1,\mathbb H} g_1=
\betaracket{f,\mathbb H} g-\betaracket{f_1\circ l,\mathbb H}(g_1\circ l) +\betaracket{f_1\circ l,\mathbb H-U_l\mathbb HU_{l^{-1}}} (g_1\circ l);
\end{equation}
writing
\betaegin{equation}\label{2329}
\betaracket{f,\mathbb H} g-\betaracket{f_1\circ l,\mathbb H} (g_1\circ l)=\betaracket{f-f_1\circ l,\mathbb H} g+\betaracket{f_1\circ l,\mathbb H}(g-g_1\circ l)
\end{equation}
and applying \eqref{hhalf41} and \eqref{hhalf42} gives,
\betaegin{equation}\label{2330}
\betaegin{aligned}
\nm{\betaracket{f,\mathbb H} g-\betaracket{f_1\circ l,\mathbb H} (g_1\circ l)}_{\dot H^{1/2}}&\lesssim
\nm{ f-f_1\circ l}_{\dot H^{1/2}}(\| g\|_{L^^{-1}nfty}+\|\mathbb H g\|_{L^^{-1}nfty})\\&+ \|\partial_\alphaa (f_1\circ l)\|_{L^2} \nm{g-g_1\circ l}_{L^2}.
\end{aligned}
\end{equation}
Consider the second term on the right hand side of \eqref{2328}. We write
\betaegin{equation}\label{2331}
\betaracket{f_1\circ l,\mathbb H-U_l\mathbb HU_{l^{-1}}} (g_1\circ l)=\betaracket{f_1\circ l,\mathbb H-U_l\mathbb HU_{l^{-1}} \fracrac1{l_\alphaa}} (g_1\circ l)+\betaracket{f_1\circ l, U_l\mathbb HU_{l^{-1}}}(\fracrac1{l_\alphaa}-1) (g_1\circ l).
\end{equation}
Now
\betaegin{equation}\label{2332}
\betaracket{f_1\circ l, U_l\mathbb HU_{l^{-1}}}((l_\alphaa)^{-1}-1) (g_1\circ l)=U_l [f_1,\mathbb H]\paren{((l^{-1})_\alphaa-1)g_1}.
\end{equation}
Changing variables, and then using \eqref{hhalf42} yields
\betaegin{equation}\label{2333}
\nm{ U_l [f_1,\mathbb H]\paren{((l^{-1})_\alphaa-1)g_1} }_{\dot H^{1/2}}\le
c\|\partial_\alphaa f_1\|_{L^2} \| g_1\|_{L^^{-1}nfty}\nm{l_\alphaa-1}_{L^2}
\end{equation}
for some constant $c$ depending on $\|l_\alphaa\|_{L^^{-1}nfty}, \|(l^{-1})_\alphaa\|_{L^^{-1}nfty}$.
For the first term on the right hand side of \eqref{2331} we use the duality argument in \eqref{2327}. Let $p^{-1}n C^^{-1}nfty_0(\mathbb R)$,
\betaegin{equation}\label{2334}
^{-1}nt \partial_\alphaa p \betaracket{f_1\circ l,\mathbb H-U_l\mathbb HU_{l^{-1}} (l_\alphaa)^{-1}} (g_1\circ l)\,d\alphaa=^{-1}nt g_1\circ l\betaracket{f_1\circ l,\mathbb H-U_l\mathbb HU_{l^{-1}} (l_\alphaa)^{-1}} \partial_\alphaa p \,d\alphaa,
\end{equation}
and
\betaegin{equation}\label{2335}
\betaegin{aligned}
&\betaracket{f_1\circ l,\mathbb H-U_l\mathbb HU_{l^{-1}}(l_\alphaa)^{-1}}\partial_\alphaa p\\&\qquad\qquad=\fracrac1{\pi i} ^{-1}nt\fracrac{(f_1\circ l(\alphaa)-f_1\circ l({\betaeta '}))(l(\alphaa)-\alphaa-l({\betaeta '})+{\betaeta '})}{(l(\alphaa)-l({\betaeta '}))(\alphaa-{\betaeta '})}\, \partial_{\betaeta '} p({\betaeta '}) \,d{\betaeta '}.
\end{aligned}
\end{equation}
Integrating by parts, then apply Cauchy-Schwarz inequality and Hardy's inequalities \eqref{eq:77} and \eqref{eq:771} gives
\betaegin{equation}\label{2336}
\nm{\betaracket{f_1\circ l,\mathbb H-U_l\mathbb HU_{l^{-1}}(l_\alphaa)^{-1}}\partial_\alphaa p}_{L^1}\le c \|\partial_\alphaa f_1\|_{L^2} \nm{l_\alphaa-1}_{L^2}\|p\|_{\dot H^{1/2}},
\end{equation}
for some constant $c$ depending on $\|l_\alphaa\|_{L^^{-1}nfty}, \|(l^{-1})_\alphaa\|_{L^^{-1}nfty}$, so
\betaegin{equation}\label{2337}
\alphabs{^{-1}nt \partial_\alphaa p \betaracket{f_1\circ l,\mathbb H-U_l\mathbb HU_{l^{-1}} (l_\alphaa)^{-1}} (g_1\circ l)\,d\alphaa}\le c \|g_1\|_{L^^{-1}nfty}\|\partial_\alphaa f_1\|_{L^2} \nm{l_\alphaa-1}_{L^2}\|p\|_{\dot H^{1/2}}.
\end{equation}
This finishes the proof for \eqref{dhhalf2-inq}.
\end{proof}
We define $$[f, m; \partial_\alphaa g]_n:=\fracrac1{\pi i}^{-1}nt\fracrac{(f(\alphaa)-f({\betaeta '}))(m(\alphaa)-m({\betaeta '}))^n}{(\alphaa-{\betaeta '})^{n+1}}\partial_{\betaeta '} g({\betaeta '})\,d{\betaeta '}.$$ So $[f,m;\partial_\alphaa g]=[f, m; \partial_\alphaa g]_1$, and $[f,\mathbb H]\partial_\alphaa g=[f, m; \partial_\alphaa g]_0$.
\betaegin{proposition}\label{d32}
Assume that $f,\ m,\ g,\ f_1,\ m_1,\ g_1$ are smooth and $f,\ g,\ f_1,\ g_1$ decay at infinity, and $l:\mathbb R\tauo\mathbb R$ is a diffeomorphism, with $l_\alphaa-1^{-1}n L^2$. Then there is a constant $c$, depending on $\|l_\alphaa\|_{L^^{-1}nfty}, \|(l^{-1})_\alphaa\|_{L^^{-1}nfty}$, such that
\betaegin{equation}\label{d32inq}
\betaegin{aligned}
&\nm{[f, m; \partial_\alphaa g]_n-U_l[f_1, m_1; \partial_\alphaa g_1]_n}_{L^2}\le c
\nm{f-f_1\circ l}_{\dot H^{1/2}}\nm{\partial_\alphaa m}^n_{L^^{-1}nfty}\|\partial_\alphaa g\|_{L^2}\\&+c \|\partial_\alphaa f_1\|_{L^2} (\nm{\partial_\alphaa m}_{L^^{-1}nfty}+\nm{\partial_\alphaa m_1}_{L^^{-1}nfty})^{n-1}\nm{\partial_\alphaa(m-m_1\circ l)}_{L^2}\nm{\partial_\alphaa g}_{L^2}\\&+c\|\partial_\alphaa f_1\|_{L^2} \nm{\partial_\alphaa m_1}^n_{L^^{-1}nfty} \nm{g-g_1\circ l}_{\dot H^{1/2}}+c\|\partial_\alphaa f_1\|_{L^2}\nm{\partial_\alphaa m_1}^n_{L^^{-1}nfty} \|\partial_\alphaa g_1\|_{L^2}\nm{l_\alphaa-1}_{L^2}.
\end{aligned}
\end{equation}
\end{proposition}
Proposition~\ref{d32} can be proved similarly as for Proposition~\ref{dl21}, we omit the details.
\betaegin{proposition}\label{d33}
Assume that $f,\ m,\ g,\ f_1,\ m_1,\ g_1$ are smooth and decay at infinity, and $l:\mathbb R\tauo\mathbb R$ is a diffeomorphism, with $l_\alphaa-1^{-1}n L^2$. Then there is a constant $c$, depending on $\|l_\alphaa\|_{L^^{-1}nfty}, \|(l^{-1})_\alphaa\|_{L^^{-1}nfty}$, such that
\betaegin{equation}\label{d33inq}
\betaegin{aligned}
&\nm{[f, g; m]-U_l[f_1, g_1; m_1]}_{L^2}\le c
\nm{f-f_1\circ l}_{\dot H^{1/2}}\|\partial_\alphaa g\|_{L^2}\nm{ m}_{L^^{-1}nfty}\\&+c\|\partial_\alphaa f_1\|_{L^2} \nm{g-g_1\circ l}_{\dot H^{1/2}}\nm{ m}_{L^^{-1}nfty}+
c\|\partial_\alphaa f_1\|_{L^2}\nm{\partial_\alphaa g_1}_{L^2}\nm{m-m_1\circ l}_{L^2}\\&
+c\|\partial_\alphaa f_1\|_{L^2}\nm{ m_1}_{L^^{-1}nfty} \|\partial_\alphaa g_1\|_{L^2}\nm{l_\alphaa-1}_{L^2}.
\end{aligned}
\end{equation}
\end{proposition}
Proposition~\ref{d33} straightforwardly follows from Cauchy-Schwarz inequality, Hardy's inequality and the definition of $\dot H^{1/2}$ norm.
\betaegin{proposition}\label{half-product}
Assume $f^{-1}n \dot H^{1/2}(\mathbb R)\cap L^^{-1}nfty(\mathbb R)$, $g^{-1}n \dot H^{1/2}(\mathbb R)$, and $g$ can be decomposed by
\betaegin{equation}\label{decomp}
g=g_1+ pq
\end{equation}
with $g_1^{-1}n L^^{-1}nfty(\mathbb R)$, $q^{-1}n L^2(\mathbb R)$, and $\partial_\alphaa p^{-1}n L^2(\mathbb R)$, satisfying $\partial_\alphaa(pf)^{-1}n L^2(\mathbb R)$. Then $fg^{-1}n \dot H^{1/2}(\mathbb R)$, and
\betaegin{equation}\label{product-inq}
\nm{fg}_{\dot H^{1/2}}\lesssim \nm{f}_{L^^{-1}nfty}\nm{g}_{\dot H^{1/2}}+ \nm{g_1}_{L^^{-1}nfty}\nm{f}_{\dot H^{1/2}}+\nm{q}_{L^2}\nm{ \partial_\alphaa (pf) }_{L^2}+\nm{q}_{L^2}\nm{ \partial_\alphaa p }_{L^2}\nm{f}_{L^^{-1}nfty}.
\end{equation}
\end{proposition}
\betaegin{proof}
The proof is straightforward by definition. We have
\betaegin{equation}
\betaegin{aligned}
&\nm{fg}_{\dot H^{1/2}}^2\lesssim ^{-1}int\fracrac{|f({\betaeta '})|^2|g(\alphaa)-g({\betaeta '})|^2}{(\alphaa-{\betaeta '})^2}\,d\alphaa d{\betaeta '}+ ^{-1}int\fracrac{|g(\alphaa)|^2|f(\alphaa)-f({\betaeta '})|^2}{(\alphaa-{\betaeta '})^2}\,d\alphaa d{\betaeta '}\\&\lesssim
\nm{f}^2_{L^^{-1}nfty}\nm{g}^2_{\dot H^{1/2}}+ \nm{g_1}^2_{L^^{-1}nfty}\nm{f}^2_{\dot H^{1/2}}+^{-1}int\fracrac{|q(\alphaa)|^2|p(\alphaa)f(\alphaa)-p({\betaeta '})f({\betaeta '})|^2}{(\alphaa-{\betaeta '})^2}\,d\alphaa d{\betaeta '}\\&+^{-1}int\fracrac{|q(\alphaa)|^2|p(\alphaa)-p({\betaeta '})|^2|f({\betaeta '})|^2}{(\alphaa-{\betaeta '})^2}\,d\alphaa d{\betaeta '}\\&\lesssim \nm{f}^2_{L^^{-1}nfty}\nm{g}^2_{\dot H^{1/2}}+ \nm{g_1}^2_{L^^{-1}nfty}\nm{f}^2_{\dot H^{1/2}}+\nm{q}^2_{L^2}\nm{ \partial_\alphaa (pf) }^2_{L^2}+\nm{q}^2_{L^2}\nm{ \partial_\alphaa p }^2_{L^2}\nm{f}^2_{L^^{-1}nfty},
\end{aligned}
\end{equation}
where in the last step we used Fubini's Theorem and Hardy's inequality \eqref{eq:77}.
\end{proof}
\subsection{The proof of Theorem~\ref{unique}}\label{proof3.5} In addition to what have already been given, we use the following convention in this section, \S\ref{proof3.5}: we write $A\lesssim B$ if there is a constant $c$, depending only on $\sup_{[0, T]}\fracrak E(t)$ and $\sup_{[0, T]}\tauilde{\mathfrak E} (t)$, such that $A\le cB$. We assume the reader is familiar with the quantities that are controlled by the functional $\fracrak E(t)$, see \S\ref{proof} and Appendix~\ref{quantities}. We don't always give precise references on these estimates.
Let $Z=Z(\alphaa,t)$, ${\mathbb{Z}}f={\mathbb{Z}}f(\alphaa,t)$, $t^{-1}n [0, T]$ be solutions of the system \eqref{interface-r}-\eqref{interface-holo}-\eqref{b}-\eqref{a1}, satisfying the assumptions of Theorem~\ref{unique}.
Recall we defined in \eqref{def-l}
\betaegin{equation}\label{2338}
l=l(\alphaa,t)=\tauh\circ h^{-1}(\alphaa, t)=\tauh(h^{-1}(\alphaa,t),t).
\end{equation}
We will apply Lemma~\ref{dlemma1} to $\Thetaeta=\betaar Z_t,\ \fracrac1{Z_{,\alphaa}}-1,\betaar Z_{tt}$ and Lemma~\ref{basic-e2} to $l_\alphaa-1$ to construct an energy functional $\mathcal F(t)$, and show that the time derivative $\mathcal F'(t)$ can be controlled by $\mathcal F(t)$ and the initial data.
We begin with computing the evolutionary equations for these quantities. We have
\betaegin{equation}\label{2339}
\partial_t ( l_\alphaa\circ h)=\partial_t\paren{\fracrac{\tauh_\alpha}{h_\alpha}}=\fracrac{\tauh_\alpha}{h_\alpha}\paren{\fracrac{\tauh_{t\alpha}}{\tauh_\alpha}-\fracrac{h_{t\alpha}}{h_\alpha}}=(l_\alphaa\circ h)(\tauilde b_\alphaa\circ \tauh-b_\alphaa\circ h);
\end{equation}
precomposing with $h^{-1}$ yields
\betaegin{equation}\label{2340}
(\partial_t +b\partial_\alpha) l_\alphaa=l_\alphaa(\tauilde b_\alphaa\circ l-b_\alphaa).
\end{equation}
The equation for $\betaar Z_t$ is given by \eqref{quasi-r}-\eqref{aux}.
To find the equation for $\betaar Z_{tt}$ we take a derivative to $t$ to \eqref{quasi-l}:
\betaegin{equation}\label{2341}
\betaegin{aligned}
(\partial_t^2+i\fracrak a\partial_\alpha)\betaar z_{tt}&=-i\fracrak a_t \betaar z_{t\alpha}+\partial_t\paren{\fracrac{\mathfrak{a}_t}{\mathfrak{a}}}(\betaar z_{tt}-i)+\fracrac{\mathfrak{a}_t}{\mathfrak{a}}\betaar z_{ttt}\\&=\partial_t\paren{\fracrac{\mathfrak{a}_t}{\mathfrak{a}}}(\betaar z_{tt}-i)+\fracrac{\mathfrak{a}_t}{\mathfrak{a}}(\betaar z_{ttt}-i\fracrak a \betaar z_{t\alpha})\\&=(\betaar z_{tt}-i)\paren{\partial_t\paren{\fracrac{\mathfrak{a}_t}{\mathfrak{a}}}+\paren{\fracrac{\mathfrak{a}_t}{\mathfrak{a}}}^2+2\paren{\fracrac{\mathfrak{a}_t}{\mathfrak{a}}}\betaar{D_\alpha z_t}},
\end{aligned}
\end{equation}
here we used equation \eqref{quasi-l} and substituted by \eqref{interface-l}: $-i\fracrak a \betaar z_\alpha=\betaar z_{tt}-i$ in the last step. Precomposing with $h^{-1}$, and then substituting $\betaar Z_{tt}-i$ by \eqref{aa1}, yields, for $\mathcal P=(\partial_t+b\partial_\alphaa)^2+i\mathcal A\partial_\alphaa$,
\betaegin{equation}\label{eqztt}
\mathcal P\betaar Z_{tt}= -i\,\fracrac{A_1}{Z_{,\alphaa}} \paren{(\partial_t+b\partial_\alphaa)\paren{\fracrac{\mathfrak{a}_t}{\mathfrak{a}}\circ h^{-1}}+\paren{\fracrac{\mathfrak{a}_t}{\mathfrak{a}}\circ h^{-1}}^2+2\paren{\fracrac{\mathfrak{a}_t}{\mathfrak{a}}\circ h^{-1}}\betaar{D_\alphaa Z_t}}:=G_3.
\end{equation}
To find the equation for $\fracrac1{Z_{,\alphaa}}$ we begin with \eqref{eq:dza}. Precomposing with $h$, then differentiate with respect to $t$ gives
\betaegin{equation}\label{2342}
\partial_t^2\paren{\fracrac {h_\alpha}{z_\alpha}}=\fracrac {h_\alpha}{z_\alpha}\paren{(b_\alphaa\circ h-D_\alpha z_t)^2+\partial_t(b_\alphaa\circ h-2{\mathbb{R}}e D_\alpha z_t)+\partial_t \betaar{D_\alpha z_t}}
\end{equation}
here we subtracted $\partial_t\betaar{D_\alpha z_t}$ and then added $\partial_t\betaar{D_\alpha z_t}$ in the second factor on the right hand side to take advantage of the formula \eqref{ba}. We compute
\betaegin{equation}\label{2343}
\partial_t\betaar {D_\alpha z_t}=\betaar{D_\alpha z_{tt}}-(\betaar{D_\alpha z_t})^2,
\end{equation}
replacing $\betaar Z_{tt}$ by \eqref{aa1} yields
\betaegin{equation}\label{2344}
\betaar{D_\alphaa Z_{tt}}=\fracrac1{\betaar Z_{,\alphaa}}\partial_\alphaa\paren{-i\fracrac{A_1}{Z_{,\alphaa}}}=-i\fracrac{A_1}{\betaar Z_{,\alphaa}}\partial_\alphaa\paren{\fracrac{1}{Z_{,\alphaa}}}-i\fracrac1{|Z_{,\alphaa}|^2}\partial_\alphaa A_1;
\end{equation}
precomposing equation \eqref{2342} with $h^{-1}$ and substitute in by \eqref{2343}-\eqref{2344}, we get
\betaegin{equation}\label{eqza}
\mathcal P\fracrac1{Z_{,\alphaa}}=\fracrac {1}{Z_\alphaa}\paren{(b_\alphaa-D_\alphaa Z_t)^2+(\partial_t+b\partial_\alphaa)(b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t)-\paren{\betaar {D_\alphaa Z_t}}^2-i\fracrac{\partial_\alphaa A_1}{|Z_{,\alphaa}|^2}}:=G_2.
\end{equation}
We record here the equation for $\betaar Z_t$, which is the first equation in \eqref{quasi-r}, in which we substituted in by \eqref{aa1},
\betaegin{equation}\label{eqzt}
\mathcal P\betaar Z_t=-i\,\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} \fracrac{A_1}{Z_{,\alphaa}}:=G_1.
\end{equation}
\subsubsection{The energy functional $\mathcal F(t)$} The energy functional $\mathcal F(t)$ for the differences of the solutions will consist of $\|l_\alphaa(t)-1\|_{L^2(\mathbb R)}^2$ and the functionals $\mathfrak F(t)$ when applied to $\Thetaeta=\betaar Z_t(t)$, $\fracrac1{Z_{,\alphaa}}-1$
and $\betaar Z_{tt}$, taking $\fracrak c=-i, \ 0, \ 0$ respectively. Let
\betaegin{equation}\label{f0}
\fracrak F_0(t)=\|l_\alphaa(t)-1\|_{L^2(\mathbb R)}^2;
\end{equation}
\betaegin{equation}\label{f1}
\fracrak F_1(t):=^{-1}nt \fracrac{\kappa}{A_1}\alphabs{ Z_{,\alphaa}\paren{\betaar Z_{tt}-i}- {\mathbb{Z}}f_{,\alphaa}\circ l\paren{\betaar {\mathbb{Z}}f_{tt}\circ l-i}}^2+i\partial_\alphaa(\betaar Z_t-\betaar {{\mathbb{Z}}f}_t\circ l)\betaar{(\betaar Z_t-\betaar {{\mathbb{Z}}f}_t\circ l)}\,d\alphaa;
\end{equation}
\betaegin{equation}\label{f2}
\betaegin{aligned}
\fracrak F_2(t):&=^{-1}nt \fracrac{\kappa}{A_1}\alphabs{ Z_{,\alphaa}(\partial_t+b\partial_\alphaa)\paren{\fracrac1{Z_{,\alphaa}}}- {\mathbb{Z}}f_{,\alphaa}\circ l(\partial_t+\tauilde b\partial_\alphaa)\paren{\fracrac1{{\mathbb{Z}}f_{,\alphaa}}}\circ l}^2\,d\alphaa\\&\quad\qquad+i^{-1}nt \partial_\alphaa\paren{\fracrac1{Z_{,\alphaa}}- \fracrac1{{\mathbb{Z}}f_{,\alphaa}}\circ l}\betaar{\paren{\fracrac1{Z_{,\alphaa}}- \fracrac1{{\mathbb{Z}}f_{,\alphaa}}\circ l }}\,d\alphaa;
\end{aligned}
\end{equation}
and
\betaegin{equation}\label{f3}
\fracrak F_3(t):=^{-1}nt \fracrac{\kappa}{A_1}\alphabs{ Z_{,\alphaa}\betaar Z_{ttt}- {\mathbb{Z}}f_{,\alphaa}\circ l\betaar {\mathbb{Z}}f_{ttt}\circ l}^2+i\partial_\alphaa(\betaar Z_{tt}-\betaar {{\mathbb{Z}}f}_{tt}\circ l)\betaar{(\betaar Z_{tt}-\betaar {{\mathbb{Z}}f}_{tt}\circ l)}\,d\alphaa.
\end{equation}
Substituting the evolutionary equations \eqref{eq:dzt}, \eqref{eq:dza} and \eqref{eq:dztt} in the functionals $\fracrak F_i$ we get
\betaegin{equation}\label{f11}
\fracrak F_1(t)=^{-1}nt \fracrac{\kappa}{A_1}\alphabs{ A_1- \tauilde {A_1}\circ l}^2+i\partial_\alphaa(\betaar Z_t-\betaar {{\mathbb{Z}}f}_t\circ l)\betaar{(\betaar Z_t-\betaar {{\mathbb{Z}}f}_t\circ l)}\,d\alphaa;
\end{equation}
\betaegin{equation}\label{f22}
\fracrak F_2(t)=^{-1}nt \fracrac{\kappa}{A_1}\alphabs{ (b_\alphaa-D_\alphaa Z_t)-(\tauilde b_\alphaa-\tauilde D_\alphaa {\mathbb{Z}}f_t)\circ l }^2+ i\partial_\alphaa(\fracrac1{Z_{,\alphaa}}- \fracrac1{{\mathbb{Z}}f_{,\alphaa}}\circ l)\betaar{(\fracrac1{Z_{,\alphaa}}- \fracrac1{{\mathbb{Z}}f_{,\alphaa}}\circ l )}\,d\alphaa;
\end{equation}
and
\betaegin{equation}\label{f33}
\betaegin{aligned}
\fracrak F_3(t)&=^{-1}nt \fracrac{\kappa}{A_1}\alphabs{ A_1\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}+\betaar{D_\alphaa Z_t}}- \paren{\tauilde {A_1}\paren{\fracrac{\tauilde{\mathfrak{a}}_t}{\tauilde{\mathfrak{a}}}\circ \tauh^{-1}+\betaar{\tauilde D_\alphaa {\mathbb{Z}}f_t}}}\circ l }^2\,d\alphaa\\&\qquad\qquad+i^{-1}nt\partial_\alphaa(\betaar Z_{tt}-\betaar {{\mathbb{Z}}f}_{tt}\circ l)\betaar{(\betaar Z_{tt}-\betaar {{\mathbb{Z}}f}_{tt}\circ l)}\,d\alphaa.
\end{aligned}
\end{equation}
\betaegin{remark}\label{remark512}
Assume that the assumption of Theorem~\ref{unique} holds. Because $h_t=b(h,t)$, and $h(\alpha, 0)=\alpha$, where $b$ is given by \eqref{b}, we have $h_{t\alpha}=h_\alpha b_\alphaa\circ h$, and
\betaegin{equation}\label{2345}
h_\alpha(\cdot, t)=e^{^{-1}nt_0^t b_\alphaa\circ h(\cdot, \tauau)\, d\tauau}.
\end{equation}
So there are constants $c_1>0$, $c_2>0$, depending only on $\sup_{[0, T]}\fracrak E(t)$, such that
\betaegin{equation}\label{2346}
c_1\le h_\alpha(\alpha, t)\le c_2,\qquad\qquad \tauext{for all } \alpha^{-1}n \mathbb R, t^{-1}n [0, T].
\end{equation}
Consequently, because $l_\alphaa=\fracrac{\tauh_a}{h_\alpha}\circ h^{-1}$, there is a constant $0<c<^{-1}nfty$, depending only on $\sup_{[0, T]}\fracrak E(t)$ and $\sup_{[0, T]}\tauilde{\mathfrak E}(t)$, such that
\betaegin{equation}\label{2346-1}
c^{-1}\le l_\alphaa(\alphaa, t) \le c, \qquad\qquad \tauext{for all } \alpha^{-1}n \mathbb R, t^{-1}n [0, T].
\end{equation}
It is easy to check that for each $t^{-1}n [0, T]$, $b_\alphaa(t)^{-1}n L^2(\mathbb R)$, so $h_\alpha(t)-1^{-1}n L^2(\mathbb R)$, and hence $l_\alphaa(t)-1^{-1}n L^2(\mathbb R)$.
It is clear that under the assumption of Theorem~\ref{unique}, the functionals $\fracrak F_i(t)$, $i=1,2,3$ are well-defined.
\end{remark}
Notice that the functionals $\fracrak F_i(t)$, $i=1,2,3$ are not necessarily positive definite, see Lemma~\ref{hhalf1}. We prove the following
\betaegin{lemma}\label{dominate1}
There is a constant $M_0$, depending only on $\sup_{[0, T]}\fracrak E(t)$ and $\sup_{[0, T]}\tauilde{\mathfrak E}(t)$, such that for all $M\gammae M_0$, and $t^{-1}n [0, T]$,
\betaegin{align}
\|l_\alphaa(t)-1\|_{L^2}+\|(\betaar Z_t-\betaar {\mathbb{Z}}f_t\circ l)(t)\|_{\dot H^{1/2}}^2+\nm{\paren{\fracrac1{Z_{,\alphaa}}- \fracrac1{{\mathbb{Z}}f_{,\alphaa}}\circ l}(t)}_{\dot H^{1/2}}^2\le M\fracrak F_0(t)+\fracrak F_1(t)+\fracrak F_2(t);\label{dominate11}\\
\nm{(A_1-\tauilde {A_1}\circ l)(t)}_{L^2}^2+\nm{(D_\alphaa Z_t-\tauilde D_\alphaa {\mathbb{Z}}f_t\circ l) (t)}_{L^2}^2+ \nm{b_\alphaa-\tauilde b_\alphaa\circ l (t)}_{L^2}^2\lesssim M\fracrak F_0(t)+\fracrak F_1(t)+\fracrak F_2(t).\label{dominate12}
\end{align}
\end{lemma}
\betaegin{proof}
By Lemma~\ref{hhalf1},
\betaegin{align}
^{-1}nt i\partial_\alphaa(\betaar Z_t-\betaar {{\mathbb{Z}}f}_t\circ l)\betaar{(\betaar Z_t-\betaar {{\mathbb{Z}}f}_t\circ l)}\,d\alphaa=\|\mathbb P_H (\betaar Z_t-\betaar {{\mathbb{Z}}f}_t\circ l)\|_{\dot H^{1/2}}^2-\|\mathbb P_A (\betaar Z_t-\betaar {{\mathbb{Z}}f}_t\circ l)\|_{\dot H^{1/2}}^2;\\
\tauext{and }\qquad \|\betaar Z_t-\betaar {{\mathbb{Z}}f}_t\circ l\|_{\dot H^{1/2}}^2=^{-1}nt i\partial_\alphaa(\betaar Z_t-\betaar {{\mathbb{Z}}f}_t\circ l)\betaar{(\betaar Z_t-\betaar {{\mathbb{Z}}f}_t\circ l)}\,d\alphaa+2\|\mathbb P_A (\betaar Z_t-\betaar {{\mathbb{Z}}f}_t\circ l)\|_{\dot H^{1/2}}^2.
\end{align}
Because $\betaar Z_t=\mathbb H\betaar Z_t$ and $\betaar {\mathbb{Z}}f_t=\mathbb H\betaar {\mathbb{Z}}f_t$,
$$2\mathbb P_A (\betaar Z_t-\betaar {{\mathbb{Z}}f}_t\circ l)=-2\mathbb P_A (\betaar {{\mathbb{Z}}f}_t\circ l)=-\mathcal Q_l (\betaar {{\mathbb{Z}}f}_t\circ l)$$
and by \eqref{q1},
$$\|\mathcal Q_l (\betaar {{\mathbb{Z}}f}_t\circ l)\|_{\dot H^{1/2}}\le C(\|l_\alphaa\|_{L^^{-1}nfty}, \|(l^{-1})_\alphaa\|_{L^^{-1}nfty})\|\partial_\alphaa \betaar {{\mathbb{Z}}f}_t\|_{L^2}\|l_\alphaa-1\|_{L^2}\lesssim \|l_\alphaa-1\|_{L^2}. $$
So there is a constant $M_0$, depending only on $\sup_{[0, T]}\fracrak E(t)$ and $\sup_{[0, T]}\tauilde{\mathfrak E}(t)$, such that for all $t^{-1}n [0, T]$ and $M\gammae M_0$,
\betaegin{equation}\label{2350}
\|(\betaar Z_t-\betaar {{\mathbb{Z}}f}_t\circ l)(t)\|_{\dot H^{1/2}}^2\le ^{-1}nt i\partial_\alphaa(\betaar Z_t-\betaar {{\mathbb{Z}}f}_t\circ l)\betaar{(\betaar Z_t-\betaar {{\mathbb{Z}}f}_t\circ l)}\,d\alphaa+M\|l_\alphaa-1\|_{L^2}^2\le \fracrak F_1(t)+M\fracrak F_0(t)
\end{equation}
A similar argument holds for $\nm{\paren{\fracrac1{Z_{,\alphaa}}- \fracrac1{{\mathbb{Z}}f_{,\alphaa}}\circ l}(t)}_{\dot H^{1/2}}^2$. This proves \eqref{dominate11}.
Now by $\fracrac{\kappa}{A_1}= \sqrt{\fracrac{l_\alphaa}{A_1\tauilde {A_1}\circ l}}$, Remark~\ref{remark512} and the estimate \eqref{2000}, there is a constant $0<c<^{-1}nfty$, depending on $\sup_{[0, T]}\fracrak E(t)$ and $\sup_{[0, T]}\tauilde{\mathfrak E}(t)$, such that
\betaegin{equation}\label{2350-1}
\fracrac1c\le \fracrac{\kappa}{A_1} \le c,
\end{equation}
so
\betaegin{equation}\label{2351}
\nm{(A_1-\tauilde {A_1}\circ l)(t)}_{L^2}^2+\nm{(b_\alphaa-D_\alphaa Z_t-(\tauilde b_\alphaa-\tauilde D_\alphaa {\mathbb{Z}}f_t\circ l)) (t)}_{L^2}^2\lesssim M\fracrak F_0(t)+\fracrak F_1(t)+\fracrak F_2(t),
\end{equation}
for large enough $M$, depending only on $\sup_{[0, T]}\fracrak E(t)$ and $\sup_{[0, T]}\tauilde{\mathfrak E}(t)$. Using \eqref{ba} we have, from Proposition~\ref{dl21},
\betaegin{equation}\label{2352}
\nm{b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t-(\tauilde b_\alphaa-2{\mathbb{R}}e \tauilde D_\alphaa {\mathbb{Z}}f_t\circ l)}_{L^2}\lesssim \|\betaar Z_t-\betaar {\mathbb{Z}}f_t\circ l\|_{\dot H^{1/2}}+\nm{\fracrac1{Z_{,\alphaa}}- \fracrac1{{\mathbb{Z}}f_{,\alphaa}}\circ l}_{\dot H^{1/2}}+ \|l_\alphaa-1\|_{L^2};
\end{equation}
combining with \eqref{2351} yields
$$ \nm{(D_\alphaa Z_t-\tauilde D_\alphaa {\mathbb{Z}}f_t\circ l) (t)}_{L^2}^2+ \nm{(b_\alphaa-\tauilde b_\alphaa\circ l )(t)}_{L^2}^2\lesssim M\fracrak F_0(t)+\fracrak F_1(t)+\fracrak F_2(t),$$
for large enough $M$, depending only on $\sup_{[0, T]}\fracrak E(t)$ and $\sup_{[0, T]}\tauilde{\mathfrak E}(t)$. This proves \eqref{dominate12}.
\end{proof}
\betaegin{lemma}\label{dominate2}
Let $M_0$ be the constant in Lemma~\ref{dominate1}. Then for all $M\gammae M_0$, and $t^{-1}n [0, T]$,
\betaegin{equation}\label{dominate21}
\|\mathbb P_A(\betaar Z_{tt}-\betaar {\mathbb{Z}}f_{tt}\circ l)(t)\|_{\dot H^{1/2}}^2\lesssim M\fracrak F_0(t)+\fracrak F_1(t)+\fracrak F_2(t)
\end{equation}
\end{lemma}
\betaegin{proof}
We have
\betaegin{equation}\label{2353}
2\mathbb P_A(\betaar Z_{tt}-\betaar {\mathbb{Z}}f_{tt}\circ l)=2\mathbb P_A(\betaar Z_{tt})-2U_l \mathbb P_A (\betaar {\mathbb{Z}}f_{tt})-\mathcal Q_l (\betaar {\mathbb{Z}}f_{tt}\circ l);
\end{equation}
and by \eqref{q1},
$$\nm{\mathcal Q_l (\betaar {\mathbb{Z}}f_{tt}\circ l)}_{\dot H^{1/2}}\lesssim \|l_\alphaa-1\|_{L^2}.$$
Consider the first two terms on the right hand side of \eqref{2353}. We use \eqref{eq:c21} and the fact that $\betaar Z_t=\mathbb H\betaar Z_t$ to rewrite
\betaegin{equation}\label{2354}
2\mathbb P_A(\betaar Z_{tt})=[\partial_t+b\partial_\alphaa, \mathbb H]\betaar Z_{t}=[b, \mathbb H]\partial_\alphaa \betaar Z_t.
\end{equation}
We would like to use \eqref{dhhalf1-inq} to estimate $\nm{2\mathbb P_A(\betaar Z_{tt})-2U_l \mathbb P_A (\betaar {\mathbb{Z}}f_{tt})}_{\dot H^{1/2}}$,
observe that we have controlled all the quantities
on the right hand side of \eqref{dhhalf1-inq},
except for $\|\mathbb H b_\alphaa\|_{L^^{-1}nfty}$.
By \eqref{ba},
\betaegin{equation}\label{2355}
\betaegin{aligned}
b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t={\mathbb{R}}e \paren{\betaracket{ \fracrac1{Z_{,\alphaa}}, \mathbb H} Z_{t,\alphalpha'}+ \betaracket{Z_t, \mathbb H}\partial_\alphaa \fracrac1{Z_{,\alphaa}} },
\end{aligned}
\end{equation}
and by the fact $Z_{t,\alphaa}=-\mathbb H Z_{t,\alphaa}$,
\betaegin{equation}\label{2356}
\betaracket{ \fracrac1{Z_{,\alphaa}}, \mathbb H} Z_{t,\alphalpha'}=-(I+\mathbb H)D_\alphaa Z_t,
\end{equation}
so
\betaegin{equation}\label{2357}
\mathbb H \betaracket{ \fracrac1{Z_{,\alphaa}}, \mathbb H} Z_{t,\alphalpha'}=\betaracket{ \fracrac1{Z_{,\alphaa}}, \mathbb H} Z_{t,\alphalpha'}.
\end{equation}
This gives, by \eqref{eq:b13},
\betaegin{equation}\label{2358}
\nm{\mathbb H \betaracket{ \fracrac1{Z_{,\alphaa}}, \mathbb H} Z_{t,\alphalpha'}}_{L^^{-1}nfty}\lesssim \nm{\partial_\alphaa \fracrac1{Z_{,\alphaa}}}_{L^2}\nm{ Z_{t,\alphalpha'}}_{L^2}\lesssim C(\fracrak E(t));
\end{equation}
similarly
$\nm{\mathbb H \betaracket{Z_t, \mathbb H}\partial_\alphaa \fracrac1{Z_{,\alphaa}} }_{L^^{-1}nfty}\lesssim C(\fracrak E(t))$, therefore $\nm{\mathbb H(b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t)}_{L^^{-1}nfty}\lesssim C(\fracrak E(t))$. Observe that the argument from \eqref{2356}-\eqref{2358} also shows that $$\nm{(I+\mathbb H)D_\alphaa Z_t}_{L^^{-1}nfty}\lesssim C(\fracrak E(t)),$$ therefore
\betaegin{equation}\label{2358-1}
\nm{\mathbb H D_\alphaa Z_t}_{L^^{-1}nfty}\le \nm{(I+\mathbb H)D_\alphaa Z_t}_{L^^{-1}nfty}+\nm{D_\alphaa Z_t}_{L^^{-1}nfty}
\lesssim C(\fracrak E(t)),
\end{equation}
and hence $\nm{\mathbb Hb_\alphaa}_{L^^{-1}nfty}\lesssim C(\fracrak E(t))$. Notice that
$$\nm{ \partial_\alphaa(b-\tauilde b\circ l)}_{L^2}\lesssim \nm{ b_\alphaa-\tauilde b_\alphaa\circ l}_{L^2}+\|l_\alphaa-1\|_{L^2}.$$
Applying \eqref{dhhalf1-inq} to \eqref{2354} we get
$$\nm{2\mathbb P_A(\betaar Z_{tt})-2U_l \mathbb P_A (\betaar {\mathbb{Z}}f_{tt})}_{\dot H^{1/2}}\lesssim \nm{ b_\alphaa-\tauilde b_\alphaa\circ l}_{L^2}+\|l_\alphaa-1\|_{L^2}+\nm{\betaar Z_t-\betaar {\mathbb{Z}}f_t\circ l}_{\dot H^{1/2}}.$$
A further application of Lemma~\ref{dominate1} yields Lemma~\ref{dominate2}.
\end{proof}
As a consequence of Lemmas~\ref{dominate1}, and \ref{dominate2} we have
\betaegin{proposition}\label{denergy}
There is a constant $M_0>0$, depending only on $\sup_{[0, T]}\fracrak E(t)$ and $\sup_{[0, T]}\tauilde{\mathfrak E}(t)$, such that for all $M\gammae M_0$, and $t^{-1}n [0, T]$,
\betaegin{equation}\label{2359}
\betaegin{aligned}
&\|(\betaar Z_t-\betaar {\mathbb{Z}}f_t\circ l)(t)\|_{\dot H^{1/2}}^2+\nm{\paren{\fracrac1{Z_{,\alphaa}}- \fracrac1{{\mathbb{Z}}f_{,\alphaa}}\circ l}(t)}_{\dot H^{1/2}}^2+\|(\betaar Z_{tt}-\betaar {\mathbb{Z}}f_{tt}\circ l)(t)\|_{\dot H^{1/2}}^2\\ &+
\nm{(A_1-\tauilde {A_1}\circ l)(t)}_{L^2}^2+\nm{(D_\alphaa Z_t-\tauilde D_\alphaa {\mathbb{Z}}f_t\circ l) (t)}_{L^2}^2+ \nm{(b_\alphaa-\tauilde b_\alphaa\circ l) (t)}_{L^2}^2+\nm{l_\alphaa(t)-1}_{L^2}^2\\&
+\nm{\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}-\fracrac{\tauilde{\mathfrak{a}}_t}{\tauilde{\mathfrak{a}}}\circ h^{-1}}(t)}_{L^2}^2\lesssim M\fracrak F_0(t)+\fracrak F_1(t)+\fracrak F_2(t)+M^{-1} \fracrak F_3(t).
\end{aligned}
\end{equation}
\end{proposition}
\betaegin{remark}\label{remark517}
It is clear that the reverse of inequality \eqref{2359} also holds. Observe that by \eqref{a1} and Proposition~\ref{dl21}, we have for any $t^{-1}n [0, T]$,
\betaegin{equation}
\nm{(A_1-\tauilde {A_1}\circ l)(t)}_{L^2}\lesssim \|(\betaar Z_t-\betaar {\mathbb{Z}}f_t\circ l)(t)\|_{\dot H^{1/2}}+ \|l_\alphaa(t)-1\|_{L^2}.
\end{equation}
and by \eqref{at}-\eqref{ba}-\eqref{dta1} and Propositions~\ref{dl21}, ~\ref{d32},
\betaegin{equation}
\betaegin{aligned}
& \nm{ \paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}-\fracrac{\tauilde{\mathfrak{a}}_t}{\tauilde{\mathfrak{a}}}\circ h^{-1}}(t) }_{L^2}=\nm{ \paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}-\fracrac{\tauilde{\mathfrak{a}}_t}{\tauilde{\mathfrak{a}}}\circ \tauh^{-1}\circ l}(t) }_{L^2} \\& \lesssim \|(\betaar Z_t-\betaar {\mathbb{Z}}f_t\circ l)(t)\|_{\dot H^{1/2}}+ \nm{\paren{\fracrac1{Z_{,\alphaa}}- \fracrac1{{\mathbb{Z}}f_{,\alphaa}}\circ l}(t)}_{\dot H^{1/2}}+\|(\betaar Z_{tt}-\betaar {\mathbb{Z}}f_{tt}\circ l)(t)\|_{\dot H^{1/2}}\\&\qquad+\nm{(b_\alphaa-\tauilde b_\alphaa\circ l) (t)}_{L^2}+\|l_\alphaa(t)-1\|_{L^2}.
\end{aligned}
\end{equation}
This, together with \eqref{2352} shows that for all $t^{-1}n [0, T]$,
\betaegin{equation}\label{2360}
\betaegin{aligned}
M\fracrak F_0(t)+\fracrak F_1(t)+&\fracrak F_2(t)+M^{-1} \fracrak F_3(t)\lesssim \|(\betaar Z_t-\betaar {\mathbb{Z}}f_t\circ l)(t)\|_{\dot H^{1/2}}^2+\nm{\paren{\fracrac1{Z_{,\alphaa}}- \fracrac1{{\mathbb{Z}}f_{,\alphaa}}\circ l}(t)}_{\dot H^{1/2}}^2\\&+\|(\betaar Z_{tt}-\betaar {\mathbb{Z}}f_{tt}\circ l)(t)\|_{\dot H^{1/2}}^2+\nm{(D_\alphaa Z_t-\tauilde D_\alphaa {\mathbb{Z}}f_t\circ l) (t)}_{L^2}^2
+\nm{l_\alphaa(t)-1}_{L^2}^2.
\end{aligned}
\end{equation}
\end{remark}
Now fix a constant $M$, with $M\gammae M_0>0$, so that \eqref{2359} holds. We define
\betaegin{equation}\label{dfunctional}
\mathcal F(t):= M\fracrak F_0(t)+\fracrak F_1(t)+\fracrak F_2(t)+M^{-1}\fracrak F_3(t).
\end{equation}
We have
\betaegin{proposition}\label{denergy-est}
Assume that $Z=Z(\alphaa, t)$, ${\mathbb{Z}}f={\mathbb{Z}}f(\alphaa,t)$ are solutions of the system \eqref{interface-r}-\eqref{interface-holo}-\eqref{a1}-\eqref{b}, satisfying the assumption of Theorem~\ref{unique}. Then there is a constant $C$, depending only on $T$, $\sup_{[0, T]}\fracrak E(t)$ and $\sup_{[0, T]}\tauilde{\mathfrak E}(t)$, such that for $t^{-1}n [0, T]$,
\betaegin{equation}\label{denergy-inq}
\fracrac d{dt}\mathcal F(t)\le C\paren{ \mathcal F(t)+ ^{-1}nt_0^t \mathcal F(\tauau)\,d\tauau + \nm{\paren{\fracrac1{Z_{,\alphaa}}-\fracrac1{{\mathbb{Z}}f_{,\alphaa}}}(0)}_{L^^{-1}nfty}\mathcal F(t)^{\fracrac12}}.
\end{equation}
\end{proposition}
Assuming Proposition~\ref{denergy-est} holds, we have, by \eqref{denergy-inq},
\betaegin{equation}\label{2460}
\fracrac d{dt}\paren{\mathcal F(t)+^{-1}nt_0^t \mathcal F(\tauau)\,d\tauau}\le C\paren{ \mathcal F(t)+ ^{-1}nt_0^t \mathcal F(\tauau)\,d\tauau + \nm{\paren{\fracrac1{Z_{,\alphaa}}-\fracrac1{{\mathbb{Z}}f_{,\alphaa}}}(0)}_{L^^{-1}nfty}^2};
\end{equation}
and by Gronwall's inequality,
\betaegin{equation}\label{2461}
\mathcal F(t)+^{-1}nt_0^t \mathcal F(\tauau)\,d\tauau\le C\paren{\mathcal F(0)+ \nm{\paren{\fracrac1{Z_{,\alphaa}}-\fracrac1{{\mathbb{Z}}f_{,\alphaa}}}(0)}_{L^^{-1}nfty}^2},\qquad \tauext{for }t^{-1}n [0, T],
\end{equation}
for some constant $C$ depending on $T$, $\sup_{[0, T]}\fracrak E(t)$ and $\sup_{[0, T]}\tauilde{\mathfrak E}(t)$.
This together with \eqref{2359} and \eqref{2360} gives \eqref{stability}.
We now give the proof for Proposition~\ref{denergy-est}.
\betaegin{proof}
To prove Proposition~\ref{denergy-est} we apply Lemma~\ref{basic-e2} to $\Thetaeta=l_\alphaa-1$, and Lemma~\ref{dlemma1} to $\Thetaeta=\betaar Z_t,\ \fracrac1{Z_{,\alphaa}}-1,\ \betaar Z_{tt}$. We have, by Lemma~\ref{basic-e2} and \eqref{2340},
\betaegin{equation}\label{2361}
\fracrak F'_0(t)\le 2\nm{l_\alphaa (b_\alphaa-\tauilde b_\alphaa\circ l)}_{L^2}\fracrak F_0(t)^{1/2}+\|b_\alphaa\|_{L^^{-1}nfty}\fracrak F_0(t)\lesssim \mathcal F(t),
\end{equation}
here we used \eqref{2346-1}, \eqref{2359}, and \S\ref{basic-quantities}, \eqref{2020}.
Now we apply Lemma~\ref{dlemma1} to $\Thetaeta=\betaar Z_t,\ \fracrac1{Z_{,\alphaa}}-1,\ \betaar Z_{tt}$ to get the estimates for $\fracrak F_1'(t)$, $\fracrak F_2'(t)$ and $\fracrak F_3'(t)$. Checking through the right hand sides of the inequalities \eqref{dlemma1-inq} for $\Thetaeta=\betaar Z_t,\ \fracrac1{Z_{,\alphaa}}-1,\ \betaar Z_{tt}$, we find that we have controlled almost all of the quantities, respectively by $\mathcal F(t)$ or $\fracrak E(t)$, $\tauilde{\mathfrak E}(t)$, except for the following:
\betaegin{itemize}
^{-1}tem
1. $\nm{\fracrac{(\partial_t+b\partial_\alphaa)\kappa }{\kappa}}_{L^2}$;
^{-1}tem
2. $\nm{1-\kappa}_{L^2}$;
^{-1}tem
3. $2{\mathbb{R}}e i^{-1}nt \betaar{\paren{\fracrac1{Z_{,\alphaa}}-\fracrac1{{\mathbb{Z}}f_{,\alphaa}}\circ l }} \paren{\betaar{{\mathbb{Z}}f_{,\alphaa}\circ l((\partial_t+\tauilde b\partial_\alphaa)\tauilde{\Theta}\circ l+\fracrak c)}\Thetaeta_\alphaa-\betaar{Z_{,\alphaa}((\partial_t+b\partial_\alphaa)\Thetaeta+\fracrak c)}(\tauilde{\Theta}\circ l)_\alphaa}\,d\alphaa$, \newline
for $\Thetaeta=\betaar Z_t,\ \fracrac1{Z_{,\alphaa}}-1,\ \betaar Z_{tt}$; with $\fracrak c=-i$ for $\Thetaeta=\betaar Z_t$, and $\fracrak c=0$ for $\Thetaeta=\fracrac1{Z_{,\alphaa}}-1$ and $\betaar Z_{tt}$;
^{-1}tem
4. $\nm{ Z_{,\alphaa}G_i- {\mathbb{Z}}f_{,\alphaa}\circ l\tauilde G_i\circ l}_{L^2}$, for $i=1,2,3$.
\end{itemize}
We begin with items 1. and 2. By definition $\kappa=\sqrt{\fracrac{A_1}{\tauilde {A_1}\circ l} l_\alphaa}$, so
\betaegin{equation}\label{2362}
2 \fracrac{(\partial_t+b\partial_\alphaa)\kappa }{\kappa}= \fracrac{(\partial_t+b\partial_\alphaa)A_1}{A_1}-\fracrac{(\partial_t+\tauilde b \partial_\alphaa)\tauilde {A_1} }{\tauilde {A_1}}\circ l+(\tauilde b_\alphaa\circ l-b_\alphaa);
\end{equation}
and by \eqref{at},
\betaegin{equation}\label{2363}
\fracrac{(\partial_t +b\partial_\alphaa) A_1}{A_1}=\dfrac{\fracrak a_t}{\fracrak a}\circ h^{-1}-(b_\alphaa -2{\mathbb{R}}e D_\alphaa Z_t);
\end{equation}
therefore
\betaegin{equation}\label{2364}
\nm{\fracrac{(\partial_t+b\partial_\alphaa)\kappa }{\kappa}}_{L^2}\lesssim \nm{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}-\fracrac{\tauilde{\mathfrak{a}}_t}{\tauilde{\mathfrak{a}}}\circ h^{-1}}_{L^2}+ \nm{b_\alphaa-\tauilde b_\alphaa\circ l}_{L^2}+\nm{D_\alphaa Z_t-\tauilde D_\alphaa{\mathbb{Z}}f_t \circ l}_{L^2}\lesssim \mathcal F(t)^{\fracrac12}.
\end{equation}
And it is clear that by the definition of $\kappa$,
\betaegin{equation}\label{2365}
\nm{1-\kappa}_{L^2}\lesssim \nm{A_1-\tauilde {A_1}\circ l}_{L^2}+\nm{l_\alphaa-1}_{L^2}\lesssim \mathcal F(t)^{\fracrac12}.
\end{equation}
What remains to be controlled are the quantities in items 3. and 4. We first consider item 4. We have, by \eqref{eqzt}, ${Z_{,\alphaa}}G_1=-i\,\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} A_1$, so
\betaegin{equation}\label{2366}
\nm{ Z_{,\alphaa}G_1- {\mathbb{Z}}f_{,\alphaa}\circ l\tauilde G_1\circ l}_{L^2}\lesssim \nm{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}-\fracrac{\tauilde{\mathfrak{a}}_t}{\tauilde{\mathfrak{a}}}\circ h^{-1}}_{L^2}+\nm{A_1-\tauilde {A_1}\circ l}_{L^2}\lesssim \mathcal F(t)^{\fracrac12};
\end{equation}
and by \eqref{eqza}, ${Z_\alphaa}G_2=(b_\alphaa-D_\alphaa Z_t)^2+(\partial_t+b\partial_\alphaa)(b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t)-\paren{\betaar {D_\alphaa Z_t}}^2-i\fracrac{\partial_\alphaa A_1}{|Z_{,\alphaa}|^2}$, so
\betaegin{equation}\label{2367}
\betaegin{aligned}
& \nm{ Z_{,\alphaa}G_2- {\mathbb{Z}}f_{,\alphaa}\circ l\tauilde G_2\circ l}_{L^2}\lesssim \nm{b_\alphaa-\tauilde b_\alphaa\circ l}_{L^2}+\nm{D_\alphaa Z_t-\tauilde D_\alphaa{\mathbb{Z}}f_t \circ l}_{L^2}+\\&\nm{(\partial_t+b\partial_\alphaa)(b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t)-(\partial_t+\tauilde b\partial_\alphaa)(\tauilde b_\alphaa- 2{\mathbb{R}}e \tauilde D_\alphaa{\mathbb{Z}}f_t )\circ l}_{L^2}+\nm{\fracrac{\partial_\alphaa A_1}{|Z_{,\alphaa}|^2}-\fracrac{\partial_\alphaa \tauilde {A_1}}{|{\mathbb{Z}}f_{,\alphaa}|^2}\circ l}_{L^2};
\end{aligned}
\end{equation}
observe that we have controlled all but the last two quantities on the right hand side of \eqref{2367} by $\mathcal F(t)^{1/2}$. By \eqref{eqztt},
$Z_{,\alphaa}G_3= -i\,A_1 \paren{(\partial_t+b\partial_\alphaa)\paren{\fracrac{\mathfrak{a}_t}{\mathfrak{a}}\circ h^{-1}}+\paren{\fracrac{\mathfrak{a}_t}{\mathfrak{a}}\circ h^{-1}}^2+2\paren{\fracrac{\mathfrak{a}_t}{\mathfrak{a}}\circ h^{-1}}\betaar{D_\alphaa Z_t}}$, so
\betaegin{equation}\label{2368}
\betaegin{aligned}
&\nm{ Z_{,\alphaa}G_3- {\mathbb{Z}}f_{,\alphaa}\circ l\tauilde G_3\circ l}_{L^2}\lesssim \nm{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}-\fracrac{\tauilde{\mathfrak{a}}_t}{\tauilde{\mathfrak{a}}}\circ h^{-1}}_{L^2}+\nm{D_\alphaa Z_t-\tauilde D_\alphaa{\mathbb{Z}}f_t \circ l}_{L^2}\\&+\nm{A_1-\tauilde {A_1}\circ l}_{L^2}\paren{\nm{(\partial_t+b\partial_\alphaa)\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} } }_{L^^{-1}nfty}+1}
\\&\qquad+
\nm{(\partial_t+b\partial_\alphaa)\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}-(\partial_t+\tauilde b\partial_\alphaa)\paren{\fracrac{\tauilde{\mathfrak{a}}_t}{\tauilde{\mathfrak{a}}} \circ \tauh^{-1} }\circ l}_{L^2}.
\end{aligned}
\end{equation}
We have controlled all but the factor in the third quantity and the very last quantity on the right hand side of \eqref{2368}.
In the remaining part of the proof for Proposition~\ref{denergy-est}, we will
show the following inequalities
\betaegin{itemize}
^{-1}tem
$\nm{\fracrac{\partial_\alphaa A_1}{|Z_{,\alphaa}|^2}-\fracrac{\partial_\alphaa \tauilde {A_1}}{|{\mathbb{Z}}f_{,\alphaa}|^2}\circ l}_{L^2}\lesssim \mathcal F(t)^{\fracrac12}$;
^{-1}tem
$\nm{(\partial_t+b\partial_\alphaa)(b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t)-(\partial_t+\tauilde b\partial_\alphaa)(\tauilde b_\alphaa- 2{\mathbb{R}}e \tauilde D_\alphaa{\mathbb{Z}}f_t )\circ l}_{L^2}\lesssim \mathcal F(t)^{\fracrac12}$;
^{-1}tem
$\nm{(\partial_t+b\partial_\alphaa)\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} } }_{L^^{-1}nfty}\le C(\fracrak E(t));$
^{-1}tem
$ \nm{(\partial_t+b\partial_\alphaa)\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}-(\partial_t+\tauilde b\partial_\alphaa)\paren{\fracrac{\tauilde{\mathfrak{a}}_t}{\tauilde{\mathfrak{a}}} \circ \tauh^{-1} }\circ l}_{L^2}\lesssim \mathcal F(t)^{\fracrac12}$;
^{-1}tem
and control the quantities in item 3.
\end{itemize}
Our main strategy is the same as always, that is, to rewrite the quantities in forms to which the results in \S\ref{prepare} can be applied.
\subsubsection{Some additional quantities controlled by $\fracrak E(t)$ and by $\mathcal F(t)$}\label{additional}
We begin with deriving some additional estimates that will be used in the proof. First we record the conclusions from the computations of \eqref{2355}-\eqref{2358-1},
\betaegin{equation}\label{2380}
\nm{\mathbb H D_\alphaa Z_t}_{L^^{-1}nfty}\le C(\fracrak E(t)),\qquad \nm{\mathbb H b_\alphaa }_{L^^{-1}nfty}\le C(\fracrak E(t)).
\end{equation}
Because $\partial_\alphaa \fracrac1{Z_{,\alphaa}} =\mathbb H\paren{\partial_\alphaa \fracrac1{Z_{,\alphaa} }}$,
\betaegin{equation}\label{2381}
2\mathbb P_A\paren{Z_{t} \partial_\alphaa \fracrac1{Z_{,\alphaa} }}=\betaracket{Z_{t},\mathbb H}\partial_\alphaa \fracrac1{Z_{,\alphaa} };
\end{equation}
and we have, by \eqref{eq:b13} and \eqref{dl21-inq},
\betaegin{align}
\nm{\mathbb P_A\paren{Z_{t} \partial_\alphaa \fracrac1{Z_{,\alphaa} }}}_{L^^{-1}nfty}
\lesssim \nm{Z_{t,\alphaa}}_{L^2}\nm{ \partial_\alphaa \fracrac1{Z_{,\alphaa} }}_{L^2}\le C(\fracrak E(t));\label{2382}\\
\nm{\mathbb P_A\paren{Z_{t} \partial_\alphaa \fracrac1{Z_{,\alphaa} }}-U_l \mathbb P_A\paren{{\mathbb{Z}}f_{t} \partial_\alphaa \fracrac1{{\mathbb{Z}}f_{,\alphaa} }}}_{L^2}\lesssim \mathcal F(t)^{1/2}.\label{2383}
\end{align}
Similarly we have
\betaegin{equation}\label{2384}
\nm{\mathbb P_A\paren{Z_{t} \partial_\alphaa \betaar Z_t}-U_l \mathbb P_A\paren{{\mathbb{Z}}f_{t} \partial_\alphaa \betaar{{\mathbb{Z}}f}_t}}_{L^2}\lesssim \mathcal F(t)^{1/2}.
\end{equation}
By \eqref{a1}, $iA_1=i-\mathbb P_A(Z_t\betaar Z_{t,\alphaa})+\mathbb P_H(\betaar Z_t Z_{t,\alphaa})$,
and by \eqref{aa1},
\betaegin{equation}\label{2385}
\betaar Z_{tt}-i=-\fracrac{i}{Z_{,\alphaa}}+\fracrac{\mathbb P_A(Z_t\betaar Z_{t,\alphaa})}{Z_{,\alphaa}}-\fracrac{\mathbb P_H(\betaar Z_t Z_{t,\alphaa})}{Z_{,\alphaa}};
\end{equation}
applying $\mathbb P_H$ to both sides of \eqref{2385} and rewriting the second term on the right hand side as a commutator gives
\betaegin{equation}\label{2386}
\fracrac{\mathbb P_H(\betaar Z_t Z_{t,\alphaa})}{Z_{,\alphaa}}=-i\paren{\fracrac{1}{Z_{,\alphaa}}-1}-\fracrac12\betaracket{\fracrac{1}{Z_{,\alphaa}},\mathbb H}\mathbb P_A(Z_t\betaar Z_{t,\alphaa})-\mathbb P_H(\betaar Z_{tt}).
\end{equation}
Now we apply \eqref{dhhalf2-inq}, \eqref{q1}, \eqref{2359} and \eqref{2384} to get
\betaegin{align}
\nm{\betaracket{\fracrac{1}{Z_{,\alphaa}},\mathbb H}\mathbb P_A(Z_t\betaar Z_{t,\alphaa})-U_l \betaracket{\fracrac{1}{{\mathbb{Z}}f_{,\alphaa}},\mathbb H}\mathbb P_A({\mathbb{Z}}f _t\betaar{ {\mathbb{Z}}f}_{t,\alphaa}) }_{\dot H^{1/2}}
\lesssim \mathcal F(t)^{1/2};\label{2387}\\
\nm{\fracrac{\mathbb P_H(\betaar Z_t Z_{t,\alphaa})}{Z_{,\alphaa}}-U_l\fracrac{\mathbb P_H(\betaar {{\mathbb{Z}}f}_t {\mathbb{Z}}f_{t,\alphaa})}{{\mathbb{Z}}f_{,\alphaa}} }_{\dot H^{1/2}}\lesssim \mathcal F(t)^{1/2};\label{2388}
\end{align}
consequently by \eqref{2385} and \eqref{2388}, \eqref{2359},
\betaegin{equation}\label{2389}
\nm{\fracrac{\mathbb P_A(\betaar Z_t Z_{t,\alphaa})}{Z_{,\alphaa}}-U_l\fracrac{\mathbb P_A(\betaar {{\mathbb{Z}}f}_t {\mathbb{Z}}f_{t,\alphaa})}{{\mathbb{Z}}f_{,\alphaa}} }_{\dot H^{1/2}}\lesssim \mathcal F(t)^{1/2}.
\end{equation}
Similarly we have
\betaegin{align}
\nm{\betaracket{\fracrac{1}{Z_{,\alphaa}},\mathbb H}\mathbb P_A\paren{Z_t \partial_\alphaa\fracrac1{Z_{,\alphaa}}}-U_l \betaracket{\fracrac{1}{{\mathbb{Z}}f_{,\alphaa}},\mathbb H}\mathbb P_A\paren{{\mathbb{Z}}f _t \partial_\alphaa\fracrac1{{\mathbb{Z}}f_{,\alphaa}} } }_{\dot H^{1/2}}
\lesssim \mathcal F(t)^{1/2};\label{2390}\\
\nm{\betaracket{\fracrac{1}{Z_{,\alphaa}}, \mathbb H}\paren{ A_1\paren{\betaar{D_\alphaa Z_t}+\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} }-U_l\betaracket{\fracrac{1}{{\mathbb{Z}}f_{,\alphaa}}, \mathbb H}\paren{ \tauilde {A_1}\paren{\betaar{\tauilde D_\alphaa {\mathbb{Z}}f_t}+\fracrac{\tauilde{\mathfrak{a}}_t}{\tauilde{\mathfrak{a}} }\circ \tauh^{-1}} }}_{\dot H^{1/2}}\lesssim \mathcal F(t)^{1/2};\label{2390-1}
\end{align}
provided we can show that \betaegin{equation}\label{2390-2}
\nm{ \mathbb H\paren{ A_1\paren{\betaar{D_\alphaa Z_t}+\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}} }_{L^^{-1}nfty}\lesssim C(\fracrak E).\end{equation}
We now prove \eqref{2390-2}. It suffices to show $\nm{ \mathbb P_A\paren{ A_1\paren{\betaar{D_\alphaa Z_t}+\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}} }_{L^^{-1}nfty}\lesssim C(\fracrak E)$, since we have $ \nm{ A_1\paren{\betaar{D_\alphaa Z_t}+\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} }_{L^^{-1}nfty}\lesssim C(\fracrak E)$.
We know
$$2\mathbb P_A (A_1 \betaar{D_\alphaa Z_t})=\betaracket{\fracrac{A_1}{\betaar Z_{,\alphaa}},\mathbb H}\betaar Z_{t,\alphaa}=-i[Z_{tt},\mathbb H]\betaar Z_{t,\alphaa},$$
hence by Cauchy-Schwarz inequality and Hardy's inequality \eqref{eq:77},
\betaegin{equation}\label{2390-3}
\nm{2\mathbb P_A (A_1 \betaar{D_\alphaa Z_t})}_{L^^{-1}nfty}\lesssim \|Z_{tt,\alphaa}\|_{L^2}\|Z_{t,\alphaa}\|_{L^2}\lesssim C(\fracrak E).
\end{equation}
For the second term we use the formula (2.23) of \cite{wu6}, \fracootnote{This formula can be checked directly from \eqref{at}-\eqref{ba}-\eqref{dta1} via similar manipulations as in \eqref{2446-1}-\eqref{2448}.}
\betaegin{equation}\label{2390-4}
{A_1}\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}=-\Im( 2[Z_t,\mathbb H]{\betaar Z}_{tt,\alphalpha'}+2[Z_{tt},\mathbb H]\partial_{\alphalpha'} \betaar Z_t-
[Z_t, Z_t; D_{\alphalpha'} \betaar Z_t]),
\end{equation}
observe that the quantities $[Z_t,\mathbb H]{\betaar Z}_{tt,\alphalpha'}$, $[Z_{tt},\mathbb H]\partial_{\alphalpha'} \betaar Z_t$ are anti-holomorphic by \eqref{comm-hilbe}, and $[Z_t, Z_t; D_{\alphalpha'} \betaar Z_t]$ is anti-holomorphic by integration by parts and \eqref{comm-hilbe}, so
\betaegin{equation}\label{2390-5}
\mathbb P_A\paren{{A_1}\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}=i\paren{ [Z_t,\mathbb H]{\betaar Z}_{tt,\alphalpha'}+[Z_{tt},\mathbb H]\partial_{\alphalpha'} \betaar Z_t-\fracrac12
[Z_t, Z_t; D_{\alphalpha'} \betaar Z_t]};
\end{equation}
therefore
\betaegin{equation}\label{2390-6}
\nm{\mathbb P_A\paren{{A_1}\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}}_{L^^{-1}nfty}\lesssim C(\fracrak E)
\end{equation}
by Cauchy-Schwarz inequality and Hardy's inequality \eqref{eq:77}. This proves \eqref{2390-2}.
In what follows we will need the bound for $\nm{Z_{ttt,\alphaa}}_{L^2}$. We begin with \eqref{eq:dztt} and calculate $\betaar Z_{ttt,\alphaa}$. We have
\betaegin{equation}\label{2410}
\betaar Z_{ttt,\alphaa}=\betaar Z_{tt,\alphaa} (\betaar{D_\alphaa Z_t}+\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1})-iA_1D_\alphaa(\betaar{D_\alphaa Z_t}+\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1})
\end{equation}
where we substituted the factor $\betaar Z_{tt}-i$ in the second term by $-\fracrac{iA_1}{Z_{,\alphaa}}$, see \eqref{eq:dzt}. We know from \S\ref{proof} that all the quantities in \eqref{2410} are controlled and we have
\betaegin{equation}\label{2411}
\nm{Z_{ttt,\alphaa}}_{L^2}\le C(\fracrak E(t)).
\end{equation}
\subsubsection{Controlling the $\dot H^{1/2}$ norms of $Z_{,\alphaa}((\partial_t+b\partial_\alphaa)\Thetaeta+\fracrak c)$ for $\Thetaeta=\betaar Z_t, \fracrac1{Z_{,\alphaa}}-1, \betaar Z_{tt}$, with $\fracrak c=-i, 0, 0$ respectively}\label{hhalf-norm}
We will use Proposition~\ref{half-product} to control the item 3 above. To do so we need to check that
the assumptions of the proposition hold. One of them is $Z_{,\alphaa}((\partial_t+b\partial_\alphaa)\Thetaeta+\fracrak c)^{-1}n \dot H^{1/2}\cap L^^{-1}nfty$, for $\Thetaeta=\betaar Z_t, \fracrac1{Z_{,\alphaa}}-1, \betaar Z_{tt}$; $\fracrak c=-i, 0, 0$ respectively; with the norms bounded by $C(\fracrak E(t))$. By \eqref{eq:dzt}, \eqref{eq:dztt} and \eqref{eq:dza},
\betaegin{equation}\label{2391}
Z_{,\alphaa}(\betaar Z_{tt}-i)=-iA_1,\quad Z_{,\alphaa}(\partial_t+b\partial_\alphaa)\fracrac1{Z_{,\alphaa}}=b_\alphaa-D_\alphaa Z_t,\quad Z_{,\alphaa} \betaar Z_{ttt}=-iA_1\paren{\betaar {D_\alphaa Z_t}+\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}.
\end{equation}
In \S\ref{proof}, we have shown that these quantities are in $L^^{-1}nfty$, with their $L^^{-1}nfty$ norms controlled by $C(\fracrak E(t))$. So we only need to estimate their $\dot H^{1/2}$ norms.
Applying Proposition~\ref{hhalf4}, \eqref{hhalf42} to \eqref{a1} and \eqref{ba}, we get $A_1, b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t^{-1}n \dot H^{1/2}$, with
\betaegin{align}
& \|A_1\|_{\dot H^{1/2}}\lesssim \nm{\partial_\alphaa Z_t}_{L^2}^2\lesssim C(\fracrak E(t));\label{2392}
\\
& \|b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t\|_{\dot H^{1/2}}\lesssim \nm{\partial_\alphaa Z_t}_{L^2}\nm{\partial_\alphaa \fracrac1{Z_{,\alphaa}}}_{L^2}\lesssim C(\fracrak E(t)).\label{2393}
\end{align}
We next compute $\|D_\alphaa Z_t(t)\|_{\dot H^{1/2}}$. By definition,
\betaegin{equation}\label{2394}
\betaegin{aligned}
\|D_\alphaa Z_t(t)\|_{\dot H^{1/2}}^2&= ^{-1}nt (i\partial_\alphaa \mathbb H D_\alphaa Z_t)\, \betaar{D_\alphaa Z_t }\,d\alphaa\\&= ^{-1}nt i\partial_\alphaa \betaracket{\mathbb H, \fracrac1{Z_{,\alphaa}}} Z_{t,\alphaa} \, \betaar{D_\alphaa Z_t }\,d\alphaa+ ^{-1}nt i\partial_\alphaa \paren{\fracrac1{Z_{,\alphaa}} \mathbb H Z_{t,\alphaa}} \betaar{D_\alphaa Z_t }\,d\alphaa\\&=
^{-1}nt i\betaar{D_\alphaa}\betaracket{\mathbb H, \fracrac1{Z_{,\alphaa}}} Z_{t,\alphaa}\, \partial_\alphaa\betaar{ Z_t }\,d\alphaa+ ^{-1}nt i Z_{t,\alphaa} (D_\alphaa \betaar{D_\alphaa Z_t })\,d\alphaa
\end{aligned}
\end{equation}
where in the last step we used integration by parts and the fact $\mathbb H Z_{t,\alphaa}=-Z_{t,\alphaa}$.
Recall in \eqref{2042}, we have shown $\nm{D_\alphaa\betaracket{\mathbb H, \fracrac1{Z_{,\alphaa}}} Z_{t,\alphaa} }_{L^2}\le C(\fracrak E(t))$. So by Cauchy-Schwarz inequality, we have
\betaegin{equation}\label{2395}
\|D_\alphaa Z_t(t)\|_{\dot H^{1/2}}^2\lesssim \nm{D_\alphaa\betaracket{\mathbb H, \fracrac1{Z_{,\alphaa}}} Z_{t,\alphaa} }_{L^2}\nm{Z_{t,\alphaa} }_{L^2}+\nm{Z_{t,\alphaa} }_{L^2}\nm{D_\alphaa^2Z_{t} }_{L^2}\le C(\fracrak E(t)).
\end{equation}
Now we consider $\nm{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}_{\dot H^{1/2}}$. By \eqref{at}-\eqref{ba}-\eqref{dta1}, we know Proposition~\ref{hhalf4}, \eqref{hhalf42} can be used to handle all terms, except for $[Z_t, b; \betaar Z_{t,\alphaa}]$.
Let $p^{-1}n C_0^^{-1}nfty(\mathbb R)$, we have, by duality,
\betaegin{equation}\label{2396}
\alphabs{^{-1}nt \partial_\alphaa p [Z_t, b; \betaar Z_{t,\alphaa}]\,d\alphaa} =\alphabs{^{-1}nt [Z_t, b; \partial_\alphaa p]\betaar Z_{t,\alphaa}\,d\alphaa}\lesssim \|Z_{t,\alphaa}\|_{L^2}^2\|b_\alphaa\|_{L^^{-1}nfty}\|p\|_{\dot H^{1/2}},
\end{equation}
where in the last step we used Cauchy-Schwarz inequality and \eqref{eq:b111}. Therefore
$\nm{[Z_t, b; \betaar Z_{t,\alphaa}]}_{\dot H^{1/2}}\le C(\fracrak E(t))$. Applying Proposition~\ref{hhalf4}, \eqref{hhalf42} to the remaining terms and using \eqref{hhalf-1} yields
\betaegin{equation}\label{2397}
\nm{ \fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1} }_{\dot H^{1/2}}\le C(\fracrak E(t)).
\end{equation}
We can now conclude that for $\Thetaeta=\betaar Z_t, \ \fracrac1{Z_{,\alphaa}}-1,\ \betaar Z_{tt}$, with $\fracrak c=i, 0, 0$ respectively,
\betaegin{equation}\label{2398}
\nm{Z_{,\alphaa}((\partial_t+b\partial_\alphaa)\Thetaeta+\fracrak c)}_{L^^{-1}nfty}+\nm{Z_{,\alphaa}((\partial_t+b\partial_\alphaa)\Thetaeta+\fracrak c)}_{\dot H^{1/2}}\le C(\fracrak E(t)).
\end{equation}
\subsubsection{Controlling $^{-1}nt \betaar{\paren{\fracrac1{Z_{,\alphaa}}-\fracrac1{{\mathbb{Z}}f_{,\alphaa}}\circ l }} \paren{\betaar{{\mathbb{Z}}f_{,\alphaa}\circ l((\partial_t+\tauilde b\partial_\alphaa)\tauilde{\Theta}\circ l+\fracrak c)}\Thetaeta_\alphaa-\betaar{Z_{,\alphaa}((\partial_t+b\partial_\alphaa)\Thetaeta+\fracrak c)}(\tauilde{\Theta}\circ l)_\alphaa}\,d\alphaa$}
We begin with studying $\fracrac1{Z_{,\alphaa}}-\fracrac1{{\mathbb{Z}}f_{,\alphaa}}\circ l$. By \eqref{2100},
$\fracrac1{Z_{,\alphaa}}(h(\alpha,t),t)=\fracrac1{Z_{,\alphaa}}(\alpha,0)e^{^{-1}nt_0^t (b_\alphaa\circ h(\alpha,\tauau)-D_\alpha z_t(\alpha,\tauau))\,d\tauau}$, so
\betaegin{equation}\label{2399}
\betaegin{aligned}
\paren{\fracrac1{Z_{,\alphaa}}-\fracrac1{{\mathbb{Z}}f_{,\alphaa}}\circ l}\circ h&=\paren{\fracrac1{Z_{,\alphaa}}(0)-\fracrac1{{\mathbb{Z}}f_{,\alphaa}}(0)} e^{^{-1}nt_0^t (\tauilde b_\alphaa-\tauilde D_\alphaa {\mathbb{Z}}f_t)\circ \tauh (\tauau)\,d\tauau}\\&+\fracrac1{Z_{,\alphaa}}\circ h\paren{1- e^{^{-1}nt_0^t ((\tauilde b_\alphaa-\tauilde D_\alphaa {\mathbb{Z}}f_t)\circ \tauh-(b_\alphaa-D_\alphaa Z_t)\circ h) (\tauau)\,d\tauau}}.
\end{aligned}
\end{equation}
We know for $t^{-1}n [0, T]$,
\betaegin{equation}\label{2470}
\nm{\paren{\fracrac1{Z_{,\alphaa}}(0)-\fracrac1{{\mathbb{Z}}f_{,\alphaa}}(0)} e^{^{-1}nt_0^t (\tauilde b_\alphaa-\tauilde D_\alphaa {\mathbb{Z}}f_t)\circ \tauh (\tauau)\,d\tauau}}_{L^^{-1}nfty}\le C(\sup_{[0, T]}\tauilde{\mathfrak E} (t))\nm{\fracrac1{Z_{,\alphaa}}(0)-\fracrac1{{\mathbb{Z}}f_{,\alphaa}}(0)}_{L^^{-1}nfty};
\end{equation}
and
\betaegin{equation}\label{2471}
\nm{ 1- e^{^{-1}nt_0^t ((\tauilde b_\alphaa-\tauilde D_\alphaa {\mathbb{Z}}f_t)\circ \tauh-(b_\alphaa-D_\alphaa Z_t)\circ h) (\tauau)\,d\tauau} }_{L^2}\lesssim ^{-1}nt_0^t \mathcal F(\tauau)^{1/2}\,d\tauau.
\end{equation}
Now we rewrite
\betaegin{equation}\label{2472}
\betaegin{aligned}
&^{-1}nt \betaar{\paren{\fracrac1{Z_{,\alphaa}}-\fracrac1{{\mathbb{Z}}f_{,\alphaa}}\circ l }} \paren{\betaar{{\mathbb{Z}}f_{,\alphaa}\circ l((\partial_t+\tauilde b\partial_\alphaa)\tauilde{\Theta}\circ l+\fracrak c)}\Thetaeta_\alphaa-\betaar{Z_{,\alphaa}((\partial_t+b\partial_\alphaa)\Thetaeta+\fracrak c)}(\tauilde{\Theta}\circ l)_\alphaa}\,d\alphaa\\&
=^{-1}nt \betaar{\paren{\fracrac1{Z_{,\alphaa}}-\fracrac1{{\mathbb{Z}}f_{,\alphaa}}\circ l }}\Thetaeta_\alphaa \paren{\betaar{{\mathbb{Z}}f_{,\alphaa}\circ l((\partial_t+\tauilde b\partial_\alphaa)\tauilde{\Theta}\circ l+\fracrak c)}-\betaar{Z_{,\alphaa}((\partial_t+b\partial_\alphaa)\Thetaeta+\fracrak c)}}\,d\alphaa\\&+
^{-1}nt \betaar{\paren{\fracrac1{Z_{,\alphaa}}-\fracrac1{{\mathbb{Z}}f_{,\alphaa}}\circ l }} \betaar{Z_{,\alphaa}((\partial_t+b\partial_\alphaa)\Thetaeta+\fracrak c)}\,(\Thetaeta-\tauilde{\Theta}\circ l)_\alphaa\,d\alphaa=I+II.
\end{aligned}
\end{equation}
We apply Proposition~\ref{half-product} to $II$, with $g=\fracrac1{Z_{,\alphaa}}-\fracrac1{{\mathbb{Z}}f_{,\alphaa}}\circ l$, and $f=Z_{,\alphaa}((\partial_t+b\partial_\alphaa)\Thetaeta+\fracrak c)$, where $\Thetaeta=\betaar Z_t, \ \fracrac1{Z_{,\alphaa}}-1,\ \betaar Z_{tt}$, with $\fracrak c=i, 0, 0$ respectively. We know
$$\partial_\alphaa\paren{\fracrac1{Z_{,\alphaa}} f}= \betaar Z_{tt,\alphaa},\quad \partial_\alphaa(\partial_t+b\partial_\alphaa)\fracrac1{Z_{,\alphaa}},\quad \betaar Z_{ttt,\alphaa},\qquad \tauext{for }\Thetaeta=\betaar Z_t, \ \fracrac1{Z_{,\alphaa}}-1,\ \betaar Z_{tt},$$
so $\nm{\partial_\alphaa\paren{\fracrac1{Z_{,\alphaa}} f}}_{L^2}\le C(\fracrak E(t))$, by \S\ref{proof} and \eqref{2411}. Applying Proposition~\ref{half-product} to the $g$ and $f$ given above yields
\betaegin{equation}\label{2473}
\betaegin{aligned}
&\nm{\paren{\fracrac1{Z_{,\alphaa}}-\fracrac1{{\mathbb{Z}}f_{,\alphaa}}\circ l}\, Z_{,\alphaa}((\partial_t+b\partial_\alphaa)\Thetaeta+\fracrak c)}_{\dot H^{1/2}}\\&\qquad \lesssim \nm{\fracrac1{Z_{,\alphaa}}-\fracrac1{{\mathbb{Z}}f_{,\alphaa}}\circ l}_{\dot H^{1/2}}+ \nm{\fracrac1{Z_{,\alphaa}}(0)-\fracrac1{{\mathbb{Z}}f_{,\alphaa}}(0)}_{L^^{-1}nfty}+^{-1}nt_0^t \mathcal F(\tauau)^{1/2}\,d\tauau;
\end{aligned}
\end{equation}
consequently
\betaegin{equation}\label{2474}
\alphabs{II}\lesssim \mathcal F(t)+T^{-1}nt_0^t\mathcal F(\tauau)\,d\tauau+\nm{\fracrac1{Z_{,\alphaa}}(0)-\fracrac1{{\mathbb{Z}}f_{,\alphaa}}(0)}_{L^^{-1}nfty}\mathcal F(t)^{1/2}.
\end{equation}
We apply the decomposition \eqref{2399} and Cauchy-Schwarz inequality to $I$, notice that $\nm{\Thetaeta_\alphaa}_{L^2}\le C(\fracrak E(t))$, and $\nm{D_\alphaa \Thetaeta}_{L^^{-1}nfty}\le C(\fracrak E(t))$, for $\Thetaeta=\betaar Z_t, \ \fracrac1{Z_{,\alphaa}}-1,\ \betaar Z_{tt}$. We have
\betaegin{equation}\label{2475}
\alphabs{I}\lesssim \nm{\fracrac1{Z_{,\alphaa}}(0)-\fracrac1{{\mathbb{Z}}f_{,\alphaa}}(0)}_{L^^{-1}nfty}\mathcal F(t)^{1/2}+\mathcal F(t)^{1/2}^{-1}nt_0^t\mathcal F(\tauau)^{1/2}\,d\tauau.
\end{equation}
This shows that for $\Thetaeta=\betaar Z_t, \ \fracrac1{Z_{,\alphaa}}-1,\ \betaar Z_{tt}$, with $\fracrak c=i, 0, 0$ respectively,
\betaegin{equation}\label{2476}
\betaegin{aligned}
&\alphabs{^{-1}nt \betaar{\paren{\fracrac1{Z_{,\alphaa}}-\fracrac1{{\mathbb{Z}}f_{,\alphaa}}\circ l }} \paren{\betaar{{\mathbb{Z}}f_{,\alphaa}\circ l((\partial_t+\tauilde b\partial_\alphaa)\tauilde{\Theta}\circ l+\fracrak c)}\Thetaeta_\alphaa-\betaar{Z_{,\alphaa}((\partial_t+b\partial_\alphaa)\Thetaeta+\fracrak c)}(\tauilde{\Theta}\circ l)_\alphaa}\,d\alphaa}\\&\qquad\qquad\lesssim \mathcal F(t)+T^{-1}nt_0^t\mathcal F(\tauau)\,d\tauau+\nm{\fracrac1{Z_{,\alphaa}}(0)-\fracrac1{{\mathbb{Z}}f_{,\alphaa}}(0)}_{L^^{-1}nfty}\mathcal F(t)^{1/2}.
\end{aligned}
\end{equation}
\subsubsection{Controlling $\nm{\fracrac{\partial_\alphaa A_1}{|Z_{,\alphaa}|^2}-\fracrac{\partial_\alphaa \tauilde {A_1}}{|{\mathbb{Z}}f_{,\alphaa}|^2}\circ l}_{L^2}$}\label{da1z2}
We will take advantage of the fact that $\fracrac{\partial_\alphaa A_1}{|Z_{,\alphaa}|^2}$ is purely real to use $(I+\mathbb H)$ to convert it to some commutator forms to which the Propositions in \S\ref{prepare} can be applied.
Observe that
\betaegin{equation}\label{2369}
i\,\fracrac{\partial_\alphaa A_1}{|Z_{,\alphaa}|^2}= \fracrac{1}{Z_{,\alphaa}}\partial_\alphaa\fracrac{ i A_1}{\betaar Z_{,\alphaa}}-\fracrac{ i A_1}{Z_{,\alphaa}}\partial_\alphaa\fracrac{1}{\betaar Z_{,\alphaa}}=\fracrac{1}{Z_{,\alphaa}}\partial_\alphaa Z_{tt}+(\betaar Z_{tt}-i)\partial_\alphaa\fracrac{1}{\betaar Z_{,\alphaa}};
\end{equation}
we apply $(I+\mathbb H)$ to \eqref{2369}, and use the fact $\partial_\alphaa\fracrac{1}{\betaar Z_{,\alphaa}}=-\mathbb H\paren{\partial_\alphaa\fracrac{1}{\betaar Z_{,\alphaa}}}$ to write the second term in a commutator form. We have
\betaegin{equation}\label{2370}
i\,(I+\mathbb H)\fracrac{\partial_\alphaa A_1}{|Z_{,\alphaa}|^2}=(I+\mathbb H)\paren{\fracrac{1}{Z_{,\alphaa}}\partial_\alphaa Z_{tt}}-\betaracket{\betaar Z_{tt}, \mathbb H}\partial_\alphaa\fracrac{1}{\betaar Z_{,\alphaa}}.
\end{equation}
For the first term on the right hand side, we commute out $\fracrac1{Z_{,\alphaa}}$, then use the fact $Z_t=-\mathbb H Z_t$ to write $(I+\mathbb H)Z_{tt}$ as a commutator (see \eqref{2354}),
\betaegin{equation}\label{2371}
(I+\mathbb H)\paren{\fracrac{1}{Z_{,\alphaa}}\partial_\alphaa Z_{tt}}=\betaracket{\mathbb H, \fracrac{1}{Z_{,\alphaa}}}\partial_\alphaa Z_{tt}-\fracrac{1}{Z_{,\alphaa}}\partial_\alphaa [b,\mathbb H]Z_{t,\alphaa};
\end{equation}
we compute
\betaegin{equation}\label{2372}
\betaegin{aligned}
\fracrac{1}{Z_{,\alphaa}}\partial_\alphaa [b,\mathbb H]Z_{t,\alphaa}&=\fracrac{1}{Z_{,\alphaa}} b_\alphaa \mathbb H Z_{t,\alphaa}-\fracrac1{\pi i Z_{,\alphaa}} ^{-1}nt \fracrac{b(\alphaa,t)-b({\betaeta '},t)}{(\alphaa-{\betaeta '})^2}Z_{t,{\betaeta '}}\,d{\betaeta '}\\&
=-b_\alphaa D_\alphaa Z_t-\betaracket{\fracrac1{Z_{,\alphaa}}, b; Z_{t,\alphaa}}-\fracrac1{\pi i } ^{-1}nt \fracrac{b(\alphaa,t)-b({\betaeta '},t)}{(\alphaa-{\betaeta '})^2}D_{\betaeta '} Z_{t}\,d{\betaeta '}\\&=
-b_\alphaa D_\alphaa Z_t-\betaracket{\fracrac1{Z_{,\alphaa}}, b; Z_{t,\alphaa}}+[b,\mathbb H]\partial_\alphaa D_\alphaa Z_t-\mathbb H(b_\alphaa D_\alphaa Z_t),
\end{aligned}
\end{equation}
in the last step we performed integration by parts.
We have converted the right hand side of \eqref{2370} in the desired forms. Applying \eqref{dl21-inq}, \eqref{dl221}, \eqref{d32inq}, \eqref{q3} and \eqref{2359}, then take the imaginary parts gives
\betaegin{equation}\label{2373}
\nm{\fracrac{\partial_\alphaa A_1}{|Z_{,\alphaa}|^2}-\fracrac{\partial_\alphaa \tauilde {A_1}}{|{\mathbb{Z}}f_{,\alphaa}|^2}\circ l}_{L^2}\lesssim \mathcal F(t)^{\fracrac12}.
\end{equation}
In what follows we will use the following identities in the calculations: for $f,\ g,\ p$,
satisfying $g=\mathbb H g$ and $p=\mathbb H p$,
\betaegin{align}
[ f, \mathbb H] (gp)&=[ f g,\mathbb H]p=[\mathbb P_A( f g),\mathbb H] p;\label{H1}\\
[ f, \mathbb H]\partial_\alphaa(gp)&=[ f\partial_\alphaa g, \mathbb H] p+[ f g,\mathbb H]\partial_\alphaa p=[\mathbb P_A( f\partial_\alphaa g), \mathbb H] p+[\mathbb P_A( f g),\mathbb H]\partial_\alphaa p
\label{H2}
\end{align}
\eqref{H1} is obtained by using the fact that the product of holomorphic functions is holomorphic, and \eqref{comm-hilbe}; \eqref{H2} is a consequence of \eqref{H1} and the product rules.
\subsubsection{Controlling $\nm{(\partial_t+b\partial_\alphaa)(b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t)-(\partial_t+\tauilde b\partial_\alphaa)(\tauilde b_\alphaa- 2{\mathbb{R}}e \tauilde D_\alphaa{\mathbb{Z}}f_t )\circ l}_{L^2}$ }\label{ddtba}
We begin with \eqref{dba-1},
\betaegin{equation}\label{2374}
\betaegin{aligned}
&(\partial_t+b\partial_\alphaa)\paren{b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t}={\mathbb{R}}e \paren{\betaracket{ (\partial_t+b\partial_\alphaa)\fracrac1{Z_{,\alphaa}}, \mathbb H} Z_{t,\alphalpha'} + \betaracket{Z_{t}, \mathbb H}\partial_\alphaa (\partial_t+b\partial_\alphaa)\fracrac1{Z_{,\alphaa}}}
\\&\qquad+{\mathbb{R}}e\paren{ \betaracket{ \fracrac1{Z_{,\alphaa}}, \mathbb H} Z_{tt,\alphalpha'}+ \betaracket{Z_{tt}, \mathbb H}\partial_\alphaa \fracrac1{Z_{,\alphaa}} -\betaracket{ \fracrac1{Z_{,\alphaa}}, b; Z_{t,\alphalpha'} } -\betaracket{ Z_{t}, b; \partial_\alphaa \fracrac1{Z_{,\alphaa}} } };
\end{aligned}
\end{equation}
observe that using Propositions~\ref{dl21}, ~\ref{d32} and \ref{denergy} we are able to get the desired estimates for the last four terms on the right hand side of \eqref{2374}. We need to rewrite the first two terms in order to apply the results in \S\ref{prepare}.
First, by \eqref{eq:dza} we have
\betaegin{equation}\label{2375}
(\partial_t+b\partial_{\alphalpha'})\paren{\fracrac1{Z_{,\alphaa}}}=\fracrac1{Z_{,\alphaa}} \paren{b_\alphaa-D_\alphaa Z_t};
\end{equation}
and by $\mathbb HZ_{t,\alphaa}=-Z_{t,\alphaa}$,
\betaegin{equation}\label{2376}
\betaracket{ (\partial_t+b\partial_\alphaa)\fracrac1{Z_{,\alphaa}}, \mathbb H} Z_{t,\alphalpha'}=-(I+\mathbb H) \paren{(D_\alphaa Z_t )(b_\alphaa-D_\alphaa Z_t)};
\end{equation}
so we can conclude from \eqref{q3} and \eqref{2359} that
\betaegin{equation}\label{2377}
\nm{\betaracket{ (\partial_t+b\partial_\alphaa)\fracrac1{Z_{,\alphaa}}, \mathbb H} Z_{t,\alphalpha'}-U_l \betaracket{ (\partial_t+\tauilde b\partial_\alphaa)\fracrac1{{\mathbb{Z}}f_{,\alphaa}}, \mathbb H} {\mathbb{Z}}f_{t,\alphalpha'}}_{L^2}\lesssim \mathcal F(t)^{1/2}.
\end{equation}
For the second term on the right hand side of \eqref{2374}, we use \eqref{b} to further rewrite \eqref{2375},
\betaegin{equation}\label{2378}
\betaegin{aligned}
&(\partial_t+b\partial_{\alphalpha'})\paren{\fracrac1{Z_{,\alphaa}}}=\fracrac1{Z_{,\alphaa}} \paren{\partial_\alphaa{{\mathbb{R}}e (I-\mathbb H)\fracrac {Z_t}{Z_{,\alphaa}} } -D_\alphaa Z_t}\\&=
\fracrac1{Z_{,\alphaa}} \paren{\mathbb P_A\fracrac {Z_{t,\alphaa}}{Z_{,\alphaa}}+\mathbb P_H\fracrac {\betaar Z_{t,\alphaa}}{\betaar Z_{,\alphaa}} +{\mathbb{R}}e (I-\mathbb H)\paren{Z_t\partial_\alphaa\fracrac1{Z_{,\alphaa}}} -D_\alphaa Z_t}\\&=
\fracrac1{Z_{,\alphaa}} \paren{\mathbb P_H\paren{\betaar{D_\alphaa Z_t}-D_\alphaa Z_t}+\mathbb P_A\paren{Z_{t} \partial_\alphaa \fracrac1{Z_{,\alphaa} }}+\betaar{\mathbb P_A\paren{Z_{t} \partial_\alphaa \fracrac1{Z_{,\alphaa} }}}}.
\end{aligned}
\end{equation}
We substitute the right hand side of \eqref{2378} in the second term, $\betaracket{Z_{t}, \mathbb H}\partial_\alphaa (\partial_t+b\partial_\alphaa)\fracrac1{Z_{,\alphaa}}$ of \eqref{2374}, term by term. For the first term we have, by \eqref{H2},
\betaegin{equation}\label{2379}
\betaegin{aligned}
& \betaracket{Z_{t}, \mathbb H}\partial_\alphaa \paren{ \fracrac1{Z_{,\alphaa}}\mathbb P_H\paren{\Im\betaar{ D_\alphaa Z_t}} }=\betaracket{\mathbb P_A\paren{Z_{t} \partial_\alphaa \fracrac1{Z_{,\alphaa} }}, \mathbb H}\mathbb P_H\paren{\Im \betaar{D_\alphaa Z_t}} \\&\qquad\qquad\qquad+\betaracket{\mathbb P_A\paren{\fracrac{Z_{t} }{Z_{,\alphaa} }}, \mathbb H} \partial_\alphaa\mathbb P_H\paren{\Im\betaar{D_\alphaa Z_t}}\\&
= (I-\mathbb H)\paren{ \mathbb P_A\paren{Z_{t} \partial_\alphaa \fracrac1{Z_{,\alphaa} }}\mathbb P_H\paren{\Im\betaar{D_\alphaa Z_t}}} +\betaracket{b, \mathbb H} \partial_\alphaa\mathbb P_H\paren{\Im\betaar{D_\alphaa Z_t}};
\end{aligned}
\end{equation}
in the last step we used \eqref{bb} and \eqref{comm-hilbe}. Therefore by \eqref{2382}-\eqref{2383}, \eqref{2359}, \eqref{q3} and \eqref{dl221},
\betaegin{equation}\label{2400}
\nm{\betaracket{Z_{t}, \mathbb H}\partial_\alphaa \paren{ \fracrac1{Z_{,\alphaa}}\mathbb P_H\paren{\Im\betaar{D_\alphaa Z_t}} }- U_l\betaracket{{\mathbb{Z}}f_{t}, \mathbb H}\partial_\alphaa \paren{ \fracrac1{{\mathbb{Z}}f_{,\alphaa}}\mathbb P_H\paren{\Im \betaar{\tauilde D_\alphaa {\mathbb{Z}}f_t}} }}_{L^2}\lesssim \mathcal F(t)^{\fracrac12}.
\end{equation}
We substitute in the second term and rewrite further by \eqref{comm-hilbe},
\betaegin{equation}\label{2401}
\betaegin{aligned}
\betaracket{Z_{t}, \mathbb H}\partial_\alphaa \paren{ \fracrac1{Z_{,\alphaa}}\mathbb P_A\paren{Z_{t} \partial_\alphaa \fracrac1{Z_{,\alphaa} }}}&=\betaracket{Z_{t}, \mathbb H}\partial_\alphaa \mathbb P_H\paren{ \fracrac1{Z_{,\alphaa}}\mathbb P_A\paren{Z_{t} \partial_\alphaa \fracrac1{Z_{,\alphaa} }}}\\&=-\fracrac12\betaracket{Z_{t}, \mathbb H}\partial_\alphaa \paren{ \betaracket{\fracrac1{Z_{,\alphaa}},\mathbb H}\mathbb P_A\paren{Z_{t} \partial_\alphaa \fracrac1{Z_{,\alphaa} }}}
\end{aligned}
\end{equation}
This allows us to conclude, by \eqref{dl21-inq}, and \eqref{2390}, \eqref{2359},\fracootnote{For the estimate $\nm{\partial_\alphaa \paren{ \fracrac1{Z_{,\alphaa}}\mathbb P_A\paren{Z_{t} \partial_\alphaa \fracrac1{Z_{,\alphaa} }}} }_{L^2}\le C(\fracrak E(t))$, see \eqref{2043}-\eqref{2044}.}
\betaegin{equation}\label{2402}
\nm{\betaracket{Z_{t}, \mathbb H}\partial_\alphaa \paren{ \fracrac1{Z_{,\alphaa}}\mathbb P_A\paren{Z_{t} \partial_\alphaa \fracrac1{Z_{,\alphaa} }}}-U_l\betaracket{{\mathbb{Z}}f_{t}, \mathbb H}\partial_\alphaa \paren{ \fracrac1{{\mathbb{Z}}f_{,\alphaa}}\mathbb P_A\paren{{\mathbb{Z}}f_{t} \partial_\alphaa \fracrac1{{\mathbb{Z}}f_{,\alphaa} }}}
}_{L^2}\lesssim \mathcal F(t)^{\fracrac12}.
\end{equation}
Now we substitute in the last term and rewrite further by \eqref{H2},
\betaegin{equation}\label{2403}
\betaegin{aligned}
&\betaracket{Z_{t}, \mathbb H}\partial_\alphaa \paren{ \fracrac1{Z_{,\alphaa}}\betaar{\mathbb P_A\paren{Z_{t} \partial_\alphaa \fracrac1{Z_{,\alphaa} }}}}=\betaracket{\mathbb P_A\paren{Z_{t}\partial_\alphaa\fracrac1{Z_{,\alphaa}}}, \mathbb H}\betaar{\mathbb P_A\paren{Z_{t} \partial_\alphaa \fracrac1{Z_{,\alphaa} }}}\\&\qquad\qquad\qquad+ \betaracket{\mathbb P_A\paren{\fracrac{Z_{t}}{Z_{,\alphaa}}}, \mathbb H}\partial_\alphaa\betaar{\mathbb P_A\paren{Z_{t} \partial_\alphaa \fracrac1{Z_{,\alphaa} }}}\\&=(I-\mathbb H)\paren{ \mathbb P_A\paren{Z_{t}\partial_\alphaa\fracrac1{Z_{,\alphaa}}}\betaar{\mathbb P_A\paren{Z_{t} \partial_\alphaa \fracrac1{Z_{,\alphaa} }}} } + \betaracket{b, \mathbb H}\partial_\alphaa\betaar{\mathbb P_A\paren{Z_{t} \partial_\alphaa \fracrac1{Z_{,\alphaa} }}}.
\end{aligned}
\end{equation}
Again, this puts it in the right form to allow us to conclude, from \eqref{2382}-\eqref{2383}, \eqref{q3}, and \eqref{dl221}, that
\betaegin{equation}\label{2404}
\nm{\betaracket{Z_{t}, \mathbb H}\partial_\alphaa \paren{ \fracrac1{Z_{,\alphaa}}\betaar{\mathbb P_A\paren{Z_{t} \partial_\alphaa \fracrac1{Z_{,\alphaa} }}}}-U_l\betaracket{{\mathbb{Z}}f_{t}, \mathbb H}\partial_\alphaa \paren{ \fracrac1{{\mathbb{Z}}f_{,\alphaa}}\betaar{\mathbb P_A\paren{{\mathbb{Z}}f_{t} \partial_\alphaa \fracrac1{{\mathbb{Z}}f_{,\alphaa} }}}}
}_{L^2}\lesssim \mathcal F(t)^{\fracrac12}.
\end{equation}
This finishes the proof of
\betaegin{equation}\label{2405}
\nm{(\partial_t+b\partial_\alphaa)(b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t)-(\partial_t+\tauilde b\partial_\alphaa)(\tauilde b_\alphaa- 2{\mathbb{R}}e \tauilde D_\alphaa{\mathbb{Z}}f_t )\circ l}_{L^2}\lesssim \mathcal F(t)^{1/2}.
\end{equation}
\subsubsection{Controlling $ \nm{(\partial_t+b\partial_\alphaa)\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}}_{L^^{-1}nfty}$} \label{dtati}
We begin with \eqref{at} and take a $\partial_t+b\partial_\alphaa$ derivative. We get
\betaegin{equation}\label{2406}
(\partial_t+b\partial_\alphaa)\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}=\fracrac{(\partial_t+b\partial_\alphaa)^2A_1}{A_1}-\paren{\fracrac{(\partial_t+b\partial_\alphaa)A_1}{A_1}}^2+(\partial_t+b\partial_\alphaa)(b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t).
\end{equation}
We have controlled all the quantities on the right hand side of \eqref{2406} in \S\ref{proof}, except for $\|(\partial_t+b\partial_\alphaa)^2A_1\|_{L^^{-1}nfty}$.
We proceed from \eqref{dta1} and use \eqref{eq:c14} to compute,
\betaegin{equation}\label{2407}
\betaegin{aligned}
(\partial_t +b\partial_\alphaa)^2 A_1&= -\Im \paren{\betaracket{2\betaracket{Z_{tt},\mathbb H}\betaar Z_{tt,\alphalpha'}-[Z_{tt}, b; \betaar Z_{t,\alphalpha'}]-[Z_{t}, b; \betaar Z_{tt,\alphalpha'}]}}\\&
-\Im \paren{\betaracket{Z_{ttt},\mathbb H}\betaar Z_{t,\alphalpha'}+\betaracket{Z_t,\mathbb H}\partial_\alphaa \betaar Z_{ttt}- (\partial_t +b\partial_\alphaa)[Z_t, b; \betaar Z_{t,\alphaa}]},
\end{aligned}
\end{equation}
and we expand similarly
\betaegin{equation}\label{2408}
\betaegin{aligned}
(\partial_t +b\partial_\alphaa)[Z_t, b; \betaar Z_{t,\alphaa}]&=[Z_{tt}, b; \betaar Z_{t,\alphaa}]+[Z_t, (\partial_t +b\partial_\alphaa)b; \betaar Z_{t,\alphaa}]+[Z_t, b; \betaar Z_{tt,\alphaa}]\\&-\fracrac2{\pi i}^{-1}nt \fracrac{(b(\alphaa,t)-b({\betaeta '},t))^2(Z_t(\alphaa,t)-Z_t({\betaeta '},t)) }{(\alphaa-{\betaeta '})^3} \betaar Z_{t,{\betaeta '}}\,d{\betaeta '}
\end{aligned}
\end{equation}
Applying Cauchy-Schwarz inequality and Hardy's inequality, we get
\betaegin{equation}\label{2409}
\betaegin{aligned}
\nm{(\partial_t +b\partial_\alphaa)^2 A_1}_{L^^{-1}nfty}&\lesssim \nm{Z_{tt,\alphaa}}_{L^2}^2+ \nm{Z_{tt,\alphaa}}_{L^2}\nm{b_\alphaa}_{L^^{-1}nfty}\nm{Z_{t,\alphaa}}_{L^2}+\nm{Z_{ttt,\alphaa}}_{L^2}\nm{Z_{t,\alphaa}}_{L^2}\\&+\nm{\partial_\alphaa(\partial_t +b\partial_\alphaa)b}_{L^^{-1}nfty}\nm{Z_{t,\alphaa}}_{L^2}^2+\nm{b_\alphaa}_{L^^{-1}nfty}^2\nm{Z_{t,\alphaa}}_{L^2}^2
\end{aligned}
\end{equation}
Observe that all quantities on the right hand side of \eqref{2409} are controlled in \S\ref{proof} and in
\eqref{2411}.
This shows that
\betaegin{equation}\label{2412}
\nm{(\partial_t +b\partial_\alphaa)^2 A_1}_{L^^{-1}nfty}\le C(\fracrak E(t)),\qquad \nm{(\partial_t+b\partial_\alphaa)\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}}_{L^^{-1}nfty}\le C(\fracrak E(t)).
\end{equation}
\subsubsection{Controlling $ \nm{(\partial_t+b\partial_\alphaa)\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}-(\partial_t+\tauilde b\partial_\alphaa)\paren{\fracrac{\tauilde{\mathfrak{a}}_t}{\tauilde{\mathfrak{a}}} \circ \tauh^{-1} }\circ l}_{L^2}$ } \label{ddtat}
By the expansions \eqref{dta1}, \eqref{2406}, and \eqref{2407}, \eqref{2408}, we see that by the results in \S\ref{prepare} and by \eqref{2405}, we can directly conclude the desired estimates for all but the following three
\betaegin{itemize}
^{-1}tem
$ \nm{\betaracket{Z_{ttt},\mathbb H}\betaar Z_{t,\alphalpha'}-(\betaracket{{\mathbb{Z}}f_{ttt},\mathbb H}\betaar {{\mathbb{Z}}f}_{t,\alphalpha'}) \circ l}_{L^2}$;
^{-1}tem
$ \nm{\betaracket{Z_t,\mathbb H}\partial_\alphaa \betaar Z_{ttt}-(\betaracket{{\mathbb{Z}}f_t,\mathbb H}\partial_\alphaa \betaar {{\mathbb{Z}}f}_{ttt} ) \circ l}_{L^2}$;
^{-1}tem
$ \nm{[Z_t, (\partial_t +b\partial_\alphaa)b; \betaar Z_{t,\alphaa}]-U_l [{\mathbb{Z}}f_t, (\partial_t +b\partial_\alphaa)b; \betaar {{\mathbb{Z}}f}_{t,\alphaa}]}_{L^2}$.
\end{itemize}
The first two items can be analyzed similarly as in \S\ref{ddtba}. We begin with $\betaracket{Z_{ttt},\mathbb H}\betaar Z_{t,\alphalpha'}$ and rewrite it using $\mathbb H\betaar Z_{t,\alphalpha'}=\betaar Z_{t,\alphalpha'}$, and substitute in by \eqref{eq:dztt}, \eqref{eq:dzt},
\betaegin{equation}\label{2413}
\betaracket{Z_{ttt},\mathbb H}\betaar Z_{t,\alphalpha'}=(I-\mathbb H)(Z_{ttt} \betaar Z_{t,\alphaa})=(I-\mathbb H)\paren{iA_1 \betaar {D_\alphaa Z_t}\paren{D_\alphaa Z_t+\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}}.
\end{equation}
From here we are ready to conclude from \eqref{q3}, \eqref{2359} that
\betaegin{equation}\label{2414} \nm{\betaracket{Z_{ttt},\mathbb H}\betaar Z_{t,\alphalpha'}-(\betaracket{{\mathbb{Z}}f_{ttt},\mathbb H}\betaar {{\mathbb{Z}}f}_{t,\alphalpha'}) \circ l}_{L^2}\lesssim \mathcal F(t)^{1/2}.
\end{equation}
Now substitute in by \eqref{eq:dztt}, \eqref{eq:dzt}, and use the identity $\mathbb P_H+\mathbb P_A=I$, then use \eqref{H2} and \eqref{comm-hilbe},
\betaegin{equation}\label{2415}
\betaegin{aligned}
\betaracket{Z_t,\mathbb H}\partial_\alphaa \betaar Z_{ttt}& = -i\betaracket{Z_t,\mathbb H}\partial_\alphaa \paren{\fracrac{1}{Z_{,\alphaa}}(\mathbb P_H+\mathbb P_A)\paren{ A_1\paren{\betaar{D_\alphaa Z_t}+\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} } }\\&=
-i\betaracket{\mathbb P_A\paren{Z_t\partial_\alphaa\fracrac{1}{Z_{,\alphaa}}},\mathbb H}\mathbb P_H\paren{ A_1\paren{\betaar{D_\alphaa Z_t}+\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} }\\&-i\betaracket{\mathbb P_A\paren{\fracrac{Z_t}{Z_{,\alphaa}}},\mathbb H}\partial_\alphaa\mathbb P_H\paren{ A_1\paren{\betaar{D_\alphaa Z_t}+\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} }\\&
-i\betaracket{Z_t,\mathbb H}\partial_\alphaa \mathbb P_H\paren{\fracrac{1}{Z_{,\alphaa}}\mathbb P_A\paren{ A_1\paren{\betaar{D_\alphaa Z_t}+\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} } }\\&
=-i(I-\mathbb H)\paren{\mathbb P_A\paren{Z_t\partial_\alphaa\fracrac{1}{Z_{,\alphaa}}}\mathbb P_H\paren{ A_1\paren{\betaar{D_\alphaa Z_t}+\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} }}\\&-i\betaracket{b,\mathbb H}\partial_\alphaa\mathbb P_H\paren{ A_1\paren{\betaar{D_\alphaa Z_t}+\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} }\\&
-i\betaracket{Z_t,\mathbb H}\partial_\alphaa \betaracket{\mathbb P_H, \fracrac{1}{Z_{,\alphaa}}}\mathbb P_A\paren{ A_1\paren{\betaar{D_\alphaa Z_t}+\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}} }.
\end{aligned}
\end{equation}
From here we can apply the Propositions in \S\ref{prepare} and \eqref{2382}-\eqref{2383}, \eqref{2390-1} to conclude
\betaegin{equation}\label{2416}
\nm{\betaracket{Z_{t},\mathbb H}\partial_\alphaa\betaar Z_{ttt}-(\betaracket{{\mathbb{Z}}f_{t},\mathbb H}\partial_\alphaa\betaar {{\mathbb{Z}}f}_{ttt}) \circ l}_{L^2}\lesssim \mathcal F(t)^{1/2}.
\end{equation}
Now we consider the last term, $[Z_t, (\partial_t +b\partial_\alphaa)b; \betaar Z_{t,\alphaa}]$. The problem with this term is that we don't yet have the estimate, $\nm{\partial_\alphaa(\partial_t +b\partial_\alphaa)b-(\partial_\alphaa(\partial_t +\tauilde b\partial_\alphaa)\tauilde b)\circ l}_{L^2}\lesssim \mathcal F(t)^{1/2}$, to apply Proposition~\ref{d32}. We will not prove this estimate. Instead, we will identify the trouble term in $\partial_\alphaa(\partial_t +b\partial_\alphaa)b$, and handle it differently. We compute, by \eqref{eq:c7}, \eqref{eq:c1-1},
\betaegin{equation}\label{2417}
\betaegin{aligned}
\partial_\alphaa&(\partial_t +b\partial_\alphaa)b=b_\alphaa^2+(\partial_t +b\partial_\alphaa)b_\alphaa\\&=b_\alphaa^2+(\partial_t +b\partial_\alphaa)(b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t)-2{\mathbb{R}}e \{(D_\alphaa Z_t)^2\}+2{\mathbb{R}}e D_\alphaa Z_{tt};
\end{aligned}
\end{equation}
observe that we have the estimate for the first three terms. We expand the last term by substituting in \eqref{aa1},
\betaegin{equation}\label{2418}
2{\mathbb{R}}e D_\alphaa Z_{tt}= 2{\mathbb{R}}e\fracrac1{Z_{,\alphaa}}\partial_\alphaa{\fracrac{iA_1}{\betaar Z_{,\alphaa}}}
=\partial_\alphaa\paren{\fracrac{iA_1}{|Z_{,\alphaa}|^2}}-\fracrac{\partial_\alphaa\paren{iA_1}}{|Z_{,\alphaa}|^2}-2\fracrac{iA_1}{\betaar Z_{,\alphaa}}\partial_\alphaa\fracrac1{Z_{,\alphaa}}.
\end{equation}
Substitute \eqref{2418} in \eqref{2417}, and then apply $\mathbb P_A$, writing the last term as a commutator; we get
\betaegin{equation}\label{2419}
\betaegin{aligned}
& \mathbb P_A \partial_\alphaa \paren{(\partial_t +b\partial_\alphaa)b - \fracrac{iA_1}{|Z_{,\alphaa}|^2} }\\&=\mathbb P_A\paren{ b_\alphaa^2+(\partial_t +b\partial_\alphaa)(b_\alphaa-2{\mathbb{R}}e D_\alphaa Z_t)-2{\mathbb{R}}e \{(D_\alphaa Z_t)^2\} -i\fracrac{\partial_\alphaa A_1}{|Z_{,\alphaa}|^2} } -\betaracket{\fracrac{iA_1}{\betaar Z_{,\alphaa}},\mathbb H}\partial_\alphaa\fracrac1{Z_{,\alphaa}};
\end{aligned}
\end{equation}
a direct application of the results in \S\ref{prepare}, \S\ref{ddtba} and \S\ref{da1z2} to the right hand side of \eqref{2419} yields
\betaegin{equation}\label{2420}
\nm{\mathbb P_A \partial_\alphaa \paren{(\partial_t +b\partial_\alphaa)b - \fracrac{iA_1}{|Z_{,\alphaa}|^2} }-U_l \mathbb P_A \partial_\alphaa \paren{(\partial_t +\tauilde b\partial_\alphaa)\tauilde b - \fracrac{i\tauilde {A_1}}{|{\mathbb{Z}}f_{,\alphaa}|^2} }}_{L^2}\lesssim \mathcal F(t)^{1/2},
\end{equation}
which of course holds also for its real part. We know the real part
$${\mathbb{R}}e\mathbb P_A \partial_\alphaa \paren{(\partial_t +b\partial_\alphaa)b- \fracrac{iA_1}{|Z_{,\alphaa}|^2} }=\fracrac12\partial_\alphaa \paren{(\partial_t +b\partial_\alphaa)b+\mathbb H\paren{\fracrac{iA_1}{|Z_{,\alphaa}|^2}} }.
$$
We split $[Z_t, (\partial_t +b\partial_\alphaa)b; \betaar Z_{t,\alphaa}]$ in two:
\betaegin{equation}\label{2421}
[Z_t, (\partial_t +b\partial_\alphaa)b; \betaar Z_{t,\alphaa}]=[Z_t, (\partial_t +b\partial_\alphaa)b+\mathbb H\paren{\fracrac{iA_1}{|Z_{,\alphaa}|^2}}; \betaar Z_{t,\alphaa}]-[Z_t, \mathbb H\paren{\fracrac{iA_1}{|Z_{,\alphaa}|^2}} ; \betaar Z_{t,\alphaa}]
\end{equation}
and we can conclude from Proposition~\ref{d32} for the first term that,\fracootnote{The fact that $\nm{\partial_\alphaa\paren{(\partial_t +b\partial_\alphaa)b+\mathbb H\paren{\fracrac{iA_1}{|Z_{,\alphaa}|^2}}}}_{L^^{-1}nfty}\le C(\fracrak E(t))$ follows from \eqref{2039-1} and \eqref{2052}.}
\betaegin{equation}\label{2422}
\nm{ [Z_t, (\partial_t +b\partial_\alphaa)b+\mathbb H\paren{\fracrac{iA_1}{|Z_{,\alphaa}|^2}}; \betaar Z_{t,\alphaa}]-U_l[{\mathbb{Z}}f_t, (\partial_t +\tauilde b\partial_\alphaa)\tauilde b+\mathbb H\paren{\fracrac{i\tauilde {A_1}}{|{\mathbb{Z}}f_{,\alphaa}|^2}}; \betaar {{\mathbb{Z}}f}_{t,\alphaa}] }_{L^2}\lesssim \mathcal F(t)^{1/2}.
\end{equation}
We are left with the term $[Z_t, \mathbb H\paren{\fracrac{iA_1}{|Z_{,\alphaa}|^2}} ; \betaar Z_{t,\alphaa}] $. We will convert it to a form so that on which we can directly apply known results to conclude the desired estimate, $$\nm{ [Z_t, \mathbb H(\fracrac{iA_1}{|Z_{,\alphaa}|^2}); \betaar Z_{t,\alphaa}]-U_l[{\mathbb{Z}}f_t, \mathbb H(\fracrac{i\tauilde {A_1}}{|{\mathbb{Z}}f_{,\alphaa}|^2}); \betaar {{\mathbb{Z}}f}_{t,\alphaa}] }_{L^2}\lesssim \mathcal F(t)^{1/2}.$$
We need the following basic identities: 1. for $f$, $g$ satisfying $f=\mathbb H f$, $g=\mathbb H g$,
\betaegin{equation}\label{2446-1}
[ f, g; 1]=0;
\end{equation}
2. for $f, p, g$, satisfying
$g=\mathbb Hg$ and $p=\mathbb Hp$,
\betaegin{equation}\label{2424}
[\betaar p, \mathbb P_H f; g]=[\mathbb P_H f, \betaar p g; 1]= [f, \mathbb P_A(\betaar p g); 1]
\end{equation}
\eqref{2446-1} can be verified by \eqref{comm-hilbe} and integration by parts. \eqref{2424} can be verified by \eqref{2446-1}.
We split
\betaegin{equation}\label{2423}
\betaracket{Z_t, \mathbb H\paren{\fracrac{iA_1}{|Z_{,\alphaa}|^2}} ; \betaar Z_{t,\alphaa}}=\betaracket{Z_t, 2\mathbb P_H\paren{\fracrac{iA_1}{|Z_{,\alphaa}|^2}} ; \betaar Z_{t,\alphaa}}-\betaracket{Z_t, \fracrac{iA_1}{|Z_{,\alphaa}|^2}; \betaar Z_{t,\alphaa}}=2I-II.
\end{equation}
Applying \eqref{2424} to $I$ yields
\betaegin{equation}\label{2425}
I:=\betaracket{Z_t, \mathbb P_H\paren{\fracrac{iA_1}{|Z_{,\alphaa}|^2}} ; \betaar Z_{t,\alphaa}}
=\betaracket{\fracrac{iA_1}{|Z_{,\alphaa}|^2}, \mathbb P_A(Z_t\betaar Z_{t,\alphaa});1};
\end{equation}
substituting in \eqref{2425} the identity
\betaegin{equation}\label{2440}
\fracrac{iA_1(\alphaa)}{|Z_{,\alphaa}|^2}-\fracrac{iA_1({\betaeta '})}{|Z_{,{\betaeta '}}|^2}=\paren{\fracrac{iA_1(\alphaa)}{Z_{,\alphaa}}-\fracrac{iA_1({\betaeta '})}{Z_{,{\betaeta '}}}}\fracrac1{\betaar Z_{,{\betaeta '}}}+\fracrac{iA_1(\alphaa)}{Z_{,\alphaa}}\paren{\fracrac1{\betaar Z_{,\alphaa}}-\fracrac1{\betaar Z_{,{\betaeta '}}}};
\end{equation}
gives
\betaegin{equation}\label{2441}
I=\fracrac1{\pi i}^{-1}nt\fracrac{\paren{\mathbb P_A(Z_t\betaar Z_{t,\alphaa})(\alphaa)-\mathbb P_A(Z_t\betaar Z_{t,{\betaeta '}})({\betaeta '})}\paren{\fracrac{iA_1(\alphaa)}{Z_{,\alphaa}}-\fracrac{iA_1({\betaeta '})}{Z_{,{\betaeta '}}} }\fracrac1{\betaar Z_{,{\betaeta '}}}}{(\alphaa-{\betaeta '})^2}\,d{\betaeta '};
\end{equation}
here the second term disappears because of
the fact \eqref{2446-1}.
Using the identity
\betaegin{equation}\label{2442}
\fracrac{\mathbb P_A(Z_t\betaar Z_{t,\alphaa})-\mathbb P_A(Z_t\betaar Z_{t,{\betaeta '}})}{\betaar Z_{,{\betaeta '}}}= \fracrac{\mathbb P_A(Z_t\betaar Z_{t,\alphaa})}{\betaar Z_{,\alphaa}}-\fracrac{\mathbb P_A(Z_t\betaar Z_{t,{\betaeta '}})}{\betaar Z_{,{\betaeta '}}}-\mathbb P_A(Z_t\betaar Z_{t,\alphaa})\paren{\fracrac1 {\betaar Z_{,\alphaa}}-\fracrac1{\betaar Z_{,{\betaeta '}}}}
\end{equation}
we get
\betaegin{equation}\label{2443}
I=\betaracket{\fracrac{\mathbb P_A(Z_t\betaar Z_{t,\alphaa})}{\betaar Z_{,\alphaa}}, \fracrac{iA_1}{Z_{,\alphaa}}; 1}-\mathbb P_A(Z_t\betaar Z_{t,\alphaa})\betaracket{\fracrac1 {\betaar Z_{,\alphaa}}, \fracrac{iA_1}{Z_{,\alphaa}}; 1 }
\end{equation}
from here we are readily to conclude from Proposition~\ref{d33}. We now work on $II$. By \eqref{2440},
\betaegin{equation}\label{2444}
II:= \betaracket{Z_t, \fracrac{iA_1}{|Z_{,\alphaa}|^2}; \betaar Z_{t,\alphaa}}=\betaracket{Z_t, \fracrac{iA_1}{Z_{,\alphaa}}; \betaar {D_\alphaa Z_{t}}}+\fracrac{iA_1}{Z_{,\alphaa}} \betaracket{Z_t, \fracrac{1}{\betaar Z_{,\alphaa}}; \betaar Z_{t,\alphaa}};
\end{equation}
the first term can be handled by Proposition~\ref{d33}. We focus on the second term. By a \eqref{2442} type identity, we have
\betaegin{equation}\label{2445}
\betaegin{aligned}
&\fracrac{1}{Z_{,\alphaa}} \betaracket{Z_t, \fracrac{1}{\betaar Z_{,\alphaa}}; \betaar Z_{t,\alphaa}}=\betaracket{\fracrac{Z_t}{Z_{,\alphaa}}, \fracrac{1}{\betaar Z_{,\alphaa}}; \betaar Z_{t,\alphaa}} -\betaracket{\fracrac{1}{Z_{,\alphaa}}, \fracrac{1}{\betaar Z_{,\alphaa}}; Z_t\betaar Z_{t,\alphaa}}\\&\qquad=\betaracket{\mathbb P_A\paren{\fracrac{Z_t}{Z_{,\alphaa}}}, \fracrac{1}{\betaar Z_{,\alphaa}}; \betaar Z_{t,\alphaa}} -\betaracket{\fracrac{1}{Z_{,\alphaa}}, \fracrac{1}{\betaar Z_{,\alphaa}}; \mathbb P_A\paren{Z_t\betaar Z_{t,\alphaa}}}\\&\qquad\qquad+\betaracket{\mathbb P_H\paren{\fracrac{Z_t}{Z_{,\alphaa}}}, \fracrac{1}{\betaar Z_{,\alphaa}}; \betaar Z_{t,\alphaa}} -\betaracket{\fracrac{1}{Z_{,\alphaa}}, \fracrac{1}{\betaar Z_{,\alphaa}}; \mathbb P_H\paren{Z_t\betaar Z_{t,\alphaa}}}=I_1-I_2+I_3-I_4
\end{aligned}
\end{equation}
The first two terms $I_1$, $I_2$ in \eqref{2445} can be handed by Propositions~\ref{d32} and \ref{d33}, because $\mathbb P_A \paren{\fracrac{Z_t}{Z_{,\alphaa}}}=\mathbb P_A b$. We need to manipulate further the last two terms. We begin with $I_4$, and use the first equality in \eqref{2424}, then use the identity $\mathbb P_H=-\mathbb P_A+I$,
\betaegin{equation}\label{2446}
\betaegin{aligned}
&I_4:= \betaracket{ \fracrac{1}{\betaar Z_{,\alphaa}}, \fracrac{1}{Z_{,\alphaa}}; \mathbb P_H\paren{Z_t\betaar Z_{t,\alphaa}}}
=\betaracket{\fracrac{1}{Z_{,\alphaa}}, \fracrac{ \mathbb P_H\paren{Z_t\betaar Z_{t,\alphaa}}}{ \betaar Z_{,\alphaa} } ;1 }\\&=
- \betaracket{\fracrac{1}{Z_{,\alphaa}}, \fracrac{ \mathbb P_A\paren{Z_t\betaar Z_{t,\alphaa}}}{ \betaar Z_{,\alphaa} } ;1 }+\betaracket{\fracrac{1}{Z_{,\alphaa}}, \mathbb P_A\paren{Z_t\mathbb P_H\betaar {D_\alphaa Z_t}} ;1 }+\betaracket{\fracrac{1}{Z_{,\alphaa}}, Z_t\mathbb P_A\betaar {D_\alphaa Z_t} ;1 }
\end{aligned}
\end{equation}
because of the fact \eqref{2446-1}, the $\mathbb P_A$ can be inserted in the second term.
Now the first two terms on the right hand side of \eqref{2446} can be handled by Propositions~\ref{d33} and \ref{dhhalf2}, we need to work further on the last term, $$I_{43}:=\betaracket{\fracrac{1}{Z_{,\alphaa}}, Z_t\mathbb P_A\betaar {D_\alphaa Z_t} ;1 }.$$
We consider it together with $I_3$.
By \eqref{2424},
\betaegin{equation}\label{2447}
I_3: =\betaracket{ \fracrac{1}{\betaar Z_{,\alphaa}}, \mathbb P_H\paren{\fracrac{Z_t}{Z_{,\alphaa}}}; \betaar Z_{t,\alphaa}}=\betaracket{\fracrac{Z_t}{Z_{,\alphaa}}, \mathbb P_A(\betaar {D_\alphaa Z_t}); 1 }.
\end{equation}
Sum up $I_3$ and $-I_{43}$ gives
\betaegin{equation}\label{2448}
I_3-I_{43}=\fracrac1{\pi i} ^{-1}nt\fracrac{ \paren{\fracrac{\mathbb P_A(\betaar {D_\alphaa Z_t})}{Z_{,{\betaeta '}}}- \fracrac{\mathbb P_A(\betaar {D_{\betaeta '} Z_t})}{Z_{,\alphaa}} }(Z_t(\alphaa,t)-Z_t({\betaeta '},t)) } {(\alphaa-{\betaeta '})^2}\,d{\betaeta '}=
-\mathbb P_A(\betaar{D_\alphaa Z_t})\betaracket{Z_t, \fracrac1{Z_{,\alphaa}};1}
\end{equation}
here we used \eqref{2446-1} in the second step.
Through the steps in \eqref{2423}--\eqref{2448}, we have converted $\betaracket{Z_t, \mathbb H\paren{\fracrac{iA_1}{|Z_{,\alphaa}|^2}} ; \betaar Z_{t,\alphaa}}$ into a sum of terms that can be handled with known results in \S\ref{prepare}-\S\ref{additional}. We can conclude now that
\betaegin{equation}\label{2449}
\nm{\betaracket{Z_t, \mathbb H\paren{\fracrac{iA_1}{|Z_{,\alphaa}|^2}} ; \betaar Z_{t,\alphaa}}-U_l\betaracket{{\mathbb{Z}}f_t, \mathbb H\paren{\fracrac{i\tauilde {A_1}}{|{\mathbb{Z}}f_{,\alphaa}|^2}} ; \betaar {{\mathbb{Z}}f}_{t,\alphaa}}}_{L^2}\lesssim \mathcal F(t)^{1/2}.
\end{equation}
Combine with \eqref{2422}, we obtain
\betaegin{equation}\label{2450}
\nm{\betaracket{Z_t, (\partial_t+b\partial_\alphaa)b ; \betaar Z_{t,\alphaa}}-U_l\betaracket{{\mathbb{Z}}f_t, (\partial_t+\tauilde b\partial_\alphaa)\tauilde b ; \betaar {{\mathbb{Z}}f}_{t,\alphaa}}}_{L^2}\lesssim \mathcal F(t)^{1/2}.
\end{equation}
Now combine all the steps in \S\ref{ddtat}, we get
\betaegin{equation}\label{2451}
\nm{(\partial_t+b\partial_\alphaa)\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}-(\partial_t+\tauilde b\partial_\alphaa)\paren{\fracrac{\tauilde{\mathfrak{a}}_t}{\tauilde{\mathfrak{a}}} \circ \tauh^{-1} }\circ l}_{L^2}\lesssim \mathcal F(t)^{1/2}.
\end{equation}
Combine all the steps above we have \eqref{denergy-inq}. This finishes the proof for Proposition~\ref{denergy-est}, and Theorem~\ref{unique}.
\end{proof}
\section{The proof of Theorem~\ref{th:local}}\label{proof2}
For the data given in \S\ref{id}, we construct the solution of the Cauchy problem in the class where $\mathcal E<^{-1}nfty$ via a sequence of approximating solutions obtained by mollifying the initial data by the Poisson kernel, where we use Theorem~\ref{unique} and a compactness argument to prove the convergence of the sequence.
To prove the uniqueness of the solutions we use Theorem~\ref{unique}.
In what follows, we denote $z'=x'+iy'$, where $x', y'^{-1}n\mathbb R$. $K$ is the Poisson kernel as defined by \eqref{poisson}, $f\alphast g$ is the convolution in the spatial variable. For any function $\varphi$, $\varphi_{\varepsilon}ilon(x)=\fracrac1{{\varepsilon}ilon}\varphi(\fracrac x{\varepsilon}ilon)$ for $x^{-1}n \mathbb R$.
\subsection{Some basic preparations}\label{analysis-2}
Observe that in inequality \eqref{stability}, the stability is proved for the difference of the solutions in Lagrangian coordinates. We begin with some inequalities that will allow us to control the difference in Riemann mapping coordinates.
We have
\betaegin{lemma}\label{lemma4}
Let $l:\mathbb R\tauo \mathbb R$ be a diffeomorphism with $l-\alphaa^{-1}n H^1(\mathbb R)$. Then
1. for any $f^{-1}n \dot H^1(\mathbb R)$,
\betaegin{equation}\label{lemma4-inq}
\nm{f\circ l-f}_{\dot H^{1/2}}\lesssim \|\partial_\alphaa f\|_{L^2}\|l-\alphaa\|_{L^2}^{1/4}\|l_\alphaa-1\|_{L^2}^{1/4}
+
C(\nm{(l^{-1})_\alphaa}_{L^^{-1}nfty}, \nm{l_\alphaa}_{L^^{-1}nfty})\|l_\alphaa-1\|_{L^2}\|\partial_\alphaa f\|_{L^2}.
\end{equation}
2. for any function $b:\mathbb R\tauo\mathbb R$, with $b_\alphaa^{-1}n H^{1/2}(\mathbb R)\cap L^^{-1}nfty(\mathbb R)$,
\betaegin{equation}\label{lemma4-inq2}
\|b_\alphaa\circ l-b_\alphaa\|_{L^2}^2\lesssim \|b_\alphaa\|_{L^2}\|b_\alphaa\|_{L^^{-1}nfty}\|l_\alphaa\|_{L^^{-1}nfty}^{1/2}\|l_\alphaa-1\|_{L^2}+\|b_\alphaa\|_{\dot H^{1/2}}\|b\circ l-b\|_{\dot H^{1/2}}+\|b_\alphaa\|_{L^^{-1}nfty}^2\|l_\alphaa-1\|_{L^2}^2.
\end{equation}
\end{lemma}
\betaegin{proof}
We know
\betaegin{equation}\label{3000}
i ^{-1}nt\partial_\alphaa(f\circ l-f)\overline {(f\circ l-f)}\,d\alphaa=2{\mathbb{R}}e i^{-1}nt\partial_\alphaa f\overline{(f-f\circ l)}\,d\alphaa
\end{equation}
so
\betaegin{equation}\label{3001}
\alphabs{i ^{-1}nt\partial_\alphaa(f\circ l-f)\overline {(f\circ l-f)}\,d\alphaa}\le 2\|\partial_\alphaa f\|_{L^2}\|f-f\circ l\|_{L^2}.
\end{equation}
Now
\betaegin{equation}\label{3002}
^{-1}nt |f(\alphaa)-f(l(\alphaa))|^2\,d\alphaa\le \|l-\alphaa\|^2_{L^^{-1}nfty}^{-1}nt |\mathcal M(\partial_\alphaa f))(\alphaa)|^2\,d\alphaa\lesssim \|l-\alphaa\|^2_{L^^{-1}nfty}\|\partial_\alphaa f\|^2_{L^2},
\end{equation}
where $\mathcal M$ is the Hardy-Littlewood maximal operator. Therefore by Sobolev embedding \eqref{eq:sobolev} and Lemma~ \ref{hhalf1},
\betaegin{align}
\nm{f\circ l-f}_{\dot H^{1/2}}\lesssim \|\partial_\alphaa f\|_{L^2}\|l-\alphaa\|_{L^2}^{1/4}\|l_\alphaa-1\|_{L^2}^{1/4}
+\nm{\mathbb P_A(f\circ l-f)}_{\dot H^{1/2}},\label{3006}\\
\nm{f\circ l-f}_{\dot H^{1/2}}\lesssim \|\partial_\alphaa f\|_{L^2}\|l-\alphaa\|_{L^2}^{1/4}\|l_\alphaa-1\|_{L^2}^{1/4}
+\nm{\mathbb P_H(f\circ l-f)}_{\dot H^{1/2}}.\label{3007}
\end{align}
Now
$$2\mathbb P_A (f\circ l-f)=(2\mathbb P_A f)\circ l-2\mathbb P_A f+\mathcal Q_l(f\circ l).$$
Applying \eqref{3007} to $(\mathbb P_A f)\circ l-\mathbb P_A f$ and using \eqref{q1} gives
$$\|\mathbb P_A (f\circ l-f)\|_{\dot H^{1/2}}\lesssim \|\partial_\alphaa f\|_{L^2}\|l-\alphaa\|_{L^2}^{1/4}\|l_\alphaa-1\|_{L^2}^{1/4}
+
C(\nm{(l^{-1})_\alphaa}_{L^^{-1}nfty}, \nm{l_\alphaa}_{L^^{-1}nfty})\|l_\alphaa-1\|_{L^2}\|\partial_\alphaa f\|_{L^2}.
$$
This proves \eqref{lemma4-inq}.
To prove \eqref{lemma4-inq2}, we begin with
\betaegin{equation}\label{3019}
b_\alphaa\circ l-b_\alphaa=\partial_\alphaa(b\circ l-b)+b_\alphaa\circ l(1-l_\alphaa);
\end{equation}
and by expanding the integral, we have
\betaegin{equation}\label{3018}
\|\partial_\alphaa(b\circ l-b)\|_{L^2}^2=^{-1}nt (b_\alphaa\circ l)^2(l_\alphaa^2-l_\alphaa)\,d\alphaa+2^{-1}nt b_\alphaa\partial_\alphaa(b-b\circ l)\,d\alphaa.
\end{equation}
\eqref{lemma4-inq2} follows directly from the Triangular, Cauchy-Schwarz and H\"older's inequalities.
\end{proof}
\betaegin{lemma}\label{lemma3}
For any $\varphi^{-1}n C^^{-1}nfty(\mathbb R)$, with $^{-1}nt\varphi(x)\,dx=1$ and $^{-1}nt |x\varphi(x)|^2\,dx<^{-1}nfty$, and for any $f^{-1}n \dot H^1(\mathbb R)$,
\betaegin{equation}\label{lemma3-inq}
\|\varphi_{\varepsilon}ilon\alphast f-f\|_{L^^{-1}nfty}\lesssim {\varepsilon}ilon^{1/2}\|\partial_x f\|_{L^2}\|x\varphi\|_{L^2}.
\end{equation}
\end{lemma}
The proof is straightforward by Cauchy-Schwarz inequality and Hardy's inequality \eqref{eq:77}. We omit the details.
Let $Z, \fracrak Z$ be solutions of the system \eqref{interface-r}-\eqref{interface-holo}-\eqref{a1}-\eqref{b}, satisfying the assumptions of Theorem~\ref{unique}, and let $l$ be given by \eqref{def-l}.
We know
$$(\partial_t+b\partial_\alphaa)(l-\alphaa)=U_{h^{-1}}(\tauh_t-h_t)=\tauilde b\circ l-b,$$ and $l(\alphaa, 0)=\alphaa$ for $\alphaa^{-1}n\mathbb R$. By Lemma~\ref{basic-e2}, \betaegin{equation}\label{3003}
\fracrac d{dt}\|l(t)-\alphaa\|^2_{L^2}\le 2\|\tauilde b\circ l(t)-b(t)\|_{L^2}\|l(t)-\alphaa\|_{L^2}+\|b_\alphaa(t)\|_{L^^{-1}nfty}\|l(t)-\alphaa\|_{L^2}^2,
\end{equation}
and from \eqref{b} and Sobolev embedding,
\betaegin{equation}\label{3010}
\|b(t)\|_{H^1(\mathbb R)}\lesssim \|Z_{t}(t)\|_{H^1(\mathbb R)}\paren{\nm{\fracrac1{Z_{,\alphaa}}(t)-1}_{H^1(\mathbb R)}+1}.
\end{equation}
Therefore by Gronwall's inequality, we have
\betaegin{equation}\label{3004}
\sup_{[0, T]}\|l(t)-\alphaa\|_{L^2(\mathbb R)}\le C,
\end{equation}
where $C$ is a constant depending on $\sup_{[0, T]}\paren{\|Z_t(t)\|_{L^2}+\|\fracrak Z_t(t)\|_{L^2}+\nm{\fracrac1{Z_{,\alphaa}}(t)-1}_{L^2} +\nm{\fracrac1{\fracrak Z_{,\alphaa}}(t)-1}_{L^2}}$ and $\sup_{[0, T]} (\mathcal E(t)+\tauilde{\mathcal E}(t) )$.
Let
\betaegin{equation}\label{3008}
\betaegin{aligned}
\nm{(Z-{\mathbb{Z}}f)(0)}:=& \|\paren{\betaar Z_t-\betaar {\mathbb{Z}}f_t}(0)\|_{\dot{H}^{1/2}}+\|\paren{\betaar Z_{tt}-\betaar {\mathbb{Z}}f_{tt}}(0)\|_{\dot{H}^{1/2}}+\nm{\paren{\fracrac1{ Z_{,\alphaa}}-\fracrac 1{ {\mathbb{Z}}f_{,\alphaa}}}(0)}_{\dot{H}^{1/2}}\\&+\|\paren{D_\alphaa Z_t-(\tauilde D_\alphaa {\mathbb{Z}}f_t)}(0)\|_{L^2}
+\nm{\paren{\fracrac1{ Z_{,\alphaa}}-\fracrac 1{ {\mathbb{Z}}f_{,\alphaa}}}(0)}_{L^^{-1}nfty}.
\end{aligned}
\end{equation}
Applying \eqref{lemma4-inq} to $f=\betaar {\fracrak Z}_t$, $\fracrac1{\fracrak Z_{,\alphaa}}-1$ and $\betaar {\fracrak Z}_{tt}$ and use \eqref{stability} gives
\betaegin{equation}\label{3005}
\betaegin{aligned}
\sup_{[0, T]}&\paren{\|\paren{\betaar Z_t-\betaar {\mathbb{Z}}f_t}(t)\|_{\dot{H}^{1/2}(\mathbb R)}+\nm{\paren{\fracrac1{ Z_{,\alphaa}}-\fracrac 1{ {\mathbb{Z}}f_{,\alphaa}}}(t)}_{\dot{H}^{1/2}(\mathbb R)}+\|\paren{\betaar Z_{tt}-\betaar {\mathbb{Z}}f_{tt}}(t)\|_{\dot{H}^{1/2}(\mathbb R)}}\\&\le C(\nm{(Z-{\mathbb{Z}}f) (0)}+ \nm{(Z-{\mathbb{Z}}f) (0)} ^{1/4});
\end{aligned}
\end{equation}
and applying \eqref{lemma4-inq2}, \eqref{lemma4-inq} to $\tauilde b$ and use \eqref{3010}, \eqref{2393}, \eqref{2395}, Appendix~\ref{quantities} and \eqref{stability} yields
\betaegin{equation}\label{3020}
\sup_{[0, T]}\|(b_\alphaa-\tauilde b_\alphaa)(t)\|_{L^2(\mathbb R)} \le C(\nm{(Z-{\mathbb{Z}}f) (0)}+ \nm{(Z-{\mathbb{Z}}f) (0)} ^{1/8}),
\end{equation}
where $C$ is a constant depending on $\sup_{[0, T]}\paren{\|Z_t(t)\|_{L^2}+\|\fracrak Z_t(t)\|_{L^2}+\nm{\fracrac1{Z_{,\alphaa}}(t)-1}_{L^2} +\nm{\fracrac1{\fracrak Z_{,\alphaa}}(t)-1}_{L^2}}$ and $\sup_{[0, T]} (\mathcal E(t)+\tauilde{\mathcal E}(t) )$. By Sobolev embedding \eqref{eq:sobolev},
\betaegin{equation}\label{3012}
\|l(\cdot,t)-\alphaa\|_{L^^{-1}nfty(\mathbb R)}^2\lesssim \|l(\cdot,t)-\alphaa\|_{L^2(\mathbb R)}\|(l_\alphaa-1)(t)\|_{L^2(\mathbb R)},
\end{equation}
therefore by \eqref{3004}, \eqref{2345}-\eqref{2346}, and \eqref{stability},
\betaegin{equation}\label{3011}
\sup_{[0, T]}(\|h(t)-\tauh(t)\|^2_{L^^{-1}nfty(\mathbb R)}+\|h^{-1}(t)-\tauh^{-1}(t)\|^2_{L^^{-1}nfty(\mathbb R)})\le C \|(Z-{\mathbb{Z}}f)(0)\|,
\end{equation}
where C is a constant depending on $\sup_{[0, T]}\paren{\|Z_t(t)\|_{L^2}+\|\fracrak Z_t(t)\|_{L^2}+\nm{\fracrac1{Z_{,\alphaa}}(t)-1}_{L^2} +\nm{\fracrac1{\fracrak Z_{,\alphaa}}(t)-1}_{L^2}}$ and $\sup_{[0, T]} (\mathcal E(t)+\tauilde{\mathcal E}(t) )$.
We also have, from Sobolev embedding \eqref{eq:sobolev}, \eqref{3010}, \eqref{3004}, \eqref{3012} and \eqref{stability} that for $t^{-1}n [0, T]$,
\betaegin{equation}\label{3013}
\betaegin{aligned}
&\nm{(b-\tauilde b)(t)}_{L^^{-1}nfty(\mathbb R)}^2\lesssim \nm{(b-\tauilde b\circ l)(t)}_{L^^{-1}nfty(\mathbb R)}^2+\nm{(\tauilde b\circ l-\tauilde b)(t)}_{L^^{-1}nfty(\mathbb R)}^2\\&\lesssim \nm{(b-\tauilde b\circ l)(t)}_{L^2(\mathbb R)}\nm{\partial_\alphaa(b-\tauilde b\circ l)(t)}_{L^2(\mathbb R)}+\nm{l(t)-\alphaa}_{L^^{-1}nfty(\mathbb R)}^2\nm{\tauilde b_\alphaa(t)}_{L^^{-1}nfty(\mathbb R)}^2\\&\le C(\|(Z-{\mathbb{Z}}f)(0)\|+\|(Z-{\mathbb{Z}}f)(0)\|^2),
\end{aligned}
\end{equation}
where C is a constant depending on $\sup_{[0, T]}\paren{\|Z_t(t)\|_{L^2}+\|\fracrak Z_t(t)\|_{L^2}+\nm{\fracrac1{Z_{,\alphaa}}(t)-1}_{L^2} +\nm{\fracrac1{\fracrak Z_{,\alphaa}}(t)-1}_{L^2}}$ and $\sup_{[0, T]} (\mathcal E(t)+\tauilde{\mathcal E}(t) )$.
We have
\betaegin{lemma}\label{lemma5}
1. Assume that $f^{-1}n H^{1/2}(\mathbb R)$. Then
\betaegin{equation}\label{lemma5-inq1}
\|f\|_{L^4(\mathbb R)}^2\lesssim \|f\|_{L^2(\mathbb R)}\|f\|_{\dot H^{1/2}(\mathbb R)}.
\end{equation}
2. Let $\varphi^{-1}n C^^{-1}nfty(\mathbb R)\cap L^q(\mathbb R)$, and $f^{-1}n L^p(\mathbb R)$, where $1\le p\le ^{-1}nfty$, $\fracrac1p+\fracrac1q=1$. For any $y'<0$, $x'^{-1}n\mathbb R$,
\betaegin{equation}\label{lemma5-inq2}
|\varphi_{y'}\alphast f(x')| \le (-y')^{-1/p}\|\varphi\|_{L^q(\mathbb R)}\|f\|_{L^p(\mathbb R)}.
\end{equation}
\end{lemma}
\betaegin{proof}
By the Theorem 1 on page 119 of \cite{s}, Plancherel's Theorem and Cauchy-Schwarz inequality, we have, for any $f^{-1}n H^{1/2}(\mathbb R)$,
$$\|f\|_{L^4(\mathbb R)}\lesssim \|\partial_x^{1/4}f\|_{L^2(\mathbb R)}\lesssim \|f\|_{L^2(\mathbb R)}\|f\|_{\dot H^{1/2}(\mathbb R)}.$$
\eqref{lemma5-inq2} is a direct consequence of H\"older's inequality.
\end{proof}
We need in addition the following compactness results in the proof of the existence of solutions.
\betaegin{lemma}\label{lemma1} Let $\{f_n\}$ be a sequence of smooth functions on $\mathbb R\tauimes [0, T]$. Let $1<p\le^{-1}nfty$. Assume that there is a constant $C$, independent of $n$, such that
\betaegin{equation}
\sup_{[0, T]}\|f_n(t)\|_{L^^{-1}nfty}+ \sup_{[0, T]}\|{\partial_x f_n}(t)\|_{L^p}+ \sup_{[0, T]}\|\partial_t f_n(t)\|_{L^^{-1}nfty}\le C.
\end{equation}
Then there is a function $f$, continuous and bounded on $\mathbb R\tauimes [0, T]$, and a subsequence $\{f_{n_j}\}$, such that $f_{n_j}\tauo f$ uniformly on compact subsets of $\mathbb R\tauimes [0, T]$.
\end{lemma}
Lemma~\ref{lemma1} is an easy consequence of Arzela-Ascoli Theorem, we omit the proof.
\betaegin{lemma}\label{lemma2}
Assume that $f_n\tauo f$ uniformly on compact subsets of $\mathbb R\tauimes [0, T]$, and assume there is a constant $C$, such that $\sup_{n}\|f_n\|_{L^^{-1}nfty(\mathbb R\tauimes [0, T])}\le C$. Then $K_{y'}\alphast f_n$ converges uniformly to $K_{y'}\alphast f$ on compact subsets of $\betaar {\mathscr P}_-\tauimes [0, T]$.
\end{lemma}
The proof follows easily by considering the convolution on the sets $|x'|<N$, and $|x'|\gammae N$ separately. We omit the proof.
\betaegin{definition} We write
\betaegin{equation}\label{unif-notation}
f_n{\mathbb{R}}ightarrow f\qquad \tauext{on }E
\end{equation}
if $f_n$ converge uniformly to $f$ on compact subsets of $E$.
\end{definition}
\subsection{The proof of Theorem~\ref{th:local}} The uniqueness of the solution to the Cauchy problem is a direct consequence of \eqref{3005} and Definition~ \ref{de}. In what follows we prove the existence of solutions to the Cauchy problem.
\subsubsection{The initial data}\label{ID}
Let $U(z', 0)$ be the initial fluid velocity in the Riemann mapping coordinate, $\mathbb{P}si(z',0):{\mathscr P}_-\tauo\Omega(0)$ be the Riemann mapping as given in \S\ref{id} with $Z(\alphalpha', 0)=\mathbb{P}si(\alphalpha', 0)$ the initial interface. We note that by the assumption
$$
\betaegin{aligned}&\sup_{y'<0}\nm{\partial_{z'}\paren{\fracrac1{\mathbb{P}si_{z'}(z',0)}}}_{L^2(\mathbb R, dx')}\le \mathcal E_1(0)<^{-1}nfty,\quad \sup_{y'<0}\nm{\fracrac1{\mathbb{P}si_{z'}(z',0)}-1}_{L^2(\mathbb R, dx')}\le c_0<^{-1}nfty,\\&
\sup_{y'<0}\|U_{z'}(z',0)\|_{L^2(\mathbb R, dx')}\le \mathcal E_1(0)<^{-1}nfty\quad \tauext{and } \ \sup_{y'<0}\|U(z',0)\|_{L^2(\mathbb R, dx')}\le c_0<^{-1}nfty,
\end{aligned}
$$
$ \fracrac1{\mathbb{P}si_{z'}}(\cdot, 0)$, $U(\cdot, 0)$ can be extended continuously onto $\betaar {\mathscr P}_-$.
So $Z(\cdot,0):= \mathbb{P}si(\cdot+i0, 0)$ is continuous differentiable on the open set where $\fracrac1{\mathbb{P}si_{z'}}(\alphalpha', 0)\ne 0$, and $\fracrac1{\mathbb{P}si_{z'}}(\alphalpha', 0)=\fracrac1{Z_{,\alphalpha'}(\alphalpha', 0)}$ where
$\fracrac1{\mathbb{P}si_{z'}}(\alphalpha', 0)\ne 0$.
By $\fracrac1{\mathbb{P}si_{z'}}(\cdot, 0)-1^{-1}n H^1(\mathbb R)$ and Sobolev embedding, there is $N>0$ sufficiently large, such that for $|\alphalpha'|\gammae N$, $|\fracrac1{\mathbb{P}si_{z'}}(\alphalpha', 0)-1| \le 1/2$, so $Z=Z(\cdot, 0)$ is continuous differentiable on $(-^{-1}nfty, -N)\cup (N, ^{-1}nfty)$, with $|Z_{,\alphalpha'}(\alphalpha', 0)|\le 2$, for all $ |\alphalpha'|\gammae N$. Moreover, $Z_{,\alphalpha'}(\cdot, 0)-1^{-1}n H^1\{(-^{-1}nfty, -N)\cup (N, ^{-1}nfty)\}$.
\subsubsection{The mollified data and the approximate solutions}\label{mo-ap}
Let ${\varepsilon}ilon>0$. We take
\betaegin{equation}\label{m-id}
\betaegin{aligned}
Z^{\varepsilon}ilon(\alphalpha', 0)&=\mathbb{P}si(\alphalpha'-{\varepsilon}ilon i, 0),\quad \betaar Z^{\varepsilon}ilon_t(\alphalpha', 0)=U(\alphalpha'-{\varepsilon}ilon i, 0),\quad
h^{\varepsilon}ilon(\alphalpha,0)=\alphalpha,\\& U^{\varepsilon}ilon(z',0)=U(z'-{\varepsilon}ilon i, 0),\quad \mathbb{P}si^{\varepsilon}ilon(z',0)=\mathbb{P}si(z'-{\varepsilon}ilon i,0).
\end{aligned}
\end{equation}
Notice that $U^{\varepsilon}ilon(\cdot, 0)$, $\mathbb{P}si^{\varepsilon}ilon(\cdot, 0)$ are holomorphic on ${\mathscr P}_-$, $Z^{\varepsilon}ilon(0)$ satisfies \eqref{interface-holo} and $\betaar Z^{\varepsilon}ilon_t(0)=\mathbb H \betaar Z^{\varepsilon}ilon_t( 0)$. Let $Z_{tt}^{\varepsilon}ilon(0)$ be given by \eqref{interface-a1}.
It is clear that $Z^{\varepsilon}ilon(0)$, $ Z_t^{\varepsilon}ilon(0)$ and $Z_{tt}^{\varepsilon}ilon(0)$ satisfy the assumption of Theorem~\ref{blow-up}. Let
$Z^{\varepsilon}ilon(t):=Z^{\varepsilon}ilon(\cdot,t)$, $t^{-1}n [0, T_{\varepsilon}ilon^*)$, be the solution given by Theorem~\ref{blow-up}, with the maximal time of existence $T_{\varepsilon}ilon^*$, the diffeomorphism $h^{\varepsilon}ilon(t)=h^{\varepsilon}ilon(\cdot,t):\mathbb R\tauo\mathbb R$, the quantity $b^{\varepsilon}ilon:=h_t^{\varepsilon}ilon\circ (h^{\varepsilon}ilon)^{-1}$, and $z^{\varepsilon}ilon(\alphalpha,t)=Z^{\varepsilon}ilon(h^{\varepsilon}ilon(\alphalpha,t),t)$. We know $z_t^{\varepsilon}ilon(\alphalpha,t)=Z_t^{\varepsilon}ilon(h^{\varepsilon}ilon(\alphalpha,t),t)$.
Let $$U^{\varepsilon}ilon(x'+iy', t)=K_{y'}\alphast \betaar Z^{\varepsilon}ilon_t(x', t),\quad \mathbb{P}si_{z'}^{\varepsilon}ilon(x'+iy', t) =K_{y'}\alphast Z^{\varepsilon}ilon_{,\alphalpha'}(x',t),\quad \mathbb{P}si^{\varepsilon}ilon(\cdot, t)$$
be the holomorphic functions on ${\mathscr P}_-$ with boundary values $\betaar Z^{\varepsilon}ilon_t( t)$, $Z^{\varepsilon}ilon_{,\alphalpha'}(t)$ and $Z^{\varepsilon}ilon(t)$; we have
$$\fracrac1{\mathbb{P}si_{z'}^{\varepsilon}ilon}(x'+iy', t) =K_{y'}\alphast \fracrac1{Z^{\varepsilon}ilon_{,\alphalpha'}}(x',t)$$
by uniqueness.\fracootnote{By the maximum principle, $\betaig(K_{y'}\alphast \fracrac1{Z^{\varepsilon}ilon_{,\alphalpha'}}\betaig)\betaig(K_{y'}\alphast {Z^{\varepsilon}ilon_{,\alphalpha'}}\betaig)\equiv 1$ on ${\mathscr P}_-$. }
We denote the energy functional $\mathcal E$ for $\paren{Z^{\varepsilon}ilon(t), \betaar Z_t^{\varepsilon}ilon(t)}$ by $\mathcal E^{\varepsilon}ilon(t)$ and the energy functional $\mathcal E_1$ for $(U^{\varepsilon}ilon(t),\mathbb{P}si^{\varepsilon}ilon(t))$ by $\mathcal E^{\varepsilon}ilon_1(t)$. It is clear that $\mathcal E^{\varepsilon}ilon_1(0)\le \mathcal E_1(0)$, and $\|Z^{\varepsilon}ilon_t(0)\|_{L^2}+\nm{\fracrac1{Z^{\varepsilon}ilon_{,\alphaa}(0)}-1}_{L^2}\le c_0$ for all ${\varepsilon}ilon>0$; and by the continuity of $\fracrac1{\mathbb{P}si_{z'}}(\cdot, 0)$ on $\betaar{\mathscr P}_-$, there is a ${\varepsilon}ilon_0>0$, such that for all $0<{\varepsilon}ilon\le{\varepsilon}ilon_0$, $|\fracrac1{Z^{\varepsilon}ilon_{,\alphaa}}(0,0)|^2\le |\fracrac1{Z_{,\alphaa}}(0,0)|^2+1$.
By Theorem~\ref{blow-up},
Theorem~\ref{prop:a priori} and Proposition~\ref{prop:energy-eq}, there exists $T_0>0$, $T_0$ depends only on $\mathcal E(0)=\mathcal E_1(0)+|\fracrac1{Z_{,\alphaa}}(0,0)|^2$,\fracootnote{By \eqref{domain-energy1}.} such that for all $0<{\varepsilon}ilon\le {\varepsilon}ilon_0$,
$T^*_{\varepsilon}ilon> T_0$
and
\betaegin{equation}\label{eq:400}
\sup_{[0, T_0]}\paren{\mathcal E_1^{\varepsilon}ilon(t)+\alphabs{\fracrac1{Z^{\varepsilon}ilon_{,\alphaa}}(0,t)}^2}=\sup_{[0, T_0]}\mathcal E^{\varepsilon}ilon(t)\le M\paren{\mathcal E(0) }<^{-1}nfty;
\end{equation}
and by \eqref{interface-a1}, \eqref{2109} and \eqref{2110},
\betaegin{equation}\label{eq:401}
\sup_{[0, T_0]}\paren{\|Z^{\varepsilon}ilon_t(t)\|_{L^2}+\|Z^{\varepsilon}ilon_{tt}(t)\|_{L^2}+\nm{\fracrac1{Z^{\varepsilon}ilon_{,\alphalpha'}(t)}-1}_{L^2}}\le c\paren{c_0, \mathcal E(0)},
\end{equation}
so there is a constant $C_0:=C(c_0, \mathcal E(0))>0$, such that
\betaegin{equation}\label{eq:402}
\sup_{[0,T_0]}\{\sup_{y'<0}\|U^{\varepsilon}ilon(\cdot+iy', t)\|_{L^2(\mathbb R)}+\sup_{y'<0}\nm{\fracrac1{\mathbb{P}si^{\varepsilon}ilon_{z'}(\cdot+iy',t)}-1}_{L^2(\mathbb R)}\}<C_0<^{-1}nfty.
\end{equation}
\subsubsection{Uniformly bounded quantities}\label{ubound}
Besides \eqref{3005}, \eqref{3011} and \eqref{3013}, we would like to apply the compactness results Lemma~\ref{lemma1}, Lemma~\ref{lemma2} to pass to limits of some of the quantities. To this end we discuss the boundedness properties of these quantities. We begin with two inequalities.
We have, from \eqref{eq:dza},
\betaegin{equation}\label{eq:411}
\nm{(\partial_t+b^{\varepsilon}ilon\partial_\alphaa) \fracrac1{Z^{\varepsilon}ilon_{,\alphalpha'}}(t)}_{L^^{-1}nfty}\le \nm{\fracrac1{Z^{\varepsilon}ilon_{,\alphalpha'}}(t)}_{L^^{-1}nfty}(\|b^{\varepsilon}ilon_{\alphalpha'}(t)\|_{L^^{-1}nfty}+\|D_{\alphalpha'}Z^{\varepsilon}ilon_{t}(t)\|_{L^^{-1}nfty})
\end{equation}
and by \eqref{eq:dztt},
\betaegin{equation}\label{eq:4400}
\|(\partial_t+b^{\varepsilon}ilon\partial_\alphaa)Z^{\varepsilon}ilon_{tt}(t)\|_{L^^{-1}nfty}\le \|Z^{\varepsilon}ilon_{tt}(t)+i\|_{L^^{-1}nfty}\paren{\|D_{\alphaa}Z^{\varepsilon}ilon_t(t)\|_{L^^{-1}nfty}+\nm{\fracrac{\fracrak a^{\varepsilon}ilon_t}{\fracrak a^{\varepsilon}ilon}\circ (h^{{\varepsilon}ilon})^{-1}(t)}_{L^^{-1}nfty}}.
\end{equation}
Let $0<{\varepsilon}ilon\le{\varepsilon}ilon_0$, and $M(\mathcal E(0))$, $c(c_0,\mathcal E(0))$, $C_0$ be the bounds in \eqref{eq:400}, \eqref{eq:401} and \eqref{eq:402}.
By Proposition~\ref{prop:energy-eq}, Sobolev embedding, Appendix~\ref{quantities} and \eqref{eq:411}, \eqref{3010}, the following quantities are uniformly bounded with bounds depending only on $M(\mathcal E(0))$, $c(c_0,\mathcal E(0))$, $C_0$:
\betaegin{equation}\label{eq:404}
\betaegin{aligned}
&\sup_{[0, T_0]}\|Z^{\varepsilon}ilon_t(t)\|_{L^^{-1}nfty}, \quad\sup_{[0, T_0]}\|Z^{\varepsilon}ilon_{t,\alphalpha'}(t)\|_{L^2}, \quad\sup_{[0, T_0]}\|Z^{\varepsilon}ilon_{tt}(t)\|_{L^^{-1}nfty}, \quad\sup_{[0, T_0]}\|Z^{\varepsilon}ilon_{tt,\alphalpha'}(t)\|_{L^2},\\&
\sup_{[0, T_0]}\nm{\fracrac1{Z^{\varepsilon}ilon_{,\alphalpha'}}(t)}_{L^^{-1}nfty}, \quad\sup_{[0, T_0]}\nm{\partial_{\alphalpha'}\fracrac1{Z^{\varepsilon}ilon_{,\alphalpha'}}(t)}_{L^2}, \quad\sup_{[0, T_0]}\nm{(\partial_t+b^{\varepsilon}ilon\partial_\alphaa)\fracrac1{Z^{\varepsilon}ilon_{,\alphalpha'}}(t)}_{L^^{-1}nfty},\quad \sup_{[0, T_0]}\nm{b^{\varepsilon}ilon}_{L^^{-1}nfty};
\end{aligned}
\end{equation}
and with a change of the variables and \eqref{2346}, \eqref{eq:4400} and Appendix~\ref{quantities},
\betaegin{equation}\label{eq:414}
\betaegin{aligned}
&\sup_{[0, T_0]}\|z^{\varepsilon}ilon_t(t)\|_{L^^{-1}nfty}+ \sup_{[0, T_0]}\|z^{\varepsilon}ilon_{t\alphalpha}(t)\|_{L^2}+ \sup_{[0, T_0]}\|z^{\varepsilon}ilon_{tt}(t)\|_{L^^{-1}nfty}\le C(c_0, \mathcal E(0)), \\&
\sup_{[0, T_0]}\nm{\fracrac{h^{\varepsilon}ilon_{\alphalpha}}{z^{\varepsilon}ilon_{\alphalpha}}(t)}_{L^^{-1}nfty}+ \sup_{[0, T_0]}\nm{\partial_{\alphalpha}(\fracrac {h^{\varepsilon}ilon_{\alphalpha}}{z^{\varepsilon}ilon_{\alphalpha}})(t)}_{L^2}+ \sup_{[0, T_0]}\nm{\partial_t \fracrac{h^{\varepsilon}ilon_\alphalpha}{z^{\varepsilon}ilon_{\alphalpha}}(t)}_{L^^{-1}nfty}\le C(c_0, \mathcal E(0)),\\&
\sup_{[0, T_0]}\|z^{\varepsilon}ilon_{tt}(t)\|_{L^^{-1}nfty}+ \sup_{[0, T_0]}\|z^{\varepsilon}ilon_{tt\alphalpha}(t)\|_{L^2}+ \sup_{[0, T_0]}\|z^{\varepsilon}ilon_{ttt}(t)\|_{L^^{-1}nfty}\le C(c_0, \mathcal E(0)).
\end{aligned}
\end{equation}
Observe that
$h^{\varepsilon}ilon(\alphalpha, t)-\alphalpha=^{-1}nt_0^t h^{\varepsilon}ilon_t(\alphalpha, s)\,ds$, so
\betaegin{equation}\label{eq:425}
\sup_{\mathbb R\tauimes [0, T_0]} |h^{\varepsilon}ilon(\alphalpha, t)-\alphalpha|\le T_0\sup_{[0, T_0]}\|h^{\varepsilon}ilon_t(t)\|_{L^^{-1}nfty}\le T_0C(c_0,\mathcal E(0))<^{-1}nfty.
\end{equation}
Furthermore by \eqref{2346} and Appendix~\ref{quantities}, there are $c_1, c_2>0$, depending only on $\mathcal E(0)$, such that
\betaegin{equation}\label{eq:416}
0<c_1\le\fracrac{h^{\varepsilon}ilon(\alphalpha,t)-h^{\varepsilon}ilon(\betaeta,t)}{\alphalpha-\betaeta}\le c_2<^{-1}nfty,\qquad \fracorall \alphalpha,\betaeta^{-1}n \mathbb R, \ t^{-1}n [0, T_0].
\end{equation}
\subsubsection{Passing to the limit}
It is easy to check by Lemma~\ref{lemma3} and \eqref{hhalf-1}, \eqref{hhalf42} that the sequence $(Z^{\varepsilon}ilon(0), \betaar Z_t^{\varepsilon}ilon(0))$ converges in the norm $\|\cdot\|$ defined by \eqref{3008}, so
by \eqref{3020}, \eqref{3011} and \eqref{3013}, there are functions $b$ and $h-\alphalpha$, continuous and bounded on $\mathbb R\tauimes [0, T_0]$, with $h(\cdot, t):\mathbb R\tauo \mathbb R$ a homeomorphism for $t^{-1}n [0, T_0]$, $b_\alphaa^{-1}n L^^{-1}nfty([0, T_0], L^2(\mathbb R))$, such that
\betaegin{equation}\label{3015}
\lim_{{\varepsilon}ilon\tauo 0} \paren{b^{\varepsilon}ilon,\, h^{\varepsilon}ilon,\, (h^{\varepsilon}ilon)^{-1}}=\paren{b,\, h, \, h^{-1}},\qquad \tauext{uniformly on } \mathbb R\tauimes [0, T_0];
\end{equation}
\betaegin{equation}\label{3015-1}
\lim_{{\varepsilon}ilon\tauo 0} b^{\varepsilon}ilon_\alphaa=b_\alphaa \qquad \tauext{in } \ \ L^^{-1}nfty([0, T_0], L^2(\mathbb R));
\end{equation}
and \eqref{eq:416} yields
\betaegin{equation}\label{eq:420}
0<c_1\le\fracrac{h(\alphalpha,t)-h(\betaeta,t)}{\alphalpha-\betaeta}\le c_2<^{-1}nfty,\qquad \fracorall \alphalpha,\betaeta^{-1}n \mathbb R, \ t^{-1}n [0, T_0].
\end{equation}
By Lemma~\ref{lemma1}, \eqref{eq:414} and \eqref{3005}, there are functions $w$, $u$, $q:=w_t$, continuous and bounded on $\mathbb R\tauimes [0, T_0]$, such that
\betaegin{equation}\label{eq:417}
z^{\varepsilon}ilon_t{\mathbb{R}}ightarrow w,\quad \fracrac{h^{\varepsilon}ilon_{\alphalpha}}{z^{\varepsilon}ilon_\alphalpha}{\mathbb{R}}ightarrow u,\quad z^{\varepsilon}ilon_{tt}{\mathbb{R}}ightarrow q,\qquad \tauext{on } \mathbb R\tauimes [0, T_0],
\end{equation}
as $ {\varepsilon}ilon\tauo 0$;
this gives
\betaegin{equation}\label{eq:419}
\betaar Z_t^{\varepsilon}ilon{\mathbb{R}}ightarrow w\circ h^{-1},\qquad \fracrac1{Z^{\varepsilon}ilon_{,\alphalpha'}}{\mathbb{R}}ightarrow u\circ h^{-1}, \quad\betaar Z_{tt}^{\varepsilon}ilon{\mathbb{R}}ightarrow w_t\circ h^{-1},\qquad\tauext{on }\mathbb R\tauimes [0, T_0]
\end{equation}
as $ {\varepsilon}ilon\tauo 0$. \eqref{3005} also gives that
\betaegin{equation}\label{3016}
\lim_{{\varepsilon}ilon\tauo 0} \paren{\betaar Z_t^{\varepsilon}ilon,\, \fracrac1{Z^{\varepsilon}ilon_{,\alphalpha'}},\, \betaar Z_{tt}^{\varepsilon}ilon}= \paren{w\circ h^{-1}, \, u\circ h^{-1},\, w_t\circ h^{-1}},\qquad\tauext{in } L^^{-1}nfty([0, T_0], \dot H^{1/2}(\mathbb R)).
\end{equation}
Now
\betaegin{equation}\label{eq:421}
U^{\varepsilon}ilon(z',t)=K_{y'}\alphast \betaar Z_t^{\varepsilon}ilon,\qquad \fracrac1{\mathbb{P}si^{\varepsilon}ilon_{z'}}(z',t)=K_{y'}\alphast \fracrac1{Z^{\varepsilon}ilon_{,\alphalpha'}}.
\end{equation}
Let $U(z',t)= K_{y'}\alphast (w\circ h^{-1})(x', t)$, $\Lambda(z',t)=K_{y'}\alphast (u\circ h^{-1})(x',t)$.
By Lemma~\ref{lemma2},
\betaegin{equation}\label{eq:422}
U^{\varepsilon}ilon(z',t){\mathbb{R}}ightarrow U(z',t),\qquad \fracrac1{\mathbb{P}si^{\varepsilon}ilon_{z'}}(z',t){\mathbb{R}}ightarrow \Lambda(z',t)\qquad\tauext{on }\betaar {\mathscr P}_-\tauimes [0, T_0];
\end{equation}
as $ {\varepsilon}ilon\tauo 0$. Moreover $U(\cdot,t)$, $\Lambda(\cdot,t)$ are holomorphic on ${\mathscr P}_-$ for each $t^{-1}n [0, T_0]$, and continuous on $\betaar {\mathscr P}_-\tauimes [0, T]$. Applying Cauchy integral formula to the first limit in \eqref{eq:422} yields, as $ {\varepsilon}ilon\tauo 0$,
\betaegin{equation}\label{eq:430}
U^{\varepsilon}ilon_{z'}(z',t){\mathbb{R}}ightarrow U_{z'}(z',t) \qquad\tauext{on } {\mathscr P}_-\tauimes [0, T_0].
\end{equation}
\subsubsection*{Step 1. The limit of $\mathbb{P}si^{\varepsilon}ilon$}\label{step4.1}
We consider the limit of $\mathbb{P}si^{{\varepsilon}ilon}$, as ${\varepsilon}ilon\tauo 0$. Let $0<{\varepsilon}ilon\le{\varepsilon}ilon_0$. We know
\betaegin{equation}\label{eq:423}
\betaegin{aligned}
z^{\varepsilon}ilon(\alphalpha,t)&=z^{\varepsilon}ilon(\alphalpha,0)+^{-1}nt_0^t z_t^{\varepsilon}ilon(\alphalpha, s)\,ds\\&
=\mathbb{P}si(\alphalpha-{\varepsilon}ilon i, 0)+^{-1}nt_0^t z_t^{\varepsilon}ilon(\alphalpha, s)\,ds,
\end{aligned}
\end{equation}
therefore
\betaegin{equation}\label{eq:424}
\betaegin{aligned}
Z^{\varepsilon}ilon(\alphalpha',t)-Z^{\varepsilon}ilon(\alphalpha',0)&
=\mathbb{P}si((h^{\varepsilon}ilon)^{-1}(\alphalpha',t)-{\varepsilon}ilon i, 0)-\mathbb{P}si(\alphalpha'-{\varepsilon}ilon i, 0)\\&+^{-1}nt_0^t z_t^{\varepsilon}ilon((h^{\varepsilon}ilon)^{-1}(\alphalpha',t) , s)\,ds.
\end{aligned}
\end{equation}
Let
\betaegin{equation}\label{eq:431}
W^{\varepsilon}ilon(\alphalpha',t):=\mathbb{P}si((h^{\varepsilon}ilon)^{-1}(\alphalpha',t)-{\varepsilon}ilon i, 0)-\mathbb{P}si(\alphalpha'-{\varepsilon}ilon i, 0)+^{-1}nt_0^t z_t^{\varepsilon}ilon((h^{\varepsilon}ilon)^{-1}(\alphalpha',t) , s)\,ds.
\end{equation}
Observe $Z^{\varepsilon}ilon(\alphalpha',t)-Z^{\varepsilon}ilon(\alphalpha',0)$ is the boundary value of the holomorphic function
$\mathbb{P}si^{\varepsilon}ilon(z', t)-\mathbb{P}si^{\varepsilon}ilon(z', 0)$. By \eqref{eq:417} and \eqref{3015}, $^{-1}nt_0^t z_t^{\varepsilon}ilon((h^{\varepsilon}ilon)^{-1}(\alphalpha',t), s)\,ds\tauo ^{-1}nt_0^t w(h^{-1}(\alphalpha',t), s)\,ds$ uniformly on compact subsets of $\mathbb R\tauimes [0, T_0]$, and by \eqref{eq:414}, $^{-1}nt_0^t z_t^{\varepsilon}ilon((h^{\varepsilon}ilon)^{-1}(\alphalpha',t), s)\,ds$ is continuous and uniformly bounded in $L^^{-1}nfty(\mathbb R\tauimes [0, T_0])$.
By the assumptions $\lim_{z'\tauo 0}\mathbb{P}si_{z'}(z',0)=1$ and $\mathbb{P}si(\cdot, 0)$ is continuous on $\betaar {\mathscr P}_-$, and by \eqref{eq:425}, \eqref{3015}, $$\mathbb{P}si((h^{\varepsilon}ilon)^{-1}(\alphalpha',t)-{\varepsilon}ilon i, 0)-\mathbb{P}si(\alphalpha'-{\varepsilon}ilon i, 0)$$ is continuous and uniformly bounded in $L^^{-1}nfty(\mathbb R\tauimes [0, T_0])$ for $0<{\varepsilon}ilon<1$, and converges uniformly on compact subsets of $\mathbb R\tauimes [0, T_0]$ as ${\varepsilon}ilon\tauo 0$. This gives\fracootnote{Because $W^{\varepsilon}ilon(\cdot,t)$ and $\partial_{\alphalpha'}W^{\varepsilon}ilon(\cdot,t):=Z^{\varepsilon}ilon_{,\alphalpha'}(\alphalpha',t)-Z^{\varepsilon}ilon_{,\alphalpha'}(\alphalpha',0)$ are continuous and bounded on $\mathbb R$, $\mathbb{P}si^{\varepsilon}ilon_{z'}(z',t)-\mathbb{P}si^{\varepsilon}ilon_{z'}(z',0)=K_{y'}\alphast (\partial_{\alphalpha'}W^{\varepsilon}ilon)(x',t)=\partial_{z'}K_{y'}\alphast W^{\varepsilon}ilon(x',t)$. \eqref{eq:426} holds because both sides of \eqref{eq:426} have the same value on $\partial{\mathscr P}_-$.}
\betaegin{equation}\label{eq:426}
\mathbb{P}si^{\varepsilon}ilon(z', t)-\mathbb{P}si^{\varepsilon}ilon(z', 0)=K_{y'}\alphast W^{\varepsilon}ilon(x',t)
\end{equation}
and by Lemma~\ref{lemma2}, $\mathbb{P}si^{\varepsilon}ilon(z', t)-\mathbb{P}si^{\varepsilon}ilon(z', 0)$ converges uniformly on compact subsets of $\betaar {\mathscr P}_-\tauimes [0, T_0]$ to a function that is holomorphic on ${\mathscr P}_-$ for every $t^{-1}n [0, T_0]$ and continuous on $\betaar {\mathscr P}_-\tauimes [0, T_0]$. Therefore there is a function $\mathbb{P}si(\cdot,t)$, holomorphic on ${\mathscr P}_-$ for every $t^{-1}n [0, T_0]$ and continuous on $\betaar {\mathscr P}_-\tauimes [0, T_0]$, such that
\betaegin{equation}\label{eq:427}
\mathbb{P}si^{\varepsilon}ilon(z',t){\mathbb{R}}ightarrow \mathbb{P}si(z',t)\qquad\tauext{on }\betaar {\mathscr P}_-\tauimes [0, T_0]
\end{equation}
as ${\varepsilon}ilon\tauo 0$; as a consequence of the Cauchy integral formula,
\betaegin{equation}\label{eq:428}
\mathbb{P}si^{\varepsilon}ilon_{z'}(z',t){\mathbb{R}}ightarrow \mathbb{P}si_{z'}(z',t)\qquad\tauext{on } {\mathscr P}_-\tauimes [0, T_0]
\end{equation}
as ${\varepsilon}ilon\tauo 0$. Combining with \eqref{eq:422}, we have $\Lambda(z',t)=\fracrac1{\mathbb{P}si_{z'}(z',t)}$, so $\mathbb{P}si_{z'}(z',t)\ne 0$ for all $(z',t)^{-1}n \betaar{\mathscr P}_-\tauimes [0, T_0]$ and
\betaegin{equation}\label{eq:429}
\fracrac1{\mathbb{P}si^{\varepsilon}ilon_{z'}(z',t)}{\mathbb{R}}ightarrow \fracrac1{\mathbb{P}si_{z'}(z',t)}\qquad\tauext{on }\betaar {\mathscr P}_-\tauimes [0, T_0]
\end{equation}
as ${\varepsilon}ilon\tauo 0$.
Denote $Z(\alphalpha', t):=\mathbb{P}si(\alphalpha', t)$, $\alphalpha'^{-1}n \mathbb R$, and $z(\alphalpha,t)=Z(h(\alphalpha,t),t)$. \eqref{eq:427} yields $Z^{\varepsilon}ilon(\alphalpha',t){\mathbb{R}}ightarrow Z(\alphalpha',t)$, together with \eqref{3015} it implies
$z^{\varepsilon}ilon(\alphalpha,t){\mathbb{R}}ightarrow z(\alphalpha,t)$
on $\mathbb R\tauimes [0, T_0]$, as ${\varepsilon}ilon\tauo 0$.
Furthermore by \eqref{eq:423},
$$z(\alphalpha', t)=z(\alphalpha',0)+^{-1}nt_0^t w(\alphalpha,s)\,ds,$$ so $w=z_t$. We denote $Z_t=z_t\circ h^{-1}$.
\subsubsection*{Step 2. The limits of $\mathbb{P}si^{\varepsilon}ilon_t$ and $U_t^{\varepsilon}ilon$} Observe that by \eqref{eq:431}, for fixed ${\varepsilon}ilon>0$, $\partial_t W^{\varepsilon}ilon(\cdot, t)$ is a bounded function on $\mathbb R\tauimes [0, T_0]$, so by \eqref{eq:426} and the dominated convergence Theorem, $\mathbb{P}si^{\varepsilon}ilon_t=K_{y'}\alphast \partial_t W^{\varepsilon}ilon$, hence $\mathbb{P}si_t^{\varepsilon}ilon$ is bounded on ${\mathscr P}_-\tauimes [0, T_0]$.
Since for given $t^{-1}n [0, T_0]$ and ${\varepsilon}ilon>0$, $\fracrac{\mathbb{P}si^{\varepsilon}ilon_t}{\mathbb{P}si^{\varepsilon}ilon_{z'}}$ is bounded and holomorphic on ${\mathscr P}_-$, by \eqref{eq:271},
\betaegin{equation}\label{eq:432}
\fracrac{\mathbb{P}si^{\varepsilon}ilon_t}{\mathbb{P}si^{\varepsilon}ilon_{z'}}=K_{y'}\alphast(\fracrac{Z^{\varepsilon}ilon_t}{Z^{\varepsilon}ilon_{,\alphalpha'}}-b^{\varepsilon}ilon).
\end{equation}
Therefore by \eqref{3015}, \eqref{eq:419} and Lemma~\ref{lemma2}, as ${\varepsilon}ilon\tauo 0$, $\fracrac{\mathbb{P}si^{\varepsilon}ilon_t}{\mathbb{P}si^{\varepsilon}ilon_{z'}}$ converges uniformly on compact subsets of $\betaar {\mathscr P}_-\tauimes [0, T_0]$ to a function that is holomorphic on ${\mathscr P}_-$ for each $t^{-1}n [0, T_0]$ and continuous on $\betaar {\mathscr P}_-\tauimes [0, T_0]$. Hence we can conclude from \eqref{eq:427} and \eqref{eq:428} that $\mathbb{P}si$ is continuously differentiable and
\betaegin{equation}\label{eq:433}
\mathbb{P}si^{\varepsilon}ilon_t{\mathbb{R}}ightarrow \mathbb{P}si_t\qquad \tauext{on }{\mathscr P}_-\tauimes [0, T_0]
\end{equation}
as ${\varepsilon}ilon\tauo 0$.
Now we consider the limit of $U^{\varepsilon}ilon_t$ as ${\varepsilon}ilon\tauo 0$. Since for fixed ${\varepsilon}ilon>0$,
$\partial_t Z_t^{\varepsilon}ilon=Z_{tt}^{\varepsilon}ilon-b^{\varepsilon}ilon Z_{t,\alphalpha'}^{\varepsilon}ilon$ is in $L^^{-1}nfty(\mathbb R\tauimes [0, T_0])$, by \eqref{eq:421} and the dominated convergence Theorem,
\betaegin{equation}\label{eq:434}
U^{\varepsilon}ilon_t(z',t)=K_{y'}\alphast \partial_t \betaar Z_t^{\varepsilon}ilon=K_{y'}\alphast (\betaar Z_{tt}^{\varepsilon}ilon-b^{\varepsilon}ilon \betaar Z_{t,\alphalpha'}^{\varepsilon}ilon).
\end{equation}
We rewrite
\betaegin{equation}\label{3017}
K_{y'}\alphast (\betaar Z_{tt}^{\varepsilon}ilon-b^{\varepsilon}ilon \betaar Z_{t,\alphalpha'}^{\varepsilon}ilon)=K_{y'}\alphast \betaar Z_{tt}^{\varepsilon}ilon-(\partial_{x'} K_{y'})\alphast (b^{\varepsilon}ilon \betaar Z_{t}^{\varepsilon}ilon)+ K_{y'}\alphast (b^{\varepsilon}ilon_\alphaa \betaar Z_{t}^{\varepsilon}ilon).
\end{equation}
Now we apply \eqref{3015}, \eqref{3015-1}, \eqref{3016} and Lemma~\ref{lemma5} to each term on the right hand side of \eqref{3017}. We can conclude that
$U$ is continuously differentiable with respect to $t$,
and
\betaegin{equation}\label{eq:435}
U^{\varepsilon}ilon_t {\mathbb{R}}ightarrow U_t\qquad \tauext{on }{\mathscr P}_-\tauimes [0, T_0]
\end{equation}
as $ {\varepsilon}ilon\tauo 0$.
\subsubsection*{Step 3. The limit of $\mathfrak P^{\varepsilon}ilon$} By the calculation in \S\ref{general-soln}, we know
there is a real valued function $\fracrak P^{\varepsilon}ilon$, such that
\betaegin{equation}\label{eq:437}
\mathbb{P}si^{\varepsilon}ilon_{z'}
U^{\varepsilon}ilon_t- {\mathbb{P}si^{\varepsilon}ilon_t}U^{\varepsilon}ilon_{z'}+{\betaar U^{\varepsilon}ilon}U^{\varepsilon}ilon_{z'} -i\mathbb{P}si^{\varepsilon}ilon_{z'}=-(\partial_{x'}-i\partial_{y'})\fracrak P^{\varepsilon}ilon,\qquad\tauext{in }{\mathscr P}_-;
\end{equation}
and
\betaegin{equation}\label{eq:438}
\fracrak P^{\varepsilon}ilon=constant,\qquad \tauext{on }\partial {\mathscr P}_-.
\end{equation}
Without loss of generality we take the $constant=0$. We now explore a few other properties of $\fracrak P^{\varepsilon}ilon$. Moving ${\betaar U^{\varepsilon}ilon}U^{\varepsilon}ilon_{z'}=\partial_{z'}({\betaar U^{\varepsilon}ilon}U^{\varepsilon}ilon)$ to the right of \eqref{eq:437} gives
\betaegin{equation}\label{eq:440}
\mathbb{P}si^{\varepsilon}ilon_{z'}
U^{\varepsilon}ilon_t- {\mathbb{P}si^{\varepsilon}ilon_t}U^{\varepsilon}ilon_{z'} -i\mathbb{P}si^{\varepsilon}ilon_{z'}=-(\partial_{x'}-i\partial_{y'})(\fracrak P^{\varepsilon}ilon+ \fracrac12 |U^{\varepsilon}ilon|^2),\qquad\tauext{in }{\mathscr P}_-;
\end{equation}
Applying $(\partial_{x'}+i\partial_{y'})=2\betaar \partial_{z'}$ to \eqref{eq:440} yields
\betaegin{equation}\label{eq:439}
-\mathcal Delta (\fracrak P^{\varepsilon}ilon+ \fracrac12 |U^{\varepsilon}ilon|^2) =0,\qquad\tauext{in } {\mathscr P}_-.
\end{equation}
So $\fracrak P^{\varepsilon}ilon+ \fracrac12 |U^{\varepsilon}ilon|^2$ is a harmonic function on ${\mathscr P}_-$ with boundary value $\fracrac12 |\betaar Z_t^{\varepsilon}ilon|^2$. On the other hand, it is easy to check that $\lim_{y'\tauo-^{-1}nfty} (\mathbb{P}si^{\varepsilon}ilon_{z'}
U^{\varepsilon}ilon_t- {\mathbb{P}si^{\varepsilon}ilon_t}U^{\varepsilon}ilon_{z'} -i\mathbb{P}si^{\varepsilon}ilon_{z'})=-i$. Therefore
\betaegin{equation}\label{eq:441}
\fracrak P^{\varepsilon}ilon(z',t)=- \fracrac12 |U^{\varepsilon}ilon(z',t)|^2-y + \fracrac12 K_{y'}\alphast (|\betaar Z_t^{\varepsilon}ilon|^2)(x',t).
\end{equation}
By \eqref{eq:422}, \eqref{eq:419} and Lemma~\ref{lemma2},
\betaegin{equation}\label{eq:442}
\fracrak P^{\varepsilon}ilon(z',t){\mathbb{R}}ightarrow - \fracrac12 |U(z',t)|^2-y + \fracrac12 K_{y'}\alphast (|\betaar Z_t|^2)(x',t),\qquad \tauext{on }\betaar {\mathscr P}_-\tauimes [0, T_0]
\end{equation}
as ${\varepsilon}ilon\tauo 0$. We write $$\fracrak P:=- \fracrac12 |U(z',t)|^2-y + \fracrac12 K_{y'}\alphast (|\betaar Z_t|^2)(x',t).$$ We have $\fracrak P$ is continuous on $\betaar {\mathscr P}_-\tauimes [0, T_0]$ with $\fracrak P ^{-1}n C([0, T_0], C^^{-1}nfty({\mathscr P}_-))$, and
\betaegin{equation}\label{eq:443}
\fracrak P=0,\qquad \tauext{on }\partial {\mathscr P}_-.
\end{equation}
Moreover, since $K_{y'}\alphast (|\betaar Z_t^{\varepsilon}ilon|^2)(x',t)$ is harmonic on ${\mathscr P}_-$, by interior derivative estimate for harmonic functions and by \eqref{eq:422},
\betaegin{equation}\label{eq:4450}
(\partial_{x'}-i\partial_{y'})\fracrak P^{\varepsilon}ilon{\mathbb{R}}ightarrow (\partial_{x'}-i\partial_{y'})\fracrak P\qquad\tauext{on }{\mathscr P}_-\tauimes [0, T_0]
\end{equation}
as ${\varepsilon}ilon\tauo 0$. And by \eqref{eq:441} and a similar argument as that in \eqref{eq:434}-\eqref{eq:435}, we have that $\fracrak P$ is continuously differentiable with respect to $t$ and
\betaegin{equation}\label{eq:4451}
\partial_t \fracrak P^{\varepsilon}ilon{\mathbb{R}}ightarrow \partial_t\fracrak P\qquad\tauext{on }{\mathscr P}_-\tauimes [0, T_0]
\end{equation}
as ${\varepsilon}ilon\tauo 0$.
\subsubsection*{Step 4. Conclusion} We now sum up Steps 1-3. We have shown that there are
functions $\mathbb{P}si(\cdot, t)$ and $U(\cdot, t)$, holomorphic on ${\mathscr P}_-$ for each fixed $t^{-1}n [0, T_0]$, continuous on $\betaar {\mathscr P}_-\tauimes [0, T_0]$, and continuous differentiable on ${\mathscr P}_-\tauimes [0, T_0]$, with $ \fracrac1{\mathbb{P}si_{z'}}$ continuous on $\betaar {\mathscr P}_-\tauimes [0, T_0]$,
such that
$\mathbb{P}si^{\varepsilon}ilon \tauo \mathbb{P}si$, $\fracrac1{\mathbb{P}si^{\varepsilon}ilon_{z'}} \tauo \fracrac1{\mathbb{P}si_{z'}}$, $ U^{\varepsilon}ilon\tauo U$ uniform on compact subsets of $\betaar {\mathscr P}_-\tauimes [0, T_0]$, $\mathbb{P}si^{\varepsilon}ilon_t \tauo \mathbb{P}si_t$, $\mathbb{P}si^{\varepsilon}ilon_{z'}\tauo \mathbb{P}si_{z'}$, $U^{\varepsilon}ilon_{z'}\tauo U_{z'}$ and $U^{\varepsilon}ilon_t\tauo U_t$ uniform on compact subsets of ${\mathscr P}_-\tauimes [0, T_0]$, as ${\varepsilon}ilon\tauo 0$. We have also shown that there is $\fracrak P$,
continuous on $\betaar {\mathscr P}_-\tauimes [0, T_0]$ with $\fracrak P=0$ on $\partial {\mathscr P}_-$, and continuous differentiable on ${\mathscr P}_-\tauimes [0, T_0]$, such that $\fracrak P^{\varepsilon}ilon\tauo \fracrak P$ uniform on compact subsets of $\betaar{\mathscr P}_-\tauimes [0, T_0]$ and, $(\partial_{x'}-i\partial_{y'})\fracrak P^{\varepsilon}ilon\tauo (\partial_{x'}-i\partial_{y'})\fracrak P$ and $\partial_t\fracrak P^{\varepsilon}ilon\tauo \partial_t \fracrak P$ uniformly on compact subsets of $ {\mathscr P}_-\tauimes [0, T_0]$, as ${\varepsilon}ilon\tauo 0$. Let ${\varepsilon}ilon\tauo 0$ in equation \eqref{eq:437}, we have
\betaegin{equation}\label{eq:444}
\mathbb{P}si_{z'}
U_t- {\mathbb{P}si_t}U_{z'}+{\betaar U}U_{z'} -i\mathbb{P}si_{z'}=-(\partial_{x'}-i\partial_{y'})\fracrak P,\qquad\tauext{on }{\mathscr P}_-\tauimes [0, T_0].
\end{equation}
This shows that $(U, \mathbb{P}si, \fracrak P)$ is a solution of the Cauchy problem for the system \eqref{eq:273}-\eqref{eq:274}-\eqref{eq:275} in the sense of Definition~\ref{de}. Furthermore because of \eqref{eq:400}, \eqref{eq:402}, letting ${\varepsilon}ilon\tauo 0$ gives
\betaegin{equation}
\sup_{[0, T_0]}\mathcal E(t)\le M(\mathcal E(0))<^{-1}nfty.
\end{equation}
and
\betaegin{equation}
\sup_{[0,T_0]}\{\sup_{y'<0}\|U(x'+iy', t)\|_{L^2(\mathbb R, dx')}+\sup_{y'<0}\|\fracrac1{\mathbb{P}si_{z'}(x'+iy',t)}-1\|_{L^2(\mathbb R,dx')}\}<C_0<^{-1}nfty.
\end{equation}
By the argument at the end of \S\ref{general-soln}, if $\Sigma(t):=\{Z=\mathbb{P}si(\alphaa,t)\,|\, \alphaa^{-1}n\mathbb R\}$ is a Jordan curve, then $\mathbb{P}si(\cdot, t):{\mathscr P}_-\tauo \Omega(t)$, where $\Omega(t)$ is the domain bounded from the above by $\Sigma(t)$, is invertible; and the solution $(U,\mathbb{P}si,\fracrak P)$ gives rise to a solution $(\betaar{\betaf v}, P):= (U\circ \mathbb{P}si^{-1}, \fracrak P\circ \mathbb{P}si^{-1})$ of the water wave equation \eqref{euler}. This finishes the proof for part 1 of Theorem~\ref{th:local}.
\subsection{The chord-arc interfaces} Now assume at time $t=0$, the interface $Z=\mathbb{P}si(\alphalpha',0):=Z(\alphalpha',0)$, $\alphalpha'^{-1}n\mathbb R$ is chord-arc, that is, there is $0<\delta<1$, such that
$$\delta ^{-1}nt_{\alphalpha'}^{\betaeta'} |Z_{,\alphalpha'}(\gammaamma,0)|\,d\gammaamma\le |Z(\alphalpha', 0)-Z(\betaeta', 0)|\le ^{-1}nt_{\alphalpha'}^{\betaeta'} |Z_{,\alphalpha'}(\gammaamma,0)|\,d\gammaamma,\quad \fracorall -^{-1}nfty<\alphalpha'< \betaeta'<^{-1}nfty.$$
We want to show there is $T_1>0$, depending only on $\mathcal E(0)$, such that for $t^{-1}n [0, \min\{T_0, \fracrac\delta{T_1}\}]$, the interface $Z=Z(\alphalpha',t):=\mathbb{P}si(\alphalpha',t)$ remains chord-arc. We begin with
\betaegin{equation}\label{eq:446}
- z^{\varepsilon}ilon(\alphalpha,t)+z^{\varepsilon}ilon(\betaeta,t)+z^{\varepsilon}ilon(\alphalpha,0)-z^{\varepsilon}ilon(\betaeta,0)=^{-1}nt_0^t^{-1}nt_{\alphalpha}^\betaeta z^{\varepsilon}ilon_{t\alphalpha}(\gammaamma,s)\,d\gammaamma\,ds
\end{equation}
for $\alphalpha<\betaeta$. Because
\betaegin{equation}\label{eq:447}
\fracrac d{dt} |z^{\varepsilon}ilon_{\alphalpha}|^2=2|z^{\varepsilon}ilon_{\alphalpha}|^2 {\mathbb{R}}e D_{\alphalpha}z^{\varepsilon}ilon_t,
\end{equation}
by Gronwall's inequality, for $t^{-1}n [0, T_0]$,
\betaegin{equation}\label{eq:448}
|z^{\varepsilon}ilon_{\alphalpha}(\alphalpha,t)|^2\le |z^{\varepsilon}ilon_{\alphalpha}(\alphalpha,0)|^2 e^{2^{-1}nt_0^t |D_{\alphalpha}z^{\varepsilon}ilon_t(\alphalpha,\tauau)|\,d\tauau };
\end{equation}
so
\betaegin{equation}\label{eq:449}
|z^{\varepsilon}ilon_{t\alphalpha}(\alphalpha,t)|\le |z^{\varepsilon}ilon_{\alphalpha}(\alphalpha,0)| |D_{\alphalpha}z^{\varepsilon}ilon_t(\alphalpha,t)|e^{^{-1}nt_0^t |D_{\alphalpha}z^{\varepsilon}ilon_t(\alphalpha,\tauau)|\,d\tauau };
\end{equation}
by Appendix~\ref{quantities}, \eqref{eq:400} and Proposition~\ref{prop:energy-eq},
\betaegin{equation}\label{eq:450}
\sup_{[0, T_0]} |z^{\varepsilon}ilon_{t\alphalpha}(\alphalpha,t)|\le |z^{\varepsilon}ilon_{\alphalpha}(\alphalpha,0)| C(\mathcal E(0));
\end{equation}
therefore for $t^{-1}n [0, T_0]$,
\betaegin{equation}\label{eq:451}
^{-1}nt_0^{t}^{-1}nt_{\alphalpha}^\betaeta |z^{\varepsilon}ilon_{t\alphalpha}(\gammaamma,s)|\,d\gammaamma\,ds\le t C(\mathcal E(0))^{-1}nt_{\alphalpha}^\betaeta |z^{\varepsilon}ilon_{\alphalpha}(\gammaamma,0)| \,d\gammaamma.
\end{equation}
Now $z^{\varepsilon}ilon(\alphalpha,0)=Z^{\varepsilon}ilon(\alphalpha,0)=\mathbb{P}si(\alphalpha-{\varepsilon}ilon i, 0)$. Because $Z_{,\alphalpha'}(\cdot,0)^{-1}n L^1_{loc}(\mathbb R)$, and $Z_{,\alphalpha'}(\cdot,0)-1^{-1}n H^1(\mathbb R\setminus [-N, N])$ for some large $N$,
\betaegin{equation}\label{eq:452}
\overline{\lim_{{\varepsilon}ilon\tauo 0}}^{-1}nt_{\alphalpha}^\betaeta |\mathbb{P}si_{z'}(\gammaamma-{\varepsilon}ilon i,0)|\,d\gammaamma\le ^{-1}nt_{\alphalpha}^\betaeta |Z_{,\alphalpha'}(\gammaamma, 0)|\,d\gammaamma.
\end{equation}
Let ${\varepsilon}ilon\tauo 0$ in \eqref{eq:446}. We get, for $t^{-1}n [0, T_0]$,
\betaegin{equation}\label{eq:453}
| |z(\alphalpha,t)-z(\betaeta,t)| -|Z(\alphalpha,0)-Z(\betaeta,0)||\le
tC(\mathcal E_1(0))^{-1}nt_{\alphalpha}^\betaeta |Z_{,\alphalpha'}(\gammaamma, 0)|\,d\gammaamma,
\end{equation}
hence for all $\alphalpha<\betaeta$ and $0\le t\le \min\{T_0, \fracrac{\delta}{2C(\mathcal E(0))}\}$,
\betaegin{equation}\label{eq:454}
\fracrac12\delta ^{-1}nt_\alphalpha^\betaeta |Z_{,\alphalpha'}(\gammaamma,0)|\,d\gammaamma\le |z(\alphalpha,t)-z(\betaeta,t)|\le 2 ^{-1}nt_\alphalpha^\betaeta |Z_{,\alphalpha'}(\gammaamma,0)|\,d\gammaamma.
\end{equation}
This show that for $\le t\le \min\{T_0, \fracrac{\delta}{2C(\mathcal E(0))}\}$,
$z=z(\cdot,t)$ is absolute continuous on compact intervals of $\mathbb R$, with $z_{\alphalpha}(\cdot,t)^{-1}n L_{loc}^1(\mathbb R)$, and is chord-arc. So $\Sigma(t)=\{z(\alphalpha,t) \ | \ \alphalpha^{-1}n \mathbb R\}$ is Jordan. This finishes the proof of Theorem~\ref{th:local}.
\betaegin{appendix}
\section{Basic analysis preparations}\label{ineq}
We present in this section some basic analysis results that will be used in this paper.
First we have, as a consequence of the fact that product of holomorphic functions is holomorphic, the following identity.
\betaegin{proposition}\label{prop:comm-hilbe}
Assume that $f,\ g ^{-1}n L^2(\mathbb R)$. Assume either both $f$, $g$ are holomorphic: $f=\mathbb H f$, $g=\mathbb H g$, or both are anti-holomorphic: $f=-\mathbb H f$, $g=-\mathbb H g$. Then
\betaegin{equation}\label{comm-hilbe}
[f, \mathbb H]g=0.
\end{equation}
\end{proposition}
Let $f :\mathbb R\tauo\mathbb C$ be a function in $\dot H^{1/2}(\mathbb R)$, we define
\betaegin{equation}\label{def-hhalf}
\|f\|_{\dot H^{1/2}}^2=\|f\|_{\dot H^{1/2}(\mathbb R)}^2:= ^{-1}nt i\mathbb H \partial_x f(x) \betaar f(x)\,dx=\fracrac1{2\pi}^{-1}int\fracrac{|f(x)-f(y)|^2}{(x-y)^2}\,dx\,dy.
\end{equation}
We have the following results on $\dot H^{1/2}$ norms and $\dot H^{1/2}$ functions.
\betaegin{lemma}\label{hhalf1}
For any function $f^{-1}n \dot H^{1/2}(\mathbb R)$,
\betaegin{align}
\nm{f}_{\dot H^{1/2}}^2&=\nm{\mathbb P_H f}_{\dot H^{1/2}}^2+\nm{\mathbb P_A f}_{\dot H^{1/2}}^2;\label{hhalfp}\\
^{-1}nt i\partial_\alphaa f \, \betaar f\,d\alphaa&=\nm{\mathbb P_H f}_{\dot H^{1/2}}^2-\nm{\mathbb P_A f}_{\dot H^{1/2}}^2.\label{hhalfn}
\end{align}
\end{lemma}
\betaegin{proof}
Lemma~\ref{hhalf1} is an easy consequence of the decomposition $f=\mathbb P_H f+\mathbb P_A f$, the
definition \eqref{def-hhalf} and the Cauchy integral Theorem. We omit the details.
\end{proof}
\betaegin{proposition}\label{prop:Hhalf}
Let $f,\ g^{-1}n C^1(\mathbb R)$. Then
\betaegin{align}
\nm{fg}_{\dot H^{1/2}}\lesssim \|f\|_{L^^{-1}nfty}\|g\|_{\dot H^{1/2}}+\|g\|_{L^^{-1}nfty}\|f\|_{\dot H^{1/2}};\label{hhalf-1}\\
\|g\|_{\dot H^{1/2}}\lesssim \|f^{-1}\|_{L^^{-1}nfty}(\|fg\|_{\dot H^{1/2}}+\|f'\|_{L^2}\|g\|_{L^2}).\label{Hhalf}
\end{align}
\end{proposition}
The proof is straightforward from the definition of $\dot H^{1/2}$ and the Hardy's inequality. We omit the
details.
We next present the basic estimates we will rely on for this paper. We start with the Sobolev inequality.
\betaegin{proposition}[Sobolev inequality]\label{sobolev}
Let $f^{-1}n C^1_0(\mathbb R)$. Then
\betaegin{equation}\label{eq:sobolev}
\|f\|_{L^^{-1}nfty}^2\le 2\|f\|_{L^2}\|f'\|_{L^2}.
\end{equation}
\end{proposition}
\betaegin{proposition}[Hardy's inequalities]
\label{hardy-inequality}
Let $f ^{-1}n C^1(\mathbb R)$,
with $f' ^{-1}n L^2(\mathbb R)$. Then there exists $C > 0$ independent of $f$ such that for any $x ^{-1}n \mathbb R$,
\betaegin{equation}
\label{eq:77}
\alphabs{^{-1}nt \frac{(f(x) - f(y))^2}{(x-y)^2} dy} \le C \nm{f'}_{L^2}^2;
\end{equation}
and
\betaegin{equation}
\label{eq:771}
^{-1}int \frac{|f(x) - f(y)|^4}{|x-y|^4} \,dx dy \le C \nm{f'}_{L^2}^4.
\end{equation}
\end{proposition}
Let $ H^{-1}n C^1(\mathbb R; \mathbb R^d)$, $A_i^{-1}n C^1(\mathbb R)$, $i=1,\dots m$, and $F^{-1}n C^^{-1}nfty(\mathbb R)$. Define
\betaegin{equation}\label{3.15}
C_1(A_1,\dots, A_m, f)(x)=\tauext{pv.}^{-1}nt F\paren{\fracrac{H(x)-H(y)}{x-y}} \fracrac{\mathbb{P}i_{i=1}^m(A_i(x)-A_i(y))}{(x-y)^{m+1}}f(y)\,dy.
\end{equation}
\betaegin{proposition}\label{B1} There exist constants $c_1=c_1(F, \|H'\|_{L^^{-1}nfty})$, $c_2=c_2(F, \|H'\|_{L^^{-1}nfty})$, such that
1. For any $f^{-1}n L^2,\ A_i'^{-1}n L^^{-1}nfty, \ 1\le i\le m, $
\betaegin{equation}\label{3.16}
\|C_1(A_1,\dots, A_m, f)\|_{L^2}\le c_1\|A_1'\|_{L^^{-1}nfty}\dots\|A_m'\|_{L^^{-1}nfty}\|f\|_{L^2}.
\end{equation}
2. For any $ f^{-1}n L^^{-1}nfty, \ A_i'^{-1}n L^^{-1}nfty, \ 2\le i\le m,\ A_1'^{-1}n L^2$,
\betaegin{equation}\label{3.17}
\|C_1(A_1,\dots, A_m, f)\|_{L^2}\le c_2\|A_1'\|_{L^2}\|A'_2\|_{L^^{-1}nfty}\dots\|A_m'\|_{L^^{-1}nfty}\|f\|_{L^^{-1}nfty}.
\end{equation}
\end{proposition}
\eqref{3.16} is a result of Coifman, McIntosh and Meyer \cite{cmm}. \eqref{3.17} is a consequence of the Tb Theorem, a proof is given in \cite{wu3}.
Let $H$, $A_i$ $F$ satisfy the same assumptions as in \eqref{3.15}. Define
\betaegin{equation}\label{3.19}
C_2(A, f)(x)=^{-1}nt F\paren{\fracrac{H(x)-H(y)}{x-y}}\fracrac{\mathbb{P}i_{i=1}^m(A_i(x)-A_i(y))}{(x-y)^m}\partial_y f(y)\,dy.
\end{equation}
We have the following inequalities.
\betaegin{proposition}\label{B2} There exist constants $c_3$, $c_4$ and $c_5$, depending on $F$ and $\|H'\|_{L^^{-1}nfty}$, such that
1. For any $f^{-1}n L^2,\ A_i'^{-1}n L^^{-1}nfty, \ 1\le i\le m, $
\betaegin{equation}\label{3.20}
\|C_2(A, f)\|_{L^2}\le c_3\|A_1'\|_{L^^{-1}nfty}\dots\|A_m'\|_{L^^{-1}nfty}\|f\|_{L^2}.
\end{equation}
2. For any $ f^{-1}n L^^{-1}nfty, \ A_i'^{-1}n L^^{-1}nfty, \ 2\le i\le m,\ A_1'^{-1}n L^2$,
\betaegin{equation}\label{3.21}
\|C_2(A, f)\|_{L^2}\le c_4\|A_1'\|_{L^2}\|A'_2\|_{L^^{-1}nfty}\dots\|A_m'\|_{L^^{-1}nfty}\|f\|_{L^^{-1}nfty}.\end{equation}
3. For any $f'^{-1}n L^2, \ A_1^{-1}n L^^{-1}nfty,\ \ A_i'^{-1}n L^^{-1}nfty, \ 2\le i\le m, $
\betaegin{equation}\label{3.22}
\|C_2(A, f)\|_{L^2}\le c_5\|A_1\|_{L^^{-1}nfty}\|A'_2\|_{L^^{-1}nfty}\dots\|A_m'\|_{L^^{-1}nfty}\|f'\|_{L^2}.\end{equation}
\end{proposition}
Using integration by parts, the operator $C_2(A, f)$ can be easily converted into a sum of operators of the form $C_1(A,f)$. \eqref{3.20} and \eqref{3.21} follow from \eqref{3.16} and \eqref{3.17}. To get \eqref{3.22}, we rewrite $C_2(A,f)$ as the difference of the two terms $A_1C_1(A_2,\dots, A_m, f')$ and $C_1(A_2,\dots, A_m, A_1f')$ and apply \eqref{3.16} to each term.
\betaegin{proposition}\label{prop:half-dir}
There exists a constant $C > 0$ such that for any $f, g, m$ smooth and decays fast at infinity,
\betaegin{align}
&\nm{[f,\mathbb{H}] g}_{L^2} \le C \nm{f}_{\dot{H}^{1/2}}\nm{g}_{L^2};\label{eq:b10}\\&
\nm{[f,\mathbb{H}] g}_{L^^{-1}nfty} \le C \nm{f'}_{L^2} \nm{g}_{L^2}; \label{eq:b13}\\&
\nm{[f,\mathbb{H}] \partial_\alphaa g}_{L^2} \le C \nm{f'}_{L^2} \nm{g}_{\dot{H}^{1/2}};\label{eq:b11}\\&
\nm{[f, m; \partial_\alphaa g]}_{L^2}\le C \nm{f'}_{L^2} \nm{m'}_{L^^{-1}nfty}\nm{g}_{\dot{H}^{1/2}}.\label{eq:b111}
\end{align}
\end{proposition}
Here $[f,g;h]$ is as given in \eqref{eq:comm}. \eqref{eq:b10} is straightforward by Cauchy-Schwarz inequality and the definition of $\dot H^{1/2}$. \eqref{eq:b13} is straightforward from Cauchy-Schwarz inequality and Hardy's inequality \eqref{eq:77}. \eqref{eq:b11} and \eqref{eq:b111} follow from integration by parts,
then Cauchy-Schwarz inequality, Hardy's inequality \eqref{eq:77}, and the definition of $\dot H^{1/2}$.
\betaegin{proposition}
There exists a constant $C > 0$ such that for any $f, \ g, \ h$, smooth and decay fast at spatial infinity,
\betaegin{align}
\label{eq:b12}
\nm{[f,g;h]}_{L^2} &
\le C \nm{f'}_{L^2} \nm{g'}_{L^2} \nm{h}_{L^2};\\ \label{eq:b15}
\nm{[f,g;h]}_{L^^{-1}nfty}&\le C \nm{f'}_{L^2} \nm{g'}_{L^^{-1}nfty} \nm{h}_{L^2};\\
\label{eq:b16}
\nm{[f,g;h]}_{L^^{-1}nfty}&\le C \nm{f'}_{L^2} \nm{g'}_{L^2} \nm{h}_{L^^{-1}nfty}.
\end{align}
\end{proposition}
\eqref{eq:b12} follows directly from Cauchy-Schwarz inequality, Hardy's inequality \eqref{eq:77} and Fubini Theorem; \eqref{eq:b15} follows from Cauchy-Schwarz inequality, Hardy's inequality \eqref{eq:77} and the mean value Theorem; \eqref{eq:b16} follows from Cauchy-Schwarz inequality and Hardy's inequality \eqref{eq:77}.
\section{Identities}\label{iden}
\subsection{Commutator identities}\label{comm-iden}
We include here various commutator identities that are necessary for the proofs. The first set: \eqref{eq:c1}-\eqref{eq:c4} has already appeared in \cite{kw}.
\betaegin{align}
\label{eq:c1}
[\partial_t,D_\alpha] &= - (D_\alpha z_t) D_\alpha;\\
\label{eq:c2}
\betaracket{\partial_t,D_\alpha^2} &
= -2(D_\alpha z_t) D_\alpha^2 - (D_\alpha^2 z_t) D_\alpha;\\
\label{eq:c3}
\betaracket{\partial_t^2,D_\alpha} &=(-D_\alpha z_{tt}) D_\alpha + 2(D_\alpha z_t)^2 D_\alpha - 2(D_\alpha z_t) D_\alpha \partial_t;\\
\label{eq:c5}
\betaracket{\partial_t^2 + i \mathfrak{a} \partial_\alpha, D_\alpha} &= (-2D_\alpha z_{tt}) D_\alpha -2(D_\alpha z_t)\partial_t D_\alpha;
\end{align}
and
\betaegin{equation} \label{eq:c4}
\betaegin{aligned}
\betaracket{(\partial_t^2 + i \mathfrak{a} \partial_\alpha),D_\alpha^2} & =(-4D_\alpha z_{tt}) D_\alpha^2 + 6(D_\alpha z_t)^2 D_\alpha^2 - (2D_\alpha^2 z_{tt}) D_\alpha \\&+ 6(D_\alpha z_t) (D_\alpha^2 z_t) D_\alpha - 2(D_\alpha^2 z_t) D_\alpha \partial_t - 4(D_\alpha z_t) D_\alpha^2 \partial_t.
\end{aligned}
\end{equation}
Let $$\mathcal P:=(\partial_t+b\partial_\alphaa)^2+i\mathcal A\partial_\alphaa.$$ Notice that $U_h^{-1}\partial_t U_h=\partial_t+b\partial_\alphaa$, $U_h^{-1}D_\alpha U_h=D_\alphaa$ and
$\mathcal P=U_h^{-1}(\partial_t^2+i\fracrak a\partial_\alpha)U_h$, we precompose with $h^{-1}$ to equations \eqref{eq:c1}-\eqref{eq:c4}, and get
\betaegin{align}
\label{eq:c1-1}
[\partial_t+b\partial_\alphaa,D_\alphaa] &= - (D_\alphaa Z_t) D_\alphaa;\\
\label{eq:c2-1}
\betaracket{\partial_t+b\partial_\alphaa,D_\alphaa^2} &
= -2(D_\alphaa Z_t) D_\alphaa^2 - (D_\alphaa^2 Z_t) D_\alphaa;\\
\label{eq:c3-1}
\betaracket{(\partial_t+b\partial_\alphaa)^2,D_\alphaa} &=(-D_\alphaa Z_{tt}) D_\alphaa + 2(D_\alphaa Z_t)^2 D_\alphaa - 2(D_\alphaa Z_t) D_\alphaa (\partial_t+b\partial_\alphaa);\\
\label{eq:c5-1}
\betaracket{\mathcal P, D_\alphaa} &= (-2D_\alphaa Z_{tt}) D_\alphaa -2(D_\alphaa Z_t)(\partial_t+b\partial_\alphaa) D_\alphaa;
\end{align}
and
\betaegin{equation} \label{eq:c4-1}
\betaegin{aligned}
\betaracket{\mathcal P,D_\alphaa^2} & =(-4D_\alphaa Z_{tt}) D_\alphaa^2 + 6(D_\alphaa Z_t)^2 D_\alphaa^2 - (2D_\alphaa^2 Z_{tt}) D_\alphaa \\&+ 6(D_\alphaa Z_t) (D_\alphaa^2 Z_t) D_\alphaa - 2(D_\alphaa^2 Z_t) D_\alphaa (\partial_t+b\partial_\alphaa) - 4(D_\alphaa Z_t) D_\alphaa^2 (\partial_t+b\partial_\alphaa).
\end{aligned}
\end{equation}
We need some additional commutator identities. In general, for operators $A, B$ and $C$,
\betaegin{equation}\label{eq:c12}
[A, BC^k]=[A, B]C^k+ B[A, C^k]=[A, B]C^k+ \sum_{i=1}^k BC^{i-1}[A, C]C^{k-i}.
\end{equation}
We have
\betaegin{align}
\label{eq:c7}
[\partial_t+b\partial_\alphaa, \partial_\alphaa]f&=-b_\alphaa\partial_\alphaa f;\\
\label{eq:c8}[(\partial_t+b\partial_\alphaa)^2, \partial_\alphaa]f&=-(\partial_t+b\partial_\alphaa)(b_\alphaa\partial_\alphaa f)-b_\alphaa\partial_\alphaa (\partial_t+b\partial_\alphaa)f;\\
\label{eq:c9}[i\mathcal A\partial_\alphaa,\partial_\alphaa]f&=-i\mathcal A_\alphaa \partial_\alphaa f;\\
\label{eq:c10}
[\mathcal P, \partial_\alphaa]f&=-(\partial_t+b\partial_\alphaa)(b_\alphaa\partial_\alphaa f)-b_\alphaa\partial_\alphaa (\partial_t+b\partial_\alphaa)f-i\mathcal A_\alphaa \partial_\alphaa f;\\
\label{eq:c11}[\partial_t+b\partial_\alphaa, \partial_\alphaa^2]f&=-\partial_\alphaa(b_\alphaa\partial_\alphaa f)-b_\alphaa\partial_\alphaa^2 f.
\end{align}
Here \eqref{eq:c8}, \eqref{eq:c11} are obtained by \eqref{eq:c12} and \eqref{eq:c7}.
We also have
\betaegin{equation}\label{eq:c21}
[\partial_t+b\partial_\alphaa, \mathbb H]=[b,\mathbb H]\partial_{\alphalpha'}
\end{equation}
We compute
$$
\betaegin{aligned}
(\partial_t+b\partial_\alphaa) [f,\mathbb H]g&=[(\partial_t+b\partial_\alphaa) f,\mathbb H]g+\betaracket{f, \betaracket{\partial_t+b\partial_\alphaa, \mathbb H}}g+ [f,\mathbb H](\partial_t+b\partial_\alphaa) g\\&
=[(\partial_t+b\partial_\alphaa) f,\mathbb H]g+\betaracket{f, \betaracket{b, \mathbb H}\partial_\alphaa}g+ [f,\mathbb H](\partial_t+b\partial_\alphaa) g
\\&
=[(\partial_t+b\partial_\alphaa) f,\mathbb H]g+ [f,\mathbb H]\paren{(\partial_t+b\partial_\alphaa) g+b_\alphaa g}\\&
\qquad+\betaracket{f, \betaracket{b, \mathbb H}}\partial_\alphaa g -[b,\mathbb H](f_\alphaa g)-[f,\mathbb H](b_\alphaa g).
\end{aligned}
$$
It can be checked easily, by integration by parts, that
$$\betaracket{f, \betaracket{b, \mathbb H}}\partial_\alphaa g -[b,\mathbb H](f_\alphaa g)-[f,\mathbb H](b_\alphaa g)=-[f,b;g].$$
So
\betaegin{equation}\label{eq:c14'}
\betaegin{aligned}
(\partial_t+b\partial_\alphaa)& [f,\mathbb H]g=[(\partial_t+b\partial_\alphaa) f,\mathbb H]g\\&+ [f,\mathbb H]((\partial_t+b\partial_\alphaa) g+b_{\alphalpha'} g)-[f, b; g];
\end{aligned}
\end{equation}
with an application of \eqref{eq:c7} yields
\betaegin{equation}\label{eq:c14}
\betaegin{aligned}
(\partial_t+b\partial_\alphaa)& [f,\mathbb H]\partial_{\alphalpha'}g=
[(\partial_t+b\partial_\alphaa) f,\mathbb H]\partial_{\alphalpha'}g\\&+ [f,\mathbb H]\partial_{\alphalpha'}(\partial_t+b\partial_\alphaa) g-[f, b; \partial_{\alphalpha'}g].
\end{aligned}
\end{equation}
We compute, by \eqref{eq:c21}, \eqref{eq:c12} and \eqref{eq:c14} that
\betaegin{equation}\label{eq:c23}
\betaegin{aligned}
\betaracket{(\partial_t+b\partial_\alphaa)^2, \mathbb H}f&=(\partial_t+b\partial_\alphaa)\betaracket{b,\mathbb H}\partial_\alphaa f+\betaracket{b,\mathbb H}\partial_\alphaa (\partial_t+b\partial_\alphaa)f
\\&=\betaracket{(\partial_t+b\partial_\alphaa)b,\mathbb H}\partial_\alphaa f+2\betaracket{b,\mathbb H}\partial_\alphaa (\partial_t+b\partial_\alphaa)f-[b,b; \partial_\alphaa f].
\end{aligned}
\end{equation}
We also have
\betaegin{equation}\label{eq:c24}
\betaracket{i\mathcal A\partial_\alphaa, \mathbb H}f=\betaracket{i\mathcal A,\mathbb H}\partial_\alphaa f.
\end{equation}
Sum up \eqref{eq:c23} and \eqref{eq:c24} yields
\betaegin{equation}\label{eq:c25}
\betaracket{\mathcal P, \mathbb H}f=\betaracket{(\partial_t+b\partial_\alphaa)b,\mathbb H}\partial_\alphaa f+2\betaracket{b,\mathbb H}\partial_\alphaa (\partial_t+b\partial_\alphaa)f-[b,b; \partial_\alphaa f]+\betaracket{i\mathcal A,\mathbb H}\partial_\alphaa f.
\end{equation}
We have, by product rules, that
\betaegin{equation}\label{eq:c15}
[(\partial_t+b\partial_\alphaa)^2, \fracrac{1}{Z_{,\alphalpha'}}]f=(\partial_t+b\partial_\alphaa)^2\paren{\fracrac{1}{Z_{,\alphalpha'}}}f+2(\partial_t+b\partial_\alphaa)\paren{\fracrac{1}{Z_{,\alphalpha'}}}(\partial_t+b\partial_\alphaa)f;
\end{equation}
and
\betaegin{equation}\label{eq:c17}
[i\mathcal A\partial_\alphaa, \fracrac{1}{Z_{,\alphalpha'}}]f=i\mathcal A\partial_\alphaa \paren{\fracrac{1}{Z_{,\alphalpha'}}} f;
\end{equation}
so
\betaegin{equation}\label{eq:c16}
[\mathcal P, \fracrac{1}{Z_{,\alphalpha'}}]f=(\partial_t+b\partial_\alphaa)^2\paren{\fracrac{1}{Z_{,\alphalpha'}}}f+2(\partial_t+b\partial_\alphaa)\paren{\fracrac{1}{Z_{,\alphalpha'}}}(\partial_t+b\partial_\alphaa)f+i\mathcal A\partial_\alphaa \paren{\fracrac{1}{Z_{,\alphalpha'}}} f.
\end{equation}
And we compute, by \eqref{eq:dza},
\betaegin{align}\label{eq:c26}
(\partial_t+b\partial_\alphaa)\paren{\fracrac{1}{Z_{,\alphalpha'}}}&=\fracrac{1}{Z_{,\alphalpha'}}(b_\alphaa-D_\alphaa Z_t);\\
\label{eq:c27}
(\partial_t+b\partial_\alphaa)^2\paren{\fracrac{1}{Z_{,\alphalpha'}}}&=\fracrac{1}{Z_{,\alphalpha'}}(b_\alphaa-D_\alphaa Z_t)^2+\fracrac{1}{Z_{,\alphalpha'}}(\partial_t+b\partial_\alphaa)(b_\alphaa-D_\alphaa Z_t).
\end{align}
\section{Main quantities controlled by $\fracrak E$} \label{quantities}
We have shown in \S\ref{basic-quantities} that the following quantities are controlled by a polynomial of $\fracrak E$ (or equivalently by $\mathcal E$):
\betaegin{equation}\label{2020-1}
\betaegin{aligned}
&\nm{ D_\alphaa\betaar Z_t}_{\dot H^{1/2}}, \quad\nm{\fracrac{1}{ Z_{,\alphalpha'}} D_\alphaa^2\betaar Z_t}_{\dot H^{1/2}}, \quad \|\betaar Z_{t,\alphaa}\|_{L^2},\quad \|D_\alphaa^2\betaar Z_t\|_{L^2},\quad \nm{\partial_\alphaa \fracrac{1}{ Z_{,\alphalpha'}}}_{L^2},\quad \alphabs{\fracrac{1}{ Z_{,\alphalpha'}}(0,t)},\\&
\|A_1\|_{L^^{-1}nfty}, \quad \|b_\alphaa\|_{L^^{-1}nfty}, \quad \|D_\alphaa Z_t\|_{L^^{-1}nfty},\quad \|D_\alphaa Z_{tt}\|_{L^^{-1}nfty},
\quad \|(\partial_t+b\partial_\alphaa)\betaar Z_{t,\alphaa}\|_{L^2} \\&
\|Z_{tt,\alphaa}\|_{L^2}, \quad \|D_\alphaa^2 \betaar Z_{tt}\|_{L^2},\quad \|D_\alphaa^2 Z_{tt}\|_{L^2},\quad \|(\partial_t+b\partial_\alphaa)D_\alphaa^2\betaar Z_t\|_{L^2},\quad \|D_\alphaa^2 Z_t\|_{L^2}.
\end{aligned}
\end{equation}
In the remainder of \S\ref{proof0} we have controlled the following quantities by a polynomila of $\fracrak E$ (or equivalently by $\mathcal E$):
\betaegin{equation}\label{2020-2}
\betaegin{aligned}
& \nm{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}_{L^^{-1}nfty},\quad \nm{(\partial_t+b\partial_\alphaa)A_1}_{L^^{-1}nfty},\quad \nm{\mathcal A_{\alphaa}}_{L^^{-1}nfty},\quad \nm{\fracrac1{Z_{,\alphaa}}\partial_\alphaa \fracrac1{Z_{,\alphaa}}}_{L^^{-1}nfty},\\& \nm{\partial_\alphaa(\partial_t+b\partial_\alphaa)\fracrac1{Z_{,\alphaa}}}_{L^2},\quad \nm{(\partial_t+b\partial_\alphaa)\partial_\alphaa\fracrac1{Z_{,\alphaa}}}_{L^2},\quad \nm{\partial_\alphaa(\partial_t+b\partial_\alphaa)b}_{L^^{-1}nfty},\quad \nm{(\partial_t+b\partial_\alphaa)b_\alphaa}_{L^^{-1}nfty},\\& \nm{(\partial_t+b\partial_\alphaa)D_\alphaa Z_t}_{L^^{-1}nfty}, \quad
\nm{D_\alphaa\paren{\fracrac{\fracrak a_t}{\fracrak a}\circ h^{-1}}}_{L^2}.
\end{aligned}
\end{equation}
As a consequence of \eqref{2045} and \eqref{2020-1} we have
$$\nm{D_{\alphalpha'}b_{\alphalpha'}}_{L^2}\lesssim C(\fracrak E).$$
\end{appendix}
\betaegin{thebibliography}{10}
\newcommand{\msn}[1]{\href{http://www.ams.org/mathscinet-getitem?mr=#1}{\sc MR#1}}
\betaibitem{abz}
T. Alazard, N. Burq \& C. Zuily {^{-1}t On the Cauchy problem for gravity water
waves.} Invent. Math. Vol.198 (2014) pp.71-163
\betaibitem{abz14}
T. Alazard, N. Burq \& C. Zuily {^{-1}t Strichartz estimates and the Cauchy problem for the gravity water waves equations.} Preprint 2014, arXiv:1404.4276
\betaibitem{ad}
T. Alazard \& J-M. Delort {^{-1}t Global solutions and asymptotic behavior for two dimensional
gravity water waves,} Preprint 2013, arXiv:1305.4090 [math.AP].
\betaibitem{am}
D. Ambrose, N. Masmoudi
\emph{The zero surface tension limit of two-dimensional water waves}. Comm. Pure Appl. Math. 58 (2005), no. 10, 1287-1315
\betaibitem{bhl}
T. Beale, T. Hou \& J. Lowengrub {^{-1}t Growth rates for the linearized
motion of fluid interfaces away from equilibrium}
Comm. Pure Appl. Math. 46 (1993), no.9, 1269-1301.
\betaibitem{bmsw}
L. Bieri, S. Miao, S. Shahshahani, \& S. Wu
{^{-1}t On the Motion of a Self-Gravitating Incompressible Fluid with Free Boundary }
Comm. Math. Phys. to appear. DOI
10.1007/s00220-017-2884-z.
\betaibitem{bi}
G. Birkhoff {^{-1}t Helmholtz and Taylor instability} Proc. Symp. in
Appl. Math. Vol. XIII, pp.55-76.
\betaibitem{cf}
A. Castro, D. C\'ordoba, C. Fefferman, F. Gancedo \& J. G\'omez-Serrano {^{-1}t Finite time singularities for
the free boundary incompressible Euler equations} Ann. of Math. (2) 178 (2013), no.3. 1061-1134
\betaibitem{cl}
D. Christodoulou, H. Lindblad {^{-1}t On the motion of the free surface of a liquid} Comm. Pure Appl. Math. 53 (2000) no. 12, 1536-1602
\betaibitem{cs}
D. Coutand, S. Shkoller {^{-1}t Wellposedness of the free-surface incompressible Euler equations with or without surface tension}
J. AMS. 20 (2007), no. 3, 829-930.
\betaibitem{cs1}
D. Coutand, S. Shkoller {^{-1}t On the finite-time splash and splat singularities for the 3-D free-surface Euler equations}
arXiv:1201.4919
\betaibitem{cmm}
R. Coifman, A. McIntosh and Y. Meyer {^{-1}t L'integrale de Cauchy definit un operateur borne sur $L^2$ pour les courbes lipschitziennes} Annals of Math, 116 (1982), 361-387.
\betaibitem{cr}
W. Craig {^{-1}t An existence theory for water waves and the Boussinesq
and Korteweg-devries scaling limits} Comm. in P. D. E. 10(8), 1985
pp.787-1003
\betaibitem{dipp}
Y. Deng, A. D. Ionescu, B. Pausader, \& F. Pusateri {^{-1}t Global solutions of the gravity-capillary water wave system in 3
dimensions} ArXiv e-prints, January 2016.
\betaibitem{ebi}
D. G. Ebin. {^{-1}t The equations of motion of a perfect fluid with free boundary are not well posed} Comm. P. D. E. 12(10), 1987, pp.1175Ð1201.
\betaibitem{gms}
P. Germain, N. Masmoudi, \& J. Shatah {^{-1}t Global solutions of the gravity water wave equation in dimension 3} Ann. of Math (2) 175 (2012), no.2, 691-754.
\betaibitem{hit}
J. Hunter, M. Ifrim \& D. Tataru {^{-1}t
Two dimensional water waves in holomorphic coordinates} Preprint 2014, arXiv:1401.1252
\betaibitem{it}
M. Ifrim \& D. Tataru
{^{-1}t Two dimensional water waves in holomorphic coordinates II: global solutions} Preprint 2014, arXiv:1404.7583
\betaibitem{ig1}
T. Iguchi {^{-1}t Well-posedness of the initial value problem for capillary-gravity waves} Funkcial. Ekvac. 44 (2001) no. 2, 219-241.
\betaibitem{ip}
A. Ionescu \& F. Pusateri. {^{-1}t Global solutions for the gravity water waves system in
2d,} Invent. Math. to appear
\betaibitem{jour}
J-L. Journ\'e. {^{-1}t Calderon-Zygmund Operators, Pseudo-Differential Operators and the Cauchy
Integral of Calderon}, vol. 994, Lecture Notes in Math. Springer, 1983.
\betaibitem{kw}
R. Kinsey \& S. Wu {^{-1}t A priori estimates for two-dimensional water waves with angled crests} Preprint 2014, arXiv1406:7573
\betaibitem{la}
D. Lannes {^{-1}t Well-posedness of the water-wave equations} J. Amer. Math. Soc. 18 (2005), 605-654
\betaibitem{le}
T. Levi-Civita. {^{-1}t D\'etermination rigoureuse des ondes permanentes d'ampleur finie.} Math. Ann.,
93(1), 1925. pp.264-314
\betaibitem{li}
H. Lindblad {^{-1}t Well-posedness for the motion of an incompressible liquid with free surface boundary} Ann. of Math. 162 (2005), no. 1, 109-194.
\betaibitem{na}
V. I. Nalimov {^{-1}t The Cauchy-Poisson Problem} (in Russian),
Dynamika Splosh. Sredy 18, 1974, pp. 104-210.
\betaibitem{newton}
Sir Issac Newton {^{-1}t
Philosophi¾ Naturalis Principia Mathematica} 1726
\betaibitem{ot}
M. Ogawa, A. Tani {^{-1}t Free boundary problem for an incompressible ideal fluid with surface tension} Math. Models Methods Appl. Sci. 12, (2002), no.12, 1725-1740.
\betaibitem{ovs}
L. V. Ovsjannikov, {^{-1}t To the shallow water foundation} Arch. Mech. Stos., 26, 1974, 407-422.
\betaibitem{s}
E. M. Stein {^{-1}t Singular integrals and differentiability properties of functions} Princeton University Press, 1970.
\betaibitem{sz}
J. Shatah, C. Zeng {^{-1}t Geometry and a priori estimates for free boundary problems of the Euler's equation} Comm. Pure Appl. Math. V. 61. no.5 (2008) pp.698-744
\betaibitem{st}
G. G. Stokes. {^{-1}t On the theory of oscillatory waves.} Trans. Cambridge Philos. Soc., 8: 1847, pp.441- 455.
\betaibitem{ta}
G. I. Taylor {^{-1}t The instability of liquid surfaces when accelerated in
a direction perpendicular to their planes I.}
Proc. Roy. Soc. London A 201, 1950, 192-196
\betaibitem{wang1}
X. Wang {^{-1}t Global infinite energy solutions for the 2D gravity water waves system} arXiv:1502.00687
\betaibitem{wang2}
X. Wang {^{-1}t Global solution for the 3D gravity water waves system above a flat bottom} arXiv:1508.06227
\betaibitem{wu1}
S. Wu {^{-1}t Well-posedness in Sobolev spaces of the full water wave problem
in 2-D} Inventiones Mathematicae 130, 1997, pp. 39-72
\betaibitem{wu2}
S. Wu {^{-1}t Well-posedness in Sobolev spaces of the full water wave problem
in 3-D} Journal of the AMS. 12. no.2 (1999), pp. 445-495.
\betaibitem{wu3}
S. Wu {^{-1}t Almost global wellposedness of the 2-D full water wave problem} Invent. Math, 177, (2009), no.1, pp. 45-135.
\betaibitem{wu4}
S. Wu {^{-1}t Global wellposedness of the 3-D full water wave problem} Invent. Math. 184 (2011), no.1, pp.125-220.
\betaibitem{wu5}
S. Wu {^{-1}t On a class of self-similar 2d surface water waves} Preprint 2012, arXiv1206:2208
\betaibitem{wu6}
S. Wu {^{-1}t Wellposedness and singularities of the water wave equations} Notes of the lectures given at the Newton Institute, Cambridge, UK, Aug. 2014.
\betaibitem{wu7}
S. Wu
{^{-1}t A blow-up criteria and the existence of 2d gravity water waves with angled crests} arXiv: 1502.05342
\betaibitem{yo}
H. Yosihara {^{-1}t Gravity waves on the free surface of an
incompressible perfect fluid of finite depth,} RIMS Kyoto 18, 1982,
pp. 49-96
\betaibitem{zz}
P. Zhang, Z. Zhang {^{-1}t On the free boundary problem of 3-D incompressible Euler equations}. Comm. Pure. Appl. Math. V. 61. no.7 (2008), pp. 877-940
\end{thebibliography}
\end{document} |
\begin{document}
\title{Shortcuts to adiabaticity: fast-forward approach}
\author{E. Torrontegui}
\affiliation{Departamento de Qu\'{\i}mica F\'{\i}sica, Universidad del Pa\'{\i}s Vasco - Euskal Herriko Unibertsitatea,
Apdo. 644, Bilbao, Spain}
\author{S. Mart\'{\i}nez-Garaot}
\affiliation{Departamento de Qu\'{\i}mica F\'{\i}sica, Universidad del Pa\'{\i}s Vasco - Euskal Herriko Unibertsitatea,
Apdo. 644, Bilbao, Spain}
\author{A. Ruschhaupt}
\affiliation{Institut f\"ur Theoretische Physik, Leibniz
Universit\"{a}t Hannover, Appelstra\ss e 2, 30167 Hannover,
Germany}
\author{J. G. Muga}
\affiliation{Departamento de Qu\'{\i}mica F\'{\i}sica, Universidad del Pa\'{\i}s Vasco - Euskal Herriko Unibertsitatea,
Apdo. 644, Bilbao, Spain}
\begin{abstract}
The ``fast-forward'' approach by Masuda and Nakamura
generates driving potentials to accelerate slow quantum adiabatic dynamics.
First we present a streamlined version of the formalism that produces the main results in a few steps. Then we show the connection between this approach and inverse engineering based on Lewis-Riesenfeld invariants. We identify in this manner applications in which the engineered potential does not depend on the initial state. Finally we discuss more general applications exemplified by
wave splitting processes.
\end{abstract}
\pacs{42.50.Dv, 03.75.Kk, 37.10.Gh}
\maketitle
\section{Introduction}
Motivated by the practical need to accelerate quantum adiabatic processes in different contexts (transport \cite{David,Calarco,transport, BECtransport,OCTtrans}, expansions \cite{PRL1,OCTexpan}, population inversion and control \cite{Rice03,Rice05,Rice08,Berry2009,PRL2,Morsch}, cooling cycles \cite{Salamon09,PRL1,EPL11}, wavefunction splitting \cite{S07,S09a,S09b,MNProc}),
and by related fundamental questions
(about the quantum limits to the speed of processes,
the viability of adiabatic computing
\cite{Zurek}, or the third principle of thermodynamics \cite{Salamon09,energy}),
a flurry of theoretical and experimental activity
has been triggered by the proposal of several approaches to design ``shortcuts to adiabaticity''.
Among other approaches let us mention (i) a transitionless tracking algorithm or ``counterdiabatic'' approach that adds to the original Hamiltonian
extra terms to cancel transitions in the adiabatic or superadiabatic bases \cite{Rice03,Rice05,Rice08,Berry2009,PRL2,Morsch}; (ii) inverse engineering of the external driving \cite{GonBEC,PRL1, energy,transport, BECtransport, NHSara, 3d, LabeyriePRA, BECexp} based on Lewis-Riesenfeldt invariants \cite{LR}, which has been applied in several expansion experiments \cite{LabeyriePRA,BECexp}; (iii) optimal control (OC) methods \cite{S07,Salamon09,OCTexpan,OCTtrans}, sometimes combined with other methods to enhance their performance \cite{OCTexpan, BECtransport, OCTtrans}; (iv) the fast-forward (FF) approach advocated by Masuda and Nakamura \cite{MNProc,MN11}; (v) parallel adiabatic passage \cite{PLAP1,PLAP2,PLAP3,PLAP4}.
The multiplicity of approaches is quite useful because they
may complement each other: either in the same application,
as demonstrated e.g. with OC and invariant-based methods, or because of their different
domains.
Clarifying the features, overlaps, and relations among these approaches is important to apply the ones which are best suited for specific systems and objectives \cite{CTM},
or to develop new ones.
In this paper we shall establish in particular the connection between the fast-forward and the invariant-based methods.
Based on some earlier results \cite{MNPra}, the fast-forward formalism for adiabatic dynamics and several application examples were worked out in \cite{MNProc, MN11} by Masuda and Nakamura
for the Gross-Pitaevskii (GP) or the corresponding Schr\"odinger equations.
The objective of the method is to accelerate a ``standard'' system subjected to a slow variation of external parameters. The time is then rescaled by a ``magnification factor'',
and an ansatz wave function is defined by the standard function multiplied by a
phase factor that, in general, depends on position and time. Inserting the ansatz into the dynamical equation provides in principle the form of the necessary fast-forward driving potential and the equation to be satisfied by the phase. This procedure leads however to divergent terms when the reference standard process is infinitely slow.
The solution found in \cite{MNProc} to this problem
was to ``regularize'' the Hamiltonian and standard state using a new ansatz.
As a consequence of the different steps and functions introduced the resulting procedure is somewhat involved, which hinders a broader application.
In Sec. \ref{secmethod} we provide a streamlined construction of
local and real fast-forward potentials, and Sec. \ref{secinverse}
delves into a more detailed connection between this streamlined version and the original formulation of the FF formalism. Section \ref{secconexion} relates the fast-forward approach to the inverse method for dynamical invariants which are quadratic in momentum. Section \ref{beyond} discusses applications beyond this domain, in particular wavefunction splitting, which is an important operation for
matter wave interferometry \cite{S07,S09a,S09b,Augusto}.
Finally Sec. \ref{secoutlook}
discusses the results and open questions.
\section{A simple inverse method\langlebel{secmethod}}
Our starting point is the 3D time-dependent GP equation
\begin{equation}
\langlebel{start}
i\hbar\frac{\partial|\psi(t)\ranglengle}{\partial t}=H(t)|\psi(t)\ranglengle,
\end{equation}
where the Hamiltonian $H$ is the sum of the kinetic energy $T$, the external potential $V(t)$,
and the mean field potential $G(t)$. We are assuming an external local
potential, where ``local'' means here $\langlengle \bold{x}|V(t)|\bold{x'}\ranglengle=V(\bold{x},t)\delta(\bold{x}-\bold{x'})$.
Then, by solving Eq. (\ref{start}) in coordinate space, $V(\bold{x},t)$
may be written as
\begin{equation}
\langlebel{pot1}
V(\bold{x},t)=\frac{i\hbar\langlengle \bold{x}| \partial_t\psi(t)\ranglengle-\langlengle\bold{x}|T|\psi(t)\ranglengle-\langlengle\bold{x}|G(t)|\psi(t)\ranglengle}{\langlengle\bold{x}|\psi(t)\ranglengle},
\end{equation}
with $\langlengle\bold{x}|\psi(t)\ranglengle=\psi(\bold{x},t)$.
The kinetic and mean field terms in the coordinate representation have the usual forms
\begin{equation}ax
\langlengle\bold{x}|T|\psi(t)\ranglengle&=&\frac{-\hbar^2}{2m} \nabla^2\psi(\bold{x},t),
\\
\langlengle\bold{x}|G(t)|\psi(t)\ranglengle&=&g|\psi(\bold{x},t)|^2\psi(\bold{x},t),
\end{equation}ax
$g$ being the coupling constant of the Bose-Einstein condensate.
By introducing into Eq. (\ref{pot1}) the ansatz
\begin{equation}
\langlebel{wave}
\langlengle\bold{x}|\psi(t)\ranglengle=r(\bold{x},t)e^{i\phi(\bold{x},t)}, \quad r(\bold{x},t), \phi(\bold{x},t) \in \mathbb{R},
\end{equation}
we get
\begin{equation}a
V(\bold{x},t)&=&i\hbar\frac{\dot{r}}{r}-\hbar{\dot \phi}+\frac{\hbar^2}{2m}\bigg(\frac{2i\nabla \phi\cdot\nabla r}{r}+i\nabla^2\phi
\noindentnumber \\
&-&(\nabla \phi)^2+\frac{\nabla^2r}{r}\bigg)-gr^2,
\langlebel{pot2}
\end{equation}a
where the dot means time derivative.
The real and imaginary parts are
\begin{equation}a
{\rm{Re}}[V(\bold{x},t)]&=&-\hbar{\dot \phi}+\frac{\hbar^2}{2m}\bigg(\frac{\nabla^2 r}{r}-(\nabla \phi)^2\bigg)-gr^2, \langlebel{real}
\\
{\rm{Im}}[V(\bold{x},t)]&=&\hbar\frac{\dot r}{r}+\frac{\hbar^2}{2m}\bigg(\frac{2\nabla \phi\cdot \nabla r}{r}+\nabla^2 \phi\bigg).
\langlebel{imag}
\end{equation}a
Our purpose now is to design a local and real potential
such that an initial eigenstate of the initial Hamiltonian, typically the ground state but it could be otherwise, evolves in a time $t_f$ into the corresponding eigenstate of the final
Hamiltonian (a different goal will be discussed in the final Section).
We assume that
the full Hamiltonian and the corresponding eigenstates are known at the boundary times.
By construction the potential of Eq. (\ref{pot2}) is local. If we impose ${\rm{Im}}[V(\bold{x},t)]=0$, i.e.
\begin{equation}
\frac{\dot r}{r}+\frac{\hbar}{2m}\bigg(\frac{2\nabla \phi\cdot \nabla r}{r}+\nabla^2 \phi\bigg)=0,
\langlebel{imag0}
\end{equation}
then we get from Eq. (\ref{real}) a local and real potential.
In the inversion protocol we design $r(\bold{x},t)$
first,
then solve for $\phi$ in Eq. (\ref{imag0}), and finally get the potential $V$ from Eq. (\ref{real}).
If, at the boundary times, $\dot{r}=0$ is imposed, Eq. (\ref{imag0}) has
solutions $\phi(\bold{x},t)$ fulfilling that
$\phi(\bold{x},t)$ is independent of $\bold{x}$ at $t=0$ and $t=t_f$.
Using this in Eq. (\ref{real}) at $t=0$, and multiplying by $e^{i\phi(0)}$, we get
\begin{equation}
\bigg[-\frac{\hbar^2}{2m}\nabla^2+V(\bold x,0)+g|\psi(\bold x,0)|^2\bigg]\psi(\bold x,0)=-\hbar\dot \phi(0)\psi(\bold x,0).
\end{equation}
The initial state is thus an eigenstate of the stationary GP equation at $t=0$,
and $-\hbar\dot\phi(0)=E(0)$ is the energy of
the eigenstate $\psi(\bold x,0)$. Note that
the above solution of $\phi$ (with $\dot{r}=0$ at boundary times)
admits the addition of an arbitrary function that depends only on time and modifies the zero of energy.
A similar result is found at $t_f$.
\section{Connection with the fast-forward approach\langlebel{secinverse}}
We shall now reformulate the above results to conect them with the FF approach in \cite{MNPra, MNProc}. The notation is made close but not necessarily in full agreement with \cite{MNPra, MNProc}.
Let us define an external parameter that depends on time, or on some scaled time function, according to
\begin{equation}
R(\Lambda(t))=\epsilon\Lambda(t) =: {\cal{R}}(t).
\end{equation}
Here ${\cal{R}}(t)$ and $R(\Lambda)$ are in general different functions of their arguments,
$\epsilon$ is a small positive constant and
the scaling function $\Lambda(t)$ is given in terms of a magnification factor $\alpha$,
\begin{equation}
\Lambda(t)=\int_{0}^{t}\ dt'\alpha(t').
\langlebel{rt}
\end{equation}
$\alpha(t)$ is positive
for $0\le t \le t_f$ and zero at the boundaries $t=0$ and $t=t_f$.
Note that $\dot{\cal{R}}=\epsilon\alpha$ and $\ddot{\cal{R}}=\epsilon\dot\alpha$.
We rewrite the modulus and the phase in Eq. (\ref{wave}) as
\begin{equation}a
r(\bold x,t)&=& \tilde{r}(\bold x,{\cal{R}}(t)),
\langlebel{eqr}\\
\phi(\bold x,t)&=&-\frac{1}{\hbar} \int_0^t
dt'{\cal{E}}({\cal{R}}(t'))+\epsilon\alpha(t)\theta(\bold x,{\cal{R}}(t)),\langlebel{eqphi}
\end{equation}a
where again we have distinguished the functions according to their different arguments,
in particular $E(t)={\cal{E}}({\cal{R}}(t))$.
If we also demand $\dot\alpha=0$ at the boundaries to fulfill $\dot r=0$,
then
$\dot{\phi}(\bold x,0)=-{\cal{E}}({\cal{R}}(0))/\hbar$ and $\dot{\phi}(\bold x, t_f)=-{\cal{E}}({\cal{R}}(t_f))/\hbar$.
Substituting Eq. (\ref{eqphi}) in Eq. (\ref{imag0}), $\theta$ has to satisfy
\begin{equation}a
0&=&\tilde{r}(\bold{x},{\cal{R}}(t)) \nabla^2\theta(\bold{x},{\cal{R}}(t))+2\nabla\tilde{r}(\bold{x},{\cal{R}}(t)) \cdot \nabla\theta(\bold{x},{\cal{R}}(t))\noindentnumber\\
&+&\frac{2m}{\hbar}\frac{\partial \tilde{r}}{\partial {\cal{R}}}(\bold{x},{\cal{R}}(t)),
\langlebel{Masuda1}
\end{equation}a
and from Eq. (\ref{real}), the ``fast-forward'' potential is given by
\begin{equation}a
V(\bold x,t)&=& V_0(\bold{x},{\cal{R}}(t))-\hbar\epsilon\dot\alpha(t)\theta(\bold{x},{\cal{R}}(t))\noindentnumber\\
& &-\hbar\epsilon^2\alpha^2(t)\frac{d\theta}{d{\cal{R}}}(\bold{x},{\cal{R}}(t))\noindentnumber\\
& &-\frac{\hbar^2}{2m}\epsilon^2\alpha^2(t)[\nabla\theta(\bold{x},{\cal{R}}(t))]^2,
\langlebel{Masuda2}
\end{equation}a
where the ``standard potential'' $V_0=V_0({\bold{x}},{\cal{R}})$
is defined by the stationary GP equation
\begin{equation}
\bigg[-\frac{\hbar^2}{2m}\nabla^2+V_0({\bold{x}},{\cal{R}})+g\tilde{r}^2({\bold{x}},{\cal{R}})\bigg]\tilde{r}({\bold{x}},{\cal{R}})={\cal{E}}({\cal{R}})
\tilde{r}(\bold x,{\cal{R}}).
\langlebel{v0}
\end{equation}
At the boundary times, but in general only there, $V(\bold x,t)=V_0({\bold{x}},{\cal{R}}(t))$.
Equation (\ref{Masuda2}) for the driving potential coincides with
Eq. (2.28) in \cite{MNProc} for real eigenfunctions,
whereas Eq. (\ref{Masuda1}) for the phase function $\theta$
corresponds to Eq. (2.18) in \cite{MNProc}.
The present formal framework may be used in the following way:
(i) Starting from a given standard potential $V_0({\bf{x}},{\cal{R}})$,
$\tilde{r}(\bold{x},{\cal{R}})$ and ${\cal{E}}({\cal{R}})$ would follow from Eq. (\ref{v0}).
Alternatively, it is also possible to impose $\tilde{r}({\bf{x}},{\cal{R}})$ first
and then calculate $V_0$.
(ii) An auxiliary function ${\cal{R}}(t)$ is imposed.
(iii) $\theta$ has to be determined from Eq. (\ref{Masuda1}).
(iv) The fast-forward potential can be calculated from Eq. (\ref{Masuda2}).
To arrive at this recipe in \cite{MNPra,MNProc}
preliminary steps are the definition of a standard state, a virtually
fast-forwarded state, and a regularized state with their corresponding equations.
The route followed in Sec. \ref{secmethod} to the driving potential is
in comparison quite direct. This is so because we made no explicit use of a slow
reference adiabatic process,
although it might be deduced from the fast designed dynamics if required.
The key simplification is to start with the ansatz in Eq. (\ref{wave})
and derive the two basic equations for phase and potential from it
by imposing locality and reality of the driving potential.
Since the phase $\phi$ that solves Eq. (\ref{imag0})
depends in general on the particular $r({\bf{x}},t)$, the potential
calculated through Eqs. (\ref{real}) resp. (\ref{Masuda2}) gives in principle a
state-dependent potential. However, in some special circumstances,
as we shall see below, the fast-forward potential becomes state independent.
\section{Connection with invariant's based inverse engineering approach\langlebel{secconexion}}
In this section we shall relate the previous results for the linear ($g=0$) Schr\"odinger equation to the engineering approach
based on quadratic-in-momentum invariants.
The non-linear GP equation could also be treated, as in
\cite{GonBEC,BECtransport}, but it
does not allow in general for
the state-independent potential forms that we shall
describe for $g=0$.
\subsection{Lewis-Leach potentials}
In a direct (rather than inverse) approach, the potential $V(\bold{x},t)$ is
considered to be known, and the wave function at any time $t$ can be deduced
from the Lewis-Riesenfeld theory of invariants \cite{LR}.
Suppose that the potential $V(\bold{x},t)$ has the structure of the most general
``Lewis-Leach'' potential that admits a quadratic-in-momentum invariant \cite{LL},
\begin{equation}
\langlebel{LLpot}
V(\bold x,t)=-\bold F(t)\cdot \bold{x}+\frac{1}{2}m\omega^2(t)|\bold{x}|^2+\frac{1}{\rho^2}U({\boldsymbol{\sigma}})+h(t),
\end{equation}
where $\omega(t)$, $\bold F(t)$ and $h(t)$ are arbitrary functions of time and $U({\boldsymbol{\sigma}})$ is an arbitrary function
of its argument ${\boldsymbol{\sigma}}={\boldsymbol{\sigma}}(t)=(\bold{x}-{\boldsymbol{\alpha}})/\rho$. The time dependent functions $\rho=\rho(t)$ and ${\boldsymbol{\alpha}}={\boldsymbol{\alpha}}(t)$ must satisfy the auxiliary equations
\begin{equation}a
\frac{\omega_{0}^{2}}{\rho^{3}}&=&\ddot\rho+\omega^2(t)\rho,
\langlebel{ermakov}\\
\frac{\bold{F}(t)}{m}&=&\ddot{\boldsymbol{\alpha}}+\omega^2(t){\boldsymbol{\alpha}}, \langlebel{osci}
\end{equation}a
with $\omega_0$ an arbitrary constant.
The associated dynamical invariant, up to a constant factor, is given by
\begin{equation}a
I&=&\frac{1}{2m}|\rho(\bold{p}-m\dot{\boldsymbol{\alpha}})-m\dot\rho(\bold{x}-\boldsymbol{\alpha})|^2 \noindentnumber\\
&+&\frac{1}{2}m\omega_0^2|\boldsymbol{\sigma}|^2 +U(\boldsymbol{\sigma}), \langlebel{inva0}
\end{equation}a
with $\bold{p}=-i\hbar\nabla$.
It satisfies $dI/dt=\partial{I}(t)/\partial{t}
-\frac{i}{\hbar}
[I (t ),H(t )] = 0$, so its expectation values are constant for any wave function $\psi(t)$ that evolves with $H$.
For the potential in Eq. (\ref{LLpot}),
the general solution of the time-dependent Schr\"odinger equation, Eq. (\ref{start}),
can be expanded as a linear combination with constant coefficients $c_n$ and orthonormal eigenvectors $\psi_n$ of $I$ \cite{LR},
\begin{equation}a
\psi(\bold{x},t)&=&\sum_nc_ne^{i\alpha_n}\psi_n(\bold{x},t), \langlebel{mode} \\
I\psi_n(\bold{x},t)&=&\langlembda_n\psi_n(\bold{x},t),
\end{equation}a
where $\langlembda_n$ are the time independent eigenvalues of $I$. The phases $\alpha_n$ satisfy
$\hbar\frac{d\alpha_n}{dt}=\langlengle\psi_n|i\hbar\frac{\partial}{\partial t}-H|\psi_n\ranglengle$, \cite{LR, transport, BECtransport}
\begin{equation}
\langlebel{faseLR}
\alpha_n=-\frac{i}{\hbar}\int_{0}^{t}\!\! dt'\bigg(\frac{\langlembda_n}{\rho^2}+\frac{m[|\dot{\boldsymbol{\alpha}}\rho-{\boldsymbol{\alpha}}\dot\rho|^2-\omega_{0}^{2}|{\boldsymbol{\alpha}}|^2/\rho^2]}{2\rho^2}+h\bigg).
\end{equation}
Performing now the unitary transformation \cite{transport, BECtransport}
\begin{equation}
\langlebel{trans}
\psi_n(\bold{x},t)=e^{\frac{im}{\hbar}[\dot\rho |\bold{x}|^2/2\rho+(\dot{\boldsymbol{\alpha}}\rho-{\boldsymbol{\alpha}}\dot\rho)\cdot\bold{x}/\rho]}\frac{1}{\rho^{3/2}}\chi_n({\boldsymbol{\sigma}}),
\end{equation}
the state $\psi_n$ is easily obtained from the solution $\chi_n(\boldsymbol{\sigma})$ (normalized in $\boldsymbol{\sigma}$-space) of the auxiliary
stationary Schr\"odinger equation
\begin{equation}
\bigg[-\frac{\hbar^2}{2m}\nabla^{2}_{{\boldsymbol{\sigma}}}+\frac{1}{2}m\omega_{0}^{2}|{\boldsymbol{\sigma}}|^2+U({\boldsymbol{\sigma}})\bigg]\chi_n({\boldsymbol{\sigma}})=\langlembda_n\chi_n({\boldsymbol{\sigma}}).
\langlebel{inva}
\end{equation}
In the direct approach we assume that
$U({\boldsymbol{\sigma}})$, $\omega(t)$ and $\bold{F}(t)$ are
known. Solving Eqs. (\ref{ermakov}) and (\ref{osci})
we get $\rho(t)$ and
${\boldsymbol{\alpha}}(t)$ from them. Thus
Eq. (\ref{inva}) can be solved to get $\langlembda_n$ and $\chi_n({\boldsymbol{\sigma}})$.
Finally combining Eqs. (\ref{trans}) and (\ref{inva}), the mode $e^{i\alpha_n}\psi_n$ can be calculated at any time.
\subsection{Inverse engineering approach}
In the inverse approach based on quadratic-in-momentum invariants, the Hamiltonian is assumed to have the form given in Eq. (\ref{LLpot}), at all times and in particular at
initial and final instants. As $U$ is given the
stationary Eq. (\ref{inva}) may be solved. Then the
functions $\rho$ and $\boldsymbol{\alpha}$ are designed so that $[H(t),I(t)]=0$ for $t=0$ and $t=t_f$. Thus
the Hamiltonian and the invariant have common eigenvectors at these boundary times
\cite{PRL1, transport, BECtransport, 3d, NHSara, OCTexpan, OCTtrans}.
Typically the initial state $\psi(0)$ is the ground state of $H(0)$ which
is also an eigenstate of $I(0)$, and this state evolves according to Eq. (\ref{mode}), as an eigenvector of the invariant.
To relate the above to the simple inverse method of Sec. \ref{secmethod} we consider the
single mode wave function $\psi_n$ of Eq. (\ref{trans}) and identify
\begin{equation}
\langlebel{rn}
r_n(\bold x,t)=\chi_n({\boldsymbol{\sigma}})/\rho^{3/2}(t).
\end{equation}
The subscript $n$ underlines the dependence with the $n$th mode considered.
Note that $\rho$ and $\boldsymbol{\alpha}$ are chosen at this point.
We may get the phase $\phi_n$ from Eq. (\ref{imag0}).
It can be checked by direct substitution that
\begin{equation}a
\phi_n&=&\frac{m}{\hbar}[\dot\rho |\bold{x}|^2/2\rho+(\dot{\boldsymbol{\alpha}}\rho-{\boldsymbol{\alpha}}\dot\rho)\cdot\bold{x}/\rho] \noindentnumber \\
&-&\frac{1}{\hbar}\int_{0}^{t}\!\! dt'\frac{\langlembda_n}{\rho^2}+\mathcal{F}(t), \langlebel{fn}
\end{equation}a
where
\begin{equation}
\mathcal{F}(t)=-\frac{1}{\hbar}\int_{0}^{t}\!\! dt'\bigg(\frac{m[|\dot{\boldsymbol{\alpha}}\rho-{\boldsymbol{\alpha}}\dot\rho|^2-\omega_{0}^{2}|{\boldsymbol{\alpha}}|^2/\rho^2]}{2\rho^2}+h\bigg),
\end{equation}
is a solution of this equation. Once $r_n$ and the phase $\phi_n$ are known,
Eq. (\ref{real}) gives the potential $V_n(\bold{x},t)$.
A different arbitrary function of time $\mathcal{F}(t)$ in $\phi_n(\bold{x},t)$
would produce a shift of the zero of energy in the resulting potential
$V_n(\bold{x},t)$.
We get, using Eq. (\ref{inva}),
\begin{equation}ax
V(\bold x,t)&=&-m \left(\ddot{\boldsymbol{\alpha}}+{\boldsymbol{\alpha}}
\frac{\omega_0^2 + \rho^3 \ddot\rho}{\rho^4}\right) {\boldsymbol{x}}
\\
&+& \frac{m}{2}\left(\frac{\omega_0^2 + \rho^3 \ddot\rho}{\rho^4}\right) |{\boldsymbol{x}}|^2
+ \frac{1}{\rho^2}U({\boldsymbol{\sigma}})+h.
\end{equation}ax
Taking Eqs. (\ref{ermakov}) and (\ref{osci}) into account, this potential
agrees with the potential in Eq. (\ref{LLpot}).
It is by construction local and real. Moreover it is independent of
the $n$th state considered so that linear combinations
of the modes at $t=0$ end up at $t_f$ unexcited, preserving the initial
populations.
Summarizing, the inverse engineering approach and the fast-forward approach
are connected via the simple inversion method.
Clearly, the external parameter ${\cal{R}}$ introduced in Sec. III must be related to ${\boldsymbol{\alpha}}$ and $\rho$, as illustrated in the
following section.
\subsection{Example: harmonic expansion}
Now we discuss an example of a $3D$ harmonic expansion produced with the
inverse engineering approach based on invariants and with the fast forward
technique to illustrate the links between the two methods.
{\it Invariants based approach:} Suppose that the expansion is governed by the Hamiltonian
\begin{equation}
\langlebel{Hejem}
H(t)=\frac{\bold{p}^2}{2m}+\frac{1}{2}m\omega^2(t)|\bold{x}|^2,
\end{equation}
where $\omega(t)$ is unknown, but at the boundary times $\omega(0)=\omega_0$ and $\omega(t_f)=\omega_f$.
This potential is a particular case of Eq. (\ref{LLpot}) with $\bold{F}(t)=U(\boldsymbol{\sigma})=h(t)=0$. Equation (\ref{osci}) is trivially fulfilled if
$\boldsymbol{\alpha}(t)=\boldsymbol{\ddot\alpha}(t)=0$
and consequently $\boldsymbol{\dot\alpha}(t)=0$.
$\rho(t)$ has to satisfy the Ermakov equation, Eq. (\ref{ermakov}).
The inverse engineering consists on imposing conditions on $\rho$ and its derivatives,
\begin{equation}a
\langlebel{cond}
\rho(0)&=&1,\;\,\quad\dot\rho(0)=0,\;\,\quad\ddot\rho(0)=0,\noindentnumber\\
\rho(t_f)&=&\gamma,\quad\dot\rho(t_f)=0,\quad\ddot\rho(t_f)=0,
\end{equation}a
where $\gamma=(\omega_0/\omega_f)^{1/2}$,
to guarantee the commutation
between $H(t)$ and $I(t)$ at $t=0$ and $t_f$, and then getting $\omega(t)$ from Eq. (\ref{ermakov}).
{\it Fast forward approach:}
The starting point for the fast forward approach could be the $n$th eigenstate of
a harmonic trap (see also \cite{MNProc})
with angular frequency ${\cal R}=\omega$,
\begin{equation}a
\chi_n(\bold{x},{\cal{R}} )&=&\frac{\beta^{3/2}e^{-\frac{\beta^2|\bold{x}|^2}{2}}}{\pi^{3/4}\sqrt{2^{n_x+n_y+n_z}n_x!n_y!n_z!}}H_{n_x}(\beta x)
H_{n_y}(\beta y)\noindentnumber\\
&\times&H_{n_z}(\beta z),
\langlebel{chi}
\end{equation}a
where $\beta=\sqrt{m{\cal{R}}/\hbar}$.
This state plays the role of $\tilde{r}$.
The corresponding potential $V_0$ is clearly
\begin{equation}a
V_0 (x, {\cal{R}}) &=& \frac{m}{2} R^2 {\bold{x}}^2,
\end{equation}a
and
\begin{equation}a
{\cal{E}}_n&=&\hbar\omega\bigg(n_x+n_y+n_z+\frac{3}{2}\bigg) \langlebel{osciladorE}.
\end{equation}a
The first step is to solve Eq. (\ref{Masuda1}) and we get as a solution
\begin{equation}a
\theta(\bold{x},{\cal{R}})=-\frac{m |\bold{x}|^2}{4\hbar{\cal{R}}}.
\end{equation}a
{\it Connection:}
The connection between the auxiliary variable ${\cal{R}}$ in the fast forward
approach and the auxiliary variable $\rho(t)$ in the inverse engineering
approach is in this example explicitely given by
\begin{equation}
{\cal{R}} (t) = \frac{\omega_0}{\rho(t)^2},
\end{equation}
see Eqs. (\ref{rn}) and (\ref{chi}).
The boundary conditions for $\rho(t)$ in Eq. (\ref{cond}) become
\begin{equation}a
{\cal{R}}(0)&=&\omega_0,\;\,\quad\dot{\cal{R}}(0)=0,\;\,\quad\ddot{\cal{R}}(0)=0,\noindentnumber\\
{\cal{R}}(t_f)&=&\omega_f,\quad\dot{\cal{R}}(t_f)=0,\quad\ddot{\cal{R}}(t_f)=0.
\end{equation}a
It also follows that $\epsilon\alpha(t)=\dot{\cal{R}}(t) = -2\omega_0\dot\rho(t)/\rho(t)^3$.
The auxiliary functions $\rho(t)$ resp. ${\cal{R}}(t)$ can be chosen in some way
fulfilling the boundary conditions.
The corresponding potential in the inverse engineering formalism
is constructed by first solving the Ermakov equation to get $\omega^2(t)$.
Then one has
\begin{equation}
V=\frac{1}{2}m\omega^2(t)|\bold{x}|^2=\frac{1}{2}m
\left(\frac{\omega_0^2}{\rho(t)^4} - \frac{\ddot\rho}{\rho}\right)|\bold{x}|^2,
\langlebel{potinv}
\end{equation}
whereas the fast-forward potential is given, according to Eq. (\ref{Masuda2}), by
\begin{equation}ax
\begin{array}{rcccl}
V&=&\frac{m |\bold{x}|^2}{2} &\Big(& {\cal{R}}^2 + \hbar\epsilon\dot\alpha
\frac{m}{4\hbar{\cal{R}}} - \hbar\epsilon^2\alpha^2 \frac{m}{4\hbar{\cal{R}}^2}\\
&&&& - \frac{\hbar^2}{2m}\epsilon^2\alpha^2 \frac{m^2}{4\hbar^2{\cal{R}}^2}\Big)\\
&=& \frac{m |\bold{x}|^2}{2} &\Big(& \frac{\omega_0^2}{\rho(t)^4} -
\frac{\ddot \rho(t)}{\rho(t)}\Big),
\end{array}
\end{equation}ax
which agrees with Eq. (\ref{potinv}).
\section{Beyond Lewis-Leach potentials: wavefunction splitting processes\langlebel{beyond}}
The transitionless condition for the inverse engineering method based on invariants relies on the commutativity $[H(t),I(t)]=0$ at times $t=0$ and $t_f$, which
guarantees common eigenvectors for $H$ and $I$ at these boundary times.
According to Eq. (\ref{trans})
the structure of the density of the $n$th mode of the invariant at initial and final times for quadratic-in-$\bold{p}$ invariants is
\begin{equation}
\langlebel{denI}
\frac{1}{\rho^3 (0)}\bigg|\chi_n\bigg(\frac{\bold x-{\boldsymbol\alpha}(0)}{\rho(0)}\bigg)\bigg|^2\rightarrow\frac{1}{\rho^3 (t_f)}\bigg|\chi_n\bigg(\frac{\bold x-{\boldsymbol\alpha}(t_f)}{\rho(t_f)}\bigg)\bigg|^2.
\end{equation}
The final density is a translation and/or scaling of the initial one.
This means that for processes in which the initial and final eigenstates
of the Hamiltonian do not behave according to Eq. (\ref{denI}), the commutativity
of $H$ and $I$ at the boundary times cannot be achieved.
This restriction is due to the use of quadratic-in-$\bold p$ invariants, not to the
invariants-based method. Studying and applying more general invariants is
still an open question.
As an example in which Eq. (\ref{denI}) does not hold for the final densities,
let us consider the splitting of an initial state from a single
to a double well potential. For simplicity we take the $1D$ linear Schr\"odinger equation governed by the Hamiltonian
\begin{equation}
H(t)=\frac{p^2}{2m}+\frac{1}{2}m\omega^2(t)x^2+\eta(t)x^4.
\end{equation}
For the initial single trap we consider $\omega^2(0)=\omega_0^2$ and $\eta(0)=\eta_0>0$. The final double well is characterized by a
repulsive harmonic part with $\omega^2(t_f)=-\omega_f^2$ and $\eta(t_f)=\eta_f>0$.
Comparing terms with Eq. (\ref{LLpot}), $F=h=\alpha=0$ and consequently $\dot\alpha=\ddot\alpha=0$, $U(\sigma)=\eta(t)\rho^2x^4$, and
the particular structure of $U(\sigma)$ sets $\eta(t)=\kappa/\rho^6$, where
$\rho$ satisfies
the Ermakov equation, Eq. (\ref{ermakov}), and $\kappa$ is an arbitrary constant. The associated invariant is
\begin{equation}
I(t)=\frac{1}{2m}(\rho p-m\dot\rho x)^2+\frac{1}{2}m\omega_0^2\frac{x^2}{\rho^2}+\kappa\frac{x^4}{\rho^4}.
\end{equation}
Imposing $\rho(0)=1$, $\dot\rho(0)=0$, $\ddot\rho(0)=0$,
and identifying $\kappa=\eta_0$, then $H$ and
$I$ commute at $t=0$. At $t_f$, $[H(t_f),I(t_f)]=0$ for $\rho(t_f)=(i\omega_0/\omega_f)^{1/2}$, $\dot\rho(t_f)=\ddot\rho(t_f)=0$, and
$\eta_f=-i\eta_0\omega_f^3/\omega_0^3$.
However, $\rho$ must be a positive real function if initially so, and moreover the final potential that we get is complex.
A way out is to use
the simple inverse fast-forward method for specific initial and final states
without restricting the potential form, see also \cite{MNProc}.
Consider
for example the
1D splitting
of
the initial state $r(x,0)=e^{-\beta^2 x^2/2}$ $(\beta=\sqrt{m\omega/\hbar})$
into the final form $r(x,t_f)=e^{-\beta^2 (x-a)^2/2}+e^{-\beta^2(x+a)^2/2}$.
In between we apply the interpolation
\begin{equation}
r(x,t)=z(t)\bigg\{[1-{\cal{R}}(t)]r(x,0)+
{\cal{R}}(t)r(x,t_f)\bigg\},
\langlebel{ansatzr}
\end{equation}
where ${\cal{R}}(t)$ is some smooth, monotonously increasing function
from 0 to 1 and $z(t)$ is a normalization function.
We also impose that $\dot {\cal R}=0$ to ensure $\dot r=0$ at the boundary times $t=0$ and $t_f$.
In the numerical examples the function ${\cal R}(t)$ is chosen as a polynomial of degree 7 to make zero the second and third derivatives at the boundaries.
Once we have established the form of $r(x,t)$, we solve Eq. (\ref{imag}), ${\rm Im}[V(x,t)]=0$, to get the phase $\phi$ with the
initial conditions $\phi(0,t)=\phi '(0,t)=0$ that fix the zero-energy point (the prime means spatial derivative). Then the resulting phase is introduced into Eq. (\ref{real}) to get the potential $V(x,t)$.
In Fig. \ref{fase} the phase $\phi(x,t)$ is plotted for a non adiabatic process
with $t_f=80$ ms.
The corresponding fast-forward potential is plotted in Fig. \ref{pot80}.
\begin{figure}
\caption{(Color online) Phase $\phi(x,t)$ calculated from Eq. (\ref{imag}
\end{figure}
\begin{figure}
\caption{(Color online) Fast-forward potential $V(x,t)$ in units of $\hbar\omega$ for a final time $t_f=80$ ms. The rest of parameters are the same as in Fig. \ref{fase}
\end{figure}
$V(x,t)$ in Fig. \ref{pot80} could be realized with high resolution time-varying optical potentials ``painted'' by a tightly focused rapidly moving laser
beam \cite{HRM09}, or by means of spatial light modulators \cite{Foot}.
A simpler approximate approach would involve the
combination of three Gaussian beams.
In principle the time $t_f$ can be reduced to produce the splitting in a shorter time. For example, in Fig. \ref{pot10} $t_f=10$ ms and a more complicated potential is needed.
\begin{figure}
\caption{(Color online) Fast-forward potential $V(x,t)$ in units of $\hbar\omega$ for a final time $t_f=10$ ms. The rest of parameters are the same as in Fig. \ref{fase}
\end{figure}
\section{Discussion\langlebel{secoutlook}}
We have first distilled from the somewhat imposing set of equations
of the fast-forward (FF) formalism as originally presented a streamlined version that
may aid to apply it more easily.
Our second aim has been to relate it to other inverse engineering methods.
In a previous publication, the inverse-engineering method based on invariants, was related to the transitionless tracking algorithm, and their potential equivalence was demonstrated \cite{CTM}.
Similarly we have established in this paper the connection between the fast-forward method and the invariant-based method for quadratic-in-momentum invariants.
These relations do not imply the full identity of the methods but their overlap and equivalence in a common domain. They are still useful heuristically as separate approaches since they are formulated in rather different terms \cite{CTM,MN11}. Moreover they facilitate extensions beyond their common domain, as exemplified
by the wave-splitting processes discussed in the previous section.
Further extensions are left for separate analysis: for example the possibility to transfer an excited state into the ground state or viceversa, or combining the fast-forward approach with optimal control theory (OCT) without including the final fidelity in the cost function as in \cite{S07, S09a, S09b}. (This would be possible because the fidelity is guaranteed to be one by construction.)
It will also be interesting for future work to consider complex potentials, either as solutions to the shortcut dynamics, as in the quantum brachistochrone \cite{qb}, or as
an effective description of the system dynamics to be accelerated \cite{NHSara}.
We are grateful to S. Masuda and K. Nakamura for discussing their method.
We acknowledge funding by Projects No. GIU07/40 and No. FIS2009-12773-C02-01,
and the UPV/EHU
under program UFI 11/55.
E. T. acknowledges financial support from the Basque Government
(Grants No. BFI08.151).
\end{document} |
\begin{document}
\title{New Free Divisors from Old}
\author[R.-O. Buchweitz]{Ragnar-Olaf Buchweitz}
\address{Dept.\ of Computer and Mathematical Sciences,
University of Tor\-onto at Scarborough, Tor\-onto, Ont.\ M1A 1C4, Canada}
\email{[email protected]}
\author[A.Conca]{Aldo Conca}
\address{Dipartimento di Matematica, Universit\'a di Genova,
Via Dodecaneso 35, I-16146 Genova, Italia}
\email{[email protected]}
\thanks{The first author was partly supported by NSERC grant
3-642-114-80.}
\date{\today}
\subjclass[2010]{Primary:
32S25,
14J17,
14J70;
Secondary:
14H51,
14B05.
}
\keywords{Free divisor, discriminant, Saito matrix, binomial, Euler vector field}
\begin{abstract}
We present several methods to construct or identify families of free divisors such as
those annihilated by many Euler vector fields, including binomial free divisors, or
divisors with triangular discriminant matrix.
We show how to create families of quasihomogeneous free divisors through the chain
rule or by extending them into the tangent bundle. We also discuss whether general
divisors can be extended to free ones by adding components and show that adding a
normal crossing divisor to a smooth one will not succeed.
\end{abstract}
\maketitle
{\footnotesize\tableofcontents}
\section{Introduction}
The goal of this note is to describe some basic operations that allow to construct new free
divisors from given ones, and to classify toric free surfaces and binomial free divisors. We mainly
deal with weighted homogeneous polynomials over a field of characteristic $0$, though several
statements and constructions generalize to power series.
A (formal) {\em free divisor\/} is a reduced polynomial (or power series) $f$ in variables
$x_1,\dots,x_n$ over a field $K$ such that its Jacobian ideal
$J(f) = (\tfrac{\partial f}{\partial x_{1}},...,\tfrac{\partial f}{\partial x_{n}})+(f)$ is perfect of
codimension $2$ in the polynomial or power series ring.
For generalities about free divisors and their importance in singularity theory
we refer to, say, \cite{BEvB} and the references therein.
A determinantal characterization of free divisors is due to K.~Saito \cite{Sai}: a reduced polynomial
$f$ is a free divisor if and only if there exists a matrix $A$ of size $n\times n$ with entries in the
relevant polynomial or power series ring such that $\det(A)=f$ and $(\nabla f) A\equiv 0 \bmod{(f)}$,
where $\nabla f = (\tfrac{\partial f}{\partial x_{1}},...,\tfrac{\partial f}{\partial x_{n}})$ is the usual
gradient of $f$. In that case $A$ is called a {\em discriminant\/} (or {\em Saito\/}) {\em matrix\/}
of the free divisor.
The normal crossing divisor $f = x_1\cdots x_k$, for some $1\leqslant k\leqslant n$, provides a
simple example of a free divisor. Indeed, it is an example of a {\em free arrangement}, that is, a
hyperplane arrangement given by linear equations $\ell_i=0$ such that the product
$f=\prod_{i} \ell_i$ is a free divisor, see \cite{OrT} for more on free arrangements.
Section 2 contains generalities and notation. In Section 3 we study homogeneous polynomials that
are annihilated by $n-2$ linearly independent {\em Euler vector fields}, that is, polynomials $f$ such
that the vector space generated by the linear derivatives $\{x_{i}\partial f/\partial x_{i}\}_{i=1,...,n}$
is of dimension at most $2$. We show that such a polynomial is a free divisor provided the gradient
$\nabla f$ vanishes as an element of the first homology module of the associated
{\em Buchsbaum-Rim complex}. As an application, we classify in Theorem \ref{poly3} those
{\em free surfaces\/} $\{f(x,y,z)=0\}$ that are weighted homogeneous and annihilated by some
Euler vector field.
In Section 4 we present a {\em composition formula\/} or {\em chain rule\/} for free divisors. Such a
formula implies, for instance, that if $f$ and $g$ are free divisors in distinct variables then $fg(f+g)$
is also a free divisor.
In Section 5 we exhibit some {\em triangular\/} free divisors, that is, free divisors whose discriminant
matrix has a triangular form. It follows, for instance, that for natural numbers
$t\geqslant 1, n\geqslant 2$, the polynomial $\prod_{j=2}^n (x_1^t+\dots+x_j^t)$ is a free
divisor.
In Section 6 we characterize {\em binomial free divisors\/} by showing that a binomial in $n+2$
variables $x_1,\dots,x_n,y,z$ is a free divisor if and only if it is, up to permutation and scaling of the
variables, of the form
\[
x_1\cdots x_n y^uz^t\left(y^\alpha\prod x_{i}^{a_i} + z^\beta \prod x_{i}^{b_i}\right)
\]
with $\min(a_i,b_i)=0$, $\alpha,\beta>0$, and $0\leqslant u,t\leqslant 1$. In particular, any
reduced binomial is a {\em factor of a free divisor}.
This observation leads us to ask whether any reduced polynomial is a factor
of a free divisor. We discuss this question in Section 7, where we show that the simplest approach
will not work: If $f$ is a smooth form of degree greater than $2$ in more than $2$ variables then
$x_1\cdots x_nf$ is not a free divisor.
In the final Section 8, we point out that homogeneous free divisors {\em extend into the tangent
bundle\/}: along with $f$, the polynomial
\[
f(\tfrac{\partial f}{\partial x_{1}}y_{1}+\cdots +\tfrac{\partial f}{\partial x_{n}}y_{n})
\]
in twice as many variables $x_{1},..., x_{n}; y_{1},..., y_{n}$ is again a free divisor. Moreover, it will
again be {\em linear}, if this holds for $f$.
We want to point out that similar ``extension problems'' for free divisors have been considered by others as well,
especially in \cite{DPi, MS, STo}.
\section{Notation and Generalities}
Let $R$ be the polynomial ring $K[x_1,\dots,x_n]$ or formal power series ring $K[\![x_1,\dots,x_n]\!]$
over a field $K$ of characteristic $0$.
Let $\theta := \theta_{R/K} \cong \oplus_{i=1}^{n}R\partial_{x_{i}}$ denote
the module of vector fields (or $K$-linear derivations) of $R$, with
$\partial_{x_{i}}$ being shorthand for the corresponding partial derivative,
$\partial_{x_{i}}:= \frac{\partial}{\partial x_{i}}$.
For $f\in R$, we further abbreviate $f_{i}:= f_{x_{i}}:=\partial_{x_{i}}f$,
so that the {\em gradient\/} of $f$ with respect to the chosen
variables is given by the vector $\nabla f = (f_{1},\dots,f_{n})$ .
\begin{definition}
For $a = (a_{1},\dots,a_{n})\in K^{n}$, we call the linear vector field $E_a=\sum_{i}a_{i}x_{i}
\partial_{x_{i}}$ the {\em Euler vector field\/} associated to $a$.
It is an {\em Euler vector field for $f$}, if $E_a(f)=\delta f$, for some $\delta\in{\mathbb Z}$.
A vector $w\in {\mathbb Z}^n$ induces naturally a ${\mathbb Z}$-grading on $K[x_1,\dots,x_n]$
by setting $\deg_w x_{i}=w_i$. Accordingly, one can assign to any non-zero polynomial $f$
a degree $\deg_w(f)$, and that polynomial is {\em $w$--homogeneous}, that is, homogeneous with
respect to this grading, if all the nonzero monomials in $f$ are of degree $\deg_w(f)$.
If $f\in R$ is $w$-homogeneous, then $E_w(f)=\deg_w(f)f$.
\end{definition}
The Jacobian
\footnote{Some authors; see e.g.~\cite[p.110]{GLS}; call this the {\em Tjurina ideal\/} to distinguish it
clearly from the ideal generated by just the partial derivatives that describes the {\em critical locus\/}
of the map defined by $f$.}
ideal $J(f)$ of $f$ is, by definition, $(f_1,\dots,f_n)+(f)\subseteq R$.
Note that $J(f)=(f_1,\dots,f_n)$ precisely when there exists a derivation $D\in \theta$ such that
$D(f)=f$. This happens, for example, if $f$ is homogeneous of non-zero degree with respect to
some weight $w\in {\mathbb Z}^n$. It is well known that, in general, $J(f)$ defines the {\em singular locus\/}
of the hypersurface ring $R/(f)$, equivalently, the hypersurface $\{f=0\}$ in affine $n$--space
$\mathbb A^{n}_{K}$.
\begin{definition} A (formal) {\em free divisor\/} is a polynomial (or power series) $f$,
whose Jacobian ideal $J(f)$ is {\em perfect\/}
\footnote{We allow the ideal to be improper, thus, the empty set is perfect of any codimension.
However, the zero ideal is, by convention, not perfect of any codimension, and we always assume $f\neq 0$.}
of codimension $2$ in $R$.
In particular, $f$ is then {\em squarefree}, equivalently, the hypersurface ring $R/(f)$ is
{\em reduced\/}, --- and we then simply also call $f$ {\em reduced} --- and the singular locus
of that hypersurface is a Cohen-Macaulay subscheme of codimension two in $\Spec R$.
\end{definition}
\begin{example}
As simplest examples, any separable polynomial in $K[x]$ defines a free divisor, and so does
any reduced $f\in K[x,y]$.
\end{example}
K.~Saito, who introduced the notion, gave the following important criterion for $f$ to be a free divisor:
\begin{theorem}{\sc (Saito \cite{Sai})}
\label{saito}
Let $f\in R$ be reduced. Then $f$ is a free divisor if and only if there exists a $n\times n$ matrix $A$
with entries in $R$ such that $\det A=f$ and $(\nabla f)A\equiv 0 \bmod (f)$.
\end{theorem}
The matrix $A$ appearing in this criterion is called a {\em discriminant\/} (or {\em Saito\/})
{\em matrix\/} of $f$. If the entries of $A$ can be chosen to be linear polynomials, then $f$ is called a
{\em linear\/} free divisor. Note that $f$ is then necessarily a homogeneous polynomial of degree
$n$.The {\em normal crossing divisor\/} $f = x_{1}\cdots x_{n}$ is a simple example of a linear free
divisor.
\begin{remark}
It follows immediately from this criterion that a free divisor $f\in R$ remains a free divisor in any
polynomial or power series ring over $R$. When viewed as an element of such larger ring, $f$ is
called the {\em suspension\/} of the original free divisor from $R$.
\end{remark}
A different way to state the criterion, and to link it with the definition we chose, denote
$\Der(-\log f)\subseteq \theta$ those vector fields $D$ such that $D(f)\in (f)$, equivalently,
$D(\log f)=D(f)/f$ is a well defined element of $R$. With this notation, one has a short exact sequence
of $R$--modules
\begin{align*}
\xymatrix{
0\ar[r]&\Der(-\log f)\ar[r]&\theta\ar[r]^-{df}&J(f)/(f)\ar[r]&0
}
\end{align*}
and a reduced $f$ is a free divisor if, and only if, $\Der(-\log f)$ is a free $R$--module, necessarily
of rank $n$. A discriminant matrix is then simply the matrix of the inclusion
$\Der(-\log f)\subseteq \theta$, when bases of these free modules are chosen.
Now we turn to our results.
\section{Polynomials Annihilated by Many Euler Vector Fields}
In this section we assume that
\begin{enumerate}[\rm (a)]
\item $f\in R$ is a nonzero squarefree polynomial that belongs to the ideal of its
derivatives, $f\in (f_{1},\dots,f_{n})\subseteq R$.
\item The $K$-vector space of Euler vector fields annihilating $f$ has dimension at least $n-2$.
In other words, there exist $n-2$ linearly independent Euler vector fields
$E_{j}= \sum_{i}a_{ij}x_{i}\partial_{x_{i}}$, for $j=1,\dots,n-2$, such that $E_{j}(f)=0$.
Denote by $A$ the $n\times(n-2)$ scalar matrix $(a_{ij})$ and by $B$ the matrix $(a_{ij}x_{i})$ of the
same size.
\end{enumerate}
Under these assumptions the Jacobian ideal of $f$ is equal to the ideal of its partial derivatives
and has codimension at least two. To show that it defines a Cohen-Macaulay subscheme of
codimension two, it suffices thus to find a {\em Hilbert--Burch matrix\/},
necessarily of size $n\times(n-1)$, for the partial derivatives. By assumption, we have a matrix
equation in $R$ of the form
\[
(\nabla f) B= (0,0,\dots,0)\,.
\]
We need one more syzygy! More precisely; see, for example, \cite[20.4]{Eis}; to get a Hilbert--Burch matrix for
$(f_{1},\dots,f_{n})$, we want a column vector $w:=(w_{1},\dots,w_{n})^{T}$ with entries from $R$,
such that we have an equality of sequences of elements from $R$ of the form
\[
(f_{1},\dots,f_{n}) = I_{n-1}(C)\,,
\]
where $C$ is obtained from $B$ by appending the column vector $w$, and $I_{n-1}$ denotes
the sequence of appropriately signed maximal minors of the indicated $n\times(n-1)$ matrix.
Define a $R$--linear map from $R^{n}$ to $R^{n}$ through
\[
\epsilon(w_{1},\dots,w_{n}) := I_{n-1}( B \mid w)\,,
\]
where $(B\mid w)$ denotes the $n\times(n-1)$--matrix obtained from $B$ by adding the column $w$.
Clearly, $B \circ \epsilon=0$, and the sequence of free (graded) $R$--modules
\begin{align*}
\mathbf{BR}(B) \quad\equiv\quad \left(F_{2} = R^{n}(n-1)
\xrightarrow{\ \partial_{2}=\epsilon\ } F_{1}=R^{n}(-1)
\xrightarrow{\ \partial_{1}=B\ } F_{0}=R^{n-2}\to 0\right)
\end{align*}
is the beginning of the {\em Buchsbaum--Rim complex\/} for the matrix $B$; see, for example,
\cite[Appendix A.2]{Eis}.
By the given setup, the vector $\nabla f \in F_{1}$ is a cycle in this complex, and the required
vector $w$ exists if, and only if, the class of $\nabla f$ is zero in the first homology group
$H_{1}(\mathbf{BR}(B))$ of this Buchsbaum--Rim complex.
Now, if the ideal of the maximal minors of $B$ has the maximal possible codimension,
equal to $n-(n-2)+1=3$, then the entire Buchsbaum--Rim complex is exact and so, in particular,
$H_{1}(\mathbf{BR}(B))=0$.
The minor of $B$ obtained by deleting rows $i$ and $j$ is the monomial
$u_{ij} x_1\dots x_n/x_{i}x_j$, where $u_{ij} $ is the minor of $A$ obtained by deleting the rows
corresponding to $i$ and $j$. The ideal generated by these minors will have maximal codimension
if, and only if, all the maximal minors of $A$ are non-zero.
Summing up, we have the following result.
\begin{proposition} Under the assumption {\em (a)\/} and {\em (b)}, and with the notation as above,
\label{BuRim}
\begin{enumerate}[\rm (1)]
\item The polynomial $f$ is a free divisor if, and only if, the class of $\nabla f$ in the first homology
$H_{1}(\mathbf{BR}(B))$ of the Buchsbaum--Rim complex associated to $B$ vanishes.
\item If all the maximal minors of $A$ are non-zero, then $f$ is a free divisor.
\end{enumerate}
\end{proposition}
\begin{example}
Consider
\begin{align*}
f = u x^{a} - vx^{b}
\end{align*}
with $u,v\in K$ nonzero and $a,b\in {\mathbb N}^n$ different exponents with $\min(a_{i},b_{i}) \leqslant 1$,
for each $i$, to ensure that $f$ is reduced. The Euler vector field
$\sum_{i=1}^n c_i x_{i}\partial/\partial x_{i}$ then annihilates $f$ if, and only if,
$\sum a_ic_i=0$ and $\sum b_ic_i=0$. Assuming $a_{i}b_{j}-a_{j}b_{i}\neq 0$ for some pair of indices
$i<j$, the space of Euler vector fields annihilating $f$ has dimension $n-2$. The corresponding
$n\times (n-2)$ coefficient matrix $A$ then satisfies $\binom{a}{b}A=0$, where $\binom{a}{b}$ is the
obvious $2\times n$ matrix of scalars. Linear algebra tells us that the maximal minors of $A$ are then,
up to sign and a common non-zero constant, equal to the maximal minors of
$\binom{a}{b}$. By virtue of Proposition \ref{BuRim}(2) we can conclude that if $a_{i}b_{j}-a_{j}b_{i}\neq 0$ for all
pairs $i<j$, then the binomial $f$ is a free divisor.
\end{example}
In Section \ref{binocla} below we will give a complete characterization of homogeneous binomial free
divisors.
In three variables the considerations above lead to a complete characterization of free divisors that are
weighted homogeneous and annihilated by an Euler vector field. To write down the corresponding
Hilbert--Burch matrices in a compact form, the following tool will be useful.
\begin{definition}
Let $d > 0$ be a natural number, $R=K[x_{1},\dots,x_{n}]$ a polynomial ring over a field $K$ of
characteristic zero, and $ y = \{y_{1},\dots,y_{m}\}$ a subset of the variables $x$.
Define a $K$--linear endomorphism $(\deg+d)_{ y}^{-1}$ on $R$ through the following action on
monomials:
\begin{align*}
(\deg+d)_{ y}^{-1}( x^{ e}) := \frac{1}{|e|_{ y}+d} x^{ e}\,,
\end{align*}
where $|e|_{ y} := \sum_{i, x_{i}\in y}e_{i}$ denotes the usual total degree of $ x^{ e}$
with respect to the variables $ y$.
In words, $(\deg+d)_y^{-1}$ has the polynomials that are homogeneous of total degree $a$ in the
variables $ y$ as eigenvectors of eigenvalue $1/(a+d)$. If $y$ is the set of all variables then the
corresponding $K$--linear endomorphism will be simply denoted by $(\deg+d)^{-1}$. \end{definition}
As is well known, the endomorphism just defined can be used to split in characteristic zero
the tautological Koszul complex on the variables. Here we will use the following form.
\begin{lemma}
Let $V=\oplus_{i}Kx_{i}$ be the indicated vector space over $K$ and
$V\cong \oplus_{i}K\xi_{i}, x_{i}\mapsto \xi_{i}$ an isomorphic copy of it.
Let ${\mathbb K}^{\bullet} = {\mathbb S}_{K}V\otimes_{K}\Lambda_{K}V\cong R\otimes_{K} \Lambda^{\bullet}_{K}
(\xi_{1},\dots,\xi_{n})$ be the exterior algebra over $R$ on variables $\xi_{i}$, the graded
$R$--module underlying the usual Koszul complex.
The $R$--linear derivation $\partial := \sum_{i}a_{i}x_{i}\frac{\partial}{\partial \xi_{i}}$ defines
a differential on ${\mathbb K}$ for any choice of $a_{i}\in K$. Let $W\subseteq V$ denote
the subspace generated by those variables $ y$ among the $ x$, for which $a_{i}\neq 0$,
and denote by $\eta_{j}$ the corresponding variables among the $\xi_{i}$ in the isomorphic copy of
$W$.
If $\omega \in {\mathbb K}^{m}$ is a {\em cycle\/} for $\partial$, then the class
of $\omega$ in $H_{i}({\mathbb K}^{\bullet},\partial)$ is zero if, and only if, $\omega = 0$ in
$R/(y)\otimes \Lambda^{i}(V/W)$.
In that case, $\omega':=(\sum_{j}\frac{1}{a_{j}}d\eta_{j}\partial_{y_{j}}) \circ (\deg+d)_{y}^{-1}(\omega)$
provides a boundary, $\partial(\omega') =\omega$. \qed
\end{lemma}
\begin{theorem}
\label{poly3}
Let $K$ be a field of characteristic zero and $f\in K[x,y,z]$ a reduced
polynomial in three variables such that $f$ is contained in the ideal of
its partial derivatives, $f\in (f_{x},f_{y},f_{z})$.
Assume further that there is a triple $(a,b,c)$ of elements of $K$ that are not all zero such that the Euler vector field
$E=ax\frac{\partial }{\partial x}+by\frac{\partial}{\partial y}+cz\frac{\partial}{\partial z}$ satisfies
$E(f)=0$.
We then have the following possibilities, up to renaming the variables:
\begin{enumerate}[\rm(1)]
\item If $abc\neq 0$, then $f$ is a free divisor with Hilbert--Burch matrix
\begin{align*}
(f_{x},f_{y},f_{z}) =
I_{2}\left(\begin{array}{ccc}
\vphantom{\dfrac{1}{2}}ax & & \big(\tfrac{1}{c}- \tfrac{1}{b}\big)(\deg+2)^{-1}(f_{yz})\\
\vphantom{\dfrac{1}{2}}by & &\big(\tfrac{1}{a}- \tfrac{1}{c}\big)(\deg+2)^{-1}(f_{xz}) \\
\vphantom{\dfrac{1}{2}}cz & & \big(\tfrac{1}{b}- \tfrac{1}{a}\big)(\deg+2)^{-1}(f_{xy})
\end{array}\right)
\end{align*}
where $f_{**}$ denotes the corresponding second order derivative of $f$.
\item If $a=0$, but $bc\neq 0$, then $f$ is a free divisor if, and only if, $f_{x}\in (y,z)$.
If that condition is verified and $f_{x}= yg+zh$, then
$f_{y}/cz = -f_{z}/by$ is an element of $R$ and a Hilbert--Burch matrix is given by
\begin{align*}
(f_{x},f_{y},f_{z}) =
I_{2 }\left(\begin{array}{cc}
0 & f_{y}/cz = -f_{z}/by\\
by & -h/c \\
cz & g/b
\end{array}\right)
\end{align*}
\item If $a=b=0$, then $f$ is independent of $z$ and so, as the suspension of a reduced plane curve, is
a free divisor.
\end{enumerate}
\end{theorem}
\begin{proof}
We simply need to verify that the Hilbert--Burch matrix is correct.
One may either use now the preceding lemma, or calculate directly, as we will do.
We just verify that, in case (1), the minor obtained when deleting the first row is correct,
leaving the remaining calculations to the interested reader. It suffices to check the case when
$f=x^{e_{1}}y^{e_{2}}z^{e_{3}}$ is a monomial with $ae_{1}+be_{2}+ce_{3} = 0$ and
$e_{i}\geqslant 0, |e| > 0$. Then,
\begin{align*}
&by(1/b- 1/a)(\deg+2)^{-1}(f_{xy})-cz (1/a- 1/c)(\deg+2)^{-1}(f_{xz}) \\
=\ & by(1/b- 1/a)(\deg+2)^{-1}(e_{1}e_{2}x^{e_{1}-1}y^{e_{2}-1}z^{e_{3}})\\
&\ -
cz (1/a- 1/c)(\deg+2)^{-1}(e_{1}e_{3}x^{e_{1}-1}y^{e_{2}}z^{e_{3}-1})\\
=\ &\frac{ e_{1}e_{2}}{|e|} (1-b/a)x^{e_{1}-1}y^{e_{2}}z^{e_{3}} -
\frac{ e_{1}e_{3}}{|e|} (c/a- 1)x^{e_{1}-1}y^{e_{2}}z^{e_{3}}\\
=\ &f_{x}\left(e_{2}(a-b)-e_{3}(c-a)\right)/a|e|\\
=\ &f_{x}\left((e_{2}+e_{3})a -e_{2}b-e_{3}c)\right)/a|e|\\
=\ &f_{x}
\end{align*}
as required.
\end{proof}
To apply this result, we need to detect Euler vector fields annihilating given polynomials, and
the following remark is useful for this purpose.
\begin{remark}
Assume $f$ is a polynomial that is homogeneous with respect to two weights $w,v\in {\mathbb Z}^n$.
For every $a,b\in {\mathbb Z}$, the polynomial $f$ is then homogeneous with respect to $aw+bv$,
of degree $a\deg_w(f)+b\deg_v(f)$. Taking $a=\deg_v(f)$ and $b=-\deg_w(f)$, we conclude
that $f$ is homogeneous of degree $0$ with respect to $\deg_v(f)w-\deg_w(f)v$, and so the
corresponding Euler vector field annihilates $f$. If further some degree $a\deg_w(f)+b\deg_v(f)$
is not zero, then $f$ satisfies the assumption $(a)$ from the beginning.
\end{remark}
This remark can be applied as follows.
\begin{example} Set
\[
f(x,y,z)=x^{\gamma_1}y^{\gamma_2}z^{\gamma_3} \Pi_{i=1}^k( x^a-\alpha_i y^bz^c)
\]
with $a,b,c,k\in {\mathbb N}\setminus\{0\}$, $\gamma_j\in \{0,1\}$ and $\alpha_i\in K$.
Assume that the $\alpha_i$ are non-zero and distinct so that $f$ is reduced.
Then $f$ is a free divisor if, and only if, not both $\gamma_2$ and $\gamma_3$ equal $0$,
equivalently, $\gamma_{2}+\gamma_{3}>0$.
To prove the statement, take $v=(0,c,-b)$ and $w=(b,a,0)$, so that $f$ becomes homogeneous
with respect to both $v$ and $w$, satisfying
\[
\deg_v(f)=c\gamma_2-b\gamma_3\quad \text{and}\quad
\deg_w(f)=b\gamma_1+a\gamma_2+kab\neq 0\,.
\]
Hence, by the remark above, $f\in (f_{x}, f_{y}, f_{z})$, and the Euler vector
field associated to
\begin{align*}
\deg_v(f)w-\deg_w(f)v &= (c\gamma_2-b\gamma_3)(b,a,0) - (b\gamma_1+a\gamma_2+kab)(0,c,-b)\\
&=-b(-c\gamma_2+b\gamma_3,a\gamma_3+c\gamma_1+kac, -b\gamma_1-a\gamma_2-kab)
\end{align*}
annihilates $f$. Clearly,
the second and the third coordinates of this vector are non-zero, while the first one equals
$b(c\gamma_2-b\gamma_3)$. Now, if $\gamma_2$ or $\gamma_3$ is non-zero, then $f_x\in (y,z)$
and we conclude by Theorem \ref{poly3}, either part (1) or (2), that $f$ is a free divisor.
On the other hand, if $ \gamma_2=\gamma_3=0$ then $f$ contains a pure power of $x$ and so
$f_x\not\in (y,z)$. We may then conclude by Theorem \ref{poly3}(2) that $f$ is not a free divisor.
\end{example}
\begin{remark}
Some isolated members of this family of examples have been identified as free divisors before:
\begin{align*}
f = y(x^{2}-yz)\quad\text{or}\quad f = xy(x^{2}-yz)\,,
\end{align*}
the quadratic cone with, respectively, one or two planes, of which one is tangent, or
\begin{align*}
f = y(x^{2}-y^{2}z)\,,
\end{align*}
the Whitney umbrella with an adjoint plane; see \cite{MS}.
A remarkable feature of this example is that it exhibits free surfaces with arbitrarily many
irreducible components that are not suspended, in that we can, for example, extend the family of
examples involving quadratic cones to
\[
f=x^{\gamma_{1}}y^{\gamma_{2}}z^{\gamma_{3}}\prod_{i=1}^{k}(x^{2}-\alpha_{i}yz)
\]
for $k\geqslant 1, \gamma_{j}\in \{0,1\}$ with $\gamma_{2}+\gamma_{3}\neq 0$ and
scalars $\alpha_{i}\in K$ satisfying $\prod_{i=1}^{k}\alpha_{i}\prod_{i<j}(\alpha_{i}-\alpha_{j})\neq 0$.
Such $f$ will clearly have $\gamma_{1}+\gamma_{2}+\gamma_{3}+k$ many irreducible components,
$1\leqslant \gamma_{1}+\gamma_{2}+\gamma_{3}\leqslant 3$ among them planes.
\begin{figure}
\caption{ \label{fig:binomial}
\label{fig:binomial}
\end{figure}
\end{remark}
\section{A Chain Rule for Quasihomogeneous Free Divisors}
We start with a simple observation: if $f\in K[x]=K[x_1,\dots,x_n]$ and $g\in K[y]=K[y_1,\dots,y_m]$
are free divisors then $fg\in K[x,y]$ is a free divisor. To see this, one just takes the
discriminant matrices $A,B$ associated to $f$ and $g$, and notes that the block matrix
\[
\left(\begin{array}{cc}
A & 0 \\
0 & B
\end{array}\right)
\]
is a discriminant matrix for $fg$ that one can think of as the pullback of the planar normal crossing
divisor along the map with components $(f,g)$.
Such free divisors have been called ``product-unions'' by J.~Damon
\cite{Dam} or ``splayed'' divisors by Aluffi and E.~Faber \cite{AFa}.
If $f=f_{1}\cdots f_{k}$ is square free, then a vector field $D$ is logarithmic for $f$ if, and only if,
$D$ is logarithmic for each $f_{i}$, as
\begin{align*}
D(\log f) =\sum_{i}D(\log f_{i}) =\sum_{i}\frac{D(f_{i})}{f_{i}}
\end{align*}
can only be an element of $R$ if that holds for the summands.
We now use these observations to establish a {\em chain rule\/} for free divisors.
In this form, the result and its proof are due to Mond and Schulze \cite[Thm.4.1]{MS}, while
we originally had obtained a weaker result. We include an algebraic version of the
proof, and strengthen their result by removing the hypothesis that no $f_{i}$ be a smooth divisor.
\begin{theorem}
\label{chainrule}
Let $k\geqslant 1$ be an integer, $K$ a field of characteristic zero.
Assume given a free divisor $f=f_{1}\cdots f_{k}\in R = K[x_{1},\dots,x_{n}]$ that admits
vector fields $E_{j}$, for $j=1,\dots,k$, satisfying $E_{j}(f_{i})=\delta_{ij}f_{i}$, where $\delta_{ij}$
is the Kronecker delta.
If $H=y_{1}\cdots y_{k}H_{1}\in Q:=K[y_{1},\dots,y_{k}]$ is a free divisor
such that $f$ and $H_{1}(f_{1},..., f_{k})$ are without common factor, then the polynomial
$\tilde H:= H(f_{1},\dots,f_{k})\in R$ is a free divisor.
\end{theorem}
\begin{proof} Because $f$ is a free divisor, its $R$--module of logarithmic vector fields
$\Der(-\log f)$ is free. It contains the vector fields $E_{i}$, because $E_{i}(f)= f$ by the
product rule.
Further, the $E_{i}$ are linearly independent over $R$, as $0 = \sum_{i=1}^{k}g_{i}E_{i} \in\theta$
implies $0=\sum_{i=1}^{k}g_{i}E_{i}(f_{j}) = g_{j}f_{j}$, and so $g_{j}=0$ for each $j$.
In this way, $\oplus_{i=1}^{k}RE_{i}$ becomes a free submodule of $\Der(-\log f)$.
Now any $D\in \Der(-\log f)$ is logarithmic for each
$f_{i}$ as those elements of $R$ are relatively prime, $f$ being squarefree.
Therefore, $D\mapsto \sum_{i=1}^{k}D(\log f_{i})E_{i}$ provides
an $R$--linear map $\Der(-\log f)\to \oplus_{i=1}^{k}R E_{i}$ that splits the inclusion,
and whose kernel consists of those derivations $D$ that satisfy $D(f_{i})=0$ for each $i$.
Therefore, we can extend the $E_{i}$ to a basis $(E_{1}, ..., E_{k}, D_{1}, ..., D_{n-k})$ of $\Der(-\log f)$
as $R$--module, with $D_{j}(f_{i})=0$ for $i=1,...,k$ and $j=1,...,n-k$.
Let $C$ be the $n\times n$ matrix over $R$ that expresses the just chosen basis of $\Der(-\log f)$
in terms of the partial derivatives $\tfrac{\partial}{\partial x_{j}}$, for $j=1,...,n$, so that
\begin{align*}
(E_{1},..., E_{k}, D_{1}, ...,D_{n-k})= (\tfrac{\partial}{\partial x_{1}},...,\tfrac{\partial}{\partial x_{n}})C\,.
\end{align*}
The matrix $C$ is then a discriminant matrix for $f$, and, in particular, $\det C =f$.
Now we turn to $H\in Q$ and observe that any $D\in\Der_{Q}(-\log H)$, a logarithmic derivation for
$H$ over $Q$, is necessarily of the form $D=\sum_{r=1}^{k}y_{r}b_{r}\frac{\partial}{\partial y_{r}}$
for suitable elements $b_{r}\in Q$, as $H$ contains by assumption $y_{1}\cdots y_{k}$ as a factor,
whence $D(\log y_{r})= b_{r}$ must be in $Q$. In matrix form, a discriminant matrix for $H$ can be
factored as
\begin{align*}
A:= \diag(y_{1},...,y_{k})B\,,
\end{align*}
where the first factor is the diagonal matrix with entries $y_{r}$ and $B=(b_{rs})$ is a
$k\times k$ matrix over $Q$ so that the vector fields $\sum_{r}y_{r}b_{rs} \frac{\partial}{\partial y_{r}}$
form a $Q$--basis of $\Der_{Q}(-\log H)$. Because $\det A=H$ by Saito's criterion in Theorem
\ref{saito}, it follows that $\det B = H_{1}\in Q$.
Next note that the given $f_{i}$ define a substitution homomorphism $Q\to R$ that
sends $y_{i}\mapsto f_{i}$. For any $b\in Q$, we denote $\tilde b=b(f_{1},..., f_{k})$ its image in
$R$. We claim that a derivation $\tilde D := \sum_{r}\tilde b_{r}E_{r}$ is logarithmic for
$\tilde H\in R$, if $D:= \sum_{r}y_{r}b_{r} \frac{\partial}{\partial y_{r}}$ is logarithmic for $H\in Q$. In
fact, the usual chain rule for derivations yields first
\begin{align*}
\tilde D(\tilde H) &= \sum_{r=1}^{k}\tilde b_{r}E_{r}(\tilde H)\\
&= \sum_{r=1}^{k}\tilde b_{r}\sum_{s=1}^{k}\widetilde{\frac{\partial H}{\partial y_{s}}} E_{r}(f_{s})\\
&= \sum_{r=1}^{k}f_{r}\tilde b_{r}\widetilde{\frac{\partial H}{\partial y_{r}}}
\end{align*}
as $E_{r}(f_{s})=\delta_{rs}f_{r}$ by assumption. Now the last term equals $\widetilde{D(H)}$,
the image of $D(H)$ under substitution. Thus, if $D(H)$ is in $(H)\subseteq Q$, its image is in
$(\tilde H)\subseteq R$, and so $\tilde D$ is indeed logarithmic for $\tilde H$.
On the other hand, if $D$ is a derivation on $R$ that vanishes on each $f_{i}$, then applying the
chain rule yet again shows
\begin{align*}
D(\tilde H) &=\sum_{r=1}^{k}\widetilde{\left(\frac{\partial H}{\partial y_{r}}\right)} D(f_{r}) =0\,,
\end{align*}
whence such $D$ is in particular logarithmic for $\tilde H$. Putting everything together,
\begin{align*}
\left(\tfrac{\partial}{\partial x_{1}},...,\tfrac{\partial}{\partial x_{n}}\right)C
\left(\begin{matrix}
\tilde B&0\\
0&I_{n-k}
\end{matrix}
\right)\,,
\end{align*}
with $I_{n-k}$ the identity matrix of indicated size, represents $n$ logarithmic vector fields for
$\tilde H$. Taking determinants, we get
\begin{align*}
\det\left(C
\left(\begin{matrix}
\tilde B&0\\
0&I_{n-k}
\end{matrix}
\right)\right)
= \det C\det \tilde B = \det C\widetilde{\det B} =
f_{1}\cdots f_{k}\widetilde{H_{1}}=\tilde H\,.
\end{align*}
Thus, the proof will be completed by Saito's criterion Theorem \ref{saito}, once we show that
$\widetilde{H_{1}}$ is squarefree, as by assumption $f$ is already squarefree and relatively
prime to $\tilde H_{1}$. To this end, we use the Jacobi criterion; see e.g. \cite[30.3]{Mat}.
The rank of the Jacobi matrix
\[
\left( \frac{\partial f_{i}}{\partial x_{j}}\right)_{j=1,...,n}^{i=1,...,k}
\]
is $k$ outside of $\{f=0\}$, as $E_{1}(f_{1})\cdots E_{k}(f_{k})=f$ is in the ideal of maximal minors
of that matrix. Therefore, $R$ is smooth over $Q$ outside of $\{f=0\}$, and the inverse image
$\{\tilde H_{1}=0\}$ of $\{H_{1}=0\}$ remains thus reduced.
\end{proof}
We mention the following special case of Theorem \ref{chainrule} as an example.
\begin{corollary}
\label{corofg} If $f\in K[x]=K[x_1,\dots,x_n]$ and $g\in K[y]=K[y_1,\dots,y_m]$ are
free divisors that are weighted homogeneous, then $fg(f+g)\in K[x,y]$ is a free divisor.
\qed
\end{corollary}
\begin{remark}
\label{faber}
In the original treatment of Theorem \ref{chainrule} in \cite{MS}, the hypothesis that $f$ and
$H_{1}(f_{1},..., f_{k})$ are without common factor is missing.
That hypothesis is, however, necessary, as is shown by the following example that Eleonore Faber
kindly provided.
Take $f_1= (1+u)(x^2-y^3), f_2=(1+v)(y^2-x^3)$, and $f_3=(1+w)(f_1^3+f_2^2)$ in $R=K[x,y,u,v,w]$.
A calculation in {\sc Singular\/} shows readily that $f=f_1 f_2 f_3$ is a free
divisor. The vector fields $E_{1}=(1+u)\partial/\partial u,
E_{2}=(1+v)\partial/\partial v$, and $E_{3}=(1+w)\partial/\partial w$ certainly satisfy $E_{i}(f_{j})=\delta_{ij}f_{i}$.
Now take $H(y_1,y_2,y_3)=y_1y_2y_3(y_1^3+y_2^2)$, a binomial free divisor according to Theorem
\ref{binofree} below, and observe that
\[H(f_1,f_2,f_3)=f_1f_2f_3(f_1^3+f_2^2)=f_1f_2(1+w)(f_1^3+f_2^2)^2
\]
is not reduced, thus, is not a free divisor, as $f$ and $H_{1}(f_{1},f_{2},f_{3})$ have the factor $f_1^3+f_2^2$
in common.
\end{remark}
\section{Triangular Free Divisors}
Let $K$ be a field of characteristic zero. Assume given a ``seed''
$F_{0}\in R:= K[y_{1},\dots,y_{n}]$ and define inductively for $i > 0$ polynomials
\begin{align*}
F_{i} := \alpha_{i}x_{i}^{a_{i}} + \beta_{i}F_{i-1}^{b_{i}}\in Q:= R[x_{1},\dots,x_{i}]
\end{align*}
for natural numbers $a_{i},b_{i} > 0$ and $\alpha_{i},\beta_{i}\in K$ with $\alpha_{i}\neq 0$.
\begin{proposition}
\label{trianfree}
Assume $F_{0}$ is a free divisor in $R$ with discriminant $(n\times n)$--matrix $A$
over $R$. If $F:= F_{i}F_{i-1}\cdots F_{0}$ is reduced, then it is a free divisor over $Q$ with
``triangular'' discriminant matrix of the form
\begin{align*}
B =
\left(\begin{matrix}
A & 0&0&\cdots&0 \\
* & F_{1} & 0 &\cdots&0\\
\vdots & \vdots & \ddots &\ddots&\vdots \\
* & * & * & F_{i-1}&0\\
* & * & * & * & F_i
\end{matrix}\right)
\end{align*}
where the entries marked ``$*$'' represent elements of $Q$ that can be calculated explicitly.
\end{proposition}
\begin{proof}
First observe that the determinant of the displayed matrix certainly equals $F$.
It thus remains to prove that the we can choose the columns to represent logarithmic vector fields for it.
The proof proceeds by induction on $i\geqslant 0$, the case $i=0$ being true by assumption.
For $i\geqslant 1$, set $G = F/F_{i}$ and assume that the result is correct for $G$.
The last column in $B$ represents the vector field $D=F_{i}{\partial}/{\partial x_{i}}$ and
we show now that it is a logarithmic vector field for $F$, that is, $F$ divides $D(F)$:
\begin{align*}
D(F) &= D(F_{i})G= F_{i}\frac{\partial F_{i}}{\partial x_{i}} G = \left(\frac{\partial F_{i}}{\partial x_{i}}\right) F\,,
\end{align*}
the first equality due to the fact that $G$ is independent of $x_{i}$.
To finish the proof, it suffices now to establish the following:
\begin{lemma}\label{trilemma}
Let $D$ be a logarithmic vector field for $G$ as an element of $R[x_{1},\dots,x_{i-1}]$.
\begin{enumerate}[\rm (1)]
\item $D$ is a logarithmic vector field for each factor $F_{0},\dots,F_{i-1}$ of $G$,
so that $c_{F_{j}}:= D(F_{j})/F_{j}\in R[x_{1},\dots,x_{i-1}]$ for each $j=0,\dots,i-1$.
\item The vector field
\begin{align*}
\tilde D = \frac{b_{i}c_{F_{i-1}}}{\alpha_{i}a_{i}} x_{i}\frac{\partial}{\partial x_{i}} + D
\end{align*}
is the unique extension of $D$ to a logarithmic vector field for $F$ in $Q$. It satisfies
\begin{align*}
\tilde D(F) = \big((b_{i}+1)c_{F_{i-1}}+\sum_{j=0}^{i-2}c_{F_{j}}\big)F\,.
\end{align*}
\end{enumerate}
\end{lemma}
\begin{proof}
The first part was already pointed out above:
if $D$ is any logarithmic vector field for a product $fg$ of coprime
factors, then it is necessarily a logarithmic vector field for each factor.
Now we turn to the derivation $D$ given in the statement.
Assume there is an extension $\tilde D = u\frac{\partial}{\partial x_{i}} + D$
of $D$ to a logarithmic vector field for $F$. We then get first from the product rule
\begin{align*}
\tilde D(F) &= \tilde D(F_{i})G + F_{i}\tilde D(G)\,,
\intertext{and by definition of $\tilde D$ and $F_{i}$ this evaluates to}
&= \left(u\alpha_{i}a_{i}x_{i}^{a_{i}-1} + \beta_{i}b_{i}F_{i-1}^{b_{i}-1}D(F_{i-1})\right)G + F_{i}D(G)
\intertext{as $\tilde D(H)=D(H)$ for $H$ equal to either $F_{i-1}$ or $G$,}
&= \left(u\alpha_{i}a_{i}x_{i}^{a_{i}-1} + \beta_{i}b_{i}c_{F_{i-1}}F^{b_{i}}_{i-1}\right)G + c_{G}F_{i}G
\end{align*}
as $D$ is respectively logarithmic for $F_{i-1}$ and for $G$ with the indicated multipliers.
Due to $F = F_{i}G$, we see that $\tilde D(F)$ will be a multiple of $F$ if, and only if,
$F_{i}= \alpha_{i}x_{i}^{a_{i}} + \beta_{i}F_{i-1}^{b_{i}}$ divides $u\alpha_{i}a_{i}x_{i}^{a_{i}-1} + \beta_{i}b_{i}c_{F_{i-1}}F^{b_{i}}_{i-1}$, if, and only if,
\begin{align*}
u = b_{i}c_{F_{i-1}}x_{i}/a_{i}\,,
\end{align*}
and in that case
\begin{align*}
\tilde D(F) = (b_{i}c_{F_{i-1}}+c_{G})F\,.
\end{align*}
It follows that
\begin{align*}
\tilde D := \frac{b_{i}c_{F_{i-1}}}{a_{i}} x_{i}\frac{\partial}{\partial x_{i}} + D
\end{align*}
is the unique extension of $D$ to a logarithmic vector field for $F$ as claimed.
Finally, observe that the multiplier in question is
\begin{align*}
c &:= \frac{\tilde D(F)}{F} = b_{i}c_{F_{i-1}} + c_{G} \\
&= b_{i}c_{F_{i-1}} + \sum_{j=0}^{i-1}c_{F_{j}}\\
&= (b_{i}+1)c_{F_{i-1}} + \sum_{j=0}^{i-2}c_{F_{j}}
\end{align*}
and that finishes the proof.
\end{proof}
To end the proof of Proposition \ref{trianfree}, if the result holds for $i-1$, we extend the column
that represents the logarithmic vector field $D$ for $G=F_{i-1}\cdots F_{0}$ in the
displayed discriminant matrix by adding the corresponding
coefficient $\frac{b_{i}c_{F_{i-1}}}{a_{i}} x_{i}$ of $\partial/\partial x_{i}$ in $\tilde D$ as the entry
in the last row of the discriminant matrix for $F$.
\end{proof}
Note that in Proposition \ref{trianfree} we may take as seed $F_{0}$ any reduced polynomial in two
variables.
\begin{figure}
\caption{ \label{fig:triang}
\label{fig:triang}
\end{figure}
\begin{example}
\label{sumpow}
Given positive integers $t_{1},..., t_{i}$, for $j=2,\dots,i$, set $G_j=x_{1}^{t_{1}}+\cdots +x_{j}^{t_{j}}$.
Take $F_{0}=G_2$ as a seed and set $a_{j}=t_{j+2}, b_{j}=\alpha_{j}=\beta_{j}=1$ to obtain
$F_{j}=G_{j+2}$ for $j=0,...,i-2$. The resulting product $G =G_2\cdots G_{i}$ of Brieskorn--Pham
polynomials is a free divisor by Proposition \ref{trianfree}.
One can easily calculate the entries of the discriminant matrix.
To illustrate, we treat the case where each exponent is $t=2$, so that
$G_j=x_{1}^{2}+\cdots +x_{j}^{2}$.
The first column can be taken as representing the usual Euler vector field that is the unique
extension of the Euler vector field for $G_{2}$. The second column can be taken to correspond to
the vector field $D = -x_{2}\partial/\partial x_{1} + x_{1}\partial/\partial x_{2}$ that in turn corresponds
to the automorphism interchanging $x_{1}$ and $x_{2}$. As for this $D$ one has $D(G_{2}) = 0$,
Lemma \ref{trilemma} shows that the corresponding matrix entries below the second row will be
zero as well.
Now we indicate how to obtain the entries of columns $3$ through $i$.
Counting from the top, start with $D = G_{j}\partial/\partial x_{j}$, thus, putting $G_{j}$ as the entry
in the $j^{th}$ row as first nonzero entry in column $j\geqslant 3$, and note that
$D(G_{j}) = 2 x_{j}G_{j}$, so that $c_{G_{j}}= 2x_{j}$. By Lemma \ref{trilemma},
the entry below it will be
\begin{align*}
a_{j+1,j} = \frac{b_{j+1}c_{G_{j}}}{a_{j+1}} x_{j+1} =
\frac{c_{G_{j}}}{2} x_{j+1} = x_{j}x_{j+1}
\end{align*}
Now $c_{G_{j+1}}= 2 x_{j}$ again, and induction shows that a relevant discriminant matrix can be taken in the form
\begin{align*}
B =
\left(\begin{array}{cccccc}
x_{1} & -x_{2} & 0 & 0 &\cdots & 0 \\
x_{2} & x_{1} & 0 & 0 &\cdots & 0 \\
x_{3} & 0 & G_{3} & 0 &\cdots &0 \\
x_{4} & 0 & x_{3}x_{4} & G_{4} & \ddots & \vdots\\
\vdots & \vdots & \vdots & \vdots & \ddots & 0 \\x_{i} & 0 & x_{3}x_{i} & x_{4}x_{i} &\cdots & G_{i} \\ & & & & & \end{array}\right)
\end{align*}
\end{example}
\section{Binomial Free Divisors}\label{binocla}
The goal of this section is to investigate binomials $(ux^{a}+ vx^{b})x^{c}$, with $u,v\in K, uv\neq 0$,
and exponent vectors $a,b,c$ with $|a|,|b|\geqslant 1$, $\min(a_{i},b_{i})=0$, that are free divisors.
This forces each entry of $c$ to be in $\{0,1\}$ and we can absorb the constants $u,v$ into the
variables to reduce to the form $F=L(M+N)$, where $L$ is a product of distinct variables and $M,N$
are coprime monomials.
We further assume $R=K[x_1,\dots,x_{n+2}]$, with $K$ as usual a field of characteristic $0$,
and we may suppose that $F$ involves all the variables, as otherwise it is just a suspension of a
divisor that satisfies this requirement.
With these preparations we show the following result.
\begin{theorem}
\label{binofree}
The binomial $F=L(M+N)$ as above is a free divisor if
\begin{enumerate}[\rm (a)]
\item at most one of the variables appearing in $M$ does not appear in $L$, and
\item at most one of the variables appearing in $N$ does not appear in $L$.
\end{enumerate}
Note that if $F$ is required to involve all variables, then these conditions imply $\deg L\geqslant n$.
If $F$ is a {\em homogeneous\/} binomial, that is, $\deg M=\deg N$, then the preceding sufficient conditions are also necessary.
\end{theorem}
\begin{proof} For the first claim, we can write, up to a permutation of the variables and
setting $y=x_{n+1}$ and $z=x_{n+2}$,
\[
F=x_1\cdots x_n y^uz^t G
\]
where
\[
G=x^ay^\alpha+x^b z^\beta
\]
and $a,b\in {\bf N}^n$ with $\min(a_i,b_i)=0$, $\alpha,\beta>0$ and $u,t\in \{0,1\}$. Let $V$ be the
$K$ vector space generated by the monomials $x_1\cdots x_n x^{a}y^{u+\alpha}z^t$ and
$x_1\cdots x_n x^{a}y^{u}z^{t+\beta}$ involved in $F$. Obviously, $V$ is $2$-dimensional,
the elements $F, zF_z$ form a basis, and $V$ contains $x_{i}F_{x_{i}}$ for each $i=1,\dots,n+2$.
So we get relations
\begin{align}
&x_{i}F_{x_{i}} +v_i zF_{z}\equiv 0 \bmod (F)\,,
\intertext{
with some $v_i\in K$, for $i=1,\dots n$.
Now note that}
&F_y=x_1\cdots x_nz^t(uG+\alpha x^ay^{\alpha-1+u})
\intertext{and}
&F_z =x_1\cdots x_ny^u(tG+\beta x^bz^{\beta-1+t})
\end{align}
whence we also get relations
\begin{align}
\beta yF_{y}&+\alpha zF_{z}\equiv 0 \bmod (F)
\intertext{and}
-y^u(tG+\beta x^bz^{\beta-1+t}) F_{y} &+ z^t(uG+\alpha x^ay^{\alpha-1+u}) F_{z}=0\,.
\end{align}
Collecting this information in the $(n+2)\times (n+2)$ matrix
\[
A=\left(\begin{array}{cccccc}
x_1 & 0 & & \dots & 0 & 0 \\
0 & x_2 & 0 & \dots & 0 & 0 \\
\vdots & & & & & \vdots \\
0 & 0 & \dots & x_{n} & 0 & 0 \\
0 & 0 & \dots & 0 & \beta y &\ \ \ -y^u(tG+\beta x^bz^{\beta-1+t}) \\
v_1z& v_2z& \dots & v_nz & \alpha z& \ \ \ z^t(uG+\alpha x^ay^{\alpha-1+u}) \end{array}\right)
\]
it follows from (1) and (4) that the first $n+1$ entries of $(\nabla F) A$ are congruent to $0$ modulo
$F$, while (5) implies that the last entry of $(\nabla F) A$ equals $0$ already in $R$.
Finally, it is straightforward that
\[
\det A=(\beta\alpha+u\beta+t\alpha)F\quad\text{and}\quad \beta\alpha+u\beta+t\alpha \neq 0\,,
\]
whence we conclude from Saito's criterion in Theorem \ref{saito} that $F$ is a free divisor.
Next we show that if $F$ is a {\em homogeneous\/} free divisor then conditions (a), (b) are satisfied.
We argue by contradiction. Suppose that $F$ is a free divisor that involves all variables, but
fails one of the conditions (a) or (b). By symmetry, and after permutating the variables, we may
assume that $F$ is of the form:
\[
F=x^ay^\alpha z^\beta+x^b\,,
\]
where we set $y=x_{n+1}, z=x_{n+2}$ as before, and $a,b\in {\mathbb N}^n, \alpha>0, \beta>0$.
With $J$ again the Jacobian ideal of $F$, note that $(y,z) \subseteq (J:x^a y^{\alpha-1} z^{\beta-1})$.
Since $J$ is perfect of codimension $2$, either $(y,z)$ is a minimal prime of $J$ or
$ x^a y^{\alpha-1} z^{\beta-1}\in J$. In the former case, $F\in J\subset (y,z)$ implies $x^b\in (y,z)$, and
that is impossible. In the latter case,
\[
x^a y^{\alpha-1} z^{\beta-1}\in J \subseteq
(y^{\alpha-1} z^{\beta}, y^{\alpha} z^{\beta-1})+( \partial x^b/\partial x_{i}\ ; i=1,\dots, n)\,,
\]
and so $x^a y^{\alpha-1} z^{\beta-1}$ must be divisible by $\partial x^b/\partial x_{i}$ for some $i$.
This contradicts the homogeneity of $F$.
\end{proof}
\begin{example}
A particular case of Theorem \ref{binofree} has recently been presented independently by Simis and
Tohaneanu \cite[Prop. 2.11]{STo}:
In our notation from the proof above, they take a homogeneous binomial of the form
$G=x^{a}y^{\alpha}+z^{\beta}$, with $\alpha > 0, |a|+\alpha=\beta$, and $a_{i}\neq 0$ for $i=2,...,n$
in $x^{a}=x_{1}^{a_{1}}\cdots x_{n}^{a_{n}}$, so that $G$ is homogeneous of degree $\beta$ and
the only potentially missing variable in the first summand is $x_{1}$. The authors then affirm
that
\begin{align*}
F&= x_{1}\cdots x_{n}(x^{a}y^{\alpha}+z^{\beta})&& \text{and}\\
F &= \frac{x_{1}\cdots x_{n}}{x_{i}}y(x^{a}y^{\alpha}+z^{\beta})&&\text{for some $i=1,...,n$,}
\end{align*}
are homogeneous free divisors. Theorem \ref{binofree} shows that in each case, $zF$ is a
homogeneous free divisor as well.
\end{example}
\section{``Divisors" of Free Divisors}
The results of the previous sections show that:
\begin{enumerate}[\rm (1)]
\item Any reduced homogeneous binomial has a multiple that is a free divisor by Theorem
\ref{binofree}.
\item If $K$ is algebraically closed, then any quadric $Q$ can be put in standard form
$x_1^2+\dots+x_{i}^2$. Hence it has a multiple that is a free divisor by Example \ref{sumpow}.
\item If $f,g$ are free divisors in distinct sets of variables, then $f+g$ divides the free divisor $fg(f+g)$
by Corollary \ref{corofg}.
\end{enumerate}
So we are led to ask:
\begin{question} Let $f$ be a (homogeneous) reduced polynomial. Does there exist a free divisor $g$
such that $f$ divides $g$?
\end{question}
This question is also raised and adressed in \cite{DPi, MS, STo}.
In light of the discussion above, the first case to look at is that of cubics in $3$ variables.
Again, by Example \ref{sumpow}, we know that the Fermat cubic $x^3+y^3+z^3$ divides the free divisor
$(x^3+y^3)(x^3+y^3+z^3)$. So, what about other smooth cubics or smooth hypersurfaces in general?
What we can prove is a negative result: it asserts that a smooth form, in $n>2$ variables of degree
larger than 2, times a product of $n$ linearly independent linear forms is never a free divisor.
\begin{theorem}
\label{divdiv1} Let $f$ be a smooth form of degree $k=\deg f>2$ in $n>2$ variables and
$\ell_1,\ell_2, \dots, \ell_n$ linearly independent linear forms. Set $g=\ell_1\cdots \ell_nf$ and
denote $J(g)\subseteq R=K[x_1,\dots,x_n]$ the Jacobian ideal of $g$. Then one has:
\begin{enumerate}[\quad\rm (1)]
\item $g$ is not a free divisor, instead
\item $\depth R/J(g)\leq \min(\max(0, n-k), n/2) < n-2$.
\end{enumerate}
In particular, if $k\geqslant n$ then $\depth R/J(g)=0$.
\end{theorem}
Since $k>2$ and $n>2$ implies $\max(0, n-k) < n-2$, assertion (1) follows indeed from (2)
as claimed. To prove (2) in Theorem \ref{divdiv1}, we need to set up some notation.
To avoid confusion, $\langle a_1,\dots,a_n \rangle$ will denote the vector with coordinates
$a_i$, while $(a_1,\dots,a_n)$ denotes the ideal or module generated by the $a_i$.
For a form $f$, we set $\hat f_{i}=x_{i}f_{i}+f$, with $f_{i}=\partial f/\partial x_{i} $ as before.
\begin{lemma}
\label{divdiv2}
Let $f$ be a form in $K[x_1,\dots,x_n]$. If $g=x_1\cdots x_nf$ is reduced, then the ideals
$J(g)$ and $(x_{i}f_{i} \ ; i=1,\dots,n)$ of $R$ have the same projective dimension.
In particular, $g$ is a free divisor if, and only if, $(x_{i}f_{i}\ ; i=1,\dots,n)$ is perfect of codimension $2$.
\end{lemma}
\begin{proof} Set $y_i=x_1\cdots x_n/x_{i}$ and note that $g_i=y_i\hat f_{i}$.
If $\langle \alpha_{1},\dots, \alpha_{n} \rangle$ is a syzygy of $\nabla g$, then
$\langle\alpha_1 \hat f_1, \dots, \alpha_{n} \hat f_n\rangle$ is thus a syzygy of
$\langle y_1, \dots, y_n\rangle$.
By the Hilbert--Burch Theorem, the syzygy module of $\langle y_1, \dots, y_n\rangle$ is generated
by $x_1e_1-x_{i}e_i$ with $i=2,\dots,n$, whence there exist polynomials $a_2,\dots,a_n$ such that
\begin{align*}
\alpha_1 \hat f_1&=(a_2+\dots+a_n)x_1&&\text{and}\\
\alpha_i \hat f_{i}&=-a_ix_{i} &&\text{for $i=2,\dots, n$.}
\end{align*}
Since $g$ is squarefree, $x_{i}$ does not divide $f$, whence that variable must divide
$\alpha_i$ for each $i$. In other words, $\alpha_i=x_{i}\beta_i$ for suitable $\beta_{i}\in R$, and
then $\langle \beta_1, \dots, \beta_n\rangle$ is a syzygy of $\langle\hat f_1, \dots, \hat f_n\rangle$.
Therefore, the $R$-linear map $\psi: R^n\to R^n$ sending $e_i$ to $x_{i}e_i$ induces an isomorphism
between the syzygy module of $\langle\hat f_1, \dots, \hat f_n\rangle$ and the syzygy module of
$\nabla g$.
Because $f$ is homogeneous, one has the Euler relation
$f=\tfrac{1}{k}\sum_{i}x_{i}f_{i}$, whence
\begin{align*}
(\hat f_{i}; i=1,...,n) \subseteq (x_{i} f_{i}; i=1,...,n)\,.
\end{align*}
Using the Euler relation once more, one obtains as well
$\sum_{i=1}^{n}\hat f_{i}= (\deg f +n)f$, thus, $f\in (\hat f_{i}; i=1,...,n)$, and then also
\begin{align*}
(x_{i} f_{i}; i=1,...,n)\subseteq (\hat f_{i}; i=1,...,n)\,.
\end{align*}
Accordingly, these ideals agree.
It follows that the first syzygy module of the ideal $J(g)$ and that of the ideal $(x_1f_1, \dots, x_nf_n)$
differ only by a free summand --- whose rank is in fact the $K$-dimension of the vector space of
Euler vector fields annihilating $f$. So the statement follows.
\end{proof}
\begin{example}
\label{exdiv}
Let us illustrate the preceding result.
\begin{enumerate}[\rm(a)]
\item Consider $f=\sum_{i=1}^k u_iM_i$ with $0\neq u_i\in K$, with $M_i$ pairwise coprime
monomials of same degree, and set $g=x_1\dots x_n f$. Then $\depth R/J(g)=n-k$,
because here the ideal $(x_{i}f_{i})_{i=1,\dots, n}$ is the complete intersection ideal $(M_1,\dots,M_k)$.
\item Let $f$ be the {\em Cayley form\/} in $n$ variables, the elementary symmetric polynomial of
degree $n-1$, that can be written
\[
f = x_1\cdots x_n (x_1^{-1}+\dots+x_n^{-1})\,,
\]
and consider $g=x_1\cdots x_nf$.
Denoting $J_k$ the ideal generated by all square-free monomials of degree $k$, it is well known
that $J_k$ is perfect of codimension $n-k+1$. The radical of the Jacobian ideal of $f$ is easily seen
to be $J_{n-2}$. So $f$ is irreducible and, for $n\geqslant 3$, singular with singular locus of
codimension $3$.
On the other hand, one checks that $( x_{i}f_{i} ; i=1,\dots,n)=J_{n-1}$ and Lemma \ref{divdiv2} therefore
verifies that $g$ is a free divisor, as was also observed in \cite{MS}, where further a discriminant
matrix is given.
\item For a given form $f$, smooth and in generic coordinates, the elements $(x_{i}f_{i})_{i}$ tend
to form a regular sequence. In that case, the resolution of the first syzygy module of $J(g)$ is
thus given by the corresponding tail of the Koszul complex on $(x_{i}f_{i})_{i}$, shifted in degree, and
therefore $R/(x_{i}f_{i})_{i}$ embeds as the nonzero Artinian submodule
$H^{0}_{(x_{i}; i=1,...,n)}(R/J(g))$ into $R/J(g)$, forcing
$\depth R/J(g)=0$. As a concrete example, take a Fermat hypersurface $f=\sum_{i=1}^{n}x_{i}^{k}$,
with $k\geqslant 1, n\geqslant 3$.
\item
\label{divdiv4}
For a subset $A$ of $\{1,\dots,n\}$, set $x_A = \Pi _{i\in A} x_{i}$. With notation as
in Lemma \ref{divdiv2}, one obviously has
\[
(f_{i}\ ; i\in A) \subseteq (x_{i}f_{i}\ ; i=1,\dots,n):(x_A)\,.
\]
Accordingly, either $x_A \in (x_{i}f_{i} )_{i}$ or the projective
dimension of $R/(x_{i}f_{i})_{i}$ is at least the codimension of $R/(f_{i}\ ; i\in A)$.
In particular, if $\deg f>n$, then no such monomial is in
$(x_{i}f_{i})_{i}$, and we see again that $\depth R/J(g)=0$.
\end{enumerate}
\end{example}
The last example leads to the following result.
\begin{proposition}
\label{divdiv7} Assume $f\in R=K[x_1,\dots,x_n]$ with $n>2$ is smooth of degree $k>2$, and let
$\ell_1,\dots,\ell_n$ be linearly independent linear forms. With $g=\ell_1\cdots \ell_n f$ one then has
\[
\depth R/J(g)\leqslant \max( 0,n-k)\,.
\]
\end{proposition}
\begin{proof} Changing coordinates we may assume $\ell_i=x_{i}$. Set $v=\min(k,n)$. In view of
Example \ref{exdiv}(\ref{divdiv4}) to Lemma \ref{divdiv2}, it is enough to show that
$x_1\cdots x_v \not\in (x_1f_1,\dots,x_nf_n)$. If $k>n$ this is obvious. If $k\leq n$, then $v=k$, and
we argue as follows. Suppose by contradiction that
\begin{align}
\label{dis*}
\tag{$*$}
x_1\cdots x_k=\sum_i \lambda_i x_{i} f_{i}
\end{align}
with $\lambda_i\in K$. Let $x_1^{\alpha_1}\cdots x_n^{\alpha_n}$ be a monomial in the support of
$f$ that is different from $x_1\cdots x_k$. From (\ref{dis*}) it follows that
$\sum_{i=1}^n \lambda_i \alpha_i=0$. If we show that the support of $f$ contains at least $n$
monomials different from $x_1\cdots x_k$ whose exponents are linearly independent, we can
conclude that $\lambda_i=0$ for all $i$, thus, contradicting (\ref{dis*}).
Since $f$ is smooth, for each $i$ there exists some $j=j(i)$, such that the
monomial $x_{i}^{k-1}x_{j}$ is in the support of $f$.
We claim that the exponents of $x_{i}^{k-1}x_{j(i)}$, for $i=1,\dots,n$, are indeed linearly independent.
To prove this, consider the linear map $h:{\bf C}^n \to {\bf C}^n$ defined as $h(e_i)=e_{j(i)}$. Any
such map is easily seen to satisfy $(h^{n!}-1)h^n=0$, whence the eigenvalues of $h$ are either $0$
or roots of unity. In particular, no integer $m$ with $|m|>1$ is a root of the characteristic polynomial
$\det(-tI+h)$ of $h$. Therefore we have that $\det(-tI+h)\neq 0$ at $t=-k+1$, and this proves the claim.
\end{proof}
As for a last ingredient, note the following.
\begin{lemma}
\label{divdiv8} If $f\in R=K[x_1,\dots,x_n]$ is smooth, then the codimension of $(x_{i}f_{i})_{i=1,...,n}$
is at least $n/2$.
\end{lemma}
\begin{proof} Let $P$ be a minimal prime of $I=(x_{i}f_{i})_{i=1,\dots, n}$ in $R$. If $c$ is the
number of variables $x_{i}$ contained in $P$, then that prime ideal contains at least $n-c$ of the
$f_{i}$. Hence $P$ contains two regular sequences: one of length $c$ and the other of length $n-c$.
So the codimension of $I$ is at least $n/2$.
\end{proof}
The
{\em Proof of Theorem\/} \ref{divdiv1} is now obtained by combining Lemma \ref{divdiv2},
Proposition \ref{divdiv7}, and Lemma \ref{divdiv8}.
\qed
\begin{remark}
As far as we know, in Example \ref{exdiv}(\ref{divdiv4}), it might be even true that for {\em any\/}
smooth $f$ in {\em any\/} system of coordinates, $x_1\cdots x_n \not\in (x_1f_1,\dots,x_nf_n)$,
so that then, in particular, always $\depth R/J(g)=0$.
However, for a smooth $f$, the ideal $(x_{i}f_{i})_{i=1,\dots, n}$ can be of codimension
$n/2$, but, of course, only for $n$ even. For example,
\[
f=(x_1^{k-1}+x_2^{k-1})x_2+(x_3^{k-1}+x_4^{k-1})x_4
\]
is smooth and the codimension of $(x_{i}f_{i})_{i=1,\dots, 4}$ is $2$. Nevertheless,
in this case $R/(x_{i}f_{i})_{i=1,\dots, n}$ still has depth $0$ since
$x_1x_2x_3x_4\not\in (x_{i}f_{i})_{i=1,\dots, n}$.
\end{remark}
\section{Extending Free Divisors into the Tangent Bundle}
Let $R=K[x_1,\dots,x_n]$ as before, and set $R'=R[y_1,\dots,y_n]$. Define a map $^*:R\to R'$ by
$$f^*=\sum_{i=1}^n y_i \partial f/\partial x_i$$
for every $f\in R$. Clearly $^*$ is a $K$-linear derivation. For a matrix $C=(c_{ij})$ with entries in
$R$ we set $C^*=(c_{ij}^*)$.
\begin{theorem}
\label{jets}
Let $f\in R$ be a homogeneous free divisor of degree $k>0$. Then $ff^*$ is a free divisor in $R'$, in
$2n$ variables
and of total degree $2k$, that is linear if $f$ is so.
\end{theorem}
\begin{proof} First note that $ff^*$ is reduced because $f^*$ is irreducible. By contradiction, if $f^*$
were reducible then, since $f^*$ is homogeneous of degree $1$ in the $y$'s, the partial derivatives
of $f$ had a non-trivial common factor contradicting the fact that $f$ is reduced.
Secondly we identify a discriminant matrix for $ff^*$. Since $f$ is homogeneous, a discriminant
matrix for $f$ can be constructed as follows. Because $J(f)$ is a perfect ideal of codimension $2$,
we can find a Hilbert-Burch matrix $B=(b_{ij})$ for $J(f)$, of size $n\times (n-1)$, such that the
$(n-1)$-minor of $B$ obtained by removing the $i$-th row is
$(-1)^{i+1}\partial f/\partial x_i$.
Adjoining $x^T=(x_1,\dots,x_n)^T$ as a column to the matrix $B$, we obtain the matrix
$$
A =(B\mid x^T)
$$
that is by construction a discriminant matrix for $f$. We now claim that the following $2n\times 2n$
block matrix
$$
A'= \left(\begin{array}{ccccc}
B & x^T &\vline & 0 & 0 \\
\hline
\vphantom{y^{T^{1}}}B^*& 0 &\vline & B & y^T
\end{array}\right)
$$
is a discriminant matrix for $ff^*$.
Its determinant is clearly $ff^*$ by definition of $A, B$ and $f^{*}$.
The product rule yields
\begin{align*}
\nabla(ff^*)&=f^*(\nabla_x(f),0)+f(\nabla_x(f^*), \nabla_x(f))\,,
\intertext{and hence}
\nabla(ff^*)A'&=f^*(\nabla_x(f),0)A'+f(\nabla_x(f^*),\nabla_x(f))A'\,.
\end{align*}
Now $(\nabla_x(f),0)A'=(\nabla_x(f)A,0)\equiv 0 \bmod (f)$, and so it remains to show that
\begin{align}
\label{disff*}
\tag{$\dagger$}
(\nabla_x(f^*),\nabla_x(f))A'\equiv 0 \bmod (f^*)\,.
\end{align}
Expanding returns the vector
\begin{align*}
(\nabla_x(f^*),\nabla_x(f))A'=(\nabla_x(f^*)B+\nabla_x(f)B^*, \nabla_x(f^*)x^T, \nabla_x(f)B,
\nabla_x(f)y^T)\,.
\end{align*}
Concerning its first part, note that $\nabla_x(f^*)=\nabla_x(f)^*$, whence
\begin{align*}
\nabla_x(f^*)B+\nabla_x(f)B^* &= (\nabla_x(f)B)^*&&\text{because $^*$ is a derivation,}\\
&=0^{*}=0&& \text{as $\nabla_x(f)B=0$ by construction.}
\intertext{Regarding the second component,}
\nabla_x(f^*)x^T &=(k-1)f^{*}\equiv 0 \bmod(f^*)\,,
\intertext{because $f^*$ is homogeneous of degree $k-1$ with respect to the variables $x$.
Finally,}
\nabla_x(f)B&=0&&
\text{by choice of $B$, and}\\
\nabla_x(f)y^T&=f^*&&\text{by definition.}
\end{align*}
Therefore, (\ref{disff*}) holds and $ff^{*}$ is confirmed as a free divisor. The assertions on degree
and number of variables are obvious from the construction.
A free divisor is linear if all entries in a discriminant matrix are linear, and this property
is clearly inherited by $A'$ from $A$.
\end{proof}
\begin{remark}
The geometric interpretation of the hypersurface defined by $ff^{*}$ is as follows.
Viewing $f\in R$ as the function $f\colon \Spec R=\mathbb A_{K}^{n}\to \mathbb A_{K}^{1}=\Spec K[t]$,
its differential fits into the exact Zariski--Jacobi sequence of K\"ahler differential forms
\[
\xymatrix{
0&\ar[l] \Omega^{1}_{K[t]/R}&\ar[l]\Omega^{1}_{R/K}\cong \oplus_{i}Rdx_{i}&&
\ar[ll]_-{df\partial/\partial t}\Omega^{1}_{K[t]/K}\otimes_{K[t]}R\cong Rdt}
\]
and one may interpret $R'\cong \Sym_{R}\Omega^{1}_{R}$ as the ring of
regular functions on the tangent bundle $T_{X} \cong \Spec R'\cong \mathbb A^{2n}_{K}$ over
$X=\Spec R\cong \mathbb A^{n}_{K}$.
This identifies $R'/(f^{*})$ with the regular functions on the total space of the affine relative tangent
``subbundle'' $T_{X/S}\subseteq T_{X}$, the kernel of the Jacobian map $df:T_{X}\to T_{S}$ that
consists of the vector fields vertical with respect to (the fibres of) $f$ over the affine line $S=\Spec K[t]$.
Accordingly, the hypersurface $H$ defined by $ff^{*}$ is the union of that affine ``bundle'' with
$\Spec R'/(f)$, the restriction of the total tangent bundle $T_{X}$ to
$\Spec R/(f)$, in turn the fibre over $0$ of the function $f$. Equivalently, $\Spec R'/(f)$ is the
suspended free divisor obtained as the inverse image of $\Spec R/(f)$ along the structure morphism
$p: T_{X}\to X$. Thus, $H=T_{X/S}\cup \Spec R'/(f)= df^{-1}(0)\cup(fp)^{-1}(0)\subseteq T_{X}$.
\begin{align*}
\xymatrix{
&\ar@{^{(}->}[dl]T_{X/S}\ar@{_{(}->}[rd]\ar[rr]&&\{0\}\ar@{_{(}->}[rd]\\
\ \hphantom{H}H\ar@{^{(}->}[rr]&&T_{X}\ar[rr]^-{df}\ar[rd]&&T_{S}\ar[rd]\\
&\ar@{_{(}->}[ul](fp)^{-1}(0)\ar[rd] \ar@{^{(}->}[ru]&&X\ar[rr]^-{f}&&S\\%\Spec R'/(f){=}
&&f^{-1}(0)\ar@{^{(}->}[ru]\ar[rr]&&\{0\}\ar@{^{(}->}[ru]
}
\end{align*}
Interesting examples are hard to visualize as they will live in four or more dimensions. However, the
intersection of the two (unions of) components, $T_{X/S}\cap \Spec R'/(f)\subseteq \Sing H$ is easy to
understand: Geometrically, over $X$ it fibres into the union of the hyperplanes perpendicular to
$\nabla f(x)$ for some $x\in X$ on $\{f=0\}$, that is,
\begin{align*}
T_{X/S}\cap \Spec R'/(f) =
\bigcup_{x, f(x)=0}\left\{(x,y)\in \mathbb A^{n}\times\mathbb A^{n}\mid \nabla f(x)y=0\right\}\,.
\end{align*}
\end{remark}
\begin{example}
Applying Theorem \ref{jets} to the normal crossing divisor $x_1\cdots x_n$ we find that
$$(x_1\cdots x_n)^2\sum_{i=1}^n \frac{y_i}{x_i}$$
is a linear free divisor.
\end{example}
\begin{remarks}
\label{remff*}
Various generalizations are possible:
\begin{enumerate}[\rm (1)]
\item
\label{remff*1}
Given a homogeneous free divisor $f$ in a polynomial ring of dimension $n$,
one can iterate the use of Theorem \ref{jets} to get an infinite family
$\{ F_i\}_{i\in {\mathbb N}}$ of homogeneous free divisors, defined by $F_0=f$ and $F_{i+1}=F_iF_i^*$,
where $^{*}$ is, of course, to be understood relative to the polynomial ring containing $F_{i}$.
By construction, $F_i$ belongs to a polynomial ring of dimension $2^in$, its degree equals
$(i+1)\deg f$, and it is a linear free divisor if, and only if, $f$ is linear.
Taking $F_{0}=x$ as a seed, we obtain the sequence of linear free divisors
\begin{gather*}
x\,, xy\,, xy(xz_{1}+yz_{2})\,, \\
xy(xz_{1}+yz_{2})(2xyz_{1}u_{1}+y^{2}z_{2}u_{1}+ x^{2}z_{1}u_{2}+2xyz_{2}u_{2}
+x^{2}yu_{3}+xy^{2}u_{4})\,,...
\end{gather*}
in $K[x,y,z_{1}, z_{2}, u_{1},..., u_{4},...]$.
\item
\label{remff*2}
Theorem \ref{jets} holds also for free divisors that are weighted homogeneous of degree $d\neq 0$
with respect to some weight vector $w=(w_1,\dots,w_n)\in {\mathbb Z}^n$. In the proof one simply replaces
the column vector $x^T$ in the discriminant matrix with $(w_1x_1,\dots,w_nx_n)^T$.
Again, linearity is preserved.
\end{enumerate}
\end{remarks}
One can further generalize Theorem \ref{jets}, as well as Remark \ref{remff*}(\ref{remff*1}), also
as follows, incorporating right away the weighted homogeneous version as in
Remark \ref{remff*}(\ref{remff*2}).
\begin{theorem}
With notation as before, assume $f$ weighted homogeneous of degree $d\neq 0$
with respect to some weight vector $w=(w_1,\dots,w_n)\in {\mathbb Z}^n$.
With $m\geqslant 1$, let $R'=R[y_{ij} : 1\leq i\leq n, 1\leq j\leq m]$, assign weights $|y_{ij}|=w_{i}$,
and set $f^{\{*_{j}\} }=\sum_i y_{ij}\partial f/\partial x_i$. Then $f\prod_{j=1}^m f^{\{*_{j}\}}$ is a
free divisor in $(m+1)n$ variables of weighted homogeneous degree $(m+1)d$ that will be
linear along with $f$.
\end{theorem}
\begin{proof}
The proof is a simple variation of the one given for $m=1$. For instance, if $m=2$, the discriminant
matrix can be taken as
$$
\left(\begin{array}{ccccccccc}
\vphantom{B^{T^{\{1\}}}}B & wx^T &\vline & 0 & 0 &\vline & 0 & 0 \\
\hline
\vphantom{B^{T^{\{1\}}}}B^{\{*_{1}\}}& 0 &\vline & B & wy_1^T &\vline & 0 & 0 \\
\hline
\vphantom{B^{T^{\{1\}}}}B^{\{*_{2}\}}& 0 &\vline & 0 & 0&\vline & B & wy_2^T
\end{array}\right)
$$
where $wx=(w_1x_1,\dots,w_nx_n)$, with $wy_{1}, wy_{2}$ analogous abbreviations.
\end{proof}
In this way, one may obtain any normal crossing divisor $x_{0}\cdots x_{m}$, starting from
$f=x_{0}$ and using $f^{\{*_{j}\} }= x_{j}\partial f/\partial x_{0}=x_{j}$ for $j=1,...,m$.
\end{document} |
\begin{document}
\maketitle
\begin{abstract}
We produce the family of Calabi-Yau hypersurfaces $X_{n}$ of $(\mathbb P^{1})^{n+1}$ in higher dimension whose inertia group contains non commutative free groups.
This is completely different from Takahashi's result \cite{ta98} for Calabi-Yau hypersurfaces $M_{n}$ of $\mathbb P^{n+1}$.
\end{abstract}
\section{Introduction}
Throughout this paper, we work over $\mathbb C$.
Given an algebraic variety $X$, it is natural to consider its birational automorphisms $\varphi \colon X \dashrightarrow X$.
The set of these birational automorphisms forms a group $\operatorname{Bir}(X)$ with respect to the composition.
When $X$ is a projective space $\mathbb P^{n}$ or equivalently an $n$-dimensional rational variety, this group is called the Cremona group.
In higher dimensional case ($n \geq 3$), though many elements of the Cremona group have been described, its whole structure is little known.
Let $V$ be an $(n+1)$-dimensional smooth projective rational manifold.
In this paper, we treat subgroups called the ``inertia group" (defined below \eqref{inertia}) of some hypersurface $X \subset V$ originated in \cite{gi94}.
It consists of those elements of the Cremona group that act on $X$ as identity.
In Section \ref{cyn}, we mention the result (Theorem \ref{tak}) of Takahashi \cite{ta98} about the smooth Calabi-Yau hypersurfaces $M_{n}$ of $\mathbb P^{n+1}$ of degree $n+2$ (that is, $M_{n}$ is a hypersurface such that it is simply connected, there is no holomorphic $k$-form on $M_{n}$ for $0<k<n$, and there is a nowhere vanishing holomorphic $n$-form $\omega_{M_{n}}$).
It turns out that the inertia group of $M_{n}$ is trivial (Theorem \ref{intro2}).
Takahashi's result (Theorem \ref{tak}) is proved by using the ``Noether-Fano inequality".
It is the useful result that tells us when two Mori fiber spaces are isomorphic.
Theorem \ref{intro2} is a direct consequence of Takahashi's result.
In Section \ref{cy1n}, we consider Calabi-Yau hypersurfaces
\[
X_{n} = (2, 2, \ldots , 2) \subset (\mathbb P^{1})^{n+1}.
\]
Let
\[
\operatorname{UC}(N) \coloneqq \overbrace{\mathbb Z/2\mathbb Z * \mathbb Z/2\mathbb Z * \cdots * \mathbb Z/2\mathbb Z}^{N} = \bigast_{i=1}^{N}\langle t_{i}\rangle
\]
be the \textit{universal Coxeter group} of rank $N$ where $\mathbb Z/2\mathbb Z$ is the cyclic group of order 2.
There is no non-trivial relation between its $N$ natural generators $t_{i}$.
Let
\[
p_{i} \colon X_{n} \to (\mathbb P^{1})^{n}\ \ \ (i=1, \ldots , n+1)
\]
be the natural projections which are obtained by forgetting the $i$-th factor of $(\mathbb P^{1})^{n+1}$.
Then, the $n+1$ projections $p_{i}$ are generically finite morphism of degree 2.
Thus, for each index $i$, there is a birational transformation
\[
\iota_{i} \colon X_{n} \dashrightarrow X_{n}
\]
that permutes the two points of general fibers of $p_{i}$ and this provides a group homomorphism
\[
\Phi \colon \operatorname{UC}(n+1) \to \operatorname{Bir}(X_{n}).
\]
From now, we set $P(n+1) \coloneqq (\mathbb P^{1})^{n+1}$.
Cantat-Oguiso proved the following theorem in \cite{co11}.
\begin{thm}$($\cite[Theorem 1.3 (2)]{co11}$)$\label{iota}
Let $X_{n}$ be a generic hypersurface of multidegree $(2,2,\ldots,2)$ in $P(n+1)$ with $n \geq 3$.
Then the morphism $\Phi$ that maps each generator $t_{j}$ of $\operatorname{UC}(n+1)$ to the involution $\iota_{j}$ of $X_{n}$ is an isomorphism from $\operatorname{UC}(n+1)$ to $\operatorname{Bir}(X_{n})$.
\end{thm}
Here ``generic'' means $X_{n}$ belongs to the complement of some countable union of proper closed subvarieties of the complete linear system $\big| (2, 2, \ldots , 2)\big|$.
Let $X \subset V$ be a projective variety.
The \textit{decomposition group} of $X$ is the group
\begin{align*}
\operatorname{Dec}(V, X) \coloneqq \{f \in \operatorname{Bir}(V)\ |\ f(X) =X \text{ and } f|_{X} \in \operatorname{Bir}(X) \}.
\end{align*}
The \textit{inertia group} of $X$ is the group
\begin{align}\label{inertia}
\operatorname{Ine}(V, X) \coloneqq \{f \in \operatorname{Dec}(V, X)\ |\ f|_{X} = \operatorname{id}_{X}\}.
\end{align}
Then it is natural to consider the following question:
\begin{que}\label{qu}
Is the sequence
\begin{align}\label{se}
1 \longrightarrow \operatorname{Ine}(V, X) \longrightarrow \operatorname{Dec}(V, X) \overset{\gamma}{\longrightarrow} \operatorname{Bir}(X) \longrightarrow 1
\end{align}
exact, i.e., is $\gamma$ surjective?
\end{que}
Note that, in general, this sequence is not exact, i.e., $\gamma$ is not surjective (see Remark \ref{k3}).
When the sequence \eqref{se} is exact, the group $\operatorname{Ine}(V, X)$ measures how many ways one can extend $\operatorname{Bir}(X)$ to the birational automorphisms of the ambient space $V$.
Our main result is following theorem, answering a question asked by Ludmil Katzarkov:
\begin{thm}\label{intro}
Let $X_{n} \subset P(n+1)$ be a smooth hypersurface of multidegree $(2, 2, \ldots, 2)$ and $n \geq 3$. Then:
\begin{itemize}
\item[(1)] $\gamma \colon \operatorname{Dec}(P(n+1), X_{n}) \to \operatorname{Bir}(X_{n})$ is surjective, in particular Question $\ref{qu}$ is affirmative for $X_{n}$.
\item[(2)] If, in addition, $X_{n}$ is generic, there are $n+1$ elements $\rho_{i}$ $(1 \leq i \leq n+1)$ of $\operatorname{Ine}(P(n+1), X_{n})$ such that
\[
\langle \rho_{1}, \rho_{2}, \ldots , \rho_{n+1} \rangle \simeq \underbrace{\mathbb Z * \mathbb Z * \cdots * \mathbb Z}_{n+1} \subset \operatorname{Ine}(P(n+1), X_{n}).
\]
In particular, $\operatorname{Ine}(P(n+1), X_{n})$ is an infinite non-commutative group.
\end{itemize}
\end{thm}
Our proof of Theorem \ref{intro} is based on an explicit computation of elementary flavour.
We also consider another type of Calabi-Yau manifolds, namely smooth hypersurfaces of degree $n+2$ in $\mathbb P^{n+1}$ and obtain the following result:
\begin{thm}\label{intro2}
Suppose $n \geq 3$. Let $M_{n} = (n+2) \subset \mathbb P^{n+1}$ be a smooth hypersurface of degree $n+2$.
Then Question $\ref{qu}$ is also affirmative for $M_{n}$.
More precisely:
\begin{itemize}
\item[(1)] $\operatorname{Dec}(\mathbb P^{n+1}, M_{n}) = \{ f \in \operatorname{PGL}(n+2, \mathbb C) = \operatorname{Aut}(\mathbb P^{n+1})\ |\ f(M_{n}) = M_{n}\}$.
\item[(2)] $\operatorname{Ine}(\mathbb P^{n+1}, M_{n}) = \{\operatorname{id}_{\mathbb P^{n+1}}\}$, and $\gamma \colon \operatorname{Dec}(\mathbb P^{n+1}, M_{n}) \overset{\simeq}{\longrightarrow} \operatorname{Bir}(M_{n}) = \operatorname{Aut}(M_{n})$.
\end{itemize}
\end{thm}
It is interesting that the inertia groups of $X_{n} \subset P(n+1) = (\mathbb P^{1})^{n+1}$ and $M_{n} \subset \mathbb P^{n+1}$ have completely different structures though both $X_{n}$ and $M_{n}$ are Calabi-Yau hypersurfaces in rational Fano manifolds.
\begin{rem}\label{k3}
There is a smooth quartic $K3$ surface $M_{2} \subset \mathbb P^{3}$ such that $\gamma$ is not surjective (see \cite[Theorem 1.2 (2)]{og13}).
In particular, Theorem \ref{intro2} is not true for $n = 2$.
\end{rem}
\section{Preliminaries}
In this section, we prepare some definitions and properties of birational geometry and introduce the Cremona group.
\subsection{Divisors and singularities}
Let $X$ be a projective variety. A \textit{prime divisor} on $X$ is an irreducible subvariety of codimension one, and a \textit{divisor} (resp. \textit{$\mathbb Q$-divisor} or \textit{$\mathbb R$-divisor}) on $X$ is a formal linear combination $D = \sum d_{i}D_{i}$ of prime divisors where $d_{i} \in \mathbb Z$ (resp. $\mathbb Q$ or $\mathbb R$).
A divisor $D$ is called \textit{effective} if $d_{i}$ $\geq$ 0 for every $i$ and denote $D \geq 0$.
The closed set $\bigcup_{i}D_{i}$ of the union of prime divisors is called the \textit{support} of $D$ and denote Supp$(D)$. A $\mathbb Q$-divisor $D$ is called \textit{$\mathbb Q$-Cartier} if, for some $0 \neq m \in \mathbb Z$, $mD$ is a Cartier divisor (i.e. a divisor whose divisorial sheaf $\mathcal O_{X}(mD)$ is an invertible sheaf), and $X$ is called $\mathbb Q$-\textit{factorial} if every divisor is $\mathbb Q$-Cartier.
Note that, since the regular local ring is the unique factorization domain, every divisor automatically becomes the Cartier divisor on the smooth variety.
Let $f \colon X \dashrightarrow Y$ be a birational map between normal projective varieties, $D$ a prime divisor, and $U$ the domain of definition of $f$; that is, the maximal subset of $X$ such that there exists a morphism $f \colon U \to Y$.
Then $\operatorname{codim}(X\setminus U) \geq 2$ and $D \cap U \neq \emptyset$, the image $(f|_{U})(D \cap U)$ is a locally closed subvariety of $Y$.
If the closure of that image is a prime divisor of $Y$, we call it the \textit{strict transform} of $D$ (also called the \textit{proper transform} or \textit{birational transform}) and denote $f_{*}D$.
We define $f_{*}D = 0$ if the codimension of the image $(f|_{U})(D \cap U)$ is $\geq$ 2 in $Y$.
We can also define the strict transform $f_{*}Z$ for subvariety $Z$ of large codimension; if $Z \cap U \neq \emptyset$ and dimension of the image $(f|_{U})(Z \cap U)$ is equal to $\dim Z$, then we define $f_{*}Z$ as the closure of that image, otherwise $f_{*}Z$ = 0.
Let $(X, D)$ is a \textit{log pair} which is a pair of a normal projective variety $X$ and a $\mathbb R$-divisor $D \geq 0$. For a log pair $(X, D)$, it is more natural to consider a \textit{log canonical divisor} $K_{X} + D$ instead of a canonical divisor $K_{X}$.
A projective birational morphism $g \colon Y \to X$ is a \textit{log resolution} of the pair $(X, D)$ if $Y$ is smooth, $\operatorname{Exc}(g)$ is a divisor, and $g_{*}^{-1}(D) \cup \operatorname{Exc}(g)$ has simple normal crossing support (i.e. each components is a smooth divisor and all components meet transversely) where $\operatorname{Exc}(g)$ is an exceptional set of $g$, and a divisor $over$ $X$ is a divisor $E$ on some smooth variety $Y$ endowed with a proper birational morphism $g \colon Y \to X$.
If we write
\[
K_{Y} + \Gamma + \sum E_{i} = g^{*}(K_{X}+D) + \sum a_{E_{i}}(X, D)E_{i},
\]
where $\Gamma$ is the strict transform of $D$ and $E_{i}$ runs through all prime exceptional divisors, then the numbers $a_{E_{i}}(X, D)$ is called the \textit{discrepancies of $(X, D)$ along $E_{i}$}.
The \textit{discrepancy of} $(X, D)$ is given by
\[
\operatorname{discrep}(X, D) \coloneqq \inf\{ a_{E_{i}}(X, D)\ |\ E_{i} \text{ is a prime exceptional divisor over } X\}.
\]
The discrepancy $a_{E_{i}}(X, D)$ along $E_{i}$ is independent of the choice of birational maps $g$ and only depends on $E_{i}$.
Let us denote $\operatorname{discrep}(X, D) = a_{E}$.
A pair $(X, D)$ is \textit{log canonical} (resp. \textit{Kawamata log terminal} ($klt$)) if $a_{E} \geq 0$ (resp. $a_{E} > 0$).
A pair $(X, D)$ is \textit{canonical} (resp. \textit{terminal}) if $a_{E} \geq 1$ (resp. $a_{E} > 1$).
\subsection{Cremona groups}
Let $n$ be a positive integer.
The \textit{Cremona group} $\operatorname{Cr}(n)$ is the group of automorphisms of $\mathbb C(X_{1}, \ldots, X_{n})$, the $\mathbb C$-algebra of rational functions in $n$ independent variables.
Given $n$ rational functions $F_{i} \in \mathbb C(X_{1}, \ldots, X_{n})$, $1 \leq i \leq n$, there is a unique endomorphism of this algebra maps $X_{i}$ onto $F_{i}$ and this is an automorphism if and only if the rational transformation $f$ defined by $f(X_{1}, \ldots, X_{n}) = (F_{1}, \ldots, F_{n})$ is a birational transformation of the affine space $\mathbb A^{n}$.
Compactifying $\mathbb A^{n}$, we get
\[
\operatorname{Cr}(n) = \operatorname{Bir}(\mathbb A^{n}) = \operatorname{Bir}(\mathbb P^{n})
\]
where Bir$(X)$ denotes the group of all birational transformations of $X$.
In the end of this section, we define two subgroups in $\operatorname{Cr}(n)$ introduced by Gizatullin \cite{gi94}.
\begin{dfn}
Let $V$ be an $(n+1)$-dimensional smooth projective rational manifold and $X \subset V$ a projective variety.
The \textit{decomposition group} of $X$ is the group
\[
\operatorname{Dec}(V, X) \coloneqq \{f \in \operatorname{Bir}(V)\ |\ f(X) =X \text{ and } f|_{X} \in \operatorname{Bir}(X) \}.
\]
The \textit{inertia group} of $X$ is the group
\[
\operatorname{Ine}(V, X) \coloneqq \{f \in \operatorname{Dec}(V, X)\ |\ f|_{X} = \operatorname{id}_{X}\}.
\]
\end{dfn}
The decomposition group is also denoted by Bir$(V, X)$.
By the definition, the correspondence
\[
\gamma \colon \operatorname{Dec}(V, X) \ni f \mapsto f|_{X} \in \operatorname{Bir}(X)
\]
defines the exact sequence:
\begin{align}\label{seq}
1 \longrightarrow \operatorname{Ine}(V, X) = \ker \gamma \longrightarrow \operatorname{Dec}(V, X) \overset{\gamma}{\longrightarrow} \operatorname{Bir}(X).
\end{align}
So, it is natural to consider the following question (which is same as Question \ref{qu}) asked by Ludmil Katzarkov:
\begin{que}\label{qexact}
Is the sequence
\begin{align}\label{exact}
1 \longrightarrow \operatorname{Ine}(V, X) \longrightarrow \operatorname{Dec}(V, X) \overset{\gamma}{\longrightarrow} \operatorname{Bir}(X) \longrightarrow 1
\end{align}
exact, i.e., is $\gamma$ surjective?
\end{que}
\begin{rem}
In general, the above sequence \eqref{exact} is not exact, i.e., $\gamma$ is not surjective.
In fact, there is a smooth quartic $K3$ surface $M_{2} \subset \mathbb P^{3}$ such that $\gamma$ is not surjective (\cite[Theorem 1.2 (2)]{og13}).
\end{rem}
\section{Calabi-Yau hypersurface in $\mathbb P^{n+1}$}\label{cyn}
Our goal, in this section, is to prove Theorem \ref{intro2} (i.e. Theorem \ref{ta}).
Before that, we introduce the result of Takahashi \cite{ta98}.
\begin{dfn}
Let $X$ be a normal $\mathbb Q$-factorial projective variety.
The 1\textit{-cycle} is a formal linear combination $C = \sum a_{i}C_{i}$ of proper curves $C_{i} \subset X$ which are irreducible and reduced.
By the theorem of the base of N\'eron-Severi (see \cite{kl66}), the whole numerical equivalent class of 1-cycle with real coefficients becomes the finite dimensional $\mathbb R$-vector space and denotes $N_{1}(X)$.
The dimension of $N_{1}(X)$ or its dual $N^{1}(X)$ with respect to the intersection form is called the \textit{Picard number} and denote $\rho(X)$.
\end{dfn}
\begin{thm}$($\cite[Theorem 2.3]{ta98}$)$\label{tak}
Let $X$ be a Fano manifold $($i.e. a manifold whose anti-canonical divisor $-K_{X}$ is ample,$)$ with $\dim X \geq 3$ and $\rho(X) = 1$, $S \in |-K_{X}|$ a smooth hypersurface with $\operatorname{Pic}(X) \to \operatorname{Pic}(S)$ surjective.
Let $\Phi \colon X \dashrightarrow X'$ be a birational map to a $\mathbb Q$-factorial terminal variety $X'$ with $\rho(X') = 1$ which is not an isomorphism, and $S' = \Phi_{*}S$.
Then $K_{X'} + S'$ is ample.
\end{thm}
This theorem is proved by using the \textit{Noether-Fano inequality} which is one of the most important tools in birational geometry, which gives a precise bound on the singularities of indeterminacies of a birational map and some conditions when it becomes isomorphism.
This inequality is essentially due to \cite{im71}, and Corti proved the general case of an arbitrary Mori fiber space of dimension three \cite{co95}.
It was extended in all dimensions in \cite{ta95}, \cite{bm97}, \cite{is01}, and \cite{df02}, (see also \cite{ma02}).
In particular, a log generalized version obtained independently in \cite{bm97}, \cite{ta95} is used for the proof of Theorem \ref{tak}.
After that, we consider $n$-dimensional \textit{Calabi-Yau manifold} $X$ in this paper.
It is a projective manifold which is simply connected,
\[
H^{0}(X, \Omega_{X}^{i}) = 0\ \ \ (0<i<\dim X = n),\ \ \textrm{and \ } H^{0}(X, \Omega_{X}^{n}) = \mathbb C \omega_{X},
\]
where $\omega_{X}$ is a nowhere vanishing holomorphic $n$-form.
The following theorem is a consequence of the Theorem \ref{tak}, which is same as Theorem \ref{intro2}.
This provides an example of the Calabi-Yau hypersurface $M_{n}$ whose inertia group consists of only identity transformation.
\begin{thm}\label{ta}
Suppose $n \geq 3$. Let $M_{n} = (n+2) \subset \mathbb P^{n+1}$ be a smooth hypersurface of degree $n+2$.
Then $M_{n}$ is a Calabi-Yau manifold of dimension $n$ and Question $\ref{qexact}$ is affirmative for $M_{n}$.
More precisely:
\begin{itemize}
\item[(1)] $\operatorname{Dec}(\mathbb P^{n+1}, M_{n}) = \{ f \in \operatorname{PGL}(n+2, \mathbb C) = \operatorname{Aut}(\mathbb P^{n+1})\ |\ f(M_{n}) = M_{n}\}$.
\item[(2)] $\operatorname{Ine}(\mathbb P^{n+1}, M_{n}) = \{\operatorname{id}_{\mathbb P^{n+1}}\}$, and $\gamma \colon \operatorname{Dec}(\mathbb P^{n+1}, M_{n}) \overset{\simeq}{\longrightarrow} \operatorname{Bir}(M_{n}) = \operatorname{Aut}(M_{n})$.
\end{itemize}
\end{thm}
\begin{proof}
By Lefschetz hyperplane section theorem for $n \geq 3$, $\pi_{1}(M_{n}) \simeq \pi_{1}(\mathbb P^{n+1}) = \{\operatorname{id}\}$, $\operatorname{Pic}(M_{n}) = \mathbb Z h$ where $h$ is the hyperplane class.
By the adjunction formula,
\[
K_{M_{n}} = (K_{\mathbb P^{n+1}} + M_{n})|_{M_{n}} = -(n+2)h + (n+2)h = 0
\]
in Pic$(M_{n})$.
By the exact sequence
\[
0 \longrightarrow \mathcal O_{\mathbb P^{n+1}}(-(n+2)) \longrightarrow \mathcal O_{\mathbb P^{n+1}} \longrightarrow \mathcal O_{M_{n}} \longrightarrow 0
\]
and
\[
h^{k}(\mathcal O_{\mathbb P^{n+1}}(-(n+2))) = 0\ \ \text{for}\ \ 1 \leq k \leq n,
\]
\[
H^{k}(\mathcal O_{M_{n}}) \simeq H^{k}(\mathcal O_{\mathbb P^{n+1}}) = 0\ \ \text{for}\ \ 1 \leq k \leq n-1.
\]
Hence $H^{0}(\Omega^{k}_{M_{n}}) = 0$ for $1 \leq k \leq n-1$ by the Hodge symmetry.
Hence $M_{n}$ is a Calabi-Yau manifold of dimension $n$.
By $\operatorname{Pic}(M_{n}) = \mathbb Z h$, there is no small projective contraction of $M_{n}$, in particular, $M_{n}$ has no flop.
Thus by Kawamata \cite{ka08}, we get $\operatorname{Bir}(M_{n}) = \operatorname{Aut}(M_{n})$, and $g^{*}h = h$ for $g \in \operatorname{Aut}(M_{n}) = \operatorname{Bir}(M_{n})$.
So we have $g = \tilde{g}|_{M_{n}}$ for some $\tilde{g} \in \operatorname{PGL}(n+1, \mathbb C)$.
Assume that $f \in \operatorname{Dec}(\mathbb P^{n+1}, M_{n})$.
Then $f_{*}(M_{n}) = M_{n}$ and $K_{\mathbb P^{n+1}} + M_{n} = 0$.
Thus by Theorem \ref{tak}, $f \in \operatorname{Aut}(\mathbb P^{n+1}) = \operatorname{PGL}(n+2, \mathbb C)$.
This proves (1) and the surjectivity of $\gamma$.
Let $f|_{M_{n}} = \operatorname{id}_{M_{n}}$ for $f \in \operatorname{Dec}(\mathbb P^{n+1}, M_{n})$.
Since $f \in \operatorname{PGL}(n+1, \mathbb C)$ by (1) and $M_{n}$ generates $\mathbb P^{n+1}$, i.e., the projective hull of $M_{n}$ is $\mathbb P^{n+1}$, it follows that $f = \operatorname{id}_{\mathbb P^{n+1}}$ if $f|_{M_{n}} = \operatorname{id}_{M_{n}}$.
Hence $\operatorname{Ine}(\mathbb P^{n+1}, M_{n}) = \{\operatorname{id}_{\mathbb P^{n+1}}\}$, i.e., $\gamma$ is injective.
So, $\gamma \colon \operatorname{Dec}(\mathbb P^{n+1}, M_{n}) \overset{\simeq}{\longrightarrow} \operatorname{Bir}(M_{n}) = \operatorname{Aut}(M_{n})$.
\end{proof}
\section{Calabi-Yau hypersurface in $(\mathbb P^{1})^{n+1}$}\label{cy1n}
As in above section, the Calabi-Yau hypersurface $M_{n}$ of $\mathbb P^{n+1}$ with $n \geq 3$ has only identical transformation as the element of its inertia group.
However, there exist some Calabi-Yau hypersurfaces in the product of $\mathbb P^{1}$ which does not satisfy this property; as result (Theorem \ref{main}) shows.
To simplify, we denote
\begin{align*}
P(n+1) &\coloneqq (\mathbb P^{1})^{n+1} = \mathbb P^{1}_{1} \times \mathbb P^{1}_{2} \times \cdots \times \mathbb P^{1}_{n+1},\\
P(n+1)_{i} &\coloneqq \mathbb P^{1}_{1} \times \cdots \times \mathbb P^{1}_{i-1} \times \mathbb P^{1}_{i+1} \times \cdots \times \mathbb P^{1}_{n+1} \simeq P(n),
\end{align*}
and
\begin{align*}
p^{i} \colon P(n+1) &\to \mathbb P^{1}_{i} \simeq \mathbb P^{1},\\
p_{i} \colon P(n+1) &\to P(n+1)_{i}
\end{align*}
as the natural projection.
Let $H_{i}$ be the divisor class of $(p^{i})^{*}(\mathcal O_{\mathbb P^{1}}(1))$, then $P(n+1)$ is a Fano manifold of dimension $n+1$ and its canonical divisor has the form $\displaystyle{-K_{P(n+1)} = \sum^{n+1}_{i=1}2H_{i}}$.
Therefore, by the adjunction formula, the generic hypersurface $X_{n} \subset P(n+1)$ has trivial canonical divisor if and only if it has multidegree $(2, 2, \ldots, 2)$.
More strongly, for $n \geq 3$, $X_{n} = (2, 2, \ldots, 2)$ becomes a Calabi-Yau manifold of dimension $n$ and, for $n=2$, a $K3$ surface (i.e. 2-dimensional Calabi-Yau manifold).
This is shown by the same method as in the proof of Theorem \ref{ta}.
From now, $X_{n}$ is a generic hypersurface of $P(n+1)$ of multidegree $(2, 2, \ldots , 2)$ with $n \geq 3$.
Let us write $P(n+1) = \mathbb P^{1}_{i} \times P(n+1)_{i}$.
Let $[x_{i1} : x_{i2}]$ be the homogeneous coordinates of $\mathbb P^{1}_{i}$.
Hereafter, we consider the affine locus and denote by $\displaystyle x_{i} = \frac{x_{i2}}{x_{i1}}$ the affine coordinates of $\mathbb P^{1}_{i}$ and by ${\bf z}_{i}$ that of $P(n+1)_{i}$.
When we pay attention to $x_{i}$, $X_{n}$ can be written by following equation
\begin{align}\label{xn}
X_{n} = \{ F_{i,0}({\bf z}_{i})x_{i}^{2} + F_{i,1}({\bf z}_{i})x_{i} + F_{i,2}({\bf z}_{i}) = 0 \}
\end{align}
where each $F_{i,j}({\bf z}_{i})$ $(j = 0, 1, 2)$ is a quadratic polynomial of ${\bf z}_{i}$.
Now, we consider the two involutions of $P(n+1)$:
\begin{align}
\tau_{i} \colon (x_{i}, {\bf z}_{i}) &\to \left(-x_{i}- \frac{F_{i,1}({\bf z}_{i})}{F_{i,0}({\bf z}_{i})}, {\bf z}_{i} \right)\label{tau}\\
\sigma_{i} \colon (x_{i}, {\bf z}_{i}) &\to \left(\frac{F_{i,2}({\bf z}_{i})}{x_{i} \cdot F_{i,0}({\bf z}_{i})}, {\bf z}_{i} \right).\label{sigma}
\end{align}
Then $\tau_{i}|_{X_{n}} = \sigma_{i}|_{X_{n}} = \iota_{i}$ by definition of $\iota_{i}$ (cf. Theorem \ref{iota}).
We get two birational automorphisms of $X_{n}$
\begin{align*}
\rho_{i} = \sigma_{i} \circ \tau_{i} \colon (x_{i}, {\bf z}_{i}) &\to \left( \frac{F_{i,2}({\bf z}_{i})}{-x_{i} \cdot F_{i,0}({\bf z}_{i}) - F_{i,1}({\bf z}_{i})}, \ {\bf z}_{i} \right)\\
\rho'_{i} = \tau_{i} \circ \sigma_{i} \colon (x_{i}, {\bf z}_{i}) &\to \left( -\frac{x_{i} \cdot F_{i,1}({\bf z}_{i}) + F_{i,2}({\bf z}_{i})}{x_{i}\cdot F_{i,0}({\bf z}_{i})}, \ {\bf z}_{i} \right).
\end{align*}
Obviously, both $\rho_{i}$ and $\rho'_{i}$ are in Ine$(P(n+1), X_{n})$, map points not in $X_{n}$ to other points also not in $X_{n}$, and $\rho_{i}^{-1} = \rho'_{i}$ by $\tau_{i}^{2} = \sigma_{i}^{2} = \operatorname{id}_{P(n+1)}$.
\begin{prp}\label{order}
Each $\rho_{i}$ has infinite order.
\end{prp}
\begin{proof}
By the definiton of $\rho_{i}$ and $\rho'_{i} = \rho_{i}^{-1}$, it suffices to show
\begin{align*}
{\begin{pmatrix}
0 & F_{i,2}\\
-F_{i,0} & -F_{i,1}
\end{pmatrix}}^{k}
\neq \alpha I
\end{align*}
for any $k \in \mathbb Z \setminus \{0\}$ where $I$ is an identity matrix and $\alpha \in \mathbb C^{\times}$.
Their eigenvalues are
\[
\frac{-F_{i,1} \pm \sqrt{F_{i,1}^{2} - 4F_{i,0}F_{i,2}}}{2}.
\]
Here $F_{i,1}^{2} - 4F_{i,0}F_{i,2} \neq 0$ as $X_{n}$ is general (for all $i$).
If $\begin{pmatrix}
0 & F_{i,2}\\
-F_{i,0} & -F_{i,1}
\end{pmatrix}^{k}
= \alpha I$
for some $k \in \mathbb Z \setminus \{0\}$ and $\alpha \in \mathbb C^{\times}$, then
\[
\left(\frac{-F_{i,1} + \sqrt{F_{i,1}^{2} - 4F_{i,0}F_{i,2}}}{-F_{i,1} - \sqrt{F_{i,1}^{2} - 4F_{i,0}F_{i,2}}}\right)^{k} = 1,
\]
a contradiction to the assumption that $X_{n}$ is generic.
\end{proof}
We also remark that Proposition \ref{order} is also implicitly proved in Theorem \ref{main}.
Our main result is the following (which is same as Theorem \ref{intro}):
\begin{thm}\label{main}
Let $X_{n} \subset P(n+1)$ be a smooth hypersurface of multidegree $(2, 2, \ldots, 2)$ and $n \geq 3$. Then:
\begin{itemize}
\item[(1)] $\gamma \colon \operatorname{Dec}(P(n+1), X_{n}) \to \operatorname{Bir}(X_{n})$ is surjective, in particular Question $\ref{qexact}$ is affirmative for $X_{n}$.
\item[(2)] If, in addition, $X_{n}$ is generic, $n+1$ elements $\rho_{i} \in \operatorname{Ine}(P(n+1), X_{n})$ $(1 \leq i \leq n+1)$ satisfy
\[
\langle \rho_{1}, \rho_{2}, \ldots , \rho_{n+1} \rangle \simeq \underbrace{\mathbb Z * \mathbb Z * \cdots * \mathbb Z}_{n+1} \subset \operatorname{Ine}(P(n+1), X_{n}).
\]
In particular, $\operatorname{Ine}(P(n+1), X_{n})$ is an infinite non-commutative group.
\end{itemize}
\end{thm}
Let $\operatorname{Ind}(\rho)$ be the union of the indeterminacy loci of each $\rho_{i}$ and $\rho^{-1}_{i}$; that is, $\displaystyle \operatorname{Ind}(\rho) = \bigcup_{i=1}^{n+1}\big(\operatorname{Ind}(\rho_{i}) \cup \operatorname{Ind}(\rho^{-1}_{i})\big)$ where $\operatorname{Ind}(\rho_{i})$ is the indeterminacy locus of $\rho_{i}$.
Clearly, $\operatorname{Ind}(\rho)$ has codimension $\geq 2$ in $P(n+1)$.
\begin{proof}
Let us show Theorem \ref{main} (1).
Suppose $X_{n}$ is generic.
For a general point $x \in P(n+1)_{i}$, the set $p_{i}^{-1}(x)$ consists of two points.
When we put these two points $y$ and $y'$, then the correspondence $y \leftrightarrow y'$ defines a natural birational involutions of $X_{n}$, and this is the involution $\iota_{j}$.
Then, by Cantat-Oguiso's result \cite[Theorem 3.3 (4)]{co11}, $\operatorname{Bir}(X_{n})$ $(n\geq 3)$ coincides with the group $\langle \iota_{1}, \iota_{2}, \ldots , \iota_{n+1} \rangle \simeq \underbrace{\mathbb Z/2\mathbb Z * \mathbb Z/2\mathbb Z * \cdots * \mathbb Z/2\mathbb Z}_{n+1}$.
Two involutions $\tau_{j}$ and $\sigma_{j}$ of $X_{n}$ which we construct in \eqref{tau} and \eqref{sigma} are the extensions of the covering involutions $\iota_{j}$.
Hence, $\tau_{j}|_{X_{n}} = \sigma_{j}|_{X_{n}} = \iota_{j}$.
Thus $\gamma$ is surjective.
Since automorphisms of $X_{n}$ come from that of total space $P(n+1)$, it holds the case that $X_{n}$ is not generic.
This completes the proof of Theorem \ref{main} (1).
Then, we show Theorem \ref{main} (2).
By Proposition \ref{order}, order of each $\rho_{i}$ is infinite.
Thus it is sufficient to show that there is no non-trivial relation between its $n + 1$ elements $\rho_{i}$.
We show by arguing by contradiction.
Suppose to the contrary that there is a non-trivial relation between $n+1$ elements $\rho_{i}$, that is, there exists some positive integer $N$ such that
\begin{align}\label{rho}
\rho_{i_{1}}^{n_{1}} \circ \rho_{i_{2}}^{n_{2}} \circ \cdots \circ \rho_{i_{l}}^{n_{l}} = \operatorname{id}_{P(n+1)}
\end{align}
where $l$ is a positive integer, $n_{k} \in \mathbb Z\setminus\{0\}$ $(1\leq k \leq l)$, and each $\rho_{i_{k}}$ denotes one of the $n + 1$ elements $\rho_{i}$ $(1 \leq i \leq n+1)$ and satisfies $\rho_{i_{k}} \neq \rho_{i_{k+1}}$ $(0 \leq k \leq l-1)$.
Put $N = |n_{1}| + \cdots + |n_{l}|$.
In the affine coordinates $(x_{i_{1}}, {\bf z}_{i_{1}})$ where $x_{i_{1}}$ is the affine coordinates of $i_{1}$-th factor $\mathbb P^{1}_{i_{1}}$, we can choose two distinct points $(\alpha_{1}, {\bf z}_{i_{1}})$ and $(\alpha_{2}, {\bf z}_{i_{1}})$, $\alpha_{1} \neq \alpha_{2}$, which are not included in both $X_{n}$ and $\operatorname{Ind}(\rho)$.
By a suitable projective linear coordinate change of $\mathbb P^{1}_{i_{1}}$, we can set $\alpha_{1} = 0$ and $\alpha_{2} = \infty$.
When we pay attention to the $i_{1}$-th element $x_{i_{1}}$ of the new coordinates, we put same letters $F_{i_{1},j}({\bf z}_{i_{1}})$ for the definitional equation of $X_{n}$, that is, $X_{n}$ can be written by
\[
X_{n} = \{ F_{i_{1},0}({\bf z}_{i_{1}})x_{i_{1}}^{2} + F_{i_{1},1}({\bf z}_{i_{1}})x_{i_{1}} + F_{i_{1},2}({\bf z}_{i_{1}}) = 0 \}.
\]
Here the two points $(0, {\bf z}_{i_{1}})$ and $(\infty, {\bf z}_{i_{1}})$ not included in $X_{n} \cup \operatorname{Ind}(\rho)$.
From the assumption, both two equalities hold:
\begin{numcases}
{}
\rho_{i_{1}}^{n_{1}} \circ \cdots \circ \rho_{i_{l}}^{n_{l}}(0, {\bf z}_{i_{1}}) = (0, {\bf z}_{i_{1}}) & \\
\rho_{i_{1}}^{n_{1}} \circ \cdots \circ \rho_{i_{l}}^{n_{l}}(\infty, {\bf z}_{i_{1}}) = (\infty, {\bf z}_{i_{1}}).\label{infty}
\end{numcases}
We proceed by dividing into the following two cases.
{\flushleft
(i). The case where $n_{1} > 0$.
Write $\rho_{i_{1}} \circ \rho_{i_{1}}^{n_{1}-1} \circ \rho_{i_{2}}^{n_{2}} \circ \cdots \circ \rho_{i_{l}}^{n_{l}} = \operatorname{id}_{P(n+1)}$.
}
Let us denote $\rho_{i_{1}}^{n_{1}-1} \circ \cdots \circ \rho_{i_{l}}^{n_{l}}(0, {\bf z}_{i_{1}}) = (p, {\bf z}_{i_{1}}')$, then, by the definition of $\rho_{i_{1}}$, it maps $p$ to $0$.
That is, the equation $F_{i_{1},2}({\bf z}'_{i_{1}}) = 0$ is satisfied.
On the other hand, the intersection of $X_{n}$ and the hyperplane $(x_{i_{1}}=0)$ is written by
\[
X_{n} \cap (x_{i_{1}}=0) = \{F_{i_{1},2}({\bf z}_{i_{1}}) = 0\}.
\]
This implies $(0, {\bf z}'_{i_{1}}) = \rho_{i_{1}}(p, {\bf z}'_{i_{1}}) = (0, {\bf z}_{i_{1}})$ is a point on $X_{n}$, a contradiction to the fact that $(0, {\bf z}_{i_{1}}) \notin X_{n}$.
{\flushleft
(ii). The case where $n_{1} < 0$.
Write $\rho^{-1}_{i_{1}} \circ \rho_{i_{1}}^{n_{1}+1} \circ \rho_{i_{2}}^{n_{2}} \circ \cdots \circ \rho_{i_{l}}^{n_{l}} = \operatorname{id}_{P(n+1)}$.
}
By using the assumption \eqref{infty}, we lead the contradiction by the same way as in (i).
Precisely, we argue as follows.
Let us write $\displaystyle x_{i_{1}} = \frac{1}{y_{i_{1}}}$, then $(x_{i_{1}} = \infty, {\bf z}_{i_{1}}) = (y_{i_{1}} = 0, {\bf z}_{i_{1}})$ and $X_{n}$ and $\rho^{-1}_{i_{1}}$ can be written by
\[
X_{n} \coloneqq \{F_{i_{1},0}({\bf z}_{i_{1}}) + F_{i_{1},1}({\bf z}_{i_{1}})y_{i_{1}} + F_{i_{1},2}({\bf z}_{i_{1}})y_{i_{1}}^{2} = 0\},
\]
\[
\rho^{-1}_{i_{1}} \colon (y_{i_{1}}, {\bf z}_{i_{1}}) \to \left(\ -\frac{F_{i_{1},0}({\bf z}_{i_{1}})}{F_{i_{1},1}({\bf z}_{i_{1}}) + y_{i_{1}}\cdot F_{i_{1},2}({\bf z}_{i_{1}})},\ {\bf z}_{i_{1}} \right).
\]
Let us denote $ \rho_{i_{1}}^{n_{1}+1} \circ \rho_{i_{2}}^{n_{2}} \circ \cdots \circ \rho_{i_{l}}^{n_{l}} (y_{i_{1}} =0, {\bf z}_{i_{1}}) = (y_{i_{1}} = q, {\bf z}_{i_{1}}'')$, then $\rho^{-1}_{i_{1}}$ maps $q$ to $0$.
That is, the equation $F_{i_{1},0}({\bf z}''_{i_{1}}) = 0$ is satisfied, but the intersection of $X_{n}$ and the hyperplane $(y_{i_{1}} = 0)$ is written by
\[
X_{n}\cap (y_{i_{1}} = 0) = \{F_{i_{1},0}({\bf z}_{i_{1}}) = 0\}.
\]
This implies $(y_{i_{1}}=0, {\bf z}''_{i_{1}}) = \rho^{-1}_{i_{1}}(y_{i_{1}} = q, {\bf z}_{i_{1}}'') = (x_{i_{1}}=\infty, {\bf z}_{i_{1}})$ is a point on $X_{n}$; that is, $(x_{i_{1}}=\infty, {\bf z}_{i_{1}}) \in X_{n} \cap (x_{i_{1}}=\infty)$.
This is contradiction.
\par
From (i) and (ii), we can conclude that there does not exist such $N$.
This completes the proof of Theorem \ref{main} (2).
\end{proof}
Note that, for the cases $n = 2$ and $1$, Theorem \ref{main} (2) also holds though (1) does not hold.
{\bf Acknowledgements: } The authors would like to express their sincere gratitude to their supervisor Professor Keiji Oguiso who suggested this subject and has given much encouragement and invaluable and helpful advices.
\end{document} |
\begin{document}
\begin{abstract}
Let $G$ be a connected reductive algebraic group over a field of positive characteristic $p$ and denote by $\mathcal T$ the category of tilting modules for $G$. The higher Jones algebras are the endomorphism algebras of objects in the fusion quotient category of $\mathcal T$. We determine the simple modules and their dimensions for these semisimple algebras as well as their quantized analogues. This provides a general approach for determining various classes of simple modules for many well-studied algebras such as group algebras for symmetric groups, Brauer algebras, Temperley--Lieb algebras, Hecke algebras and $BMW$-algebras. We treat each of these cases in some detail and give several examples.
\end{abstract}
\title{Higher Jones algebras and their simple modules}
\section{Introduction}
In \cite{ILZ} the authors define Jones algebras as certain quotients of Temperley--Lieb algebras. They also show that these algebras may be identified with the endomorphism algebras over the quantum group for $\mathfrak sl_2$ of fusion tensor powers of its natural vector representation. In this paper we study more generally such endomorphism algebras for arbitrary reductive groups in positive characteristics and their quantized root of unity analogues. We call these semisimple algebras {\it higher Jones algebras}. In the quantum $\mathfrak sl_2$-case they coincide with the Jones algebras from \cite{ILZ}. Our main result is an algorithm which determines the dimensions of the simple modules for these higher Jones algebras.
As the first important example and as a role model for our study consider the general linear group $GL_n$. Together with the corresponding root of unity quantum case we show how this gives interesting semisimple quotients of the group algebras for symmetric groups as well as of the corresponding Hecke algebras, and it allows us to determine the dimensions of certain classes of simple modules for these algebras. The results in these cases were obtained a long time ago, see \cite{Ma1} and \cite{Kl} for the modular case (using similar, respectively different techniques), and \cite{GW} for the Hecke algebra case. Nevertheless we treat this case in some detail as it will serve as role model for the general case. In fact, one of our key points is to give a unified treatment of a number of other cases, showing that they can be handled in similar ways.
In order to explain our general strategy we pass now to an arbitrary reductive algebraic group G defined over a field k of prime
characteristic.
The category of tilting modules for $G$ has a quotient category called the fusion category, see \cite{A92}, \cite{AS}. Objects in this category may be identified with certain semisimple modules for $G$ and the higher Jones algebras are then defined as the endomorphism algebras of such objects and of their fusion tensor powers. The main result in \cite {AST1} says that any endomorphism algebra of a tilting module for $G$ has a natural cellular algebra structure. We show that the higher Jones algebras inherite a cellular structure, and exploiting this we are able to compute the dimensions of their simple modules. This applies to the corresponding quantum case as well.
If $G$ is one of the classical groups $GL(V), SP(V)$ or $O(V)$ the module $V$ is (except for $O(V)$ in characteristic $2$ when $\dim V$ is odd) a tilting module for $G$ and via Schur--Weyl duality the endomorphism algebras of the fusion tensor powers of $V$ lead to semisimple quotient algebras of the group algebras of symmetric groups and Brauer algebras. This should be compared with the results in \cite{Ma1} and \cite{We2}, respectively. In the corresponding quantum cases we obtain semisimple quotients of Hecke algebras and $BMW$-algebras (compare \cite{Kl}, \cite{We1}, and \cite{We3}). In all cases we obtain effective algorithms for computing the dimensions
of the corresponding classes of simple modules. We have illustrated this by giving a number of concrete examples.
We want to point out that our approach, based on the theory of tilting modules and the cellular structure on their endomorphism rings as developed in \cite{AST1}, gives a general method for handling many more cases than those mentioned above. To mention just one big family of algebras - which fits into our framework and which are similar in principle to the examples given so far - take again an arbitrary reductive algebraic group $G$ and let $V$ be a simple Weyl module for $G$. This could e.g. be a Weyl module with minuscule highest weight or with highest weight belonging to the bottom alcove of the dominant chamber. Then our approach applies to the fusion tensor powers of $V$ or more generally to fusion tensor products of finite families of simple Weyl modules. As a result we get algorithms for the dimensions of certain simple modules for the corresponding endomorphism algebras. However, in only few cases are these algebras related to ``known" algebras and we have chosen to limit ourselves to the above examples.
The paper is organized as follows. In Section 2 we first set up notation etc. for a general reductive group $G$ and then make this explicit in the case where $G$ is a group a classical type. We shall rely heavily on tilting modules for $G$ and in Section 3 we start out by recalling the basic facts that we will need from this theory. In addition, this section establishes results on fusion tensor products which we then apply in Section 4 to symmetric groups and in Section 5 to Brauer algebras. In Section 6 we turn to quantum groups at roots of unity. Here we prove results analogous to the ones we obtained in the modular case and in Sections 7 and 8 we apply these to Hecke algebras for symmetric groups and to $BMW$-algebras, respectively.
\vskip .5 cm
{\bf Acknowledgments. }
Thanks to the referee for a quick and careful reading as well as for her/his many useful comments and corrections.
\section{Reductive algebraic groups}
This section introduces notation and contains a few basic facts about reductive algebraic groups and their representations over a field of prime characteristic. We shall be rather brief and refer the reader to \cite{RAG} for further details. We also deduce some specific facts needed later on for each of the groups of classical type.
\subsection {General notation} \label{general}
Suppose $G$ is a connected reductive algebraic group over a field $k$. We assume $k$ has characteristic $p > 0$. In this paper all modules will be finite-dimensional.
Let $T$ be a maximal torus in $G$, and denote by $X =X(T)$ its character group. In the root system $R \subset X$ for $(G,T)$ we choose a set of positive roots $R^+$, and denote by $X^+ \subset X$ the corresponding cone of dominant characters. Then $R^+$ defines an ordering $\leq$ on $X$. It also determines uniquely a Borel subgroup $B$ whose roots are the set of negative roots $-R^+$.
Denote by $S$ the set of simple roots in $R^+$. The reflection $s_\alpha$ corresponding to $\alpha \in S$ is called a simple reflection. The set of simple reflections generates the Weyl group $W$ for $R$. We can identify $W$ with
$N_G(T)/T$. Then we see that $W$ acts naturally on $X$: $\lambda \mapsto w(\lambda), \lambda \in X, w \in W$.
In addition to this action of $W$ on $X$ we shall also consider the so-called dot-action given by: $w \cdot \lambda = w(\lambda + \rho) - \rho, w \in W, \lambda \in X$. As usual, $\rho$ is half the sum of the positive roots.
In the category of $G$-modules we have the family of standard modules ${\mathbb D}elta(\lambda)$, and likewise the family of costandard modules $\nabla(\lambda)$. Here $\lambda$ runs through the set of dominant weights $X^+$ and ${\mathbb D}elta(\lambda)$ is also known as the Weyl module with highest weight $\lambda$. The dual Weyl module $\nabla(\lambda)$ is then ${\mathbb D}elta(-w_0 \lambda)^*$ where $w_0$ denotes the longest element in $W$.
The simple module with highest weight $\lambda$ may be realized as the head of ${\mathbb D}elta(\lambda)$ as well as the socle of $\nabla(\lambda)$. Recall that there is up to scalars a unique non-zero homomorphism
\begin{equation} \label{can} c_\lambda: {\mathbb D}elta(\lambda) \rightarrow \nabla(\lambda),
\end{equation}
namely the one factoring through $L(\lambda)$.
A $G$-module $M$ is said to have a ${\mathbb D}elta$-filtration if it has submodules $M^{i}$ with
$$ 0=M^0 \subset M^1 \subset \dots \subset M^r = M, \text { where } M^{i+1}/M^{i} \simeq {\mathbb D}elta(\lambda_i) \text { for some } \lambda_i \in X^+.$$
One defines $\nabla$-filtrations similarly.
If $M$ has a ${\mathbb D}elta$-filtration we set $(M:{\mathbb D}elta(\mu))$ equal to the number of occurrences of ${\mathbb D}elta(\mu)$ in such a filtration (note that these numbers are uniquely determined and independent of which ${\mathbb D}elta$-filtration we choose). When $M$ has a $\nabla$-filtration the numbers $(M:\nabla(\mu))$ are defined analogously.
A crucial result concerning modules with a ${\mathbb D}elta$-filtration says that, if $M$ and $M'$ both have a ${\mathbb D}elta$-filtration, then so does $M\otimes M'$. This is the Wang--Donkin-
-Mathieu theorem, see \cite{Wa}, \cite{Do-book}, and \cite{Ma}.
For $n \in {\mathbb Z}$ and $\alpha \in S$ we denote by $s_{\alpha, n}$ the affine reflection determined by
$$s_{\alpha, n}(\lambda) = s_\alpha (\lambda) - np\alpha.$$
The affine Weyl group $W_p$ is the group generated by all $s_{\alpha, n}$ where $ \alpha \in S$ and $ n \in {\mathbb Z}$ (note that in the Bourbaki convention this is the affine Weyl group corresponding to the dual root systen $R^\vee$).
The linkage principle \cite{A80a} says that, whenever $L(\lambda)$ and $L(\mu)$ are two composition factors of an indecomposable $G$- module, then $\mu \in W_p \cdot \lambda$. It follows that $M$ splits into a direct sum of submodules according to the orbits of $W_p$ in $X$. More precisely, if we set
$$A(p)= \{\lambda \in X | 0 < \langle \lambda + \rho, \alpha^\vee \rangle < p \text { for all } \alpha \in R^+\},$$
called the bottom dominant alcove, then the closure
$$\overline A(p) = \{\lambda \in X | 0 \leq \langle \lambda + \rho, \alpha^\vee \rangle \leq p \text { for all } \alpha \in R^+\}$$
is a fundamental domain for the dot-action of $W_p$ on $X$. We have
$$ M = \bigoplus_{\lambda \in \overline A(p)} M[\lambda], $$
with $M[\lambda]$ equal to the largest submodule in $M$ whose composition factors $L(\mu)$ all have $\mu \in W_p \cdot \lambda$.
\begin{remarkcounter} \label{alcove A}
\begin{enumerate}
\item [a)]
As an immediate consequence of the strong linkage principle \cite{A80a} we have
$$ {\mathbb D}elta(\lambda) = L(\lambda) = \nabla(\lambda) \text { for all } \lambda \in \overline A \cap X^+.$$
\item [b)] We have $A(p) \neq \emptyset$ if and only if $p > \langle \rho, \alpha^\vee \rangle$ for all roots $\alpha$, i.e. if and only if $p \geq h$, where $h$ is the Coxeter number for $R$.
\end{enumerate}
\end{remarkcounter}
\subsection{The general linear groups} \label{GL}
Let $V$ be a vector space over $k$. The reductive group $GL(V)$ plays a particularly important role in this paper. In this section we make the above notations and remarks explicit for the group $GL(V)$.
We set $n = \dim V$ and choose a basis $\{v_1, v_2, \cdots , v_n\}$ for $V$. Then $G_n = GL(V)$ identifies with $GL_n(k)$ and the set $T_n$ of diagonal matrices in $G_n$ is a maximal torus. The character group $X_n = X(T_n)$ is the free abelian group with basis $\underline{\epsilon}silon_i$, $i=1, 2, \cdots ,n$ where $\underline{\epsilon}silon_i: T_n \rightarrow k^{\times}$ is the homomorphism mapping a diagonal matrix into its $i$'th entry. If $\lambda \in X_n$, we shall write
$$\lambda = (\lambda_1, \lambda_2, \cdots , \lambda_n),$$
when $\lambda = \sum_1^n \lambda_i \underline{\epsilon}silon_i.$
The root system for $(G_n,T_n)$ is
$$R = \{\underline{\epsilon}silon_i -\underline{\epsilon}silon_j | i \neq j\}.$$
It is of type $A_{n-1}$. Our choice of simple roots $S$ will be
$$S = \{\alpha_i = \underline{\epsilon}silon_i -\underline{\epsilon}silon_{i+1} | i = 1, 2, \cdots , n-1\}$$
inside the set of positive roots $R^+$ consisting of all $\underline{\epsilon}silon_i - \underline{\epsilon}silon_j$ with $i<j$.
We set
$$\omega_i = \underline{\epsilon}silon_1 + \underline{\epsilon}silon_2 + \cdots + \underline{\epsilon}silon_i, \; i = 1, \cdots , n.$$
Then $\{\omega_1, \cdots , \omega_n\}$ is another basis of $X_n$. Note that $\omega_n$ is the determinant and thus, is trivial on the intersection of $T_n$ with $SL_n(k)$. Consider
$$\rho' = \omega_1 + \cdots + \omega_n = (n, n-1, \cdots , 1).$$
Then $\rho' = \rho +\frac{1}{2} (n+1) \omega_n$ and we shall prefer to work with $\rho'$ instead of $\rho$ (note that, if $n$ is even, $\rho \notin X_n$ whereas $\rho' \in X_n$ for all $n$). As $\omega_n$ is fixed by $W$, the dot-action of $W$ on $X$ is unchanged when we replace $\rho$ by $\rho'$.
We have an inner product on $X_n$ given by $(\underline{\epsilon}silon_i, \underline{\epsilon}silon_j) = \delta_{i,j}$. It satisfies $(\omega_i, \alpha_j) = \delta_{i,j}$, $i,j = 1, 2, \cdots n-1$, i.e. $\omega_1, \cdots , \omega_{n-1}$ are the fundamentals weights in $X_n$. On the other hand, $(\omega_n, \alpha_j) = 0$ for all $j$. Hence, $(\rho', \alpha_j) = 1$ for all $j = 1, 2, \cdots , n-1$.
The set of dominant weights is
$$X_n^+ = \{\lambda \in X_n | \lambda_1 \geq \lambda_2 \geq \cdots \geq \lambda_n\} = \{\sum_1^n m_i \omega_i | m_i \in {\mathbb Z}_{\geq 0}, i= 1, 2, \cdots , n-1, \;\ m_n \in {\mathbb Z}\}.$$
If $\lambda \in X_n^+$ has $\lambda_n \geq 0$,
then $\lambda$ may be identified with a partition of $|\lambda| = \lambda_1 + \lambda_2 + \cdots + \lambda_n$.
The bottom alcove will be denoted $A_n(p)$. When $n > 1$ it is given by
$$ A_n(p) = \{\lambda \in X_n^+ | \lambda_1 - \lambda_n \leq p - n\}.$$
We have $A_n(p) \neq \emptyset$ if and only if $p \geq n$. In particular, $A_2(p)$ is always non-empty.
In the special case $n = 1$ the group $G_1$ is the $1$-dimensional torus. In that case $X_1 = {\mathbb Z} \underline{\epsilon}silon_1$ and $A_1(p) = {\mathbb Z} \underline{\epsilon}silon_1$. Note that for any $r \in {\mathbb Z}$ the Weyl module ${\mathbb D}elta_1(r \underline{\epsilon}silon_1)$ is the $1$-dimensional $G_1$ module $k_{r\underline{\epsilon}silon_1}$.
\begin{remarkcounter} \label{A for type A}
The natural module $V$ for $G_n$ has weights $\underline{\epsilon}silon_1, \cdots , \underline{\epsilon}silon_n$. It is simple because the highest weight $\underline{\epsilon}silon_1$ is minuscule. We have $\underline{\epsilon}silon_1 \in A_n(p)$ if and only if $p > n$.
\end{remarkcounter}
\subsection{The symplectic groups} \label{Sp}
Let now $V$ be a $2n$-dimensional symplectic vector space over $k$ with a fixed symplectic form, and consider the semisimple algebraic group $G_n = SP(V)$ consisting of those elements in $GL(V)$ which respect this form. This is naturally a subgroup of $GL(V)$. Note that $G_1 = SL_2(k)$. We let $T_n$ be the maximal torus in $G_n$ obtained as the intersection of the maximal torus in $GL(V)$ with $G_n$. In the notation from Section \ref{GL} the restrictions to $T_n$ of $\underline{\epsilon}silon_1, \cdots , \underline{\epsilon}silon_n$ form a basis of $X_n = X(T_n)$. The root system for $(G_n, T_n)$ consists of the the elements
$$\{\pm \underline{\epsilon}silon_i \pm \underline{\epsilon}silon_j, \pm2 \underline{\epsilon}silon_i | 1 \leq i \neq j \leq n \},$$
and is of type $C_n$. With respect to the usual choice of positive roots the set of dominant weights is
$$X_n^+ = \{\lambda = \sum_i \lambda_1 \underline{\epsilon}silon_i | \lambda_i \geq \lambda_2 \geq \cdots \geq \lambda_n \geq 0 \}.$$
The bottom dominant alcove is also in this case denoted $A_n(p)$. It is given by
$$ A_n(p) = \begin{cases} \{\lambda \underline{\epsilon}silon_1 | 0 \leq \lambda \leq p-2\} \text { if } n = 1, \\\{\lambda \in X_n^+ | \lambda_1 + \lambda_2 \leq p - 2n \} \text { if } n > 1. \end{cases}$$
When $n > 1$ we have $A_n(p) \neq \emptyset$ if and only if $p \geq 2n$, whereas $A_1(p) \neq \emptyset$ for all $p$.
\begin{remarkcounter} \label{A for type C}
The natural module $V$ for $G_n$ is simple for all $p$ as its highest weight $\underline{\epsilon}silon_1$ is minuscule. It has weights $\pm \underline{\epsilon}silon_1, \cdots , \pm \underline{\epsilon}silon_n$. Note that, for $n > 1$, we have $\underline{\epsilon}silon_1 \in A_n(p)$ if and only if $p > 2n$, whereas for $n=1$ the condition is $p > 2$.
\end{remarkcounter}
\subsection{The orthogonal groups} \label{O}
Consider next a vector space $V$ over $k$ equipped with a non-degenerate, symmetric bilinear form. Then the orthogonal group $O(V)$ is the subgroup of $GL(V)$ consisting of those elements which preserve the bilinear form on $V$. We shall separate our discussion into the case where $\dim V$ is odd and the case where $\dim V$ is even.
\subsubsection{Type $B_n$}
Assume that $\dim V$ is odd, say $\dim V = 2n + 1$. Then we set $G_n = O(V)$. Again in this case we have $G_1 \simeq SL_2(k)$. However, the module $V$ for $G_1$ is the $3$-dimensional standard module for $SL_2(k)$. The root system $R$ for $G_n$ has type $B_n$ and may be taken to consist of the elements
$$ R = \{\pm \underline{\epsilon}silon_i \pm \underline{\epsilon}silon_j, \pm \underline{\epsilon}silon_i | 1 \leq i \neq j \leq n \}$$
in $X_n = \oplus _{i=1}^n {\mathbb Z}\underline{\epsilon}silon_i$. The set of dominant weights is
$$X_n^+ = \{ \sum_i \lambda_i \underline{\epsilon}silon_i \in X_n | \lambda_1 \geq \lambda_2 \geq \cdots \geq \lambda_n \geq 0 \}.$$
In this case the bottom dominant alcove $A_n(p)$ is given by
$$ A_n(p) = \begin{cases} \{\lambda \underline{\epsilon}silon_1 | 0 \leq \lambda \leq p-2\} \text { if } n = 1, \\\{\lambda \in X_n^+ | 2 \lambda_1 \leq p - 2n \} \text { if } n > 1. \end{cases}$$
We have $A_n(p) \neq \emptyset$ if and only if $p > 2n$ (except for $n = 1$).
\begin{remarkcounter} \label{A for type B}
Unlike in the previous cases the highest weight of $V$ is no longer minuscule. However, we still have $V = {\mathbb D}elta(\underline{\epsilon}silon_1)$ is simple for all $p > 2$, \cite[Section II.8.21]{RAG}. It has weights $\pm \underline{\epsilon}silon_1, \cdots , \pm \underline{\epsilon}silon_n$ together with $0$. Note that $\underline{\epsilon}silon_1 \in A_n(p)$ if and only if $p > 2n + 2$ except for $n=1$ where the condition is $p > 2$.
\end{remarkcounter}
\subsubsection{Type $D_n$}
Assume that $\dim V$ is even, say $\dim V = 2n$. We set again $G_n = O(V)$. The corresponding root system $R$ then has type $D_n$ and may be taken to consist of the elements
$$ R = \{\pm \underline{\epsilon}silon_i \pm \underline{\epsilon}silon_j | 1 \leq i \neq j \leq n \}$$
in $X_n =\{\lambda \in \oplus _{i=1}^n \frac{1}{2} {\mathbb Z}\underline{\epsilon}silon_i | \lambda_i - \lambda_j \in {\mathbb Z} \text { for all } i, j\}$. The set of dominant weights is
$$X_n^+ = \{ \sum_i \lambda_1 \underline{\epsilon}silon_i \in X_n | \lambda_i \geq \lambda_2 \geq \cdots \geq \lambda_{n-1} \geq |\lambda_n| \}.$$
In this case the bottom dominant alcove $A_n(p)$ is given by
$$ A_n(p) = \begin{cases} \{\lambda \underline{\epsilon}silon_1 | \lambda \in Z, 0 \leq \lambda \leq p-2\} \text { if } n = 1, \\\{\lambda \in X_2^+ | \lambda_1 \pm \lambda_2 \leq p - 2 \} \text { if } n = 2, \\\{\lambda \in X_n^+ | \lambda_1 + \lambda_2 \leq p - 2n + 2 \} \text { if } n > 2. \end{cases}$$
When $n>2$ we have $A_n(p) \neq \emptyset$ if only if $p > 2n - 2$, whereas $A_1(p)$ and $A_2(p)$ are always non-empty.
\begin{remarkcounter} \label{A for type D}
$V = {\mathbb D}elta(\underline{\epsilon}silon_1)$ is simple for all $p $ (its highest weight is minuscule). It has weights $\pm \underline{\epsilon}silon_1, \cdots , \pm \underline{\epsilon}silon_n$. Note that, for $n > 2$, we have $\underline{\epsilon}silon_1 \in A_n(p)$ if and only $p > 2n - 2$, whereas the condition for both $n=1$ and $n=2$ is $p > 2$.
\end{remarkcounter}
\section{Tilting modules for reductive algebraic groups}
We return to the situation of a general reductive group $G$ and use the notation from Section \ref{general}. We very briefly recall the basics about tilting modules for $G$ (referring to \cite[Section 2]{Do} or \cite[ChapterII.E]{RAG} for details), and prove the results which we then apply in the next two sections. Moreover, we recall from \cite[Section 4]{AST1} a few facts about the cellular algebra structure on endomorphism rings for tilting modules for $G$, which we also need.
\subsection {Tilting theory for $G$}
A $G$-module $M$ is called tilting if it has both a ${\mathbb D}elta$- and a $\nabla$-filtration. It turns out that for each $\lambda \in X^+$ there is a unique (up to isomorphisms) indecomposable tilting module $T(\lambda)$ with highest weight $\lambda$, and up to isomorphisms these are the only indecomposable tilting modules, see \cite[Theorem 1.1] {Do}. The Weyl module ${\mathbb D}elta(\lambda)$ is a submodule of $T(\lambda)$, while the dual Weyl module $\nabla(\lambda)$ is a quotient. The composite of the inclusion ${\mathbb D}elta(\lambda) \to T(\lambda)$ and the quotient map $T(\lambda) \to \nabla(\lambda)$ equals the homomorphism $c_\lambda$ from (\refeq{can}) (up to a non-zero constant in $k$).
We have the following elementary (and no doubt well-known) lemma.
\begin{lem} \label{quotient} Let $M$ be a $G$-module which contains two submodules $M_1$ and $M_2$ such that $M = M_1 \oplus M_2$. Denote by $i_j: M_j \rightarrow M$, respectively $\pi_j: M \rightarrow M_j$ the natural inclusion, respectively projection, $j = 1,2$. Suppose $f \circ g = 0$ for all $f \in \mathrm{Hom}_G(M_2, M_1)$ and $g \in \mathrm{Hom}_G(M_1, M_2)$. Then the natural map
$$\phi: \mathrm{End}_G(M) \rightarrow \mathrm{End}_G(M_1)$$
which takes $h \in \mathrm{End}_G(M)$ into $\pi_1 \circ h \circ i_1 \in \mathrm{End}_G(M_1)$
is a surjective algebra homomorphism.
\end{lem}
\begin{proof} The surjectivity of $\phi$ is obvious, so we just have to check that $\pi_1 \circ h' \circ h \circ i_1 = \pi_1 \circ h' \circ i_1 \circ \pi_1 \circ h \circ i_1$ for all $h', h \in \mathrm{End}_G(M)$. However, $h \circ i_1 = i_1 \circ \pi_1\circ h \circ i_1 + i_2 \circ \pi_2\circ h \circ i_1$, and by our assumption we see that $(\pi_1 \circ h' \circ i_2) \circ (\pi_2\circ h \circ i_1) = 0$. The desired equality follows.
\end{proof}
Let $Q$ be a tilting module for $G$. Then $Q$ splits into indecomposable summands as follows
$$ Q = \bigoplus_{\lambda \in X^+} T(\lambda)^{(Q:T(\lambda))}$$
for unique $(Q:T(\lambda)) \in {\mathbb Z}_{\geq 0}$.
Set now
$$Q^{{\mathcal F}}= \bigoplus_{\lambda \in A(p)} T(\lambda)^{(Q:T(\lambda))} \text { and } Q^{{\mathbb{Z}_{\geq 0}}e} = \bigoplus_{\lambda \in X^+\setminus A(p)}T(\lambda)^{(Q:T(\lambda))}.$$
For reasons explained in Section \ref{Fusion} we call $Q^{{\mathcal F}}$ the fusion summand of $Q$ and $Q^{{\mathbb{Z}_{\geq 0}}e}$ the negligible summand of $Q$. Then:
\begin{lem} \label{no composites}
If $f \in \mathrm{Hom}_G(Q^{{\mathcal F}}, Q^{{\mathbb{Z}_{\geq 0}}e})$ and $g \in \mathrm{Hom}_G(Q^{{\mathbb{Z}_{\geq 0}}e}, Q^{{\mathcal F}})$, then $g \circ f = 0$.
\end{lem}
\begin{proof} It is enough to check the lemma in the case where $Q^{{\mathcal F}} = T(\lambda)$ and $Q^{{\mathbb{Z}_{\geq 0}}e} = T(\mu)$ with $\lambda \in A(p)$ and $\mu \in X^+\setminus A(p)$. By Remark \ref{alcove A}a) we have $T(\lambda) = {\mathbb D}elta(\lambda) = L(\lambda)$. Hence, in this case $g \circ f$ is up to scalar the identity on $T(\lambda)$. If the scalar were non-zero, $T(\lambda)$ would be a summand of $T(\mu)$, which contradicts the indecomposability of $T(\mu)$.
\end{proof}
\begin{thm} \label{fusion-quotient}
The natural map $\phi: \mathrm{End}_G(Q) \rightarrow \mathrm{End}_G(Q^{{\mathcal F}})$ is a surjective algebra homomorphism. The kernel of $\phi$ equals
$$\{h \in \mathrm{End}_G(Q) | \boldsymbol{\mathcal{T}}r(i_\lambda \circ h \circ \pi_\lambda) = 0 \text { for all homomorphisms } i_\lambda: T(\lambda) \rightarrow Q, \; \pi_\lambda: Q \rightarrow T(\lambda),\; \lambda \in X^+\}.$$
\end{thm}
\begin{proof} The combination of Lemma \ref{quotient} and Lemma \ref{no composites} immediately gives the first statement. To prove the claim about the kernel of $\phi$ we first observe that the endomorphisms $i_\lambda \circ h \circ \pi_\lambda$ of $T(\lambda)$ are either nilpotent, or a constant times the identity. Now, nilpotent endomorphisms clearly have trace zero. If $ \lambda \notin A(p)$, then $\dim T(\lambda)$ is divisible by $p$. This holds when $\lambda$ is $p$-singular (i.e. if there exists a root $\beta$ with $\langle \lambda + \rho, \beta^{\vee} \rangle$ divisible by $p$), because then the linkage principle implies that $\mu$ is also $p$-singular for all $\mu \in X^+$ for which ${\mathbb D}elta(\mu)$ occurs in a ${\mathbb D}elta$-filtration of $T(\lambda)$. For a $p$-regular $\lambda$ it then follows by an easy translation argument, see \cite[Section 5]{A92} (the argument used there deals with the quantum case but applies just as well in the modular case). So when $\lambda \notin A(p)$ all endomorphisms of $T(\lambda)$ have trace zero. In particular, we have $\boldsymbol{\mathcal{T}}r(i_\lambda \circ h \circ \pi_\lambda) = 0$ for all $h \in \mathrm{End}_G(Q)$ when $\lambda \in X^+\setminus A(p)$.
On the other hand, if $\lambda \in A(p)$, then by Remark \ref{alcove A} we see that $T(\lambda)$ is simple, i.e. we have $T(\lambda) = L(\lambda) = {\mathbb D}elta(\lambda)$. So in this case any non-zero endomorphism of $T(\lambda)$ has trace equal to a non-zero constant times $\dim T(\lambda)$. By Weyl's dimension formula $\dim ({\mathbb D}elta(\lambda)) $ is prime to $p$. If $i_1$, respectively $\pi_1$, denotes the natural inclusion of $Q^{\mathcal F}$ into $Q$, respectively projection onto $Q^{\mathcal F}$, then this means that $h \in \mathrm{End}_G(Q)$ is in the kernel of $\phi$ if and only if $i_1 \circ h \circ \pi_1 = 0$ if and only if $i_\lambda \circ h \circ \pi_\lambda = 0$ for all $\lambda \in A(p)$ if and only if $ \boldsymbol{\mathcal{T}}r(i_\lambda \circ h \circ \pi_\lambda) = 0$ for all $\lambda \in X^+$.
\end{proof}
\subsection{Fusion} \label{Fusion}
Let $\mathcal T$ denote the category of tilting modules for $G$. As noted above, this is a tensor category. Inside $\mathcal T$ we consider the subcategory $\mathcal N$ consisting of all negligible modules, i.e. a tilting module $M$ belongs to $\mathcal N$ if and only if $\boldsymbol{\mathcal{T}}r(f) = 0$ for all $f \in \mathrm{End}_{G}(M)$. As each object in $\mathcal T$ is a direct sum of certain of the $T(\lambda)$'s and $\dim T(\lambda)$ is divisible by $p$ if and only if $\lambda \notin A(p)$ (as we saw in the proof of Theorem \ref{fusion-quotient}) we see that $M \in \mathcal N$ if and only if $(M:T(\lambda)) = 0$ for all $\lambda \in A(p)$.
We proved in \cite[Section 4]{A92} (in the quantum case - the arguments for $G$ are analogous) that $\mathcal N$ is a tensor ideal in $\mathcal T$. The corresponding quotient category $\mathcal T/ \mathcal N$ is then itself a tensor category. It is denoted ${\mathcal F}$ and called the fusion category for $G$. We may think of objects in $\mathcal F$ as the tilting modules $Q$ whose indecomposable summands are among the $T(\lambda)$'s with $\lambda \in A(p)$. Note that $\mathcal F$ is a semisimple category (with simple objects $(T(\lambda) = L(\lambda))_{\lambda \in A(p)}$), cf. \cite[Section 4]{A92}.
\begin{remarkcounter} \label{p=2$} Note that for $p < h$ the alcove $A(p)$ is empty. This means that in this case $\mathcal N = \mathcal T$. In particular, if $p=2$ the fusion category is trivial except for the case $G=SL_2(k)$ in which case $A(2) =\{0\}$, so that $\mathcal F$ is the category of finite-dimensional vector spaces. For this reason we shall in the following tacitly assume $p > 2$.
\end{remarkcounter}
In order to distinguish it from the usual tensor product on $G$-modules we denote the tensor product in $\mathcal F$ by $\underline \otimes$. If $Q, Q' \in \mathcal F$ then $Q \underline \otimes Q = \mathrm{pr} (Q \otimes Q')$ where $\mathrm{pr}$ denotes the projection functor from $\mathcal T$ to $\mathcal F$ (on the right-hand side we consider $Q, Q'$ as modules in $\mathcal T$).
\begin{cor} \label{fusion} Let $T$ be an arbitrary tilting module for $G$. Then, for any $r \in {\mathbb Z}_{\geq 0}$, the natural homomorphism $\mathrm{End}_G(T^{\otimes r}) \rightarrow \mathrm{End}_G(T^{\underline \otimes r})$ is surjective.
\end{cor}
\begin{proof} Set $Q = T^{\otimes r}$. Then $Q$ is a tilting module and in the above notation $Q^{\mathcal F} = T^{\underline \otimes r}$ ($ = 0$ if $T \in \mathcal N$) . Hence the corollary is an immediate consequence of Theorem \ref{fusion-quotient}.
\end{proof}
\subsection{Cellular theory for endo-rings of tilting modules} \label{cellular}
Recall the notions of cellularity, cellular structure and cellular algebras from \cite{GL}. When $Q$ is a tilting module for $G$ its endomorphism ring $E_Q = \mathrm{End}_G(Q)$ has a natural cellular structure, see \cite[Theorem 3.9] {AST1}. The parameter set of weights for $E_Q$ is
$$ \Lambda = \{\lambda \in X^+ | (Q:{\mathbb D}elta(\lambda)) \neq 0 \}.$$
When $\lambda \in X^+$ the cell module for $E_Q$ associated with $\lambda$ is $C_Q(\lambda) = \mathrm{Hom}_G({\mathbb D}elta(\lambda), Q)$. Then $\dim C_Q(\lambda) = (Q:{\mathbb D}elta(\lambda)) (= 0$ unless $\lambda \in \Lambda$). We set
$$ \Lambda_0 = \{\lambda \in \Lambda | (Q:T(\lambda)) \neq 0 \}.$$
If $\lambda \in \Lambda_0$ then $C_Q(\lambda)$ has a unique simple quotient which we in this paper denote $D_Q(\lambda)$. The set $\{D_Q(\lambda) | \lambda \in \Lambda_0\}$ is up to isomorphisms the set of simple modules for $E_Q$. We have
\begin{equation} \label{dim simple/tilting}
\dim D_Q(\lambda) = (Q: T(\lambda)), \end{equation}
see \cite[Theorem 4.12]{AST1}.
Finally, recall the following result on semisimplicity, see \cite[Theorem 4.13] {AST1}.
\begin{thm} \label{ss} $E_Q$ is a semisimple algebra if and only if $Q$ is a semisimple $G$-module. In that case we have $\Lambda = \Lambda_0$, $T(\lambda) = {\mathbb D}elta(\lambda) = L(\lambda)$ and $C_Q(\lambda) = D_Q(\lambda)$ for all $\lambda \in \Lambda$.
\end{thm}
\begin{examplecounter} \label{Ex1} Let $T$ be a tilting module for $G$ and set $Q = T^{\otimes r}$ as in the previous section. Then $E_Q= \mathrm{End}_G(T^{\otimes r})$ is a cellular algebra with cell modules $(C_Q(\lambda))_{\lambda \in \Lambda_T^r}$, where $\Lambda_T^r = \{\lambda \in X^+ | (T^{\otimes r} : {\mathbb D}elta(\lambda)) \neq 0\}$, and simple modules $(D_Q(\lambda))_{\lambda \in \Lambda_{0,T}^r}$, $\Lambda_{0,T}^r = \{\lambda \in \Lambda_T^r | (T^{\otimes r}: T(\lambda)) \neq 0\}$.
Denote by $Q_1$ the summand $T^{\underline \otimes r}$ of $Q$. Then the endomorphism ring
$$\overline E_Q = \mathrm{End}_G(Q_1),$$
is, according to Corollary \ref{fusion}, a quotient of $E_Q$, and, by Theorem \ref{ss}, it is a semisimple cellular algebra. In fact, $\overline E_Q$ is a direct sum of the matrix rings, namely
$$\overline E_Q \simeq \bigoplus_{\lambda \in A(p)} M_{(Q_1:T(\lambda))} (k).$$
The simple modules for $\overline E_Q$ are $\{D_Q(\lambda) | \lambda \in A(p) \cap \Lambda_0 \}$. We have
$$\dim D_Q(\lambda) = (Q_1:T(\lambda))$$
for all $\lambda \in A(p) \cap \Lambda_{0,T}^r$.
\end{examplecounter}
\section {Semisimple quotients of the group algebras $kS_r$}
In this section $G_n = GL(V)$ where $V$ is a vector space over $k$ of dimension $n$ with basis $\{v_1, v_2, \cdots , v_n\}$ as in Section \ref{GL}. As we will also look at various subspaces $V'$ of $V$ we shall from now on write $V_n = V$. We write ${\mathbb D}elta_n(\lambda)$ for the Weyl module for $G_n$, $T_n(\lambda)$ for the indecomposable tilting module for $G_n$ with highest weight $\lambda$, etc.
\subsection{Algebras associated with tensor powers of the natural module for $G$} \label{tensor powers}
We let $r \in Z_{\geq 0}$ and consider the $G_n$-module $V_n^{\otimes r}$. As tensor products of tilting modules are again tilting we see that this is a tilting module for $G_n$. Consider the subset $I = \{\alpha_1, \alpha_2, \cdots , \alpha_{n-2}\} \subset S$. Then the corresponding Levi subgroup $L_I$ identifies with $G_{n-1} \times G_1$, where the first factor $G_{n-1} =GL(V_{n-1})$ is the subgroup fixing $v_n$, and the second factor $G_1$ is the subgroup fixing $v_i$ for $i < n$ and stabilizing the line $k v_n$. As an $L_I$-module we have $V_n = V_ {n-1} \oplus k_{ \underline{\epsilon}silon_n}$. Here $k_{\underline{\epsilon}silon_n}$ is the line $k v_n$ on which $G_1$ acts via the character $\underline{\epsilon}silon_n$. This gives
\begin{equation} \label{restriction}
V_n^{\otimes r} \simeq \bigoplus _{s=0} ^r (V_{n-1}^{\otimes s} \otimes k_{(r-s)\underline{\epsilon}silon_n})^{\oplus \binom{r}{s}} \text { (as $L_I$-modules)}.
\end{equation}
In particular, $V_{n-1}^{\otimes r}$ is an $L_I$-summand. Its weights (for the natural maximal torus in $L_I$ which is also the maximal torus $T_n$ in $G_n$) consist of $\lambda$'s with $\lambda_n = 0$ whereas any weight $\mu$ of the complement $C = \bigoplus _{s=0} ^{r-1} (V_{n-1}^{\otimes s} \otimes k_{(r-s)\underline{\epsilon}silon_n})^{\oplus \binom{r}{s}}$ has $\mu_n > 0$. It follows that
\begin{equation} \label{no cross-homs}
\mathrm{Hom}_{L_I}(V_{n-1}^{\otimes r}, C) = 0 = \mathrm{Hom}_{L_I}(C, V_{n-1}^{\otimes r}).
\end{equation}
Moreover, since $G_1$ acts trivially on $V_{n-1}$ we have $\mathrm{End}_{L_I}(V_{n-1}^{\otimes r}) = \mathrm{End}_{G_{n-1}}(V_{n-1}^{\otimes r})$. Hence we get from Lemma \ref{quotient} (in which the assumptions are satisfied because of (\refeq{no cross-homs})):
\begin{prop}\label{surj GL} The natural algebra homomorphism
$$\mathrm{End}_{G_n}(V_n^{\otimes r}) \rightarrow \mathrm{End}_{G_{n-1}}(V_{n-1}^{\otimes r})$$
is surjective.
\end{prop}
Later on we shall use the following related result.
\begin{prop} \label{restriction of Specht}
Suppose $\lambda \in X^+$ has $\lambda_n = 0$. Then the natural homomorphism
$$\mathrm{Hom}_{G_n}({\mathbb D}elta_n(\lambda), V_n^{\otimes r}) \rightarrow \mathrm{Hom}_{G_{n-1}}({\mathbb D}elta_{n-1}(\lambda), V_{n-1}^{\otimes r})$$
is an isomorphism for all $r$.
\end{prop}
\begin{proof} In this proof we shall need the parabolic subgroup $P_I$ corresponding to $I$. We have $P_I = L_I U^{I}$ (semidirect product) where $U^{I}$ is the unipotent radical of $P_I$. We set $\nabla_I(\lambda) = \mathrm{i}nd_B^{P_I}(k_\lambda)$. Our assumption that $\lambda_n = 0$ implies that as an $L_I$-module and as a $G_{n-1}$-module we have $\nabla_I(\lambda) = \nabla_{n-1}(\lambda)$.
We shall prove the proposition by proving the dual statement
$$\mathrm{Hom}_{G_n}( V_n^{\otimes r}, \nabla_n(\lambda)) \simeq \mathrm{Hom}_{G_{n-1}}(V_{n-1}^{\otimes r}, \nabla_{n-1}(\lambda)).$$
First, by Frobenius reciprocity \cite[Proposition I.3.4]{RAG}, we have
$$\mathrm{Hom}_{G_n}( V_n^{\otimes r}, \nabla_n(\lambda)) \simeq \mathrm{Hom}_{P_I}( V_n^{\otimes r}, \nabla_I(\lambda)).$$
Then restricting to $L_I$ gives an isomorphism to $\mathrm{Hom}_{L_I}( V_n^{\otimes r}, \nabla_I(\lambda))$. Finally, we use (\refeq{restriction}) and the weight arguments from the proof of Proposition \ref{surj GL} to see that this identifies with
$$\mathrm{Hom}_{L_I}( V_{n-1}^{\otimes r}, \nabla_I (\lambda)) \simeq \mathrm{Hom}_{G_{n-1}}( V_{n-1}^{\otimes r}, \nabla_{n-1}(\lambda)).$$
\end{proof}
We can, of course, iterate the statement in Proposition \ref{surj GL}: If we set $E_n^r = \mathrm{End}_{G_n}(V_n^{\otimes r})$, then we recover the following well-known fact (cf. \cite[E.17]{RAG}).
\begin{cor} \label{sequence of surjections} We have a sequence of surjective algebra homomorphisms
$$ E_n^r \rightarrow E_{n-1}^r \rightarrow \cdots \rightarrow E_2^r \rightarrow E_1^r.$$
\end{cor}
\vskip 1 cm
Set now $\overline E_n^r = \mathrm{End}_{G_n}(V_n^{\underline \otimes r})$. Note that these are the higher Jones algebras (see the introduction) in the case $G= GL_n$ corresponding to the tilting modules $V_n^{\otimes r}$. We get from Corollary \ref{fusion} that this is a quotient of $E_n^r$. It is a semisimple algebra (see Example \ref{Ex1}), so that by Corollary \ref{sequence of surjections} we get
\begin{thm} \label {ss quotients}
For all $n$ and all $r$ the algebras $\overline E_m^r$, $m=1, 2, \cdots , n$ are semisimple quotient algebras of $E_n^r$.
\end{thm}
\begin{remarkcounter} \label{p-term is 0} \begin{enumerate}
\item [a)] We have $\overline E_m^r = 0$ for all $r$ when $m \geq p$. This is clear for $m > p$, because then $A_m(p) = \emptyset$. If $m = p$, we have that $\underline{\epsilon}silon_1$ belongs to the upper wall of $A_m(p)$, see Remark \ref{A for type A}. Hence, $V_p$ is negligible and therefore so are also all tensor powers $V_p^{\otimes r}$. This means that $V_p^{\underline \otimes r} = 0$ for all $r$.
\item [b)] We do not have surjections $\overline E_m^r \to \overline E_{m-1}^r$ analogous to the ones we found in Corollary \ref{sequence of surjections}. In fact, the alcove $A_p(m)$ become larger the smaller $m$ we consider. This means that the algebras $\overline E_m^r$ grow in size when $m$ decreases.
\end{enumerate}
\end{remarkcounter}
\subsection{A class of simple modules for symmetric groups} \label{class of simple}
The group algebra $kS_r$ of the symmetric group on $r$ letters is isomorphic to the algebra $E_n^r$ for all $n \geq r$ see e.g. \cite[3.1]{CL}. Hence, by Theorem \ref{ss quotients} $kS_r$ has the following list of semisimple quotients: $\overline E_1^r, \overline E_2^r, \cdots , \overline E_r^r$. As observed in Remark \ref{p-term is 0} we have $\overline E_n^r= 0$, if $n \geq p$. On the other hand, $kS_r$ is itself semisimple if $p >r$, and its representation theory coincides with the well-known theory in characteristic $0$. So we shall assume in the following that
$p \leq r$.
In the special case $n=1$ we have $V_1^{\otimes r}= k_{r \underline{\epsilon}silon_1}$, $r \in {\mathbb Z}$ and these modules together with their duals are the indecomposable tilting modules (as well as the simple modules) for $G_1$. The fusion category for $G_1$ coincides with the full category of finite-dimensional $G_1$-modules. We identify the trivial $1$ line partion of $r>0$ with the element $r \underline{\epsilon}silon_1$ in $A_1(p)$. Clearly, we have
$\overline E_1^r = E_1^r = \mathrm{End}_{G_1}(k_{r \underline{\epsilon}silon_1}) = k$ for all $r$.
We shall explore the simple modules for $kS_r$ arising from the above quotients $\overline E_m^r$. Note that we just observed that the first algebra $\overline E_1^r$ equals $k$.
Consider the remaining quotients $\overline E_m^r$, $m= 2, \cdots , p-1$ of $kS_r$. We shall describe the simple modules for $kS_r$ arising from these. Recall that the simple modules for $kS_r$ are indexed by the $p$-regular partitions of $r$, i.e. partitions of $r$ with no $p$ rows having the same length. If $\lambda$ is such a partion, we denote the corresponding simple module for $kS_r$ by $D_r(\lambda)$.
Set $\Lambda^r$ equal to the set of partitions of $r$. This is the weight set for the cellular algebra $E_n^r$ whenever $n \geq r$. Define
$$\overline \Lambda^r(p) = \{ (\lambda_1, \lambda_2, \cdots ,\lambda_m) \in \Lambda^r | \lambda \in A_m(p) \text { for some } m < p \}.$$
So $\overline \Lambda^r(p) $ consists of those partitions of $r$ which have at most $m$ non-zero terms and satisfy $\lambda_1 - \lambda_m \leq p - m$. Clearly, the partions in $\overline \Lambda^r(p)$ are all $p$-regular. We shall now derive an algorithm which determines the dimensions of the simple modules $D_r(\lambda)$ when $\lambda \in \overline \Lambda^r(p)$.
We have the following Pieri-type branching formula, which is proved e.g. in \cite[(3.7)]{AS}.
\begin{prop} \label{inductive formula}
Let $m \geq 1$ and suppose $\lambda \in A_m(p)$. Then
$$(V_m^{\otimes r}: T(\lambda)) = \sum_{i: \lambda - \underline{\epsilon}silon_i \in \Lambda^{r-1} \cap A_m(p)} (V_m^{\otimes (r-1)}: T(\lambda - \underline{\epsilon}silon_i)).$$
\end{prop}
\begin{lem} \label {the p-1 algebra}
Suppose $1 \leq r = a (p-1) + b$ where $0 \leq b < p-1$. Then $V_{p-1}^{\underline \otimes r} = T(a\omega_{p-1} + \omega_b)$. Hence, $\overline E_{p-1}^r = k$.
\end{lem}
\begin{proof} The lemma is clearly true when $r =1$ where $V_{p-1} = T(\omega_1) = L(\omega_1)$. Observe that (with the notation in the lemma) $a\omega_{p-1} + \omega_b$ is the unique element in $\Lambda^r \cap A_{p-1}(p)$. Hence, for $r>1$ the statement follows by induction from Proposition \ref{inductive formula}.
\end{proof}
\begin{thm} \label{main symm}
Let $r > 0$ and suppose $\lambda \in \overline \Lambda^r(p)$. Then the dimension of the simple $kS_r$-module $D_r(\lambda)$ is recursively determined by
$$ \dim D_r(\lambda) = \sum_{i: \lambda - \underline{\epsilon}silon_i \in \overline \Lambda^{(r-1)}(p)} \dim D_{r-1}(\lambda - \underline{\epsilon}silon_i).$$
\end{thm}
\begin{proof} For any partition $\mu$ of $r$ the corresponding Specht module for $kS_r$ identifies with the cell module $C_r(\lambda) =\mathrm{Hom}_{G_r}({\mathbb D}elta_r(\mu), V_r^{\otimes r})$ for $E_r^r \simeq kS_r$. Now Proposition \ref{restriction of Specht} shows that, if $\mu$ has at most $m$ terms, then we have $C_r(\mu) \simeq C_m(\mu)$. The surjection $V_m^{\otimes r}$ onto the fusion summand $V_m^{\underline \otimes r}$ then gives a surjection of $C_m(\mu)$ onto the cell module $\overline C_m(\mu)) = \mathrm{Hom}_{G_m}({\mathbb D}elta_m(\mu), V_m^{\underline \otimes r})$ for the semisimple quotient algebra $\overline E_m^r$ of $kS_r$. This latter module is only non-zero if $m < p$ and $\mu \in A_m(p)$. So if $\mu = \lambda$ with $\lambda$ as in the theorem, we see that $D_r(\lambda) = \overline C_m(\lambda)$. The theorem therefore follows from Proposition \ref{inductive formula} by observing that $\dim \overline C_m(\lambda) = (V_m^{\otimes r} : T(\lambda))$, cf. (\refeq{dim simple/tilting}).
\end{proof}
\begin{examplecounter}
Consider the case $p = 3$. Here we have
$$ \overline \Lambda ^r(3) = \begin{cases} \{(1)\} \text { if } r = 1, \\ \{(r), ( (r+1)/2, (r-1)/2)\} \text { if } r \geq 3 \text { is odd,} \\ \{ (r), ( r/2, r/2)\} \text { if } r \geq 2 \text { is even.} \end{cases}$$
The trivial partition $(r)$ of $r$ corresponds to the trivial simple module $D_r((r)) = k$ (this is true for all primes). For the unique $2$-parts partition $ \lambda$ in $\overline \Lambda ^r$ we get from Theorem \ref{main symm}
$$ \dim D_r(\lambda) = \begin{cases} \dim D_{r-1}(\lambda - \underline{\epsilon}silon_1) \text { if $r$ is odd,} \\ \dim D_{r-1}(\lambda - \underline{\epsilon}silon_2) \text { if $r$ is even.} \end{cases}$$
Hence we find $\dim D_r(\lambda) = 1$ for all $r$. This is of course also an immediate consequence of the fact that in this case $\overline E_2^r =k$, see Lemma \ref{the p-1 algebra}. Note that $\overline E_2^r$ is the modular Jones algebra appearing in \cite[Section 7] {A17} and it was observed there as well that the Jones algebras are all trivial in characteristic $3$.
\end{examplecounter}
\begin{examplecounter}
Consider now $p = 5$. Then for $r \geq 5$ we have exactly two partitions $\lambda^1(r)$ and $\lambda^2(r)$ of $r$ having $2$ non-zero parts, which belong to $A_2(5)$. Likewise, there are exactly $2$ partitions $\mu^1(r)$ and $\mu^2(r)$ of $r$ with $3$ non-zero parts, which belong to $A_3(p)$. Finally, there is a unique partition $\nu(r)$ of $r$ with $4$ non-zero parts which belongs to $A_4(p)$. To be precise we have
$$ \lambda^1(r) = ((r+2)/2, (r-2)/2) \text { and } \lambda^2(r) = (r/2, r/2), \text { if $r$ is even;}$$
whereas
$$ \lambda^1(r) = ((r+3)/2, (r-3)/2) \text { and } \lambda^2(r) = ((r+1)/2,(r-1)/2), \text { if $r$ is odd.}$$
We leave to the reader to work out the formulas for $\mu^1(r), \mu^2(r)$. The expression for $\nu(r)$ is given in Lemma \ref{the p-1 algebra}.
So $\overline \Lambda^r (p)
= \{(r), \lambda^1(r), \lambda^2(r), \mu^1(r), \mu^2(r), \nu(r) \}$. We choose the enumeration such that $\lambda^1(r) > \lambda^2(r)$ (in the dominance order) and likewise $\mu^1(r) > \mu^2(r)$. For each of these 6 weights we can easily compute the dimension of the corresponding simple $kS_r$-modules via Theorem \ref{main symm}. In Table 1 we have illustrated the results for $ r \leq 10$. In this table the numbers in row $r$ (listed in the above order) are the dimensions of these $6$ simple $kS_r$-modules. When $r$ is small
some weights are repeated, e.g. for $r = 3$ we have $(3) = \lambda^1(3)$, $\lambda^2(3) = \mu^1(3)$ and $\mu^2(3) = \nu(3)$.
\vskip .5 cm
\centerline {
{\it Table 1. Dimensions of simple modules for $kS_r$ when $p= 5$}}
\vskip .5cm
\centerline{
\begin{tabular} { r| c | c c |c c | cc }
r &(r) & $\lambda^1(r)$ & $\lambda^2(r)$ & $\mu^1(r)$ & $\mu^2(r)$& $\nu(r)$ \\
\hline
1 & 1 & &1 & 1& & 1\\
2 & 1 & 1 & 1 & 1 & 1 & 1 & \\
3 & 1& 1 & 2& 2 & 1 & 1 \\
4 & 1 &3 & 2 & 2 & 3 & 1\\
5 &1 & 3 & 5 & 3 & 5 &1\\
6 & 1 & 8& 5& 8 &5 &1 \\
7 &1 &8 &13 &8 & 13 & 1\\
8 & 1& 21&13 &13 & 21 & 1\\
9 & 1 &21&34&34 & 21 & 1\\
10 & 1 &55&34&34 & 55 & 1\\
\end{tabular}}
\vskip .5 cm
The table can easily be extended using the following formulas. Set $a^j(r) = \dim D_r(\lambda^j(r)), \; j=1, 2$. Then Theorem \ref{main symm} gives $a^1(1) = 0, a^2(1) = 1 = a^2(2)$ and the following recursion rules
$$ a^1(2r+1) = a^1(2r) = a^1(2r-1) + a^2(2r-1); \; a^2(2r+2) = a^2(2r+1) = a^1(2r) + a^2(2r)$$
for $r \geq 1$.
Another way of phrasing this is that $a^2(1), a^1(2), a^2(3), a^1(4), a^2(5), a^1(6), \cdots $ is the Fibonacci sequence. The first equations above then determine the remaining numbers $a^j(r)$.
Again we leave it to the reader to find the similar recursion for the dimension of the simple modules corresponding to the $\mu^j(r)$'s. Apart from the fact, that the recursion rules coincide, we see no obvious representation theoretic explanation for the ``symmetry" between the numbers involving $\lambda$'s and those involving $\mu$'s.
\end{examplecounter}
\vskip 1cm
\section{Semisimple quotients of the Brauer algebras} \label{Brauer}
In this section we shall apply our results from Section 2 to the symplectic and orthogonal groups. This will allow us via Schur--Weyl duality for these groups to obtain certain semisimple quotients of the Brauer algebras over $k$ and to give an algorithm for finding the dimensions of the corresponding simple modules.
The Brauer algebra $\mathcal B_r(\delta)$ with parameter $\delta \in k$ may be defined via generators and relations, see e.g. \cite[Introduction]{DDH}. Alternatively, we may consider it first as
just a vector space over $k$ with basis consisting of the so-called Brauer diagrams with $r$ strands. Then one defines a multiplication of two such diagrams by stacking the second diagram on top of the first, see e.g. \cite{B} or \cite[Section 4]{GL}. This gives an algebra structure on $\mathcal B_r(\delta)$.
We have Brauer algebras for an arbitrary parameter $\delta \in k$. However, the ones that are connected with our endomorhism algebras are those where $\delta$ is the image in $k$ of an integer, i.e. where $\delta$ belongs to the prime field ${\mathcal F}u \subset k$. It follows from the various versions of the Schur--Weyl duality (see below) that in this case $\mathcal B_r(\delta)$ surjects onto the endomorphism algebra of the $r$'th tensor power of the natural modules for appropriate symplectic and orthogonal groups.
\subsection{Quotients arising from the symplectic groups} \label{quotients sp}
We shall use the notation from Section \ref{Sp}. In particular, $V$ will be a $2n$-dimensional symplectic vector space, which we from now on denote $V_n$. We set $G_n = SP(V_n)$ and $E_n^r = \mathrm{End}_{G_n}(V_n^{ \otimes r})$.
Consider now the fusion summand $V_n^{\underline \otimes r}$ of $V_n^{\otimes r}$ with endomorphism ring $\overline E_n^r = \mathrm{End}_{G_n}(V^{\underline \otimes r})$. Then exactly as in Proposition \ref{surj GL} we obtain:
\begin{prop} \label{quotients Sp} For all $n$ and $r$ the algebra $\overline E_n^r$ is a semisimple quotient of $E_n^r$.
\end{prop}
Recalling the description of $A_m(p)$ from Section 2.3 and using Remark 3 we see that $\overline E_n^r = 0$ unless $2n \leq p-1$.
In contrast with the $GL(V)$ case we usually do not have $\overline E_1^r = k$. In fact, $G_1 = SL_2(k)$ and the tensor powers of the natural module for $G_1$ therefore typically have many summands. On the other hand, the top non-zero term is always equal to $k$:
\begin{prop}
$\overline E_{(p-1)/2}^r = k$ for all $r$.
\end{prop}
\begin{proof}
In this proof we drop the subscript $ {(p-1)/2}$ on $V$ and ${\mathbb D}elta$. We have that $V \otimes V $ has a ${\mathbb D}elta$-filtration with factors ${\mathbb D}elta(2 \underline{\epsilon}silon_1), {\mathbb D}elta(\underline{\epsilon}silon_1 + \underline{\epsilon}silon_2)$ and ${\mathbb D}elta(0) = k$. The first two of these have highest weights on the upper wall of $A_{(p-1)/2}(p)$ whereas the highest weight $0$ of the last term belongs to $A_{(p-1)/2}(p)$. It follows that
$V \underline \otimes V = k$. Hence,
$$V^{\underline \otimes r} = \begin{cases} V \text { if $r$ is odd,} \\ k \text { if $r $ is even.} \end{cases}$$
The claim follows.
\end{proof}
The analogue of Proposition \ref{inductive formula} is
\begin{prop} \label{inductive formula Sp}
Let $m \geq 1$ and suppose $\lambda \in A_m(p)$. Then
$$(V_m^{\otimes r}: T_m(\lambda)) = \sum_{i: \lambda \pm \underline{\epsilon}silon_i \in A_m(p)} (V_m^{\otimes (r-1)}: T_m(\lambda \pm \underline{\epsilon}silon_i)).$$
\end{prop}
\begin{proof} As $\underline{\epsilon}silon_1$ is minuscule we have for any $\lambda \in X_n^+$ that the ${\mathbb D}elta$-factors in ${\mathbb D}elta(\lambda) \otimes V_m$ are those with highest weights $\lambda + \mu$ where $\mu$ runs through the weights of $V_m$ (ignoring possible $\mu$'s for which $\lambda + \mu$ belong to the boundary of $X_m^+$). Likewise, if $\lambda \in A_m(p)$ then the same highest weights all belong to the closure of $A_m(p)$. Hence the fusion product ${\mathbb D}elta(\lambda) \underline \otimes V$ is the direct sum of all ${\mathbb D}elta_m(\lambda + \mu)$ for which $\lambda + \mu \in A_m(p)$. As the possible $\mu$'s are the $\pm \underline{\epsilon}silon_i$ (each having multiplicity $1$) we get the formula.
\end{proof}
Recall now the Schur--Weyl duality theorem for $SP(V)$, see \cite{DDH}.
\begin{thm} \label{Schur-Weyl Sp} There is an action of $\mathcal B_r(-2n)$ on $V_n^{\otimes r}$ which commutes with the action of $G_n$. The corresponding
homomorphism $\mathcal B_r(-2n) \rightarrow E_n^r$ is surjective for all $n$ and for $n\geq r$ it is an isomorphism.
\end{thm}
The simple modules for $\mathcal B_r(\delta)$ are parametrized by the $p$-regular partitions of $r, r-2, \cdots$, see \cite[Section 4]{GL}, and we shall denote them $D_{\mathcal B_r(\delta)}(\lambda)$.
This parametrization holds for any $\delta \in k$. However, in this section we only consider the case where $\delta$ is the image in $k$ of a negative even number. We identify $\delta$ with an integer in $[0, p-1]$.
Assume $\delta$ is odd and define the following subsets of weights
$$ \overline \Lambda^r(\delta, p) = (\Lambda^r \cup \Lambda^{r-2} \cup \cdots ) \cap A_{(p-\delta)/2}(p).$$
So if $\delta < p-2$ then $ \overline \Lambda^r(\delta, p)$ consists of partitions $\lambda = (\lambda_1, \lambda_2, \cdots \lambda_{(p-\delta)/2})$ with $|\lambda| = r - 2i$ for some $i \leq r/2$ which satisfy $\lambda_1 + \lambda_2 \leq \delta$. On the other hand,
$\overline \Lambda^r(p-2, p) = \{(r-2i) | r-p+2 \leq 2i \leq r\}$.
Note that all partitions in $\overline \Lambda^r(p)$ are $p$-regular.
\begin{thm} \label{main brauer Sp}
Let $r > 0$ and consider an odd number $\delta \in [0,p-1]$. Suppose $\lambda \in \overline \Lambda^r(\delta, p)$. Then the dimension of the simple $\mathcal B_r(\delta)$-module $D_{\mathcal B_r(\delta)}(\lambda)$ is recursively determined by
$$ \dim D_{\mathcal B_r(\delta)}(\lambda) = \sum_{i: \lambda \pm \underline{\epsilon}silon_i \in \overline \Lambda^{(r-1)}(\delta, p)} \dim D_{\mathcal B_{r-1}(\delta)}(\lambda \pm \underline{\epsilon}silon_i).$$
\end{thm}
\begin{proof} Combining Theorem \ref{Schur-Weyl Sp} with Theorem \ref{quotients Sp} we see that $\overline E_{(p-\delta)/2}^r$ is a semisimple quotient of $B_r(\delta)$. Then the theorem follows from Proposition \ref{inductive formula Sp} by recalling that the dimensions of the simple modules for
$\overline E_{(p-\delta)/2}^r$ coincide with the tilting multiplicities in $V_{(p-\delta)/2}^{\otimes r}$, see (\refeq{dim simple/tilting}).
\end{proof}
\begin{remarkcounter} If $n \equiv (p-\delta)/2\; (\mathrm{mod } p)$ for some odd number $\delta \in [0,p-1]$, then $-2n \equiv \delta$. Hence, the theorem describes a class of simple modules for $\mathcal B_r(-2n)$ for all such $n$.
\end{remarkcounter}
\begin{examplecounter} \label{brauer p=7}
Consider $p = 7$. Then the relevant $\delta$'s are $5, 3$ and $1$. The weight set $\overline \Lambda^r(5,7)$ contains $3$ elements (except for $r < 4$ where there are fewer) $\lambda^1(r), \lambda^2(r), \lambda^3(r)$ listed in descending order, namely $(4), (2), (0)$, when $ r$ is even, and $(5), (3), (1)$, when $r$ is odd. Likewise, $\overline \Lambda^r(3,7)$ contains $3$ elements (except for $r = 1$) $\mu^1(r), \mu^2(r), \mu^3(r)$ listed in descending order, namely $(2,0), (1,1), (0,0)$, when $ r$ is even, and $(3,0), (2,1), (1,0)$, when $r$ is odd. Finally, $\overline \Lambda^r(1,7)$ consists of a unique element $\nu(r)$, namely $\nu(r) = (0,0,0)$, when $r$ is even, and $\nu(r) =(1,0,0)$, when $r$ is odd.
In Table 2 we have listed the dimensions of the simple modules for $\mathcal B_r(\delta) $ for $r \leq 10$. These numbers are computed recursively using Theorem \ref{main brauer Sp}.
\end{examplecounter}
\eject
\centerline {
{ \it Table 2. Dimensions of simple modules for $\mathcal B_r(\delta)$ when $p= 7$ and $\delta = 5, 3, 1$.
}}
\vskip .5cm
\centerline {
\begin{tabular}{ r| c c c |c c c |c| c c c c c c c c c c}
$\delta$ &&5&&&3&&1& \\ \hline
r & $\lambda^1(r)$ & $\lambda^2(r)$ & $\lambda^3(r)$ & $\mu^1(r)$ & $\mu^2(r)$ & $\mu^3(r)$ & $\nu(r)$ \\ \hline
1 & & & 1 & & &1 & 1& \\
2 & & 1 &1 & 1 & 1 & 1 & 1 \\
3 & & 1& 2& 1 & 2 & 3 & 1 \\
4 & 1 &3 & 2 & 6 & 5 & 3& 1 \\
5 &1 & 4 & 5 & 6 & 11 &14 & 1\\
6 & 5 & 9& 5& 31&25 &14 & 1 \\
7 &5 &14 &14 &31& 56 & 70 & 1\\
8 & 19& 28&14 &157 & 126 &70 & 1\\
9 & 19 &47&42&157 & 283 & 353 & 1\\
10 & 66 &89&42&793 & 636 & 353 & 1\\
\end{tabular}}
\vskip 1 cm
\subsection{Quotients arising from the orthogonal groups}
In this section we consider the orthogonal groups. Again we shall see that the very same methods as we used for general linear groups in Section 4 apply in this case.
We shall use the notation from Section \ref{O}. In particular, $V$ will be a vector space with a non-degenerate symmetric bilinear form. If $\dim V$ is odd, we write $\dim V = 2n +1$ and set $V_n = V$ and $G_n = O(V)$. Likewise, if $\dim V$ is even, we write $\dim V = 2n$ and set $V_n = V$ and $G_n = O(V_n)$. In both cases we denote by $E_n^r$ the endomorphism algebra $ \mathrm{End}_{G_n}(V_n^{ \otimes r})$ and by $\overline E_n^r$ the algebra $\mathrm{End}_{G_n}(V_n^{\underline \otimes r})$.
As in the general linear and the symplectic case we have:
\begin{prop} \label{quotients O} For all $n$ and $r$ the algebra $\overline E_n^r$ is a semisimple quotient of $E_n^r$.
\end{prop}
Recalling the description of $A_m(p)$ from Section \ref{O} we observe:
\begin{remarkcounter} \label{orthogonal barE} \begin{enumerate}
\item [a)]By Remarks \ref{A for type B} and \ref{A for type D} we have $\overline E_n^r$ = 0 unless $2n < p-2$ in the odd case, respectively $2n < p+2$ in the even case.
\item [b)] In the even case we get $\overline E_{(p+1)/2}^r = k$ for all $r$ using the same argument as in the symplectic case. On the other hand, this argument does not apply to the odd case, where in fact the highest term $\overline E_{(p-3)/2}^r$ is usually not $k$ (this is illustrated in Example \ref{brauer2 p=7} below).
\end{enumerate}
\end{remarkcounter}
The Schur--Weyl duality for orthogonal groups \cite[Theorem 1.2]{DH} says in particular:
\begin{thm} \label{Schur-Weyl O} Set $\delta = \dim V_n$. There is an action of $\mathcal B_r(\delta)$ on $V_n^{\otimes r}$ which commutes with the action of $G_n$. The corresponding
homomorphism $\mathcal B_r(\delta) \rightarrow E_n^r$ is surjective for all $n$.
\end{thm}
\begin{remarkcounter}
The Schur--Weyl duality for orthogonal groups gives rise to isomorphisms for large enough $n$, see e.g. \cite[Section 3.4]{AST2}. We shall not need this here.
\end{remarkcounter}
We now divide our discussion into the odd and even cases.
\subsubsection{Type B}
In the odd case where $G_n$ has type $B_n$ our methods lead to the higher Jones quotient $\overline E_m^r$ of $\mathcal B_r(2m+1)$ for $1 \leq m \leq (p-3)/2$. Noting that the Brauer algebras in question are those with an odd $\delta $ lying between $3$ and $p-2$ which we have already dealt with in Section \ref{quotients sp}, we shall leave most details to the reader. However, we do want to point out that the inductive formula for
the dimensions of the simple modules for $\overline E_n^r$ in this case is more complicated than in the symplectic case. The reason is that for type $B$ the highest weight for the natural module is not minuscule. This means that instead of the direct analogue of Proposition \ref{inductive formula} we need to use the following general formula (with notation as in Section 2).
\begin{thm} (\cite[Equation 3.20(1)]{AP}).
Let $G$ be an arbitrary reductive group over $k$ and suppose $Q$ is a tilting module for $G$. If $\lambda$ is a weight belonging to the bottom dominant alcove $A(p)$, then
$$ (Q:T(\lambda)) = \sum_{w} (-1)^{\ell (w)} (Q:{\mathbb D}elta(w \cdot \lambda)),$$
where the sum runs over those $w \in W_p$ for which $w \cdot \lambda \in X^+$.
\end{thm}
\begin{examplecounter} \label{brauer2 p=7} Consider $p =7$. Then type $B$ leads to higher Jones algebras of $\mathcal B_r(3)$ and $\mathcal B_r(5)$. The reader may check that the recursively derived dimensions for the class of simple modules in these cases match (with proper identification of the labeling) with those listed in Table 2. Note in particular that to get those for $\mathcal B_r(3)$ we need to decompose $V_1^{\underline \otimes r}$ into simple modules for $G_1$. The Lie algebra for $G_1$ is $\mathfrak{sl}_2$ and the natural $G_1$-module $V_1$ identifies with the simple $3$-dimensional $SL_2$-module.
\end{examplecounter}
\subsubsection{Type D}
In the even case $G_n$ has type $D$. The module $V_n$ equals ${\mathbb D}elta_n(\underline{\epsilon}silon_1)$ and its highest weight $\underline{\epsilon}silon_1$ is minuscule. This means that we have
\begin{prop} \label{inductive formula O}
Let $n \geq 1$ and suppose $\lambda \in A_n(p)$. Then
$$(V_n^{\otimes r}: T_n(\lambda)) = \sum_{i: \lambda \pm \underline{\epsilon}silon_i \in A_n(p)} (V_n^{\otimes (r-1)}: T_n(\lambda \pm \underline{\epsilon}silon_i)).$$
\end{prop}
\begin{proof} Completely analogous to the proof of Proposition \ref{inductive formula Sp}.
\end{proof}
Assume now $\delta \in [2, p+1]$ is even and define the following subsets of weights
$$ \overline \Lambda^r(2, p) = \{(r-2i) | 0 \leq r-2i \leq p-2\},$$
$$ \overline \Lambda^r(4, p) =\{(\lambda_1, \lambda_2) \in X_2^+ | (\lambda_1,|\lambda_2|) \in \Lambda^{r-2i} \text { for some $i$ with } 0 \leq r-2i \leq p-2\}, $$
and for $\delta > 4$
$$ \overline \Lambda^r(\delta, p) = \{ (\lambda_1, \lambda_2, \cdots , \lambda_{\delta/2}) \in X_{\delta/2}^+ | (\lambda_1, \cdots , |\lambda_{\delta/2}|) \in \Lambda^{r-2i} \text { for some } i \leq r/2 \text { and } \lambda_1 + \lambda_2 \leq p-\delta + 2\}. $$
\vskip .3 cm
\begin{thm} \label{main brauer O}
Let $r > 0$ and consider an even number $\delta \in [0,p+1]$. Suppose $\lambda \in \overline \Lambda^r(\delta, p)$. Then the dimension of the simple $\mathcal B_r(\delta)$-module $D_{\mathcal B_r(\delta)}(\lambda)$ is recursively determined by
$$ \dim D_{\mathcal B_r(\delta)}(\lambda) = \sum_{i: \lambda \pm \underline{\epsilon}silon_i \in \overline \Lambda^{(r-1)}(\delta, p)} \dim D_{\mathcal B_{r-1}(\delta)}(\lambda \pm \underline{\epsilon}silon_i).$$
\end{thm}
\begin{proof} Combining Theorem \ref{Schur-Weyl O} with Theorem \ref{quotients O} we see that $\overline E_{(p-\delta)/2}^r$ is a semisimple quotient of $B_r(\delta)$. Then the theorem follows from Proposition \ref{inductive formula O} by recalling that the dimensions of the simple modules for
$\overline E_{\delta/2}^r$ coincide with the tilting multiplicities in $V_{\delta/2}^{\otimes r}$, see (\refeq{dim simple/tilting}).
\end{proof}
\begin{remarkcounter} If $n \equiv \delta/2 \; (\mathrm{mod } \; p)$ for some even number $\delta \in [2,p+1]$, then $2n \equiv \delta \; (\mathrm{mod } \;p)$. Hence, the theorem describes a class of simple modules for $\mathcal B_r(2n)$ for all such $n$.
\end{remarkcounter}
\begin{examplecounter} Consider $p = 7$. Then the relevant $\delta$'s are $2, 4, 6, 8$. By Remark \ref{orthogonal barE}b we have that the higher Jones quotient algebra for $\mathcal B_r(8)$ is the trivial algebra $k$ (alternatively, observe that $\mathcal B_r(8) = \mathcal B_r(1)$ which we dealt with in Example \ref{brauer p=7}). At the other extreme the (higher) Jones quotient of $\mathcal B_r(2)$ is also a quotient of the Temperley--Lieb algebra $TL_r(2)$. This case is dealt with in \cite[Proposition 6.4]{A17}. So here we only consider the two remaining cases $\delta = 4$ and $\delta = 6$. We have
$$ \overline \Lambda^1(4,7) = \{(1,0)\},$$
$$ \overline \Lambda^2(4,7) = \{(2,0), (1,1), (1,-1), (0,0)\},$$
$$ \overline \Lambda^3(4,7) = \{(3,0), (2,1), (2,-1), (1,0)\},$$
$$ \overline \Lambda^r(4,7) = \begin{cases} \{(4,0), (2,2), (2,-2), (3,1) (3,-1), (2,0), (1,1), (1,-1), (0,0)\} \text { if $r \geq 4$ is even,} \\ \{(5,0), (3,2), (3,-2), (4,1), (4,-1), (3,0), (2,1), (2,-1), (1,0)\} \text { if $r \geq 5$ is odd.} \end{cases}$$
In Table 3 we have denoted these weights $\lambda^1(r), \cdots , \lambda^9(r)$.
Likewise, we have
$$ \overline \Lambda^1(6,7) = \{(1,0,0))\},$$
$$ \overline \Lambda^2(6,7) = \{(2,0,0), (1,1,0), (0,0,0)\},$$
$$ \overline \Lambda^r(6,7) = \begin{cases} \{(3,0,0), (2,1,0), (1,1,1), (1,1,-1), (1,0,0)\} \text { if $r \geq 3$ is odd.} \\ \{(2,1,1)), (2,1,-1)), (2,0,0), (1,1,0),(0,0,0)\} \text { if $r \geq 4$ is even.} \\ \end{cases}$$
In Table 3 we have denoted these weights $\mu^1(r), \cdots , \mu^5(r)$. In this table we have then listed the dimensions (computed via the algorithm in Theorem \ref{main brauer O}) for the simple modules for $\mathcal B_r(4)$, respectively $\mathcal B_r(6)$ corresponding to these sets of weights for $r \leq 10$.
\eject
\centerline {{ \it Table 3. Dimensions of simple modules for $\mathcal B_r(\delta)$ when $p= 7$ and $\delta = 4 $ and $6$.}}
\vskip .5cm
\noindent
\begin{tabular}{ r| c c c c c c c c c | c c c c c c}
&&& &&$\delta =4$&&&&&&&$\delta = 6$&& \\ \hline
r & $\lambda^1(r)$ & $\lambda^2(r)$ & $\lambda^3(r)$&$\lambda^4(r)$&$\lambda^5(r)$&$\lambda^6(r)$&$\lambda^7(r)$&$\lambda^8(r)$&$\lambda^9(r)$ & $\mu^1(r)$& $\mu^2(r)$ & $\mu^3(r)$ &$\mu^4(r)$ & $\mu^5(r)$ \\ \hline
1 & & & & &&&&&1 & & &&& 1& \\
2 &&&&&&1 & 1 & 1 & 1 & &&1& 1 & 1 \\
3 &&&&&&1& 2&2 & 4 & 1&2&1&1& 3 \\
4 & 1 &2 & 2 & 3 & 3 & 9&6&6&4& 2 & 3 & 6 & 7 & 3 \\
5 &1 & 5 & 5 & 4 & 4& 16 &20 & 20 &25 &6 & 18 &9&10&16 \\
6 & 25& 25& 25& 45 & 45&81 & 45 & 45 & 25 &27&28&40&53 & 16& \\
7 &25 &70 &70 &70& 70 & 196& 196 & 196 & 196 &40&148&80 &81 &109 \\
8 & 361,& 266 & 266 & 532 & 532 & 784 & 392 & 392 & 196 &228 &229 &297 &418 &109 \\
9 & 361&798& 798 & 893 & 893 & 2209 & 1974 & 1974 & 1764 &297&1172&646&647&824 \\
10 & 4356 & 2772 & 2772 & 5874 & 5874 & 7921 & 3738 & 3738 & 1764 &1828&1829&2293&3289&824 \\
\end{tabular}
\vskip .5 cm
Together with Example \ref{brauer p=7} this example give a class of simple modules for Brauer algebras with parameter $\delta$ equal to any non-zero element of $\mathbb {F}_7$.
\end{examplecounter}
Note that in the above example we were in type $D_1 = A_1$, $D_2 = A_1 \times A_1$ or $D_3 = A_3$ and we could have deduced the results from the Type A case treated in Section 4. We shall now give another example illustrating type $ D_n$ computations with $n > 3$.
\begin{examplecounter}
Consider $p = 11$ and take $\delta = 10$. Then $\mathcal B_r(10)$ has the higher Jones quotient $\overline E_5^r$. If $r \geq 5$ the weight set $\overline \Lambda^r(10, 11)$ contains $7$ elements, namely
$$ \{(1,1,1,1,-1), (2,1,1,1,0), (1,1,1,1,1), (3, 0,0,0,0), (2,1,0,0,0), (1,1,1,0,0), (1,0,0,0,0)\}$$
when $r$ is odd, and
$$ \{(2,1,1,1, -1), (2,1,1,1,1), (2,1,1,0,0), (1,1,1,1,0), (2,0,0,0,0), (1,1,0,0,0), (0,0,0,0,0)\}$$
when $r$ is even.
If $r \in \{1,2,3,4\}$, the set $\overline \Lambda^r(10, 11)$ consists of the last, the $3$ last, the $4$ last, and the $5$ last elements, respectively, in the above lists.
In Table 4 we have listed the dimensions of the corresponding simple modules for $\mathcal B_r(10)$ for $r \leq 10$ using Theorem \ref{main brauer O}. We have denoted the above $7$ weights $\lambda^1(r), \cdots , \lambda^7(r)$ (in the given order).
\end{examplecounter}
\eject
\centerline{
{ \it Table 4. Dimensions of simple modules for $\mathcal B_r(10)$ when $p= 11$.
}}
\vskip .5cm
\centerline {
\begin{tabular}{ r| c c c c c c c|ccc}
r & $\lambda^1(r)$ & $\lambda^2(r)$ & $\lambda^3(r)$ & $\lambda^4(r)$ & $\lambda^5(r)$ & $\lambda^6(r)$ &$\lambda^7(r)$ \\ \hline
1& & & & & & & 1& \\
2 & && & & 1 & 1 & 1 & \\
3 && & & 1& 2 & 1 & 3 & \\
4 & &&3 & 1 & 6 & 6 & 3& \\
5 &1&4 & 1 & 6 & 15 & 10 &15 & \\
6 & 5&5 & 29& 16& 36&40 &15 & \\
7 &21 &55 &21 &36 &105& 85 & 91 & \\
8 &76 & 76& 245&97 &232 & 281 &91 & \\
9 &173& 494 &173&232&568 & 623 & 604& \\
10 &667& 667 &1685&840&1404 & 1795 & 604 & \\
\end{tabular}}
\vskip 1 cm
\section{Quantum Groups}
In the remaining sections $k$ will denote an arbitrary field.
Let $\mathfrak g$ denote a simple complex Lie algebra. Then there is a quantum group $U_q = U_q(\mathfrak g)$ (a quantized enveloping algebra over $k$) associated with $\mathfrak g$. We shall be interested in the case where the quantum parameter $q$ is a root of unity in $k$ and we want to emphazise that the quantum group we are dealing with is the Lusztig version defined via $q$-divided powers, see e.g. \cite[Section 0]{APW}. This means that we start with the ``generic" quantum group $U_v = U_v(\mathfrak g)$ over ${\mathbb Q}(v)$ where $v$ is an indeterminate. Then we consider the ${\mathbb Z}[v,v^{-1}]$-subalgebra $U_{{\mathbb Z}[v,v^{-1}]}$ of $U_v$ generated by the quantum divided powers of the generators for $U_v$. When $q \in k\setminus 0$ we make $k$ into an ${\mathbb Z}[v,v^{-1}]$-algebra by specializing $v$ to $q$ and define $U_q$ as $U_q = U_{{\mathbb Z}[v,v^{-1}]} \otimes_{{\mathbb Z}[v,v^{-1}]} k$. This construction, of course, makes sense for arbitrary $q$, but if $q$ is not a root of unity all finite-dimensional $U_q$-modules are semisimple and our results are trivial. So in the following we always assume that $q$ is a root of unity and we denote by $\ell$ the order of $q$. When $\ell \in \{2, 3, 4, 6\}$ the (quantum) higher Jones algebras we introduce turn out to be trivial ($0$ or $k$) for all $\mathfrak g$ so we ignore these cases. We set $\ell' = \mathrm{ord}(q^2)$, i.e. $\ell' = \ell$, if $\ell$ is odd, and $\ell' = \ell/2$, if $\ell$ is even.
In this section we shall very briefly recall some of the key facts about $U_q$ and its representations relevant for our purposes. As the representation theory for $U_q$ is in many ways similar to the modular representation theory for $G$ that we have been dealing with in the previous sections, we shall leave most details to the reader. However, we want to emphazise one difference: if the root system associated with $\mathfrak g$ has two different root lengths then the case of even $\ell$ is quite different from the odd case (the affine Weyl groups in question are not the same). This phenomenon is illustrated in \cite[Section 6]{AS} where the fusion categories for type $B$ as well as the corresponding fusion rules visibly depend on the parity of $\ell$. The difference will also be apparent in Section 6.3 below where for instance the descriptions of the bottom dominant alcoves in the type $C$ case considered there depend on the parity of $\ell$.
Again, we start out with the general case and then specialize first to the general linear quantum groups, and then to the symplectic quantum groups. We omit treating the case of quantum groups corresponding to the orthogonal Lie algebras, because of the lack of a general version of Schur--Weyl duality in that case.
\subsection{Representation theory for Quantum Groups}
We have a triangular decomposition $U_q = U_q^- U_q^0U_q^+$ of $U_q$. If $n$ denotes the rank of $\mathfrak g$, then we set $X = {\mathbb Z}^n$ and identify each $\lambda \in X$ with a character of $U_q^0$ (see e.g. \cite[Lemma 1.1]{APW}). These characters extend to $B_q = U_q^-U_q^0$ giving us the $1$-dimensional $B_q$-modules $k_\lambda, \lambda \in X$. As in Section 1.1 we denote by $R$ the root system for $\mathfrak g$ and consider $R$ as a subset of $X$. The set $S$ of simple roots corresponds to the generators of $U_q^+$ and we define the dominant cone $X^+ \subset X$ as before. The Weyl group $W$ is still the group generated by the reflections $s_{\alpha}$ with $\alpha \in S$.
Define the bottom dominant alcove in $X^+$ by
$$ A(\ell) = \begin{cases} \{\lambda \in X^+ | \langle \lambda + \rho, \alpha_0^{\vee} \rangle < \ell \} \text { if $\ell$ is odd,} \\ \{\lambda \in X^+ | \langle \lambda + \rho, \beta_0^{\vee} \rangle < \ell' \} \text { if $\ell$ is even.} \end{cases}$$
Here $\alpha_0$ is the highest short root and $\beta_0$ is the highest long root.
The affine Weyl group $W_\ell$ for $U_q$ is then the group generated by the reflections in the walls of $A(\ell)$. Note that, when $\ell$ is odd, $W_\ell$ is the affine Weyl group (scaled by $\ell$) associated with the dual root system of $R$, whereas if $\ell$ is even, $W_\ell$ is the affine Weyl group (scaled by $\ell'$) for $R$, cf. \cite[Section 3.17]{AP}.
Suppose $\lambda \in X^+$. Then we have modules ${\mathbb D}elta_q(\lambda), \nabla_q(\lambda), L_q(\lambda)$ and $T_q(\lambda)$ completely analogous to the $G$-modules in Section 2 with the same notation without the index $q$.
The quantum linkage principle (see \cite{A03}) implies that if $L_q(\mu)$ is a composition factor of ${\mathbb D}elta_q(\lambda)$, then $\mu$ is strongly linked (by reflections from $W_\ell$) to $\lambda$. Likewise, if ${\mathbb D}elta_q(\mu)$ occurs in a Weyl filtration of $T_q(\lambda)$, then $\mu$ is strongly linked to $\lambda $.
The quantum linkage principle then gives the identities
$$ {\mathbb D}elta_q(\lambda) = {\mathbb D}elta_q(\lambda) = L_q(\lambda) = T_q(\lambda) \text { for all } \lambda \in A(\ell),$$
which will be crucial for us in the following.
Suppose $Q$ is a general tilting module for $U_q$. Imitating the definitions in Section \ref{Fusion} we define the fusion summand and the negligible summand of $Q$ as follows
$$ Q^{{\mathcal F}}= \bigoplus_{\lambda \in A(\ell)} T_q(\lambda)^{(Q:T_q(\lambda))} \text { and } Q^{{\mathbb{Z}_{\geq 0}}e} = \bigoplus_{\lambda \in X^+\setminus A(\ell)}T_q(\lambda)^{(Q:T_q(\lambda))}$$.
The exact same arguments as in the modular case then give us the quantum analogue of Theorem \ref{fusion-quotient}
\begin{thm} \label{q-fusion-quotient}
Let $Q$ be an arbitrary tilting module for $U_q$. Then the natural map $\phi: \mathrm{End}_{U_q}(Q) \rightarrow \mathrm{End}_{U_q}(Q^{{\mathcal F}})$ is a surjective algebra homomorphism. The kernel of $\phi$ equals
$$\{h \in \mathrm{End}_{U_q}(Q) | \boldsymbol{\mathcal{T}}r_q(i_\lambda \circ h \circ \pi_\lambda) = 0 \text { for all } i_\lambda \in \mathrm{Hom}_{U_q}( T_q(\lambda), Q), \; \pi_\lambda \in \mathrm{Hom}_{U_q}(Q, T_q(\lambda)),\; \lambda \in X^+\}.$$
\end{thm}
We also have a quantum fusion category (still denoted $\mathcal F$) and a fusion tensor product $\underline \otimes$ on it, see \cite[Section 4]{A92}. This leads to an analogue of Corollary 2.4.
\begin{cor} \label{q-fusion} Let $T$ be an arbitrary tilting module for $U_q$. Then for any $r \in {\mathbb Z}_{\geq 0}$ the natural homomorphism $\mathrm{End}_{U_q}(T^{\otimes r}) \rightarrow \mathrm{End}_{U_q}(T^{\underline \otimes r})$ is surjective.
\end{cor}
All of the above easily adapts to the case, where we replace the simple Lie algebra $\mathfrak g$ by the general linear Lie algebra $\mathfrak {gl}_n$ and we shall explore this case further in the next section.
Finally, the cellular algebra theory recalled in Section \ref{cellular} carries over verbatim. Alternatively, use the quantum framework from \cite[Section 5]{AST1} directly.
\subsection{The General Linear Quantum Group} \label{general linear q-group}
Let $n \geq 1$ and consider the general linear Lie algebra $\mathfrak {gl}_n$. The generic quantum group over ${\mathbb Q}(v)$ associated to $\mathfrak {gl_n}$ has a triangular decomposition in which the $0$ part identifies with a Laurent polynomial algebra ${\mathbb Q}(v)[K_1^{\pm 1}, \cdots , K_n^{\pm 1}]$. If $\lambda = (\lambda_1, \cdots , \lambda_n) \in X_n = {\mathbb Z}^n$ then $\lambda$ defines a character of this algebra which sends
$K_i$ into $v^{\lambda_i}$. In particular, the element $\underline{\epsilon}silon_i \in X_n$ with $1$ as its $i$-th entry and $0$'s elsewhere defines the character which sends $K_i$ to $v$ and all other $K_j$'s to $1$. We then have $\lambda = \sum_i \lambda_i \underline{\epsilon}silon_i$.
Set $U_{q,n}$ equal to the quantum group for $\mathfrak {gl}_n$ over $k$ with parameter a root of unity $q \in k$. Then we still get for $\lambda \in X_n$ a character of $U_q^0$, see \cite[Section 9]{APW}. If we denote by $V_{q,n}$ the $n$-dimensional vector representation of $U_{q,n}$, then (in analogy with the classical case) $V_{q,n}$ has weights $\underline{\epsilon}silon_1, \cdots, \underline{\epsilon}silon_n$, all with multiplicity $1$. Moreover, we may (for all $\ell$) identify $V_{q,n}$ with ${\mathbb D}elta_q(\underline{\epsilon}silon_1) = \nabla_q(\underline{\epsilon}silon_1) = L_q(\underline{\epsilon}silon_1) = T_q(\underline{\epsilon}silon_1)$.
The bottom alcove in $X_n$ is now denoted $A_n(\ell)$ and given by
$$ A_n(\ell) = \{\lambda \in X_n | \lambda_1 \geq \lambda_2 \geq \cdots \geq \lambda_n \text { and } \lambda_1 - \lambda_n \leq \ell'
-n\}.$$
As noted above, $V_{q,n}$ is a tilting module. Hence, so are $V_{q,n}^{\otimes r}$ as well as the corresponding fusion summands $V_{q,n}^{\underline \otimes r}$ for all $r \in Z_{\geq 0}$. We set $E_{q,n}^r = \mathrm{End}_{U_{q,n}}(V_{q,n}^{\otimes r})$ and $\overline E_{q,n}^r = \mathrm{End}_{U_{q,n}}(V_{q,n}^{\underline \otimes r})$. These endomorphism algebras are then cellular algebras, and $\overline E_{q,n}^r$ is in fact semisimple (because $V_{q,n}^{\underline \otimes r}$ is a semisimple $U_{q,n}$-module). Moreover, by Corollary \ref{q-fusion} we have:
\begin{equation} \label {E to barE}
\text {The natural homomorphism } E_{q,n}^r \rightarrow \overline E_{q,n}^r \text { is surjective.}
\end{equation}
Arguing as in Section \ref{tensor powers} we also get:
\begin{equation} \label{surj n>m}
\text {The ``restriction" homomorphisms }E_{q,n}^r \rightarrow E_{q,m}^r \text { are surjective for all } n \geq m \text { and all } r.
\end{equation}
\subsection{Quantum Symplectic Groups} \label{q-sp}
Set now $U_{q,n}$ equal to the quantum group corresponding to the simple Lie algebra $\mathfrak {sp}_{2n}$ of type $C_n$. The vector representation $V_{q,n} = {\mathbb D}elta_q(\underline{\epsilon}silon_1)$ is then a tilting module for $U_{q,n}$. As in the corresponding classical case it has weights $\pm \underline{\epsilon}silon_1, \cdots , \pm \underline{\epsilon}silon_n$ .
The bottom alcove in $X_n$ is now denoted $A_n(\ell)$ and given by
$$ A_n(\ell) = \begin{cases} \{\lambda \in X_n | \lambda_1 \geq \lambda_2 \geq \cdots \geq \lambda_n \geq 0 \text { and } \lambda_1 + \lambda_2 \leq \ell - 2n \} \text { if $\ell$ is odd,}\\ \{\lambda \in X_n | \lambda_1 \geq \lambda_2 \geq \cdots \geq \lambda_n \geq 0 \text { and } \lambda_1\leq \ell' -n-1\} \text { if $\ell$ is even.} \end{cases}$$
In both the even and the odd case we have $A_n(\ell) \neq \emptyset$ if and only if $\ell > 2n$. In the odd case $\underline{\epsilon}silon_1$ belongs to $A_n(\ell)$ for $n = 1, 2, \cdots , (\ell -1)/2$, whereas in the even case the same is true for $n=1, 2, \cdots , (\ell -4)/2$.
Again in this case we get (with $E_{q,n}^r = \mathrm{End}_{U_{q,n}}(V_{q,n}^{\otimes r})$ and $\overline E_{q,n}^r = \mathrm{End}_{U_{q,n}}(V_{q,n}^{\underline \otimes r}$)):
\begin{equation} \label{surj E to Ebar qsp}
\text {The natural homomorphisms } E_{q,n}^r \rightarrow \overline E_{q,n}^r \text { are surjective for all $n, r$.}
\end{equation}
\section{A class of simple modules for the Hecke algebras of symmetric groups}
We continue in this section to assume that $k$ is an arbitrary field and that $q \in k$ is a root of order $\ell$.
Let $r$ be a positive integer and denote by $H_r(q)$ the Hecke algebra of the symmetric group $S_r$ with parameter $q\in k$.
Using the notation from Section \ref{general linear q-group} we then have the quantum Schur--Weyl duality:
\begin{thm} \label{q-Schur-Weyl}
The Hecke algebra $H_r(q)$ acts on the tensor power $V_{q,n}^{\otimes r}$ via the quantum $R$ matrix for $U_{q,n}$. This action commutes with the $U_{q,n}$-module structure on $V_{q,n}^{\otimes r}$ giving homomorphisms $H_r(q) \rightarrow E_{q,n}^r$ which are surjective for all $n$ and isomorphisms for $n \geq r$.
\end{thm}
This is the main result of \cite{DPS}.
\begin{cor} \label{q-Jones}
Suppose $r \geq \ell$. Then the Hecke algebra $H_r(q)$ has the following semisimple quotients $\overline E_{q,1}^r, \overline E_{q,2}^r, \cdots ,\overline E_{q, \ell -1}^r$.
\end{cor}
\begin{proof}
By Theorem \ref{q-Schur-Weyl} we have $H_r(q) \simeq E_{q,r}^r$. Then the corollary follows from (\refeq{surj n>m}) and (\refeq{E to barE}) .
\end{proof}
\begin{remarkcounter} \begin{enumerate}
\item
The semisimple quotients of $H_r(q)$ listed in Corollary \ref{q-Jones} are obvious generalisations of the Jones algebras introduced in \cite{ILZ}, and as explained in the introduction this is the reason why we use the name `higher Jones algebras' in this paper.
\item In analogy with
the modular case we see that $ E_{q,1}^r = k = \overline E_{q,\ell-1}^r $ for all $r$.
\end{enumerate}
\end{remarkcounter}
The simple modules for $H_r(q)$ are parametrized by the set of $\ell$-regular partitions of $r$. We denote the simple $H_r(q)$-module associated with such a partition $\lambda$ by $D_{q,r}(\lambda)$. Our aim is to derive an algorithm for computing the dimensions of a special class of simple $H_r(q)$-modules, namely those coming from the higher Jones algebras.
In analogy with the notation in Section \ref{class of simple} we set
$$\overline \Lambda^r(\ell) = \{\lambda = (\lambda_1, \lambda_2, \cdots ,\lambda_m) | \lambda \text { is a partition of $r$ and } \lambda \in A_m(\ell) \text { for some } m < \ell' \}.$$
So $\overline \Lambda^r(\ell) $ consists of those partitions of $r$ which have at most $m < \ell'$ non-zero terms and satisfy $\lambda_1 - \lambda_m \leq \ell' - m$. Clearly, the partitions in $\overline \Lambda^r(\ell)$ are all $\ell'$-regular.
The result in Proposition \ref{inductive formula} carries over unchanged to the quantum case and leads to the following analogue of Theorem \ref{main symm}.
\begin{thm} \label{main q-symm}
Let $r > 0$ and suppose $\lambda \in \overline \Lambda^r(\ell)$. Then the dimension of the simple $H_r(q)$-module $D_{q,r}(\lambda)$ is recursively determined by
$$ \dim D_{q,r}(\lambda) = \sum_{i: \lambda - \underline{\epsilon}silon_i \in \overline \Lambda^{(r-1)}(\ell)} \dim D_{q,r-1}(\lambda - \underline{\epsilon}silon_i).$$
\end{thm}
This theorem allows us to determine the dimensions of a class of simple modules for $H_r(q)$ just like we did for symmetric groups in Section \ref{class of simple}. The only difference is that $\ell$ in contrast to $p$ may now take any value in ${\mathbb Z}_{>0}$. We illustrate by a couple of examples.
\begin{examplecounter}
Let $\ell = 8$, i.e. $q$ is a root of unity of order $8$. In this case $\overline \Lambda^r(8)$ consists of the trivial partition of $r$ (corresponding to the trivial module for $H_r(q)$), the unique $3$-parts partition $\nu$ of $r$ with $\nu_1 - \nu_3 \leq 1$, and the $2$ parts partitions $(s+1,s-1)$ and $(s,s)$, if $r = 2s$ is even, respectively $(s, s-1)$, if $r = 2s-1$ is odd. It is easy to deduce from Theorem \ref{main q-symm} that the partitions with $2$ parts all correspond to simple $H_r(q)$-modules of dimension $2^s$.
\end{examplecounter}
\begin{examplecounter}
Consider the case $\ell = 12$. Here $\overline \Lambda^r(12)$ consists of the trivial partition $(r)$, the unique partition $\nu$ with $5$ parts satisfying $\nu_1 - \nu_5 \leq 1$, the partitions $\lambda^1(r), \lambda^2(r), \lambda^3(r)$ with $2$-parts
$$ \{\lambda^1(r), \lambda^2(r), \lambda^3(r)\} = \begin{cases} \{(s+2, s-2), (s+1, s-1), (s,s)\} \text { if } r = 2s, \\ \{(s+2, s-1), (s+1,s)\} \text { if } r = 2s +1; \end{cases}$$
the partitions $\mu^1(r), \mu^2(r), \mu^3(r), \mu^r(4)$ with $3$ parts
$$ \{\mu^1(r), \mu^2(r), \mu^3(r), \mu^4(r)\} = \begin{cases} \{(s+2,s-1,s-1),(s+1, s+1, s-2), (s+1, s, s-1), (s,s,s)\}\\ \text { if } r = 3s, \\ \{(s+2,s, s-1), (s+1,s+1, s-1), (s+1,s,s)\} \text { if } r = 3s +1,\\ \{(s+2,s+1, s-1), (s+2,s, s), (s+1,s+1,s)\} \text { if } r = 3s +2;\\ \end{cases}$$
and the partitions $\eta^1(r), \eta^2(r), \eta^3(r)$ with $4$ parts
$$ \{\eta^1(r), \eta^2(r), \eta^3(r)\} = \begin{cases} \{(s+1,s+1,s-1,s-1), (s+1,s,s, s-1), (s,s,s,s)\} \text { if } r = 4s, \\ \{(s+1,s+1,s, s-1), (s+1,s,s, s)\} \text { if } r = 4s +1,\\ \{(s+2,s, s,s), (s+1,s+1, s+1,s-1), (s+1,s+1,s,s)\} \text { if } r = 4s +2,\\ \{(s+2,s+1,s,s), (s+1,s+1,s+1,s) \} \text { if } r = 4s + 3.\end{cases}$$
Here the listed partitions involving a zero or a negative number (these occur only for small $r$) should be deleted. In these cases as well as in the cases where a set with only $2$ elements is listed it is understood that the corresponding or missing $\lambda$, $ \mu$ or $\eta$ does not occur.
We can use Theorem \ref{main q-symm} to compute the dimensions of the simple modules for $ H_r(q)$ where $q$ a root of unity having order $12$. In Table 5 we have listed the results for the first few values of $r$. As we know that both the trivial partition and the partition $\nu$ always correspond to simple modules of dimension $1$ we have not included these two partitions in the table.
\end{examplecounter}
\eject
\centerline{
{ \it Table 5. Dimensions of simple modules for $H_r(q)$ with $\ell = 12$.}}
\vskip .5cm
\centerline
{\begin{tabular}{ r| c c c| c c c c | c c c| c}
& $\lambda^r(1)$ & $\lambda^2(r)$ & $\lambda^3(r)$ & $\mu^1(r)$ & $\mu^2(r)$& $\mu^3(r)$& $\mu^4(r)$ &$\eta^1(r)$ & $\eta^2(r)$ & $\eta^3(r)$ \\ \hline
1 & &&1 & & & 1 && &&1 \\
2 & & 1 &1 & &1&1 & &1&&1& \\
3 && 1 &2 & 1 & & 2 & 1 &2 &1&&\\
4 &1& 3 & 2 & 3& 2 &3&&2& 3& 1&\\
5 & 4 & 5& & 5 &6 &5 &&5&4&& \\
6 &4 &9 &5& 6 & 5 &16&5 & 4 &5& 9&\\
7 & 13& 14&&22 & 5 &21 && 13 & 14&\\
8 & 13 &27&14&27&43 & 26 & & 13 &27 &14&\\
9 & 40 &41&&43 & 27 & 96 & 26 & 40 & 41&&\\
10 & 40 &81&41&139 & 123 & 122 & & 41 & 40 & 81\\
\end{tabular}}
\section{A class of simples modules for BMW-algebras}
Denote by $x, v, z$ be three indeterminates and set $R = {\mathbb Z}[v, v^{-1}, x, z]/((1-x)z + (v - v^{-1}))$. Let $r \geq 1$ be an integer and consider the general $3$-parameter $BMW$-algebra $BMW_r(R)$ over $R$ as in \cite[Definition 3.1]{Hu}.
As an $R$-module $BMW_r(R)$ is free of rank (2r-1)!! (with basis indexed by Brauer diagrams).
As in the previous sections we denote by $k$ an arbitrary field containing a root of unity $q$ of order $\ell$. We make $k$ into an $R$-algebra by specializing $v$ to $-q^{2n+1}$, $z$ to $q-q^{-1}$, and $x$ to $1 - \sum_{i=-n}^n q^{2i}$. Then the $BMW$-algebra over $k$ that we shall work with is
$$ BMW_r(-q^{2n+1}, q) = BMW_r(R) \otimes_R k.$$
For $q = 1$ it turns out that $BMW_r(-q^{2n+1}, q)$ may be identified with the Brauer algebra $\mathcal B_r(-2m)$, see the remarks after Definition 3.1 in \cite{Hu}. We treated the Brauer algebras in Section \ref{Brauer} so in this section we shall assume $q \neq 1$.
Using the notation from Section \ref{q-sp} the quantum analogue \cite[Theorem 1.5]{Hu} of the Schur--Weyl duality for symplectic groups says:
\begin{thm} \label{qsp-Schur-Weyl}
The algebra $BMW_r(-q^{2n+1}, q)$ acts naturally on the tensor power $V_{q,n}^{\otimes r}$. This action commutes with the $U_{q,n}$-module structure on $V_{q,n}^{\otimes r}$ giving homomorphisms $BMW_r(-q^{2n+1},q) \rightarrow E_{q,n}^r$ which are surjective for all $n$.
\end{thm}
\begin{cor} \begin{enumerate}
\item If $\ell$ is odd,
then the $BMW$-algebra $BMW_r(-q^{2n+1},q)$ surjects onto the semisimple algebra $\overline E_{q,n}^r$ for $n= 1, 2, \cdots , (\ell - 1)/2$ and $r> 0$.
\item If $\ell$ is even,
then the $BMW$-algebra $BMW_r(-q^{2n+1},q)$ surjects onto the semisimple algebra $\overline E_{q,n}^r$ for $n= 1, 2, \cdots , (\ell - 4)/2$ and $r> 0$.
\end{enumerate}
\end{cor}
\begin{proof}
By Theorem \ref{qsp-Schur-Weyl} we have $BMW_r(-q^{2n+1}, q)$ surjects onto $ E_{q,n}^r$ for all $n, r$ and hence also on $\overline E_{q,n}^r$ by (\refeq {surj E to Ebar qsp}). In Section \ref{q-sp} we observed that these latter algebras are non-zero for the $n$'s listed in the corollary.
\end{proof}
Let $\lambda$ is a partition of $r-2i$ for some $i \leq r/2$. In analogy with the Brauer algebra case we denote the simple $BMW_r(-q^{2n+1}, q)$-module corresponding to $\lambda$ by $D_{BMW_r(-q^{2n+1}, q)}(\lambda)$.
Recall the definition of $A_n(\ell)$ from Section \ref{q-sp} and set for any $r>0$
$$ \overline \Lambda^r(n, \ell) = (\Lambda^r \cup \Lambda^{r-2} \cup \cdots ) \cap A_{n}(\ell).$$
Then arguments similar to the ones used above give
\begin{thm} \label{main BMW}
Let $r > 0$.
\begin{enumerate}
\item Suppose $\ell$ is odd. Let $n \in \{1, 2, \cdots (\ell - 1)/2\}$ and $\lambda \in \overline \Lambda^r(n, \ell )$. Then the dimension of the simple $BMW_r(-q^{2n+1}, q)$-module $D_{BMW_r(-q^{2n+1}, q)}(\lambda)$ is recursively determined by
$$ \dim D_{BMW_r(-q^{2n+1}, q)}(\lambda) = \sum_{i: \lambda \pm \underline{\epsilon}silon_i \in \overline \Lambda^{(r-1)}(\delta, \ell)} \dim D_{BMW_{r-1}(-q^{2n+1}, q)}(\lambda \pm \underline{\epsilon}silon_i).$$
\item Suppose $\ell$ is even. Let $n \in \{1, 2, \cdots (\ell - 4)/2\}$ and $\lambda \in \overline \Lambda^r(n, \ell )$. Then the dimension of the simple $BMW_r(-q^{2n+1}, q)$-module $D_{BMW_r(-q^{2n+1}, q)}(\lambda)$ is recursively determined by
$$ \dim D_{BMW_r(-q^{2n+1}, q)}(\lambda) = \sum_{i: \lambda \pm \underline{\epsilon}silon_i \in \overline \Lambda^{(r-1)}(\delta, \ell)} \dim D_{BMW_{r-1}(-q^{2n+1}, q)}(\lambda \pm \underline{\epsilon}silon_i).$$
\end{enumerate}
\end{thm}
\begin{examplecounter}
We shall illustrate Theorem \ref{main BMW} in the case $\ell$ is even (the odd case is equivalent to the Brauer case in Section \ref{Brauer}). So we take $\ell = 10$. Then the relevant values of $n$ are $1, 2$ and $3$. The weight set $\overline \Lambda^r(1,10)$ contains $2$ elements (except for $r =1$) $\lambda^1(r), \lambda^2(r)$, namely $ (2), (0)$, when $ r$ is even, and $ (3), (1)$, when $r$ is odd. Likewise, $\overline \Lambda^r(2,10)$ contains $2$ elements when $r$ is odd (except for $r=1$) and $4$ elements, when $r$ is even (except for $r = 2$). We denote these weights $\mu^1(r), \mu^2(r), (\mu^3(r), (\mu^4(r))$. They are $(2,2), (2,0), (1,1), (0,0)$, when $ r$ is even, and $ (2,1), (1,0)$, when $r$ is odd. Finally, $\overline \Lambda^r(3,10)$ consists of $2$ elements $\nu^1(r), \nu^2(r)$, namely $(1,1,0), (0,0,0)$, when $r$ is even, and $(1,1,1), (1,0,0)$, when $r$ is odd (except $r=1$).
In Table 6 we have in row $r$ listed (in the order given above) the dimensions of the simple modules for $ B_r(-q^{2n+1}, q) $ for $r \leq 10$. These numbers are computed recursively using Theorem \ref{main BMW}.
\end{examplecounter}
{ \it Table 6. Dimensions of simple modules for $BMW_r(-q^{2n+1}, q)$ when $\ell = 10$ and $n = 1 , 3, 5$.}
\vskip .5cm
\centerline {
\begin{tabular}{ r| c c |c c c c |c c| c }
&$n=1$&&&$n=2$&&&$n=3$& \\ \hline
r & $\lambda^1(r)$ & $\lambda^2(r)$ & $\mu^1(r)$ & $\mu^2(r)$ & $\mu^3(r)$& $\mu^4(r)$ & $\nu^1(r)$ & $ \nu^2(r)$ \\ \hline
1 & & 1& & 1 & &&&1 & \\
2 & 1 & 1 & 1& 1 & 1 & 1 & 1 & 1 \\
3 & 1&2& 2& 3& & & 1 & 2 \\
4 & 3 &2 & 2& 5 & 5 &3& 3& 2 \\
5 &3 & 5 & 12 & 13 & & &3 & 5\\
6 & 8 & 5& 12& 25&25 &13&8 & 5 \\
7 &8 &13 &62 &63&& & 8 & 13\\
8 & 21& 13&62 &125 & 125 &63& 21 & 13\\
9 & 21 &44&312&313 && & 21 & 34\\
10 & 65 &44&312&625 & 625 & 313&55 & 34\\
\end{tabular}}
\vskip 1 cm
\end{document} |
\begin{document}
\title{Positive weight function and classification of g-frames}
\begin{abstract}
Given a positive weight function and an isometry map on a Hilbert spaces ${\mathbb H}$, we study a class of linear maps which is a $g$-frame, $g$-Riesz basis and a $g$-orthonormal basis for ${\mathbb H}$ with respect to $\C$ in terms of the weight function. We apply our results to study the frame for shift-invariant subspaces on the Heisenberg group.
\end{abstract}
\section{Introduction and preliminaries}
A frame for a Hilbert space is a countable set of overcompleted vectors such that each vector in the Hilbert space can be represented in a non-unique way in terms of the frame elements. The redundancy and the flexibility in the representation of a Hilbert space vector by the frame elements make the frames a useful tool in mathematics as well as in interdisciplinary fields such as sigma-delta quantization \cite{ben06}, neural networks \cite{can99}, image processing \cite{can05}, system modelling \cite{dud98}, quantum measurements \cite{eld02}, sampling theory \cite{fei94}, wireless communications \cite{str03} and many other well known fields.
Given a Hilbert space $\mathcal H$, a countable family of vectors $\{x_j\}_{j\in J}\subset \mathcal H$ is called a {\it frame} for $\mathcal H$ if there are positive constants $A$ and $B$ such that for any $x\in \mathcal H$,
$$A\|x\|^2 \leq \sum_{j\in J} |\langle x, x_j\rangle|^2 \leq B \|x\|^2 .$$
The frames were introduced for the first time by Duffin and Schaeffer \cite{duf52}, in the context of nonharmonic Fourier series \cite{you01}. The notion of a frame extended to g-frame by Sun \cite{sun06} in 2006 to generalize all the existing frames such as bounded quasi-projectors \cite{for04}, frames of subspaces \cite{cas04}, pseudo-frames \cite{li04}, oblique frames \cite{chr04}, outer frames \cite{ald04}, and time-frequency localization operators \cite{dor06}. Here, we recall the definition of a g-frame.
\begin{definition}
Let $\mathcal H$ be a Hilbert space and $\{\mathcal K_j\}_{j\in J}$ be a countable family of Hilbert spaces with associated norm $\| \cdot\|_{\mathcal K_j}$.
A countable family of linear and bounded operators $\{\Lambda_j: {\mathbb H} \to \K_j\}_{j \in J}$ is called a {\it $g$-frame} for $\mathcal H$ with respect to $\{\mathcal K_j\}_{j\in J}$ if there are two positive constants $A$ and $B$ such that for any $f\in \mathcal H$ we have
\begin{equation}\label{eq01}
A\|f\|_{\mathcal H}^2 \leq \sum_{j\in J} \|\Lambda_j(f)\|_{\mathcal K_j}^2 \leq B \|f\|_{\mathcal H}^2.
\end{equation}
\end{definition}
The constants $A$ and $B$ are called {\it $g$-frame lower and upper bounds}, respectively. If $A=B=1$, then it is called a {\it Parseval $g$-frame}. For example, by the Riesz representation theorem, every $g$-frame is a frame if $\mathcal K_j= \Bbb C$ for all $j\in J$. And, every frame is a $g$-frame with respect to $\Bbb C$. If the right-hand side of (\ref{eq01}) holds, it is said to be a {\it $g$-Bessel sequence} with bound $B$. The family $\{\Lambda_j \}_{j \in J}$ is called {\it $g$-complete}, if for any vector $f\in \mathcal H$ with $\Lambda_j(f)=0$ for $j \in J$, we have $f=0$. If $\{\Lambda_j \}_{j \in J}$ is $g$-complete and there are positive constants $A$ and $B$ such that for any finite subset $J_1 \subset J$ and $g_j \in \K_j, \; j \in J_1$,
\[ A \sum_{j \in J_1} \|g_j \|^2 \leq \bigg\|\sum_{j \in J_1} \Lambda_j^* (g_j) \bigg\|^2 \leq B \sum_{j \in J_1}\|g_j \|^2, \]
then $\{\Lambda_j \}_{j \in J}$ is called a {\it $g$-Riesz basis} for ${\mathbb H}$ with respect to $\{\K_j\}_{j \in J}$. Here, $\Lambda_j^*$ denotes the adjoint operator. We say $\{\Lambda_j \}_{j \in J}$ is a $g$-orthonormal basis for ${\mathbb H}$ with respect to $\{\K_j\}_{j \in J}$ if it satisfies the following:
\begin{align}\label{onb1}
\langle \Lambda^*_{i} g, \Lambda^*_{j} h \rangle &= 0 \quad \forall g \in \K_i, \; h \in \K_j, i\neq j \\\label{onb2}
\| \Lambda^*_{i} g\|^2 &= \|g \|^2, \quad \forall i, \; g \in \K_i \\\label{onb3}
\sum_{j \in J}\|\Lambda_jf \|^2 &=\|f \|^2, \quad \forall f \in {\mathbb H}.
\end{align}
Before we state the main results of this paper, let us consider the following example. For a function $\phi\in L^2(\Bbb R^d)$, $d\geq 1$ and $m, n\in \Bbb Z^d$, the modulation and translation of $\phi$ by multi-integers $m$ and $n$ are defined by
$$M_m\phi(x) = e^{2\pi i \langle m, x\rangle} \phi(x), \quad T_n\phi(x) =\phi(x-n) .$$
The Gabor (Weil-Heisenberg) system generated by $\phi$ is
$$\mathcal G(\phi):=\{M_m T_n\phi: \ m, n\in \Bbb Z^d\} .$$
It is well-known that the ``basis\rq\rq{} property of the Gabor system $\mathcal G(\phi)$ for its spanned vector space can be studied by the Zak transform of $\phi$
$$Z\phi(x, \xi) = \sum_{k\in \Bbb Z^d} \phi(x+k) e^{2\pi i \langle \xi, k\rangle} .$$
For example, the Gabor system $\mathcal G(\phi)$ is a Riesz basis for the spanned vector space if and only if there are positive constants $A$ and $B$ such that $A\leq |Z\phi(x, \xi)|\leq B$ a.e. $(x,\xi)\in [0,1]^d\times [0,1]^d$ (\cite{HSWW}). The purpose of this paper is to show that the above result is a particular case of a more general theory involving $g$-frames.
For the rest of the paper we shall assume the following. $\Omega$ is a measurable set with measure $dx$. We assume that $\Omega$ has finite measure $|\Omega|$ and $|\Omega|=1$. We let $w:\Omega \to (0,\infty)$be a measurable map with $\int_\Omega w(x)dx<\infty$. Let $\U$ be a Hilbert space over the field $\Bbb F$ ($\Bbb R$ or $\Bbb C$) with associated inner product $\langle \cdot , \cdot \rangle_{\U} $. We denote by $L^2_w(\Omega, \U)$ the weighted Hilbert space of all measurable functions $f:\Omega\to \U$ such that
$$\|f\|_{L^2_w(\Omega, \U)}^2: = \int_\Omega \|f(x)\|_\U^2 w(x) dx<\infty .$$
The associated inner product of any two functions $f, g$ in $L_w^2(\Omega, \U)$ is then given by
$$\langle f, g\rangle_{L_w^2(\Omega, \U)} = \int_\Omega \langle f(x), g(x)\rangle_\U w(x) dx. $$
A countable family of unit vectors $\{f_k\}_{k\in K}$ in ${L_w^2(\Omega, \U)}$ constitute an ONB (orthonormal basis) for ${L_w^2(\Omega, \U)}$ with respect to the weight function $w$ if the family is orthogonal and for any $g\in {L_w^2(\Omega, \U)}$ the Parseval identity holds:
$$\| g \|^2 = \sum_{k\in K} \left|\langle g, f_k\rangle_{L_w^2(\Omega, \U)}\right|^2 =\sum_{k\in K}\left|
\int_\Omega \langle g(x), f_k(x)\rangle_{\U} w(x) dx\right|^2 .$$
To avoid any confusion, in the sequel, we shall use subscripts for all inner products and associated norms for Hilbert spaces, when necessary.
For the rest, we assume that $S: \mathcal H \to L^2_w(\Omega, \U)$ is a linear and unitary map. Thus for any $f\in \mathcal H$
$$ \|f\|^2 = \int_\Omega \|Sf(x)\|_{\U}^2 w(x) dx. $$
We fix an ONB $\{f_n\}_{n\in K}$ for $L^2(\Omega)$ and ONB $\{g_m\}_{m\in J}$ for the Hilbert space $\U$, and define
$$G_{m,n}(x): = f_n(x) g_m, \quad \forall x\in \Omega, \ (m,n)\in K\times J.$$
And,
$$\tilde \Lambda_{(m,n)}(f)(x) = \langle S(f)(x), G_{(m,n)}(x) \rangle_{\U} \quad \forall \ f\in \mathcal H, x\in \Omega .$$
Our main results are the following.
\begin{theorem}\label{th1}
Let $\{f_n\}_{n\in K}\subset L^2(\Omega)$, $\{g_m\}_{m\in J} \subset \U$ and $\{\tilde \Lambda_{m,n}\}$ be as in above. Assume that $|f_n(x)|=1$ for a.e. $x\in \Omega$.
Then the following hold:
\begin{itemize}
\item [(a)] $\{\tilde\Lambda_{(m,n)}\}_m $ is a Parseval $g$-frame for $\mathcal H$. Thus $\{\Lambda_{(m,n)}\}_m$ is a Bessel sequence.
\item [(b)] For any ${(m,n)}$, the linear map $\Lambda_{m,n}: \mathcal H \to \C$ defined by
$$\Lambda_{m,n}(f) =\int_\Omega \tilde\Lambda_{m,n}(f)(x) \ w(x) dx $$
is well-defined. And, $\{\Lambda_{m,n}\}$ is a frame for ${\mathbb H}$ if and only if there are positive finite constants $A$ and $B$ such that $A\leq w(x)\leq B$ for a.e. $x\in \Omega$.
\item [(c)] The family $\{\Lambda_{m,n}\}$ is a Riesz basis for ${\mathbb H}$ if and only if there are positive finite constants $A$ and $B$ such that $A\leq w(x)\leq B$ for a.e. $x\in \Omega$.
\end{itemize}
\end{theorem}
\begin{corollary}\label{main result}
Let $\{\lambda_k\}_{k\in J}$ be an orthonormal basis (or a Parseval frame) for $L^2(\Omega)$ such that $|\lambda_k(x)|=1$ for all $x\in \Omega$. Assume that $S: \mathcal H \to L^2_w(\Omega)$ is a unitary map. Then the sequence of operators $\{\Lambda_k\}_{k\in J}$ defined by
$$\Lambda_k(f) =\int_\Omega S(f)(x) \overline{ \lambda_k(x) } w(x) dx $$
is a frame for ${\mathbb H}$ if and only if there are positive finite constants $A$ and $B$ such that $A\leq w(x)\leq B$ for a.e. $x\in \Omega$.
\end{corollary}
\begin{theorem}\label{th3}
Let $\{f_n\}_{n\in K}\subset L^2(\Omega)$, $\{g_m\}_{m\in J} \subset \U$ and $\{\tilde \Lambda_{m,n}\}$ be as in above. Assume that $|f_n(x)|=1$ for a.e. $x\in \Omega$. The family $\{\Lambda_{m,n}\}$ is an ONB for ${\mathbb H}$ if and only if $w(x)=1$ for a.e. $x\in \Omega$.
\end{theorem}
\section{Proof of Theorem \ref{th1}}
First we prove the following lemmas which we need for the proof of Theorem \ref{th1}.
\begin{lemma}\label{technical lemma}
Let $\{f_n\}$ be an ONB for the weighted Hilbert space $L_w^2(\Omega)$. Let $\{g_m\}$ be an ONB for a Hilbert space $\U$. Define $G_{m,n}(x) = f_n(x) g_m$. Then the family $\{G_{m,n}\}_{J\times K}$ is an ONB for $L_w^2(\Omega, \U)$.
\end{lemma}
In order to prove the lemma, we shall recall the following result from \cite{iosevich-mayeli-14} and prove it here for the sake of completeness.
\begin{lemma}\label{mixed orthonormal bases}
Let $(X,\mu)$ be a measurable space, and $\{f_n\}_n$ be an orthonormal basis for $L^2(X):=L^2(X, d\mu)$. Let $Y$ be a Hilbert space and $\{g_m\}_m$ be a family in $Y$. For any $m, n$ and $x\in X$ define $G_{m,n}(x) := f_n(x) g_m$. Then $\{G_{m,n}\}_{m,n}$ is an orthonormal basis for the Hilbert space $L^2(X,Y,d\mu)$ if and if $\{g_m\}_m$ is an orthonormal basis for $Y$.
\end{lemma}
\begin{proof}
For any $m, n$ and $m\rq{}, n\rq{}$ we have
\begin{align}\label{orthogonality-relation}
\langle G_{m,n}, G_{m\rq{},n\rq{}} \rangle &= \int_X \langle f_m(x) g_n, f_{m\rq{}}(x) g_{n\rq{}} \rangle_Y \ d\mu(x)\\\notag
& = \langle f_m,f_{m\rq{}}\rangle_{L^2(X)} \langle g_n, g_{n\rq{}}\rangle_Y \\\notag
&= \delta_{m,m\rq{}} \langle g_n, g_{n\rq{}}\rangle_Y.
\end{align}
This shows that the orthogonality of $\{G_{m,n}\}_{m,n}$ is equivalent to the orthogonality of $\{g_m\}_m$. And, $\|G_{m,n}\| =1$ if and only if $\|g_n\|=1$.
Let $\{g_m\}_m$ be an orthonormal basis for $Y$. To prove the completeness of $\{G_{m,n}\}$ in $L^2(X,Y,d\mu)$, let $F\in L^2(X,Y,d\mu)$ such that $\langle F, G_{m,n}\rangle =0$, $\forall \ m, n$. We claim $F=0$. By the definition of the inner product we have
\begin{align}\label{inner-product}
0=\langle F, G_{m,n}\rangle &=\int_X \langle F(x), G_{m,n}(x)\rangle_Y d\mu(x)\\\notag
&= \int_X \langle F(x), f_m(x) g_n\rangle_Y d\mu(x) \\\notag
&= \int_X \langle F(x), g_n\rangle_Y \overline{f_m(x)} d\mu(x) \\\notag
&= \langle A_n, f_m\rangle
\end{align}
where
$$A_n: X\to \C; \ \ x \mapsto \langle F(x), g_n\rangle_Y. $$
$A_n$ is a measurable function and lies in $L^2(X)$ with $\|A_n\|\leq \|F\|$. Since $\langle A_n, f_m\rangle_{L^2(X)}=0$ for all $m$, then $A_n=0$ by the completeness of $\{f_m\}$. On the other hand, by the definition of $A_n$ we have $\langle F(x), g_n\rangle_{Y}=0$ for a.e. $x\in X$. Since $\{g_n\}$ is complete in $Y$, then $F(x)=0$ for a.e. $x\in X$. This proves the claim.
Conversely, assume that $\{G_{m,n}\}_{m,n}$ is an orthonormal basis for the Hilbert space $ L^2(X,Y,d\mu)$. Therefore by (\ref{orthogonality-relation}), $\{g_m\}$ is an orthonormal set. We prove that if for $g\in Y$ and $\langle g, g_m\rangle=0$ for all $m$, then $g$ must be identical to zero. For this, for any $n$ define the map
$$B_n: X\to Y; \ \ x\mapsto f_n(x)g.$$
Then $B_n$ is measurable and it belongs to $ L^2(X,Y,d\mu)$ and $\|B_n\| = \|g\|_Y$. Thus
\begin{align}
B_n&=\sum_{n\rq{},m} \langle B_n, G_{m,n\rq{}}\rangle_{L^2(X,Y,d\mu)} G_{m,n\rq{}}\\\notag
&= \sum_{n\rq{},m} \langle f_n, f_{n\rq{}}\rangle_{L^2(X)} \langle g, g_m\rangle_Y G_{m,n\rq{}}\\\notag
&= \sum_{m} \langle g, g_m\rangle_Y G_{n,m}.
\end{align}
By the assumption that $ \langle g, g_m\rangle_Y=0$ for all $m$, we get $B_n=0$. This implies that $B_n(x)= f_n(x) g=0$ for a.e. $x$. Since, $f_n\neq 0$, then $g$ must be a zero vector, and hence we are done.
\end{proof}
\begin{lemma}\label{boundedness}
$\tilde \Lambda_{(m,n)}: \mathcal H\to L^2_w(\Omega)$ is a bounded operator and $\|\tilde \Lambda_{(m,n)}(f)\|_{L^2_w(\Omega)}\leq \|f\|$.
\end{lemma}
\begin{proof}
Let $f\in {\mathbb H}$. Then for any $m\in J$ and $n\in K$,
\begin{align*}
\| \tilde\Lambda_{m,n}(f)\|_{L^2_w(\Omega)}^2 &= \int_\Omega |\tilde\Lambda_{m,n}(f)(x)|^2 w(x) dx \\
&= \int_\Omega |\langle Sf(x), f_n(x)g_m\rangle_{\U}|^2 w(x) dx\\
&= \int_\Omega |\langle Sf(x), g_m\rangle_{\U}|^2 w(x) dx .
\end{align*}
Using the Cauchy--Schwartz inequality in the preceding line, we get
\begin{align*}
\| \tilde\Lambda_{m,n}(f)\|_{L^2_w(\Omega)}^2 &\leq \int_\Omega \|Sf(x)\|^2 w(x) dx = \|f\|^2 .
\end{align*}
\end{proof}
Here, we first calculate the adjoint of $\tilde\Lambda_{m,n}$: $S$ is a unitary map. Then for any $f\in {\mathbb H}$ and $h\in L_w^2(\Omega,\U)$ we have
\begin{align}
\int_\Omega \langle Sf(x), h(x)\rangle_{\U} w(x)dx= \langle Sf, h\rangle_{L_w^2(\Omega,\U)} =\langle f, S^{-1}h\rangle .
\end{align}
Therefore for any $\phi\in L_w^2(\Omega)$ we get
\begin{align}\label{adjoint}
\langle \tilde \Lambda_{m,n} f, \phi\rangle = \langle f, S^{-1}((f_n \phi)g_m)\rangle ,
\end{align}
where $(f_n \phi)g_m \in L_w^2(\Omega, \U)$ and $(f_n \phi)g_m(x) = f_n(x)\phi(x) g_m$. The relation (\ref{adjoint}) indicates that
$$\tilde \Lambda_{m,n}^*(\phi) = S^{-1}((f_n \phi)g_m).$$
Notice, for any $f\in {\mathbb H}$,
$$\Lambda_{m,n} f = \langle f, S^{-1}(f_ng_m)\rangle_{{\mathbb H}} .$$
Thus $\Lambda_{m,n}^*: \C \to {\mathbb H}$ is given by $c\to cS^{-1}(f_ng_m)$.
\begin{proof}[Proof of Theorem \ref{th1}]
$(a)$: Observe that $|\tilde \Lambda_{m,n}(f)(x)|\leq \| Sf(x)\|$ and $S$ is an isometry map. For any $f \in {\mathbb H}$ and $n\in K$ we have
\begin{align*}
\sum_m \| \tilde \Lambda_{m,n} (f)\|^2_{L^2_w(\Omega)} & =\sum_m \int_{\Omega} | \tilde \Lambda_{m,n} (f)(x)|^2 w(x) dx \\
& = \sum_m \int_{\Omega} | \langle G_{m,n}(x) , S(f)(x)\rangle_{\U} |^2 w(x) dx \\
& = \int_{\Omega} \sum_m| \langle G_{m,n}(x) , S(f)(x)\rangle_{\U} |^2 w(x) dx \\
& = \int_{\Omega} \sum_m| \langle g_m , w^{1/2}S(f)(x)\rangle_{\U} |^2 dx.
\end{align*}
By the assumptions of the theorem, for a.e. $x\in \Omega$, the sequence $\{g_m\}_m$ is an ONB for $\U$. Invoking this along the isometry property of $S$ in the last equation above, we get
\begin{align}\label{parseval-proprty}
\sum_m \| \tilde \Lambda_{m,n} (f)\|^2_{L^2_w(\Omega)}
= \int_{\Omega} \| S(f)(x) \|^2_{\U} \; w(x) dx
= \| f \|^2.
\end{align}
Therefore, $\{\tilde \Lambda_{m,n}\}_m$ is a Parseval $g$-frame for ${\mathbb H}$ with respect to $L^2_w(\Omega)$. To prove that $\{\Lambda_{m,n}\}_m$ is a Bessel sequence, note that by the H{\"o}lder's inequality for in the weighted Hilbert space $L^2_w(\Omega)$ we can write
\begin{align*}
|\Lambda_{m,n}(f)| \leq \int_\Omega |\tilde \Lambda_{m,n} (f)(x)| w(x) dx & \leq \bigg( \int_\Omega |\tilde \Lambda_{m,n} (f)(x)|^2 w(x) dx \bigg)^{\frac{1}{2}} \bigg( \int_\Omega w(x) dx \bigg)^{\frac{1}{2}}.
\end{align*}
By Lemma \ref{boundedness}, the first integral on the right is finite. Therefore by summing the square of the terms over $m$ we get
\[ \sum_{m \in J} |\Lambda_{m,n}(f)|^2 \leq C \sum_{m \in J} \int_\Omega |\tilde \Lambda_{m,n} (f)(x) |^2 w(x) dx =C \sum_{m \in J} \| \tilde \Lambda_{m,n} (f) \|_{L^2_w(\Omega)}^2 =C\| f \|^2, \]
where $C:= \int_\Omega w(x) dx$ is a non-zero constant. Thus $\{\Lambda_{m,n}\}_{m \in J}$ is a Bessel sequence for ${\mathbb H}$ with bound $C$. Notice, in the last equality we used (\ref{parseval-proprty}).
$(b)$ The map $\Lambda_{m,n}:{\mathbb H} \to \C$ is linear, well-defined and bounded. Indeed, for any $f\in {\mathbb H}$,
\begin{align*}
\int_\Omega |\tilde \Lambda_{m,n} (f)(x)| w(x) dx &\leq \left(\int_\Omega w(x) dx\right)^{1/2} \left(\int_\Omega\|Sf(x)\|^2 w(x) dx\right)^{1/2} \\
&= \|f\| \left(\int_\Omega w(x) dx\right)^{1/2}.
\end{align*}
Assume that $A\leq w(x)\leq B$ for almost every $x\in \Omega$. Let $f\in \mathcal H$. Then
\begin{align}\notag
\sum_{m,n} |\Lambda_{m,n}(f)|^2 &= \sum_{m,n} \left| \int_\Omega \langle G_{m,n}(x) , S(f)(x)\rangle_{\U} w(x) dx\right|^2 \\\notag
&= \sum_{m,n} \left| \int_\Omega \langle G_{m,n}(x) , S(f)(x)w(x)\rangle_{\U} dx\right|^2 \\\label{eq1}
&=\sum_{m,n} \left| \langle G_{m,n} , S(f)w\rangle_{L^2(\Omega, \U)}\right|^2 .
\end{align}
Since the countable family $\{ G_{m,n}\}_{m,n}$ is an ONB for $L^2(\Omega, \U)$. Thus
\begin{align}\notag
(\ref{eq1})&= \Vert S(f) w \Vert^2_{L^2(\Omega, \U)} \\\label{equ2}
& = \int_\Omega \Vert S(f)(x)\Vert^2_{\U} \ w(x)^2 dx.
\end{align}
By invoking the assumption that $w(x)\leq B$ for a.e. $x\in \Omega$ in (\ref{equ2}) we obtain
\begin{align}\notag
(\ref{equ2})
\leq B \int_\Omega \Vert S(f)(x)\Vert^2_{\U} \ w(x) dx
= B \|S(f)\|^2_{L^2_w(\Omega, \U)}= B \| f\|^2.
\end{align}
This proves that the sequence $\{\Lambda_{m,n}\}_{m,n}$ is a Bessel sequence for ${\mathbb H}$. An analogues argument also proves the frame lower bound condition for $\{\Lambda_{m,n}\}_{m,n}$.
For the converse, assume that $\{\Lambda_{m,n}\}_{m,n}$ is a frame for $\mathcal H$ with the frame bounds $0<A\leq B<\infty$. Therefore for any $f\in \mathcal H$
$$A\|f\|^2 \leq \sum_{m,n} |\Lambda_{m,n}(f)|^2 \leq B \|f\|^2.$$
Assume that there is a set $E\subset \Omega$ with positive measure such that $w(x)<A$ for all $x\in E$. We will prove that there exits a function in $\mathcal H$ for which the lower frame condition dose not hold. To this end, let $e_0 \in \U$ be a unit vector and let $\vec{0}$ denote the zero vector in $\U$. Define $\chi_E(x):= 1_E(x) e_0$. By the assumption, $w \in L^1(E)$. Thus $\chi_E \in L^2_w(\Omega, \U)$. $S$ is unitary, therefore there is a function $\phi_E\in {\mathbb H}$ such that $S(\phi_E)=\chi_E$, and we have
\begin{align}\label{iso}
\| S(\phi_E) \|_{L^2_w(\Omega, \U)}= \|\phi_E \|_{{\mathbb H}}=\| \chi_E \|_{L^2_w(\Omega, \U)}.
\end{align}
On the other hand, the sequence $\{G_{m,n}\}_{m,n}$ is an ONB for $L^2(\Omega, \U)$. Thus
\begin{align*}
\sum_{m,n} |\Lambda_{m,n}(\phi_E)|^2 &= \sum_{m,n} \left| \langle G_{m,n}, S(\phi_E)w \rangle_{L^2(\Omega, \U)} \right|^2 \\
&= \Vert \chi_E w \Vert^2_{L^2(\Omega, \U)} \\
& = \int_\Omega \Vert \chi_E(x) \Vert^2_{\U} \ w(x)^2 dx \\
&< A \int_\Omega \Vert \chi_E(x) \Vert^2_{\U} \ w(x) dx \\
& = A \|\chi_E\|^2_{L_w^2(\Omega,\U)} \\
&= A \| \phi_E \|^2_{{\mathbb H}} \hspace{1in} \text{by \ (\ref{iso})}.
\end{align*}
The preceding calculation shows that the lower frame bound condition fails for $\phi_E$. This contradicts our assumption that $\{\Lambda_{m,n}\}_{m,n}$ is a frame for ${\mathbb H}$, therefore $w(x)\geq A$ a.e. $x\in \Omega$. The argument for the upper bound for $w$ follows similarly.
$(c)$ Assume that $A\leq w(x)\leq B$ for almost every $x\in \Omega$. Let $\{c_{m,n}\}_{m,n}$ be any finite sequence in $\C$. Then
\begin{align*}
\left\| \sum_{m,n} \Lambda_{m,n}^*(c_{m,n}) \right\|_{\mathbb H}^2 &= \left\| \sum_{m,n} c_{m,n} S^{-1}(f_ng_m) \right\|_{\mathbb H}^2 \\
& = \left\| S^{-1}\left(\sum_{m,n} c_{m,n} f_ng_m\right) \right\|_{\mathbb H}^2 \\
& = \left\| \sum_{m,n} c_{m,n} f_ng_m\right\|_{L^2_w(\Omega,\U)}^2 & \text{($S$ is unitary)}\\
&= \int_\Omega \left\| \sum_{m,n} c_{m,n} f_n(x)g_m\right\|_{\U}^2 w(x) dx\\
& = \int_\Omega \sum_{m,n} |c_{m,n}|^2 w(x) dx & \text{(by orthogonality of $g_m$)}\\
&\leq B \sum_{m,n} |c_{m,n}|^2 &\text{(since $w(x)\leq B$ a.e. $x\in \Omega$).}
\end{align*}
We also have
\begin{align*}
\left\| \sum_{m,n} \Lambda_{m,n}^*(c_{m,n}) \right\|_{\mathbb H}^2 & =
\int_\Omega \sum_{m,n} |c_{m,n}|^2 w(x) dx \geq A \sum_{m,n} |c_{m,n}|^2 &\text{(since $w(x) \geq A$ a.e. $x\in \Omega$).}
\end{align*}
These show that $\{\Lambda_{m,n}\}_{K\times J}$ is a Riesz basis for ${\mathbb H}$ with lower and upper Riesz bounds $A$ and $B$, respectively.
Now assume that $\{\Lambda_{m,n}\}_{K\times J}$ is a Riesz basis for ${\mathbb H}$ with Riesz bounds $A$ and $B$. Therefore, for any sequence $\{c_{m,n}\}_{m,n}$ the inequalities hold:
\begin{align}\label{Riesz inequality}
A\sum_{m,n} |c_{m,n}|^2 \leq \left\| \sum_{m,n}\Lambda_{m,n}^*(c_{m,n})\right\|^2 \leq B \sum_{m,n} |c_{m,n}|^2.
\end{align}
We show that there are positive constants $A$ and $B$ such that $A\leq w(x)\leq B$ for a.e. $x\in \Omega$. In contrary, without loss of generality, we assume then there is a measurable subset $E\subset \Omega$ with positive measure such that $w(x)<A$ for all $x\in E$. Let $e$ be any unitary vector in the Hilbert space $\U$ and define the function ${\bf 1}_E(x) = e$ if $x\in E$ and otherwise ${\bf 1}_E(x)=0$. It is clear that ${\bf 1}_E\in L^2(\Omega, \U)$. Thus, there are coefficients $\{c_{m,n}\}_{K\times J}$ such that ${\bf 1}_E= \sum_{m,n} c_{m,n} f_n g_m$ and $\|{\bf 1}_E\|^2= \sum_{m,n}|c_{m,n}|^2$. Then $\|{\bf 1}_E(x)\|=1$ for all $x\in E$ and we get
\begin{align*}
\left\| \sum_{m,n}\Lambda_{m,n}^*(c_{m,n})\right\|^2 &= \left\| \sum_{m,n} c_{m,n} f_ng_m\right\|_{L^2_w(\Omega,\U)}^2 \\
&= \huge\int_\Omega \left\| \sum_{m,n} c_{m,n} f_n(x)g_m\right\|_{\U}^2 w(x) dx\\
&= \huge\int_\Omega \left\| {\bf 1}_E(x)\right\|_{\U}^2 w(x) dx\\
&= \huge\int_E w(x) dx\\
&\leq A \int_\Omega \left\| {\bf 1}_E(x)\right\|_{\U}^2 dx \quad \text{(by the assumption $w(x)<A$ for all $x\in E$)}\\
&= A \int_\Omega \left\| \sum_{m,n} c_{m,n} f_n(x)g_m\right\|_{\U}^2 dx \\
&= A \int_\Omega \sum_{m,n} |c_{m,n}|^2 dx \\
&= A \sum_{m,n} |c_{m,n}|^2 .
\end{align*}
This is contrary to the lower bound condition in (\ref{Riesz inequality}).
\end{proof}
\section{Proof of Corollary \ref{main result}}
\begin{proof}
Assume that $A\leq w(x)\leq B$ for almost every $x\in \Omega$. Let $f\in \mathcal H$. Then
\begin{align}\label{first line}
\sum_{k \in J} |\Lambda_k(f)|^2 &= \sum_{k \in J} \left| \int_\Omega S(f)(x) \overline{\lambda_k(x)} w(x) dx\right|^2.
\end{align}
By the fact that $\{\lambda_k\}_{k \in J}$ is an orthonormal basis for $L^2(\Omega)$, using the Plancherel\rq{}s theorem we continue as follows:
\begin{align*}
(\ref{first line})&= \int_\Omega |S(f)(x) w(x)|^2 dx
\leq B \int_\Omega |S(f)(x)|^2 w(x) dx
= B \|f\|^2.
\end{align*}
The frame boundedness from below by $A$ also follows with a similar calculation.
For the converse, we shall use a contradiction argument. Assume that $\{\Lambda_k\}_{k \in J}$ is a frame for $\mathcal H$ with the frame bounds $0<A\leq B<\infty$. Therefore for any $f\in \mathcal H$ we have
$$A\|f\|^2 \leq \sum_{k \in J} |\Lambda_k(f)|^2 \leq B \|f\|^2.$$
Assume that $E\subset \Omega$ be a measurable set with positive measure such that $w(x)< A$ for all $x\in E$. We shall show that the lower bound condition for the frame $\{\Lambda_k\}_{k \in J}$ must then fail for the lower bound $A$.
By the assumptions, $w \in L^1(E)$. Thus $1_E\in L^2_w(\Omega)$. Since $S$ is an onto map, assume that $\phi_E$ is the pre-image of $1_E$ in $\mathcal H$. Therefore $S(\phi_E) = 1_E$ and $\|\phi_E\|_\mathcal H = \|1_E\|_{L^2_w(\Omega)}$ and we have
\begin{align}\label{E1}
\sum_{k \in J} |\Lambda_k(\phi_E)|^2 = \sum_{k \in J} \left|\int_\Omega S(\phi_E)(x) \overline{\lambda_k(x)} w(x)dx \right|^2
= \int_\Omega |1_E(x)|^2 |w(x)|^2 dx
= \int_E |w(x)|^2 dx.
\end{align}
Since $w(x)<A$ for all $x\in E$, then from the last integral we obtain the following:
\begin{align*}
(\ref{E1}) \leq A \int_E w(x) dx
= A \int_\Omega |1_E(x)|^2 w(x) dx
= A\int_\Omega |S(\phi_E)(x)|^2 w(x) dx
= A\|\phi_E\|^2.
\end{align*}
The preceding calculation shows that the frame lower bound condition fails for $\phi_E$. This contradicts our assumption that $\{\Lambda_k\}_{k \in J}$ is a frame, therefore $w(x)\geq A$ a.e. $x\in \Omega$. The argument for the upper bound follows similarly.
\end{proof}
\section{Proof of Theorem \ref{th3}}
\begin{proof}
Assume that $w(x)=1$ a.e. $x\in \Omega$. By the equations (\ref{eq1}) and (\ref{equ2}), for any $f\in {\mathbb H}$ we have
\begin{align}\notag
\sum_{m,n} |\Lambda_{m,n}(f)|^2
&=\sum_{m,n} \left| \langle G_{m,n} , S(f)\rangle_{L^2(\Omega, \U)}\right|^2 = \| Sf\|^2 = \|f\|^2 .
\end{align}
This proves (\ref{onb3}). Let $c_1, c_2\in \C$. For any $m,m\rq{}\in J$ and $n, n\rq{}\in K$ we have
\begin{align*}
\langle \Lambda_{m,n}^*(c_1),\Lambda_{m\rq{},n\rq{}}^*(c_2)\rangle &= c_1\overline{c_2} \langle S^{-1}(f_ng_m), S^{-1}(f_{n\rq{}}g_{m\rq{}})\rangle_{{\mathbb H}} \\
& = c_1\overline{c_2} \langle f_ng_m, f_{n\rq{}}g_{m\rq{}}\rangle_{L^2(\Omega, \U)} \\
&= c_1\overline{c_2} \delta_{m,m\rq{}}\delta_{n,n\rq{}}.
\end{align*}
This proves the relations (\ref{onb1}) and (\ref{onb2}).
To prove the converse, we assume contrary. We assume that there is a subset $E\subset \Omega$ of positive measure for which $w(x)<1$. As in the proof of Theorem \ref{th1}, one can show the existence of a function $\phi_E$ for which with an analogous calculation following the relation (\ref{iso}) the following holds:
$$\sum_{m,n} |\Lambda_{m,n}(\phi_E)|^2 \leq \|\phi_E\|^2 .$$
This indicates that the relation (\ref{onb3}) does not hold for $\phi_E$, which contradicts the assumption.
\end{proof}
\section{Examples}
\begin{example}\label{example 1}
Let $\Omega=D$ be a fundamental domain in $\Bbb R^d$ with Lebesgue measure one. Assume that ${\mathbb G}amma=M\Bbb Z^d$ where $M$ is an $d\times d$ invertible matrix and the exponentials $\{e_n(x):= e^{2\pi i \langle n,x\rangle}: \ n\in {\mathbb G}amma\}$ form an orthonormal basis for $L^2(D)$.
For $0\neq\phi\in L^2(\Bbb R^d)$, define ${\mathbb H}:= \overline{\text{span}\{\phi(\cdot-n): n\in \Bbb Z^d\}}$ and the weight function $w$ by $w(x) := \sum_{n\in {\mathbb G}amma^\perp} |\hat\phi(x+n)|^2$, a.e. $x\in D$. We claim that $\int_D w(x) dx= \|\phi\|^2$, thus is finite. To this end, notice $D$ is a fundamental domain for the lattice ${\mathbb G}amma$. By a result by Fuglede \cite{Fug74}, $D$ tiles $\Bbb R^d$ by the dual lattice ${\mathbb G}amma^\perp=M^{-t}\Bbb Z^d$, $M^{-t}$ the inverse transpose of $M$. Therefore, we have
\begin{align*}
\int_D w(x) dx &= \int_D \sum_{n\in {\mathbb G}amma^\perp} |\hat\phi(x+n)|^2 dx
= \int_{\mathbb R^d} |\hat\phi(x)|^2 dx
= \|\hat \phi\|^2
= \|\phi\|^2 .
\end{align*}
Let $E_w:= \{x\in D: w(x)>0\}$ and for any $f\in {\mathbb H}$ define
$$S(f)(x):= 1_{E_w}(x) {w(x)}^{-1} \sum_{n\in {\mathbb G}amma^\perp} \hat f(x+n) \overline{\hat \phi(x+n)} \quad \text{a.e.}\ x\in E_w.$$
Then $S$ is an unitary map from ${\mathbb H}$ onto the weighted Hilbert space $L_w^2(0,1)$ with $\U=\C$. Note that $Sf(x)= 0$ a.e. $x\in D\setminus E_w$ (Theorem 3.1 (i) \cite{HSWW}).
For $k\in {\mathbb G}amma$ define
$$\Lambda_k(f):= \int_{E_w} \left(\sum_{n\in {\mathbb G}amma^\perp} \hat f(x+n)\overline{\hat\phi(x+n)}\right) e^{-2\pi i \langle x, k\rangle } dx.$$
By Corollary \ref{main result}, the operators $\{\Lambda_k\}_{k\in {\mathbb G}amma}$ constitute a frame for ${\mathbb H}$ if there are positive constants $A$ and $B$ such that $A\leq \sum_{n\in {\mathbb G}amma^\perp} |\hat\phi(x+n)|^2\leq B$ a.e. $x\in E$. By the well-known periodization method, it is obvious that $\Lambda_k(f) = \langle T_k \phi, f\rangle$ for any $f\in {\mathbb H}$ and $k\in {\mathbb G}amma$, with $T_k\phi(x)= \phi(x-k)$. Thus, $\{\Lambda_k\}_{\mathbb G}amma$ is the translation family $\{T_k \phi\}_{\mathbb G}amma$.
For example, if $\phi\in L^2(\mathbb R)$ such that $\hat\phi= 1_{[0,1]}$, the indicator function of the unit interval, then the inequalities for $w$ holds for $A=1$ and $B=2$, hence $\{\Lambda_k\}_{k\in {\mathbb G}amma}$ is a frame with lower and upper frame bounds $1$ and $2$, respectively.
\end{example}
\section{Application: Frames for shift-invariant subspaces on the Heisenberg group}
In this section we shall revisit the example of a function in $L^2(\Bbb H^d)$ that was introduced in \cite{BHM14} and exploit our current results to study the frame and Riesz property of the lattice translations of the function for a shift-invariant subspace of $L^2(\Bbb H^d)$.
\subsection{The Heisenberg group}
The $d$-dimensional Heisenberg group $\Bbb H^d$ is identified with $\mathbb R^d \times \mathbb R^d \times \mathbb R$ and the noncommutative group law is given by
\begin{equation}\label{equ:Hlaw}
(p,q,t) (p',q',t') = (p + p', q + q', t + t' + p\cdot q').
\end{equation}
The inverse of an element is given by $(p,q,t)^{-1}= (-p,-q, -t+p\cdot q)$. Here, $x\cdot y$ is the inner product of two vectors in $\mathbb R^d$. The Haar measure of the group is the Lebesgue measure on $\Bbb R^{2d+1}$.
The class of non-zero measure irreducible representations of $\Bbb H^d$ is identified by non-zero elements $\lambda \in \mathbb R^*:=\mathbb R \setminus \{0\}$ (see \cite{F1995}). Indeed, for any $\lambda\neq 0$, the associated irreducible representation $\rho_\lambda$ of the Heisenberg group is equivalent to Schr\"odinger representation into the class of unitary operators on $L^2(\Bbb R^d)$, such that for any $(p,q,t)\in \Bbb H^d$ and $f\in L^2(\Bbb R^d)$
\begin{align}\label{definition-of-schroedinger-representation}
\rho_\lambda(p,q,t)f(x) = e^{2\pi i t \lambda} e^{-2\pi i \lambda \langle q\cdot x\rangle} f(x-p) .
\end{align}
Notice $\rho_\lambda(p,q,0)f(x) = M_{\lambda q} T_p f(x)$ is the unitary frequency-translation operator, where $M_x$ and $T_y$ are the modulation and translation operators, respectively. For $\varphi\in L^2(\Bbb H^d)$, we denote by $\hat \varphi$ the operator valued Fourier transform of $\varphi$ which is defined by
\begin{equation}\label{equ:Hfourier}
\hat \varphi(\lambda) = \int_{\mathbb H}D \varphi(x) \rho_\lambda(x) dx \quad \forall \lambda\in \mathbb R \setminus \{0\} .
\end{equation}
The operator $\hat \varphi(\lambda)$ is a Hilbert-Schmidt operators on $L^2(\Bbb R^d)$ such that for any $f\in L^2(\Bbb R^d)$
\begin{equation}\notag
\hat \varphi(\lambda)f(y) = \int_{\mathbb H}D \varphi(x) \rho_\lambda(x)f(y) \ dx \quad \forall \lambda\in \mathbb R \setminus \{0\} ,
\end{equation}
and the equality is understood in $L^2$-norm sense.
For any $\psi$ and $\varphi$ in $L^2(\Bbb H^d)$ and $\lambda \in \mathbb R \setminus \{0\}$, the Hilbert-Schmidt inner product $\langle \hat \varphi(\lambda), \hat\psi(\lambda)\rangle_{{\mathbb H}S}$ is the trace of an operator. Indeed,
\begin{equation}\label{eq:HS}
\langle\hat\varphi(\lambda), \hat\psi(\lambda)\rangle_{{\mathbb H}S} = \textnormal{trace}_{L^2(\mathbb R^d)} \left(\hat\varphi (\lambda) \hat\psi(\lambda)^*\right) .
\end{equation}
(Here, $\hat\psi(\lambda)^*$ denotes the $L^2(\mathbb R^d)$ adjoint of the operator $\hat\psi(\lambda)$.) It is easy to see that $\hat\varphi (\lambda) \hat\psi(\lambda)^*$ is a kernel operator. Thus $\langle\hat\varphi(\lambda), \hat\psi(\lambda)\rangle_{{\mathbb H}S}$ is trace of a kernel operator (\cite{F1995}). The Plancherel formula for the Heisenberg group is given by
\begin{equation}\label{eq:Plancherel}
\langle \varphi, \psi\rangle_{L^2({\mathbb H}D)} = \int_\mathbb R \langle\hat\varphi(\lambda), \hat\psi(\lambda)\rangle_{{\mathbb H}S} |\lambda|^d d\lambda .
\end{equation}
The measure $|\lambda|^d d\lambda$ is the Plancherel measure on the non-zero measure class of irreducible representations of the Heisenberg group (\cite{F1995}) and $d\lambda$ is the Lebesgue measure on $\Bbb R^*$. By the periodization method, the integral in (\ref{eq:Plancherel}) is can be equivalently written as
\begin{equation}\notag
\int_\mathbb R \langle\hat\varphi(\lambda), \hat\psi(\lambda)\rangle_{{\mathbb H}S} |\lambda|^d d\lambda = \int_0^1
\sum_{j\in \Bbb Z} \langle\hat\varphi(\alpha+j), \hat\psi(\alpha+j)\rangle_{{\mathbb H}S} |\alpha+j|^d d\alpha .
\end{equation}
Thus, for any $\varphi\in L^2(\Bbb H^d)$, by the Plancherel formula we deduce the following:
\begin{equation}\label{periodization}
\|\varphi\|^2 = \int_0^1 \sum_{j\in \Bbb Z} \|\hat \varphi(\alpha+j)\|^2_{{\mathbb H}S} |\alpha+j|^d d\alpha .
\end{equation}
\subsection{Frames for a shift-invariant subspace}
Let $u={\bf 1}_{[0,1]^d}\in L^2(\Bbb R^d)$ be the indicator function of the unit cube $[0,1]^d$. For $\alpha\neq 0$, define the $L^2$-unitary dilation of $u$ with respect to $\alpha$ by
$u_\alpha(x)= |\alpha|^{-d/2}u(\alpha x)$. Let $a$ and $b$ be two real numbers such that $0\neq ab\in \Bbb Z$. Then the family consisted of translations and modulations of $u_\alpha$ by $a\Bbb Z^d$ and $b \Bbb Z^d$, respectively, is then given by
$$ \left\{|\alpha|^{d/2}e^{-2\pi i\alpha b\langle m, x\rangle} {\bf 1}_{(0,\frac{1}{\alpha})^d}(x-an): \ \ m, n\in \Bbb Z^d\right\} .$$
It is known that the family is an orthonormal basis for $L^2(\Bbb R^d)$ and is called orthonormal Gabor or Weyl-Heisenberg basis for $L^2(\Bbb R^d)$ with the window function $u_\alpha$.
Fix $0<\epsilon<1$ (for the rest of the paper) and define the projector map $\Psi_\epsilon$ from $(0,1)$ into the class of Hilbert-Schmidt operators of rank one on $L^2(\Bbb R^d)$ by
$$\Psi_\epsilon(\alpha) := (u_\alpha\otimes u_\alpha) 1_{(\epsilon,1]}(\alpha),$$
where for any $f, g, h\in L^2(\Bbb R^d)$ we have $(f\otimes g)h:= \langle h, g\rangle f$.
By the definition of the Hilbert-Schmidt norm, we then have
\begin{align}\label{value of HS-norm}
\|\Psi_\epsilon(\alpha)\|_{\mathcal{HS}} = 1_{(\epsilon,1]}(\alpha) .
\end{align}
Thus $\|\Psi_\epsilon\|^2 = (d+1)^{-1}(1-\epsilon^{d+1})$. This implies that $\Psi_\epsilon\in L^2(\mathbb R^*,{\mathbb H}S(L^2(\mathbb R^d)),|\lambda|^d d\lambda)$. Therefore, by the inverse Fourier transform for the Heisenberg group, there is a function in $L^2(\Bbb H^d)$ whose Fourier transform is identical to $\Psi_\epsilon$ in $L^2$-norm. We let $\psi_\epsilon$ denote this function $ L^2(\Bbb H^d)$.
Let $\Bbb A$ and $\Bbb B$ be any $d\times d$ matrices in $GL(\Bbb R, d)$ such that $\Bbb A\Bbb B^t\in GL(\Bbb Z,d)$. Define ${\mathbb G}amma :=\Bbb A\Bbb Z^d \times \Bbb B \Bbb Z^d\times\Bbb Z$. Then ${\mathbb G}amma$ is a lattice subgroup of the Heisenberg group, a discrete and co-compact subgroup. For any $\gamma=(p,q,t)\in {\mathbb G}amma$, we denote by $T_\gamma \psi_{\epsilon}$ the $\gamma$-translation of $\psi_\epsilon$ which is given by
$$T_\gamma \psi_{\epsilon}(x,y,z)= \psi_{\epsilon}(\gamma^{-1}(x,y,z)).$$
Our goal here is to employ the current results and study the frame property of the family $\{T_\gamma\psi_{\epsilon}\}_{\gamma\in {\mathbb G}amma}$ for its spanned vector space $V_{{\mathbb G}amma, \psi_\epsilon}= \overline{\text{span}\{T_\gamma \psi_\epsilon: \gamma\in {\mathbb G}amma\}}$. It is obvious that $V_{{\mathbb G}amma, \psi_\epsilon}$ is ${\mathbb G}amma$-translation-invariant subspace of $L^2(\Bbb H^d)$.
For fixed $\epsilon$, define $w_\epsilon$ on $\Bbb R$ by
$$w_\epsilon(\alpha) = \sum_{j \in \Z} \| \Psi_\epsilon (\alpha + j)\|^2_{{\mathbb H}S(L^2(\mathbb R^d))}|\alpha + j|^d.$$
The function $w_\epsilon$ is a positive and periodic function. Let $E_{w_\epsilon}:=\{\alpha\in (0,1): \ w_\epsilon(\alpha)>0\}$. The definition of $\Psi_\epsilon$ along (\ref{value of HS-norm}) yields the following result.
\begin{lemma}\label{lem:example1}
For any $\alpha \in E_{w_\epsilon}$
\begin{align}\label{inequality for the toy weight}
\epsilon^d \leq w_\epsilon(\alpha) \leq 1 .
\end{align}
\end{lemma}
Let $k:=(0,0,k) \in \Bbb Z$ and $T_k\psi_\epsilon$ denote the translation of $\psi_\epsilon$ at the center direction of the Heisenberg:
$$T_k \psi_\epsilon(p,q,t) =\psi_\epsilon (p,q,t-k), \quad (p,q,t)\in \Bbb H^d .$$
Let ${\mathbb H}= \overline{\text{span}\{T_k \psi_\epsilon: \ k\in \Bbb Z\}}$ and $f\in {\mathbb H}$. For any $\alpha\in (0,1)$ define
\begin{align}\label{isometry S}
S(f)(\alpha) := 1_{E_{w_\epsilon}}(\alpha) w_\epsilon(\alpha)^{-1} \sum_{j \in \Z} \langle \hat \psi_\epsilon (\alpha + j), \hat f(\alpha+j)\rangle_{{\mathbb H}S(L^2(\mathbb R^d))}|\alpha + j|^d.
\end{align}
\begin{lemma}\label{isometry lemma}
The map $S: {\mathbb H} \to L_{w_\epsilon}^2(0,1)$ defined in (\ref{isometry S}) is an unitary map.
\end{lemma}
\begin{proof}
First we prove that $S$ is a bounded map on $L^2(\Bbb H^d)$. Let $f\in L^2(\Bbb H^d)$.
Then
\begin{align}\notag
& \int_0^1 |Sf(\alpha)|^2 w_\epsilon(\alpha)d\alpha \\\notag
& = \int_0^1 1_{E_{w_\epsilon}}(\alpha) w_\epsilon(\alpha)^{-2} \left(\sum_{j \in \Z} |\langle \hat \psi_\epsilon (\alpha + j), \hat f(\alpha+j)\rangle_{{\mathbb H}S(L^2(\mathbb R^d))}||\alpha + j|^d \right)^2 w_\epsilon(\alpha) d\alpha \\\notag
& \leq \int_0^1 1_{E_{w_\epsilon}}(\alpha) w_\epsilon(\alpha)^{-1} \left(\sum_{j \in \Z} \|\hat \psi_\epsilon (\alpha + j)\|_{{\mathbb H}S} \|\hat f(\alpha+j)\|_{{\mathbb H}S} |\alpha + j|^d \right)^2 d\alpha \\\notag
&\leq \int_0^1 1_{E_{w_\epsilon}}(\alpha) w_\epsilon(\alpha)^{-1} B_{\psi_\epsilon}(\alpha) B_f(\alpha) d\alpha\\\notag
&= \int_0^1 1_{E_{w_\epsilon}}(\alpha) B_f(\alpha) d\alpha
\end{align}
where $B_g(\alpha) := \sum_j \|\hat g(\alpha+j)\|^2 |\alpha+j|^d$ for $g\in L^2(\Bbb H^d)$ and $\alpha\in (0,1)$. By the definition of $w_\epsilon$, it is immediate that $w_\epsilon(\alpha)^{-1} B_{\psi_\epsilon}(\alpha) = 1$ for a.e. $\alpha\in (0,1)$.
Therefore
\begin{align}\notag
\int_0^1 |Sf(\alpha)|^2 w_\epsilon(\alpha)d\alpha \leq
\int_0^1 1_{E_{w_\epsilon}}(\alpha) B_f(\alpha) d\alpha
= \| f\|^2 \quad \quad \text{by (\ref{periodization})}.
\end{align}
This proves that $S$ is a bounded operator. Next we prove that $S$ is an isometry map on $\mathcal{H}$. Assume that $f= \sum_k a_k T_k \psi_\epsilon$ for any finite linear combination of $T_k\psi_\epsilon$, $k\in\Bbb Z$. Thus
\begin{align}\notag
& \int_0^1 |Sf(\alpha)|^2 w_\epsilon(\alpha)d\alpha \\\notag
& = \int_0^1 1_{E_{w_\epsilon}}(\alpha) w_\epsilon(\alpha)^{-2} \left|\sum_{j \in \Z} \langle \hat \psi_\epsilon (\alpha + j), \hat f(\alpha+j)\rangle_{{\mathbb H}S(L^2(\mathbb R^d))}|\alpha + j|^d \right|^2 w_\epsilon(\alpha) d\alpha \\\notag
& =\int_0^1 1_{E_{w_\epsilon}}(\alpha) w_\epsilon(\alpha)^{-1} \left|\sum_{j \in \Z} \langle \hat \psi_\epsilon (\alpha + j), \sum_k a_k \widehat{T_k \psi_\epsilon}(\alpha+j)\rangle_{{\mathbb H}S(L^2(\mathbb R^d))}|\alpha + j|^d \right|^2 d\alpha \\\notag
& =\int_0^1 1_{E_{w_\epsilon}}(\alpha) w_\epsilon(\alpha)^{-1}
\left|\sum_k a_k e^{-2\pi i k \alpha}\right|^2 \left|\sum_{j \in \Z} \langle \hat \psi_\epsilon (\alpha + j), \hat\psi_\epsilon(\alpha+j)\rangle_{{\mathbb H}S(L^2(\mathbb R^d))}|\alpha + j|^d \right|^2 d\alpha\\\notag
& =\int_0^1 1_{E_{w_\epsilon}}(\alpha) \left|\sum_k a_k e^{-2\pi i k \alpha}\right|^2 w_\epsilon(\alpha) d\alpha \quad \text{(by the definition of $w_\epsilon$).}\\\notag
\end{align}
Notice we can write
$$\left|\sum_k a_k e^{-2\pi i k \alpha}\right|^2 w_\epsilon(\alpha)= \sum_j \left\| \sum_k a_k \widehat{T_k \psi_\epsilon}(\alpha+j) \right\|_{\mathcal{HS}}^2 |\alpha + j|^d .$$
Applying this in above we get
\begin{align}\notag
\int_0^1 |Sf(\alpha)|^2 w_\epsilon(\alpha)d\alpha
&= \int_0^1 1_{E_{w_\epsilon}}(\alpha) \sum_j \left\| \sum_k a_k \widehat{T_k \psi_\epsilon}(\alpha+j)\right\|_{\mathcal{HS}}^2 |\alpha + j|^d d\alpha\\\notag
&= \int_0^1 1_{E_{w_\epsilon}}(\alpha) \sum_j \| \hat f(\alpha+j)\|_{\mathcal{HS}}^2 |\alpha + j|^d d\alpha\\\notag
&= \|f\|^2 \quad \quad \text{by (\ref{periodization})}.
\end{align}
This completes the proof of the lemma.
\end{proof}
\begin{theorem}
For any $k\in \Bbb Z$ and $f\in {\mathbb H}$ define
$$\Lambda_k(f) = \int_{E_{w_\epsilon}} \sum_{j \in \Z} \langle \hat \psi_\epsilon (\alpha + j), \hat f(\alpha+j)\rangle_{{\mathbb H}S(L^2(\mathbb R^d))}|\alpha + j|^d e^{-2\pi i \alpha k} d\alpha .$$
Then $\Lambda_k (f) = \langle T_k \psi_{\epsilon}, f\rangle $
and $\{\Lambda_k\}_{k\in\Z}$ is a frame for ${\mathbb H}$ with respect to $\Bbb C$.
\end{theorem}
\begin{proof}
The equation $\Lambda_k (f)= \langle T_k \psi_{\epsilon}, f\rangle $ is a result of the Parseval identity. The family $\{\Lambda_k\}_{k\in\Z}$ is a frame for ${\mathbb H}$ with respect to $\Bbb C$ by Lemmas \ref{lem:example1}, \ref{isometry lemma} and Theorem \ref{th1} (b).
\end{proof}
\section*{Acknowledgments}
The author is deeply indebted to Dr. Azita Mayeli for several fruitful discussions and generous comments. The author wishes to thank the anonymous referees for their helpful comments and suggestions that helped to improve the quality of the paper.
\end{document} |
\begin{document}
\title{Conditional stability for backward parabolic operators with Osgood continuous coefficients}
\begin{abstract}
We prove continuous dependence on initial data for a backward parabolic operator whose leading coefficients are Osgodd continuous in time. This result fills the gap between uniqueness and continuity results obtained so far.
\end{abstract}
\section{Introduction}
Backward parabolic equations are known to generate ill-posed (in the sense of Hadamard~\cite{Had_53,Had_64}) Cauchy problems. Due to the smoothing effects of the parabolic operator, in fact, it is not possible, in general, to guarantee existence of the solution for initial data which are not suitably regular. In addition, even when solutions possibly exist, uniqueness cannot be ensured without additional assumptions on the operator. Nevertheless, also for problems which are not well-posed the study of the conditional stability of the solution -- the surrogate of the notion of ``continuous dependence'' when existence of a solution is not guaranteed -- is of some interest. Such kind of study can be performed by resorting to the notion of \emph{well-behaved problem} introduced by John~\cite{John}: a problem is \emph{well-behaved} if ``only a fixed percentage of the significant digits need be lost in determining the solution from the data''. More precisely, a problem is well behaved if its solutions in a space $\mathcal{H}$ depend continuously on the data belonging to a space $\mathcal{K}$, provided they satisfy a prescribed bound in a space $\mathcal{H}^\prime$ (possibly different from $\mathcal{H}$).
In this paper we give a contribution to the study of the (\emph{well}) behaviour of the Cauchy problem associated with a backward parabolic operator. In particular, we consider the operator $\mathcal{L}$ defined, on the strip $[0,T]\times \mathbb{R}^n$, by
\begin{equation}\label{eq_L}
\mathcal{L}u=\partial_t u +\sum_{i,j=1}^n \partial_{x_i}\left( a_{i,j}(t)\partial_{x_j}u \right)+\sum_{j=1}^n b_j(t)\partial_{x_j}u+c(t)u\,,
\end{equation}
where all the coefficients are bounded. We suppose that $a_{i,j}(t)=a_{j,i}(t)$ for all $i,j=1,\ldots,n$ and for all $t\in [0,T]$. We also suppose that $\mathcal{L}$ is backward parabolic, i.e. there exists $k_A\in ]0,1[$ such that, for all $(t,\xi)\in[0,T]\times \mathbb{R}^n$,
\begin{equation}
k_A\vert \xi\vert^2\le \sum_{i,j=1}^n a_{i,j}(t)\xi_i\xi_j\le k_A^{-1}\vert\xi\vert^2\,.
\end{equation}
We show that if the coefficients of the principal part of $\mathcal{L}$ are at least Osgood regular, then there exists a function space in which the associated Cauchy problem
\begin{equation}\label{eq_probl_cauchy}
\left\{\begin{array}{ll}
\mathcal{L}u=f\,,\qquad & \textnormal{ in }(0,T)\times \mathbb{R}^n\,,\\
u\vert_{t=0}=u_0\,,\qquad & \textnormal{ in }\mathbb{R}^n\,,
\end{array}\right.
\end{equation}
has a stability property.
To collocate the new result in the framework of the existing literature, the contents of some publications on the subject are preliminarily recalled. They show that, as one could expect, the function space in which the stability property holds is related to the degree of regularity of the coefficients of $\mathcal{L}$. Weaker requirements on the regularity of the coefficients must be balanced, for the stability property to hold, by stronger \emph{a priori} requirements on the regularity of the solution, hence stability holds in a smaller function space.
The overview on available works helps to lead the reader to the new result, claimed in the final part of the paper, concerning operators with Osgood-continuous coefficients. This kind of regularity is critical since it is the minimum required regularity to have uniqueness of the solution and can therefore be considered as a sort of lower limit. Although the proof of the claim is based on the theoretical scheme followed to achieve previous results~\cite{MATAN}, the modifications needed to obtain an analogous proof in the case of Osgood coefficients are by no means trivial.
The paper is organised as follows. In Section~\ref{sec_uniq} we give an overview on uniqueness and non-uniqueness results for (\ref{eq_probl_cauchy}). Moreover, we introduce the notion of modulus of continuity and define the Osgood condition. Section~\ref{sec_stab} is dedicated to the notion of conditional stability; after recalling some known results, we state and prove the main result of the paper (Theorem~\ref{teo_nuovo}). In Section~\ref{sec_part} we consider the particular case of Log-Log-Lipschitz coefficients, where the dependence on initial data can be explicitly determined.
\section{Uniqueness and non-uniqueness results}\label{sec_uniq}
This section recalls some results on the uniqueness and non-uniqueness of the solution of the problem (\ref{eq_probl_cauchy}) for an operator like (\ref{eq_L}) with coefficients depending also on $x$. Consider the space
\begin{equation}\label{eq_insieme}
\mathcal{H}_0\triangleq C([0,T],L^2)\cap C([0,T[,H^1)\cap C^1([0,T[,L^2)\,.
\end{equation}
One of the first results concerning uniqueness is due to Lions and Malgrange~\cite{Lio_Mal} who consider an equation associated to a sesquilinear operator defined in a Hilbert space. In our context, this result can be read as follows.
\begin{teor}\label{teo_Lio_Mal}
If the coefficients of the principal part of $\mathcal{L}$ are Lipschitz continuous with respect to $t$ and $x$, $u\in\mathcal{H}_0$ and $u_0=0$, then $\mathcal{L}u=0$ implies $u\equiv 0$.$
\square$
\end{teor}
The Lipschitz continuity of the coefficients is a crucial requirement for the claim, as shown some years later by Pli\'s~\cite{Pli} who proved the following theorem.
\begin{teor}
There exist $u$, $b_1$, $b_2$ and $c\in C^\infty(\mathbb{R}^3)$, bounded with bounded derivatives and periodic in the space variables and there exist $l:[0,T]\to\mathbb{R}$, H\"older-continuous of order $\delta$ for all $\delta<1$ but not Lipschitz-continuous, such that $1/2\le l(t)\le 3/2$ for all $t$, the support of $u$ is the set $\{t\ge 0\}\times\mathbb{R}^2$, and
\begin{multline}\label{eq_cauchy_plis}
\partial_t^2u(t,x_1,x_2)+\partial_{x_1}^2u(t,x_1,x_2)+l(t)\partial_{x_2}^2u(t,x_1,x_2)+\\
\qquad +b_1(t,x_1,x_2)\partial_{x_1}u(t,x_1,x_2)+b_2(t,x_1,x_2)\partial_{x_2}u(t,x_1,x_2)+\\
\qquad\qquad +c(t,x_1,x_2)u(t,x_1,x_2)=0 \qquad\qquad \textnormal{in }\mathbb{R}^3\,.
\end{multline}
$
\square$
\end{teor}
Note that the differential operator in (\ref{eq_cauchy_plis}) is elliptic. However, the same idea developed by Pli\'s to prove the claim can be exploited to obtain a counterexample for the backward parabolic operator
$$
\mathcal{L}_P\triangleq\partial_t+\partial_{x_1}^2+l(t)\partial_{x_2}^2+b_1(t,x_1,x_2)\partial_{x_1}+b_2(t,x_1,x_2)\partial_{x_2}+c(t,x_1,x_2)\,.
$$
Moreover, the result can be extended to the operator $\mathcal{L}$ by considering the problem solved by $u(t,x_1,x_2)e^{-x_1^2-x_2^2}$, thus obtaining the following theorem.
\begin{teor}
There exist coefficients $a_{i,j}$, depending only on $t$, which are H\"older continuous of every order but not Lipschitz continuous and there exist $u\in\mathcal{H}_0$ such that the solution of problem (\ref{eq_probl_cauchy}) with $u_0=0$ and $f=0$ is not identically zero.$
\square$
\end{teor}
In view of the previous results, a question naturally arises: which is the \emph{minimal} regularity (between Lipschitz continuity and H\"older continuity) of the coefficients of the principal part of $\mathcal{L}$ guaranteeing uniqueness of the solution of (\ref{eq_probl_cauchy})? To answer to this question, the definition of \emph{modulus of continuity}, that can be exploited to measure the degree of regularity of a function, is useful.
\begin{defi}
A \emph{modulus of continuity} is a function $\mu:[0,1]\to[0,1]$ which is continuous, increasing, concave and such that $\mu(0)=0$. A function $f:\mathbb{R}\to\mathbb{R}$ has \emph{regularity $\mu$} if
$$
\sup_{0<\vert t-s\vert<1}\frac{\vert f(t)-f(s)\vert}{\mu(\vert t-s\vert)}<+\infty\,.
$$
The set of all functions having regularity $\mu$ is denoted by $C^\mu$.
\end{defi}
As particular cases, the Lipschitz continuity, the $\tau$-H\"older continuity ($\tau\in]0,1[$) and the \emph{logarithmic Lipschitz} (in short \emph{Log-Lipschitz}) continuity are obtained for $\mu(s)=s$, $\mu(s)=s^{\tau}$ and $\mu(s)=s\log(1+1/s)$, respectively.
A further characterization of the modulus of continuity is the so called \emph{Osgood condition} which is crucial in most of the results on uniqueness and stability that are described in the rest of the article. A modulus of continuity $\mu$ satisfies the Osgood condition if
$$
\int_0^1 \frac{1}{\mu(s)}ds = +\infty\,.
$$
This characterization is used, for instance, in~\cite{JMPA} to obtain the following result concerning an operator whose coefficients in the principal part depend also on $x$.
\begin{teor}\label{teo_delpri_2005}
Let $\mu$ be a modulus of continuity that satisfies the Osgood condition. Let
\begin{equation}\label{eq_H_1}
\mathcal{H}_1\triangleq H^1([0, T],L^2(\mathbb{R}^n))\cap L^2([0, T],H^2(\mathbb{R}^n))
\end{equation}
and let the coefficients $a_{i,j}$ be such that, for all $i,j=1,\ldots,n$,
$$
a_{i,j}\in C^\mu([0,T],\mathscr{C}_b(\mathbb{R}^n))\cap \mathscr{C}([0,T],\mathscr{C}_b^2(\mathbb{R}^n))\,,
$$
where $\mathscr{C}_b$ is the space of bounded functions and $\mathscr{C}_b^2$ is the space of the bounded functions whose first and second derivatives are bounded. If $u\in \mathcal{H}_1$, $\mathcal{L}u=0$ on $[0,T]\times \mathbb{R}^n$ and $u(0,x)=0$ on $\mathbb{R}^n$, then $u\equiv 0$ on $[0,T]\times\mathbb{R}^n$.
\end{teor}
More recently, by using Bony's para-multiplication, the result has been improved as far as the regularity with respect to $x$ is concerned, i.e. replacing $\mathscr{C}^2$ regularity with Lipschitz regularity~\cite{AMPA}.
Note that the claim of Theorem~\ref{teo_delpri_2005} refers to the function space defined by (\ref{eq_H_1}), however, it is not difficult to extend it to the function space $\mathcal{H}_0$ defined by (\ref{eq_insieme}).
\section{Conditional stability results}\label{sec_stab}
For Cauchy problems related to the backward parabolic differential operators, which in general are not well posed, the notion of continuous dependence from initial data is replaced by the notion of (conditional) stability which is associated with the property of a problem to be well behaved, as defined by John~\cite{John}. The question about the conditional stability can be stated as follows. Suppose that two functions $u$ and $v$, defined in $[0,T]\times\mathbb{R}^n$, are solutions of the same equation; suppose, in addition, that $u$ and $v$ satisfy a fixed bound in a space $\mathcal{K}$ and that $\Vert u(0,\cdot)-v(0,\cdot)\Vert_{\mathcal{H}}$ is small (less than some $\epsilon$). Given these assumptions can we say something on the quantity $\sup_{t\in[0,T^\prime]}\Vert u(t,\cdot)-v(t,\cdot)\Vert_{\mathcal{K}}$ for some $T^\prime<T$? Does it remains small as well (e.g. less than a value related to $\epsilon$)? In this section some results that give an answer to the above questions are reported.
\subsection{Stability with Lipschitz-continuous (with respect to $t$) coefficients}
One of the first results on conditional stability has been proven by Hurd~\cite{Hurd} in the same theoretical framework considered by Lions and Malgrange.
\begin{teor}\label{teor_LioMal}
Suppose that the coefficients $a_{i,j}$ are Lipschitz continuous both in $t$ and in $x$. For every $T^\prime\in]0,T[$ and for every $D>0$ there exist $\rho>0$, $\delta\in]0,1[$ and $M>0$ such that if $u\in \mathcal{H}_0$ is a solution of $\mathcal{L}u=0$ on $[0,T]\times\mathbb{R}^n$ with $\Vert u(t,\cdot)\Vert_{L^2}\le D$ on $[0,T]$ and $\Vert u(0,\cdot)\Vert_{L^2}\le\rho$, then
\begin{equation}\label{eq_teo_hurd}
\sup_{t\in[0,T^\prime]}\Vert u(t,\cdot)\Vert_{L^2}\le M\Vert u(0,\cdot)\Vert_{L^2}^\delta\,.
\end{equation}
The constants $\rho$, $\delta$ and $M$ depend only on $T^\prime$ and $D$, on the ellipticity constant of $\mathcal{L}$, on the $L^\infty$ norms of the coefficients $a_{i,j}$, $b_j$, $c$, on the $L^\infty$ norms of their spatial derivatives, and on the Lipschitz constant of the coefficients $a_{i,j}$ with respect to time.$
\square$
\end{teor}
The result expressed by (\ref{eq_teo_hurd}) implies uniqueness of the solution to the Cauchy problem, so that a necessary condition to this kind of conditional stability is that the coefficients $a_{i,j}$ fulfil the Osgood condition with respect to time. Hence a natural question arises: is Osgood condition also a sufficient condition for (\ref{eq_teo_hurd}) to hold? Del Santo and Prizzi~\cite{MATAN} have given a negative answer to this question. In particular, mimicking Pli\'s counterexample, they have shown that if the coefficients $a_{i,j}$ are not Lipschitz-continuous but only Log-Lipschitz-continuous then Hurd's result does not hold. Moreover, they have proven that if the coefficients are Log-Lipschitz-continuous then a conditional stability property, weaker than (\ref{eq_teo_hurd}), does hold. More recently, the result has been further improved~\cite{NONLINAL}.
\subsection{Stability with Log-Lipschitz-continuous (with respect to $t$) coefficients}
As mentioned above, Osgood condition is not sufficient for H\"older conditional stability of the solution expressed by (\ref{eq_teo_hurd}). The following paragraph specifies this claim.
\subsubsection{Counterexample to H\"older stability in the Log-Lipschitz case}\label{par_controesempio_uno}
The counterexample relies on the fact that it is possible~\cite{MATAN} to construct
\begin{itemize}
\item a sequence $\{\mathcal{L}_k\}_{k\in \mathbb{N}}$ of backward uniformly parabolic operators with uniformly Log-Lipschitz-continuous coefficients (not depending on the space variables) in the principal part and space-periodic uniformly bounded smooth coefficients in the lower order terms,
\item a sequence $\{u_k\}_{k\in\mathbb{N}}$ of space-periodic smooth uniformly bounded solutions of $\mathcal{L}_ku_k=0$ on $[0,1]\times\mathbb{R}^2$,
\item a sequence $\{t_k\}_{k\in\mathbb{N}}$ of real numbers, with $t_k\to 0$,
\end{itemize}
such that
$$
\lim_{k\to\infty}\Vert u_k(0,\cdot,\cdot)\Vert_{L^2([0,2\pi]\times[0,2\pi])}=0
$$
and
$$
\lim_{k\to\infty}\frac{\Vert u_k(t_k,\cdot,\cdot)\Vert_{L^2([0,2\pi]\times[0,2\pi])}}{\Vert u_k(0,\cdot,\cdot)\Vert^\delta_{L^2([0,2\pi]\times[0,2\pi])}}=+\infty
$$
for every $\delta>0$.
\subsubsection{Stability result in the Log-Lipschitz case}
In the case of Log-Lipschitz coefficients a result weaker that (\ref{eq_teo_hurd}) is valid. Consider the equation $\mathcal{L}u=0$ on $[0,T]\times\mathbb{R}^n$, with $\mathscr{L}$ defined in (\ref{eq_L}) and suppose that for all $i,j=1,\ldots,n$, $a_{i,j}\in \textnormal{LogLip}([0,T])$, in particular
$$
\sup_{0<\vert\tau\vert<1}\frac{\vert a_{i,j}(t+\tau)-a_{i,j}(t)\vert}{\vert\tau\vert\left(\log\left(1+\frac{1}{\vert\tau\vert}\right)\right)}<+\infty\,;\label{condizione_terza}
$$
let $b_j$ and $c$ belong to $L^\infty([0,T])$.
\begin{teor}\label{teo_matan}
{\bf\cite{MATAN}}
Suppose that the above hypotheses hold. For all $T^\prime\in]0,T[$ and for all $D>0$ there exist $\rho>0$, $M>0$, $N>0$ and $\delta\in]0,1[$ such that, if $u\in\mathcal{H}_0$ is a solution of $\mathcal{L}u=0$ on $[0,T]\times\mathbb{R}^n$ with $\Vert u(t,\cdot)\Vert_{L^2}\le D
$ on $[0,T]$ and $\Vert u(0,\cdot)\Vert_{L^2}\le \rho$, then
\begin{equation}\label{eq_teo_del_pri}
\sup_{t\in[0,T^\prime]}\Vert u(t,\cdot)\Vert_{L^2}\le M e^{-N\vert\log\Vert u(0,\cdot)\Vert_{L^2}\vert^\delta}\,,
\end{equation}
where the constants $\rho$, $\delta$, $M$ and $N$ depend only on $T^\prime$, on $D$, on the ellipticity constant of $\mathcal{L}$, on the $L^\infty$ norms of the coefficients $a_{i,j}$, on the $L^\infty$ norms of their spatial first derivatives, and on the Log-Lipschitz constant of the coefficients $a_{i,j}$ with respect to time.
\end{teor}
Using Bony's para-product the result can be extended to the case in which the coefficients depend also on the space variable and are Lipschitz continuous with respect to it~\cite{NONLINAL}.
\subsection{Stability with Osgood-continuous (with respect to time) coefficients}
Let us finally come to the new result contained in this paper. As in the previous section we first present a counterexample to the stability condition (\ref{eq_teo_del_pri}) and then a new weaker stability result.
\subsubsection{Counterexample to stability estimate (\ref{eq_teo_del_pri}) in the LogLog-Lipschitz case}\label{par_controesempio_due}
Consider the modulus of continuity $\omega$ defined, near $0$, by
$$
\omega(s)=s\log\left(1+\frac{1}{s}\right)\log\left(\log\left(1+\frac{1}{s}\right)\right)
$$
and note that $\omega$ satisfies the Osgood condition but $\mathscr{C}^\omega$ functions are not Log-Lipschitz continuous. As in Paragraph~\ref{par_controesempio_uno}, it is possible~\cite{mia_tesi} to construct
\begin{itemize}
\item a sequence $\{\mathcal{L}_k\}_{k\in \mathbb{N}}$ of backward uniformly parabolic operators with uniformly $\mathscr{C}^\omega$-continuous coefficients in the principal part and space-periodic uniformly bounded smooth coefficients in the lower order terms,
\item a sequence $\{u_k\}_{k\in\mathbb{N}}$ of space-periodic smooth uniformly bounded solutions of $\mathcal{L}_ku_k=0$ on $[0,1]\times\mathbb{R}^2$,
\item a sequence $\{t_k\}_{k\in\mathbb{N}}$ of real numbers, with $t_k\to 0$,
\end{itemize}
such that
$$
\lim_{k\to\infty}\Vert u_k(0,\cdot,\cdot)\Vert_{L^2([0,2\pi]\times[0,2\pi])}=0
$$
but (\ref{eq_teo_del_pri}) does not hold for all $\delta$; more precisely
$$
\lim_{k\to\infty}\frac{\Vert u_k(t_k,\cdot,\cdot)\Vert_{L^2([0,2\pi]\times[0,2\pi])}}{e^{-N\vert \log\Vert u_k(0,\cdot,\cdot)\Vert_{L^2([0,2\pi]\times[0,2\pi])}\vert^\delta}}=+\infty
$$
for every $\delta>0$.
\subsection{Stability result in the Osgood-continuous case}
From now on, the following conditions are assumed to hold.
\begin{ipot}\label{ipot_princ}
The operator $\mathcal{L}$ defined in (\ref{eq_L}) is such that
\begin{itemize}
\item for all $t\in[0,T]$ and for all $i,j=1,\ldots,n$,
$$
a_{i,j}(t)=a_{j,i}(t)\,;
$$
\item there exists $k_A>0$ such that, for all $(t,\xi)\in [0,T]\times \mathbb{R}^n$,
$$
k_A\vert\xi\vert^2\le \sum_{i,j=1}^na_{i,j}(t)\xi_i\xi_j\le k_A^{-1}\vert\xi\vert^2\,;
$$
\item there exists $k_B>0$ such that, for all $t\in [0,T]$ and for all $i=1,\ldots,n$, $\vert b_i(t)\vert \le k_B$;
\item there exists $k_C>0$ such that, for all $t\in [0,T]$, $\vert c(t)\vert\le k_C$;
\item for all $i,j=1,\ldots,n$, $a_{i,j}\in C^{\omega}([0,T])$, where $\omega$ is a modulus of continuity that satisfies the Osgood condition.
\end{itemize}
\end{ipot}
We can now state our main result.
\begin{teor}\label{teo_nuovo}
For all $T^\prime\in]0,T[$ and for all $D>0$ there exist $\rho^\prime>0$, and an increasing continuous function $G:[0,+\infty[\to[0,+\infty[$, with $G(0)=0$, such that, if $u\in\mathcal{H}_0$ is a solution of $\mathcal{L}u=0$ on $[0,T]$ with $\Vert u(t,\cdot)\Vert_{L^2}\le D$ on $[0,T]$ and $\Vert u(0,\cdot)\Vert_{L^2}\le\rho^\prime$, then
\begin{equation}\label{eq_in_L2}
\sup_{t\in[0,T^\prime]}\Vert u(t,\cdot)\Vert_{L^2}^2\le G(\Vert u(0,\cdot)\Vert_{L^2}^2)\,.
\end{equation}
The constant $\rho^\prime$ and the function $G$ depend on $k_A,k_B,k_C,\omega,n,T,T^\prime$ and $D$.$
\square$
\end{teor}
\begin{defi}\label{def_Gev_Sob}
\cite{Hua_Rod} Given $a\ge 0$, $d\in\mathbb{R}$ and $\epsilon>1$, the \emph{Gevrey-Sobolev} function space $H^d_{a,\epsilon}$ is the space of the functions $u:\mathbb{R}^n\to\mathbb{R}$ such that
$$
\Vert u\Vert_{H^d_{a,\epsilon}}\triangleq\int_{\mathbb{R}^n}\left(1+\vert\xi\vert^2\right)^de^{2a\vert\xi\vert^{1/\epsilon}}\left\vert \hat{u}(\xi)\right\vert^2d\xi<+\infty\,,
$$
where $\hat{u}$ is the Fourier transform of $u$.
\end{defi}
\begin{defi}\label{def_OGS}
Let $a>0$, $d\in\mathbb{R}$ and $\omega$ a modulus of continuity satisfying the Osgood condition. We denote by $H_{a,\omega}^d$ the set of the functions $u:\mathbb{R}^n\to\mathbb{R}$ such that
$$
\Vert u\Vert^2_{H_{a,\omega}^d}\triangleq \int_{\mathbb{R}^n}\left(1+\vert\xi\vert^2\right)^de^{a\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)}\vert\hat{u}(\xi)\vert^2d\xi<+\infty\,.
$$
We call it \emph{Osgood-Sobolev} function space.
\end{defi}
\begin{remark}
From Definitions~\ref{def_Gev_Sob} and~\ref{def_OGS} it is easy to see that, for all moduli of continuity $\omega$, for all $\epsilon>1$, for all $a>0$ and for all $d\in\mathbb{R}$,
$$
H_{a,\omega}^d \subset H_{a,\epsilon}^d\,.
$$
\end{remark}
Theorem~\ref{teo_nuovo} is a consequence of the following local result.
\begin{teor}\label{teo_finale_stab_norme_classiche}
There exists $\alpha_1>0$ and, for any $T^{\prime\prime}:0<T^{\prime\prime}<T$, there exist constants $\rho>0$, $C>0$ and a function $g:[0,k_A]\to[0,+\infty[$, such that, if $u\in\mathcal{H}_0$ is a solution of
\begin{equation}\label{eq_lu=0}
\mathcal{L}u=0\,,
\end{equation}
with $\mathcal{L}$ fulfilling Assumption~\ref{ipot_princ} and $\Vert u(0,\cdot)\Vert^2_{H_{\nu,\epsilon}^0}<\rho$ for some $\nu>0$ and some $\epsilon>1$, then
\begin{equation}\label{eq_u_settima_ultima_norme_classiche}
\sup_{z\in[0,\bar{\sigma}]}\Vert u(z,\cdot)\Vert^2_{H^1}\le C e^{-\sigma g\left(\Vert u(0,\cdot)\Vert^2_{H_{\nu,\epsilon}^0}\right)}\left[1+\Vert u(\sigma,\cdot)\Vert^2_{H^1}\right]\,,
\end{equation}
where $\sigma=\min\{T^{\prime\prime},1/\alpha_1\}$ and $\bar{\sigma}=\sigma/8$. The constant $\alpha_1$ depends only on $k_A,k_B,k_C,\omega$ and $n$ while the constants $\rho$ and $C$ depend also on $T$ and $T^{\prime\prime}$. The function $g$ is a strictly decreasing function; it depends on $k_A,k_B,k_C,\omega,n,T,T^{\prime\prime},\epsilon$ and $\nu$ and satisfies $\lim_{y\to 0}g(y)=+\infty$.$
\square$
\end{teor}
Theorem~\ref{teo_finale_stab_norme_classiche} will be proven with the help of partial results expressed in terms of estimates of some integral quantities. The following Lemma~\ref{lem_tutti gevrey} guarantees that all the integral quantities that will be introduced are finite, so that the obtained estimates make sense.
\begin{lemma}\label{lem_derivata_limitata}
Let $u:[0,T]\to\mathbb{R}$ a $C^1$ function. If $u^\prime(t)\ge M u(t)$, then $u(t)\le e^{M(t-T)}u(T)$.
\end{lemma}
\textbf{Proof. }If is sufficient to note that:
\begin{multline*}
u^\prime(t)\ge Mu(t)\,\,\Rightarrow\,\,u^\prime(t)e^{-M(t-T)}-Mu(t)e^{-M(t-T)}\ge 0\,\,\Rightarrow\\[2mm]
\Rightarrow\,\,\frac{d}{dt}\left(u(t)e^{-M(t-T)}\right)\ge 0\,\,\Rightarrow\,\, u(t)e^{-M(t-T)}\le u(T)\,\,\Rightarrow\\
\Rightarrow\,\,u(t)\le e^{M(t-T)}u(T)\,.
\end{multline*}
$
\blacksquare$
\begin{lemma}\label{lem_tutti gevrey}
Let $M>0$ and let $u\in\mathcal{H}_0$ be a solution of
\begin{equation}\label{eq_lemma_maggiorazioni}
\partial_t u+\sum_{i,j=1}^na_{i,j}(t)\partial_{x_i}\partial_{x_j}u+\sum_{i=1}^nb_i(t)\partial_{x_i}u+c(t)u=0\,,
\end{equation}
on $[0,T]$, such that $\Vert u(t,\cdot)\Vert_{L^2}\le M$, for all $t\in[0,T]$. Let $l>0$ and extend the coefficients $a_{i,j}$, $b_i$ and $c$ to $[-l,T]$ by setting $a_{i,j}(t)=a_{i,j}(0)$, $b_i(t)=b_i(0)$ and $c(t)=c(0)$ for all $t\in[-l,0[$. Then $u$ can be extended to a solution of (\ref{eq_lemma_maggiorazioni}) on $[-l,T]$ such that there exists $\hat{M}$ such that $\Vert u(t,\cdot)\Vert_{L^2}\le \hat{M}$ on $[-l,T]$. The constant $\hat{M}$ depends only on $n$, $k_A$, $k_B$, $K_C$, $T$, $l$ and $M$. Moreover,
\begin{enumerate}
\item $u\in C^0([-l,T[,H_{a,\epsilon}^d)$ for all $a\ge 0$, $\epsilon>1$ and $d\in\mathbb{R}$;
\item $u\in C^0([-l,T[,H^1)$ and there exists $C$, which depends on $n$, $k_A$, $k_B$, $k_C$, $T$ and $l$, such that
$$
\Vert u(t,\cdot)\Vert_{H^1}\le C(T-t)^{-1/2}\Vert u(T,\cdot)\Vert_{L^2}
$$
for all $t\in[-l,T[$;
\item there exists $\hat{C}$, which depends on $n$, $k_A$, $k_B$, $k_C$, $l$, $\nu$ and $\epsilon$ and which tends to $+\infty$ when $l$ tends to zero, such that
$$
\Vert u(-l,\cdot)\Vert_{H^0_{\nu,\epsilon}}\le \hat{C}\Vert u(0,\cdot)\Vert_{L^2}\,.
$$
$
\square$
\end{enumerate}
\end{lemma}
\textbf{Proof. }It is easy to see that for all $t\in[0,T]$ and for almost all $\xi\in\mathbb{R}^n$,
\begin{equation}\label{identita_trasf_four}
\partial_t \hat{u}(t,\xi)-\sum_{i,j=1}^n a_{i,j}(t)\xi_i\xi_j \hat{u}(t,\xi)+\imath\sum_{i=1}^nb_i(t)\xi_i\hat{u}(t,\xi)+c(t)\hat{u}(t,\xi)=0\,.
\end{equation}
Multiplying both terms of (\ref{identita_trasf_four}) by $\bar{\hat{u}}$ yields
\begin{equation}\label{eq_ut_e_u_bar}
\partial_t \hat{u}(t,\xi)\bar{\hat{u}}(t,\xi)\!=\!\sum_{i,j=1}^n a_{i,j}(t)\xi_i\xi_j \vert\hat{u}(t,\xi)\vert^2\!-\!\imath\sum_{i=1}^nb_i(t)\xi_i\vert\hat{u}(t,\xi)\vert^2\!-\!c(t)\vert\hat{u}(t,\xi)\vert^2\,.
\end{equation}
By adding to (\ref{eq_ut_e_u_bar}) its complex conjugate, we obtain
\begin{multline}\label{eq_u_tutti_una}
\partial_t\vert\hat{u}(t,\xi)\vert^2=2\sum_{i,j=1}^na_{i,j}(t)\xi_i\xi_j\vert\hat{u}(t,\xi)\vert^2+2\sum_{i=1}^n\Im\{b_i(t)\}\xi_i\vert \hat{u}(t,\xi)\vert^2+\\[2mm]
-2\Re\{c(t)\}\vert\hat{u}(t,\xi)\vert^2\,,
\end{multline}
hence, recalling the bounds for the coefficients of $\mathcal{L}$ (see Assumption~\ref{ipot_princ}),
\begin{equation*}
\partial_t\vert\hat{u}(t,\xi)\vert^2\ge2 k_A\vert\xi\vert^2\vert\hat{u}(t,\xi)\vert^2-2n k_B\vert\xi\vert\vert\hat{u}(t,\xi)\vert^2-2k_C\vert\hat{u}(t,\xi)\vert^2\,,
\end{equation*}
i.e.
\begin{equation*}
\partial_t\vert\hat{u}(t,\xi)\vert^2\ge(2k_A\vert\xi\vert^2-2n k_B\vert\xi\vert-2k_C)\vert\hat{u}(t,\xi)\vert^2\,.
\end{equation*}
Lemma~\ref{lem_derivata_limitata} allows one to write
\begin{equation}\label{tre_asterischi}
\vert\hat{u}(t,\xi)\vert^2\le e^{(2k_A\vert\xi\vert^2-2nk_B\vert\xi\vert-2k_C)(t-T)}\vert \hat{u}(T,\xi)\vert^2\,.
\end{equation}
Therefore, for a fixed $t\in[-l,T[$,
\begin{multline*}
\int_{\mathbb{R}^n}\left(1+\vert\xi\vert^2\right)^{d}e^{2a\vert\xi\vert^{\frac{1}{\epsilon}}}\vert\hat{u}(t,\xi)\vert^2d\xi\le\\
\le\int_{\mathbb{R}^n}\left(1+\vert\xi\vert^2\right)^{d}e^{2a\vert\xi\vert^{\frac{1}{\epsilon}}+(2k_A\vert\xi\vert^2-2nk_B\vert\xi\vert-2k_C)(t-T)}\vert\hat{u}(T,\xi)\vert^2d\xi<+\infty\,,
\end{multline*}
where the last inequality comes from the fact that $u\in\mathcal{H}_0$ and therefore, in particular, $u\in\mathscr{C}^0([0,T],L^2(\mathbb{R}^n))$, and, since $t<T$,
$$
\lim_{\vert \xi \vert\to\infty}\left(1+\vert\xi\vert^2\right)^{d}e^{2a\vert\xi\vert^{\frac{1}{\epsilon}}+(2k_A\vert\xi\vert^2-2nk_B\vert\xi\vert-2k_C)(t-T)}=0
$$
for all $a>0$ and all $\epsilon>1$. The first claim is then proven. The second claim is proven easily by choosing $d=1$ and $a=0$. To prove the third claim it is sufficient to rewrite equation (\ref{tre_asterischi}) replacing $T$ with $0$.$
\blacksquare$
\subsection{Preliminary results and defintions}\label{sec_omega}
In this section some functions that are used in the rest of the article are defined. Let $\omega$ be a modulus of continuity satisfying Osgood condition. For a given $\rho>1$ define the function $\theta:[1,+\infty[\to[0,+\infty]$ as
\begin{equation}\label{eq_def_theta}
\theta(\rho)=\int_{1/\rho}^1\frac{1}{\omega(s)}ds\,.
\end{equation}
It is easy to see that $\theta$ is bijective and strictly increasing. As a consequence, it can be inverted. For $y\in(0,1]$, for $q>0$ and for $\lambda>0$, let $\psi_{\lambda,q}:]0,1]\to[1,+\infty[$ be defined by
$$
\psi_{\lambda,q}(y)\triangleq \theta^{-1}\left(-\lambda q \log y\right)\,.
$$
The relation
$$
\theta\left(\psi_{\lambda,q}(y)\right)=-\lambda q \log y
$$
immediately follows from the definitions; hence
$$
\theta^\prime\left(\psi_{\lambda,q}(y)\right)\psi^\prime_{\lambda,q}(y)=-\frac{\lambda q}{y}\,.
$$
Now, let the function $\phi_{\lambda, q}:(0,1]\to(-\infty,0]$ be defined as
\begin{equation}\label{eq_def_phi}
\phi_{\lambda,q}(y)\triangleq q\int_1^y\psi_{\lambda,q}(z)dz\,.
\end{equation}
The function $\phi_{\lambda, q}$ is bijective and strictly increasing; moreover,
\begin{equation}\label{eq_diff_phi_prima}
\phi_{\lambda,q}^{\prime\prime}(y)=q\psi^\prime_{\lambda,q}(y)=\frac{q}{\theta^\prime\left(\psi_{\lambda,q}(y)\right)}\left(-\frac{\lambda q}{y}\right)\,.
\end{equation}
On the other hand, equation (\ref{eq_def_theta}), with the change of variable $\eta=1/s$, becomes
$$
\theta(\rho)=-\int_\rho^1\frac{1}{\omega\left(\frac{1}{\eta}\right)}\frac{1}{\eta^2}d\eta=\int_1^\rho\frac{1}{\eta^2\omega\left(\frac{1}{\eta}\right)}d\eta
$$
from which
\begin{equation}\label{eq_da_sostituire}
\frac{1}{\theta^\prime\left(\psi_{\lambda,q}(y)\right)}=\psi_{\lambda,q}(y)^2\omega\left(\frac{1}{\psi_{\lambda,q}(y)}\right)\,.
\end{equation}
Substituting (\ref{eq_da_sostituire}) into (\ref{eq_diff_phi_prima}) and recalling that $\psi_{\lambda,q}(y)=\phi^\prime_{\lambda,q}(y)/q$, it is easy to see that $\phi_{\lambda,q}$ satisfies the equation
\begin{equation}\label{eq_diff_per_phi}
y\phi^{\prime\prime}_{\lambda,q}(y)=-\lambda\left(\phi^\prime_{\lambda,q}(y)\right)^2\omega\left(\frac{q}{\phi_{\lambda,q} ^\prime(y)}\right)\,.
\end{equation}
Note that for all $\lambda>0$, for all $q>0$ and for all $y\in(0,1]$, $\psi_{\lambda,q}\in(1,+\infty)$ and, consequently,
$$
\frac{q}{\phi^\prime_{\lambda,q}(y)}\in(0,1)\,.
$$
\subsection{A pointwise estimate}
The first result shows that, once fixed $\xi$, namely the value of the frequence argument of $\hat{u}$, it is possible to find a bound for a particular time-integral, in an interval $[0,\sigma]$, of a function of $\vert\hat{u}(t,\xi)\vert$. This bound consists in the sum of two terms depending on $\hat{u}(0,\xi)$ and $\hat{u}(\sigma,\xi)$, respectively.
\begin{prop}\label{prop_con_lettere}
Let $T^{\prime\prime}\in \; ]0,T[$. There exist $\alpha_1>0$, $\bar{\lambda}$ and $\bar{\gamma}>0$ such that, setting $\alpha\triangleq\max\{\alpha_1,1/T^{\prime\prime}\}$, defining $\sigma=1/\alpha$, fixing $\tau\in\;]0,\sigma/4]$, and letting $\beta\ge \sigma+\tau$, whenever $u\in \mathcal{H}_0$ is a solution of (\ref{eq_lu=0}), one has
\begin{multline}\label{eq_u_hat_seconda}
\frac{1}{4}\left(k_A\vert\xi\vert^2+\gamma\right)\int_0^\sigma e^{(1-\alpha t)\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)}e^{2\gamma t}e^{-2\beta\phi_\lambda\left(\frac{t+\tau}{\beta}\right)}\vert\hat{u}(t,\xi)\vert^2dt\le\\
\le \phi^\prime_\lambda\left(\frac{\tau}{\beta}\right)\tau e^{\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)}e^{-2\beta\phi_\lambda\left(\frac{\tau}{\beta}\right)}\vert\hat{u}(0,\xi)\vert^2+\\
+(\sigma+\tau)(\gamma+k_A^{-1}\vert\xi\vert^2)e^{2\gamma\sigma}e^{-2\beta\phi_\lambda\left(\frac{\sigma+\tau}{\beta}\right)}\vert\hat{u}(\sigma,\xi)\vert^2\,,
\end{multline}
for all $\lambda\ge\bar{\lambda}$ and all $\gamma\ge\bar{\gamma}$, where $\phi_\lambda\triangleq \phi_{\lambda,k_A}$ (see (\ref{eq_def_phi})).
The constant $\alpha_1$ depends only on $n$, $k_A$, $k_B$, $k_C$ and $\omega$, while $\bar{\gamma}$ and $\bar{\lambda}$ depend on $n$, $k_A$, $k_B$, $k_C$, $\omega$, $T$ and $T^{\prime\prime}$.$
\square$
\end{prop}
\textbf{Proof. } Let $T^{\prime\prime}\in \; ]0,T[$ and let $\alpha\geq1/T^{\prime\prime}$, $\gamma>0$, $\lambda>0$, $\tau\in\;]0,T^{\prime\prime}[$, $\sigma=1/\alpha$ and $\beta\geq \tau+\sigma$. Consider the function $\hat{v}:[0,\sigma]\times\mathbb{R}^n\to\mathbb{R}$ defined by
\begin{equation}\label{eq_v_hat}
\hat{v}(t,\xi)=e^{\left(\frac{1-\alpha t}{2}\right)\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)}e^{\gamma t}e^{-\beta\phi_\lambda\left(\frac{t+\tau}{\beta}\right)}\hat{u}(t,\xi)\,.
\end{equation}
The time-derivative of $\hat{v}$ is
\begin{multline*}
\partial_t\hat{v}(t,\xi)=-\frac{\alpha}{2}\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)e^{\left(\frac{1-\alpha t}{2}\right)\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi^2\vert+1}\right)}e^{\gamma t}e^{-\beta\phi_\lambda\left(\frac{t+\tau}{\beta}\right)}\hat{u}(t,\xi)+\\
+\gamma e^{\left(\frac{1-\alpha t}{2}\right)\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)}e^{\gamma t}e^{-\beta\phi_\lambda\left(\frac{t+\tau}{\beta}\right)}\hat{u}(t,\xi)+\\
-\phi_\lambda^\prime\left(\frac{t+\tau}{\beta}\right)e^{\left(\frac{1-\alpha t}{2}\right)\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)}e^{\gamma t}e^{-\beta\phi_\lambda\left(\frac{t+\tau}{\beta}\right)}\hat{u}(t,\xi)+\\
+e^{\left(\frac{1-\alpha t}{2}\right)\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)}e^{\gamma t}e^{-\beta\phi_\lambda\left(\frac{t+\tau}{\beta}\right)}\partial_t\hat{u}(t,\xi)
\end{multline*}
which may be rewritten as
\begin{multline}\label{eq_per_v_hat}
\partial_t\hat{v}+\frac{\alpha}{2}\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\hat{v}-\gamma\hat{v}+\phi_\lambda^\prime\left(\frac{t+\tau}{\beta}\right)\hat{v}-\sum_{i,j=1}^na_{i,j}(t)\xi_i\xi_j\hat{v}+\\
+\imath\sum_{i=1}^nb_i(t)\xi_i\hat{v}+c(t)\hat{v}=0\,,
\end{multline}
where the dependency of $\hat{v}$ and $\partial_t\hat{v}$ on $t$ and on $\xi$ has been neglected for the sake of a simple notation and where the identity (\ref{identita_trasf_four}) has been exploited. The complex conjugate equation of (\ref{eq_per_v_hat}) is
\begin{multline}\label{eq_per_v_hat_con}
\partial_t\bar{\hat{v}}+\frac{\alpha}{2}\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\bar{\hat{v}}-\gamma\bar{\hat{v}}+\phi_\lambda^\prime\left(\frac{t+\tau}{\beta}\right)\bar{\hat{v}}-\sum_{i,j=1}^na_{i,j}(t)\xi_i\xi_j\bar{\hat{v}}+\\
-\imath\sum_{i=1}^n\bar{b}_i(t)\xi_i\bar{\hat{v}}+\bar{c}(t)\bar{\hat{v}}=0\,.
\end{multline}
Multiplying (\ref{eq_per_v_hat}) by $(t+\tau)\partial_t\bar{\hat{v}}$ and (\ref{eq_per_v_hat_con}) by $(t+\tau)\partial_t\hat{v}$ and summing the two terms yields
\begin{multline}\label{eq_sommata}
2(t+\tau)\vert\partial_t\hat{v}\vert^2+\frac{\alpha}{2}(t+\tau)\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)(\hat{v}\partial_t\bar{\hat{v}}+\bar{\hat{v}}\partial_t\hat{v})-\gamma(t+\tau)(\hat{v}\partial_t\bar{\hat{v}}+\bar{\hat{v}}\partial_t\hat{v})+\\
+(t+\tau)\phi_\lambda^\prime\left(\frac{t+\tau}{\beta}\right)(\hat{v}\partial_t\bar{\hat{v}}+\bar{\hat{v}}\partial_t\hat{v})-(t+\tau)\sum_{i,j=1}^na_{i,j}(t)\xi_i\xi_j(\hat{v}\partial_t\bar{\hat{v}}+\bar{\hat{v}}\partial_t\hat{v})+\\
-2(t+\tau)\sum_{i=1}^n\xi_i\Im\{b_i(t)\hat{v}\partial_t\bar{\hat{v}}\}+2(t+\tau)\Re\left\{c(t)\hat{v}\partial_t\bar{\hat{v}}\right\}=0\,.
\end{multline}
Substituting in the second term the explicit expressions of $\partial_t\hat{v}$ and $\partial_t\bar{\hat{v}}$, that may be obtained from (\ref{eq_per_v_hat}) and (\ref{eq_per_v_hat_con}), one obtains
\begin{multline}\label{eq_prima_della_grande}
2(t+\tau)\vert\partial_t\hat{v}\vert^2-\frac{\alpha^2}{2}(t+\tau)\vert\xi\vert^4\left[\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\right]^2\vert\hat{v}\vert^2+\\
+\alpha\gamma(t+\tau)\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\vert\hat{v}\vert^2-\alpha(t+\tau)\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\phi_\lambda^\prime\left(\frac{t+\tau}{\beta}\right)\vert\hat{v}\vert^2+\\
+\alpha(t+\tau)\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\vert\hat{v}\vert^2\left(\sum_{i,j=1}^na_{i,j}(t)\xi_i\xi_j-c(t)\right)+\\
-\gamma(t+\tau)(\hat{v}\partial_t\bar{\hat{v}}+\bar{\hat{v}}\partial_t\hat{v})+(t+\tau)\phi_\lambda^\prime\left(\frac{t+\tau}{\beta}\right)(\hat{v}\partial_t\bar{\hat{v}}+\bar{\hat{v}}\partial_t\hat{v})+\\
-(t+\tau)(\hat{v}\partial_t\bar{\hat{v}}+\bar{\hat{v}}\partial_t\hat{v})\sum_{i,j=1}^na_{i,j}(t)\xi_i\xi_j+\\
-2(t+\tau)\sum_{i=1}^n\xi_i\Im\{b_i(t)\hat{v}\partial_t\bar{\hat{v}}\}+2(t+\tau)\Re\left\{c(t)\hat{v}\partial_t\bar{\hat{v}}\right\}=0\,.
\end{multline}
Integrating (\ref{eq_prima_della_grande}) between $0$ and $s$, with $s\le\sigma=1/\alpha$, yields
\begin{multline}\label{eq_grande_prima}
2\int_0^s(t\!+\!\tau)\vert\partial_t\hat{v}(t,\xi)\vert^2dt-\frac{\alpha^2}{2}\vert\xi\vert^4\left[\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\right]^2\int_0^s(t\!+\!\tau)\vert\hat{v}(t,\xi)\vert^2dt+\\
+\underbrace{\alpha\gamma\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\int_0^s(t\!+\!\tau)\vert\hat{v}(t,\xi)\vert^2dt}_{(A)}+\\
-\alpha\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\int_0^s(t\!+\!\tau)\phi_\lambda^\prime\left(\frac{t\!+\!\tau}{\beta}\right)\vert\hat{v}(t,\xi)\vert^2dt+\\
+\alpha\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\int_0^s(t\!+\!\tau)\sum_{i,j=1}^na_{i,j}(t)\xi_i\xi_j\vert\hat{v}(t,\xi)\vert^2dt+\\
-\alpha\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\int_0^s(t\!+\!\tau)c(t)\vert\hat{v}(t,\xi)\vert^2dt+\\
+\gamma\int_0^s\vert\hat{v}(t,\xi)\vert^2dt-\gamma(s\!+\!\tau)\vert\hat{v}(s,\xi)\vert^2+\\+\underbrace{\gamma\tau\vert\hat{v}(0,\xi)\vert^2}_{(B)}+\int_0^s\left[-\phi_\lambda^{\prime\prime}\left(\frac{t\!+\!\tau}{\beta}\right)\left(\frac{t\!+\!\tau}{\beta}\right)-\phi_\lambda^\prime\left(\frac{t\!+\!\tau}{\beta}\right)\right]\vert\hat{v}(t,\xi)\vert^2dt+\\
+\underbrace{\phi_\lambda^\prime\left(\frac{s\!+\!\tau}{\beta}\right)(s\!+\!\tau)\vert\hat{v}(s,\xi)\vert^2}_{(C)}-\phi_\lambda^\prime\left(\frac{\tau}{\beta}\right)\tau\vert\hat{v}(0,\xi)\vert^2+\\
-\underbrace{\int_0^s(t\!+\!\tau)[\hat{v}(t,\xi)\partial_t\bar{\hat{v}}(t,\xi)+\bar{\hat{v}}(t,\xi)\partial_t\hat{v}(t,\xi)]\sum_{i,j=1}^na_{i,j}(t)\xi_i\xi_jdt}_{(D)}+\\
-2\sum_{i=1}^n\xi_i\int_0^s(t+\tau)\Im\{b_i(t)\hat{v}(t,\xi)\partial_t\bar{\hat{v}}(t,\xi)\}dt+\\
+2\int_0^s(t\!+\!\tau)\Re\{c(t)\hat{v}(t,\xi)\partial_t\bar{\hat{v}}(t,\xi)\}dt=0\,,
\end{multline}
where, to ease the following reasoning, some terms have been identified with capital letters from $A$ to $D$. Terms $(A)$ and $(B)$ are positive and, since $\phi$ is strictly increasing, also $(C)$ is positive. To obtain the final estimate, equation (\ref{eq_grande_prima}) needs to be slightly modified. In particular, extend functions $a_{i,j}$ to the whole real axis by setting $a_{i,j}(t)=a_{i,j}(0)$ for $t<0$ and $a_{i,j}(t)=a_{i,j}(T)$ if $t>T$ and define
$$
a_{i,j}^{\epsilon}(t)\triangleq (\rho_{\epsilon}\ast a_{i,j})(t)=\int_{\mathbb{R}^n}\rho_{\epsilon}(t-s)a_{i,j}(s)ds\,
$$
where $\rho_{\epsilon}$ is a $\mathscr{C}^\infty$ mollifier.
From (\ref{eq_grande_prima}), replacing, in $(D)$, $a_{i,j}(t)$ with $a_{i,j}(t)+a_{i,j}^\epsilon(t)-a_{i,j}^\epsilon(t)$, yields
\begin{multline}\label{eq_grande_prima_e_mezza}
2\underbrace{\int_0^s(t\!+\!\tau)\vert\partial_t\hat{v}(t,\xi)\vert^2dt}_{(E)}-\underbrace{\frac{\alpha^2}{2}\vert\xi\vert^4\left[\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\right]^2\int_0^s(t\!+\!\tau)\vert\hat{v}(t,\xi)\vert^2dt}_{(F)}+\\
-\underbrace{\alpha\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\int_0^s(t\!+\!\tau)\phi_\lambda^\prime\left(\frac{t\!+\!\tau}{\beta}\right)\vert\hat{v}(t,\xi)\vert^2dt}_{(G)}+\\
+\underbrace{\alpha\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\sum_{i,j=1}^n\xi_i\xi_j\int_0^s(t\!+\!\tau)a_{i,j}(t)\vert\hat{v}(t,\xi)\vert^2dt}_{(H)}+\\
-\underbrace{\alpha\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\int_0^s(t\!+\!\tau)c(t)\vert\hat{v}(t,\xi)\vert^2dt}_{(I)}+\underbrace{\gamma\int_0^s\vert\hat{v}(t,\xi)\vert^2dt}_{(L)}+\\
-\underbrace{\gamma(s\!+\!\tau)\vert\hat{v}(s,\xi)\vert^2}_{(M)}+\underbrace{\int_0^s\left[-\phi_\lambda^{\prime\prime}\left(\frac{t\!+\!\tau}{\beta}\right)\!\!\left(\frac{t\!+\!\tau}{\beta}\right)\!-\!\phi_\lambda^\prime\left(\frac{t\!+\!\tau}{\beta}\right)\right]\vert\hat{v}(t,\xi)\vert^2dt}_{(N)}+\\
-\underbrace{\phi_\lambda^\prime\left(\frac{\tau}{\beta}\right)\tau\vert\hat{v}(0,\xi)\vert^2}_{(O)}+\underbrace{2\sum_{i,j=1}^n\xi_i\xi_j\int_0^s(t\!+\!\tau)\Re\{\hat{v}(t,\xi)\partial_t\bar{\hat{v}}(t,\xi)\}\widetilde{a}_{i,j}^\epsilon(t)dt}_{(P)}+\\
+\underbrace{\sum_{i,j=1}^n\xi_i\xi_j\int_0^s\vert\hat{v}(t,\xi)\vert^2\frac{\partial}{\partial t}[(t\!+\!\tau)a^\epsilon_{i,j}(t)]dt}_{(Q)}+\underbrace{\tau\sum_{i,j=1}^na^\epsilon_{i,j}(0)\xi_i\xi_j\vert \hat{v}(0,\xi)\vert^2}_{(R)}+\\
-\underbrace{(s\!+\!\tau)\sum_{i,j=1}^na^\epsilon_{i,j}(s)\xi_i\xi_j\vert \hat{v}(s,\xi)\vert^2}_{(S)}-\underbrace{2\sum_{i=1}^n\xi_i\!\int_0^s(t\!+\!\tau)\Im\{b_i(t)\hat{v}(t,\xi)\partial_t\bar{\hat{v}}(t,\xi)\}dt}_{(T)}\\
+\underbrace{2\int_0^s(t+\tau)\Re\{c(t)\hat{v}(t,\xi)\partial_t\bar{\hat{v}}(t,\xi)\}dt}_{(U)}\le 0\,,
\end{multline}
where $\widetilde{a}_{i,j}^\epsilon =a_{i,j}^\epsilon- a_{i,j}$ for all $i,j=1,\ldots,n$.
In the following each term is considered individually, beginning with $(P)$. The properties of the modulus of continuity $\omega$ guarantee that there exists a constant $C_0$ such that
$$
\vert a_{i,j}^\epsilon(t)-a_{i,j}(t)\vert\le C_0 \omega(\epsilon)\,,
$$
for all $\epsilon$, for all $i$, for all $j$ and for all $t$. Hence
$$
\left\vert\sum_{i,j=1}^n[a_{i,j}^\epsilon(t)-a_{i,j}(t)]\xi_i\xi_j\right\vert\le \sum_{i,j=1}^n\vert a_{i,j}^\epsilon(t)-a_{i,j}(t)\vert\vert\xi_i\xi_j\vert\le C_0n^2 \omega(\epsilon)\vert\xi\vert^2\,,
$$
where the property that, for all $i$, $\vert \xi_i\vert\le\vert \xi\vert$ has been exploited. As a consequence, if
$$
\epsilon=\frac{1}{\vert\xi\vert^2+1}\,,
$$
then
$$
\vert (P)\vert\le 2C_0n^2\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\int_0^s(t\!+\!\tau)\vert\hat{v}(t,\xi)\partial_t\bar{\hat{v}}(t,\xi)\vert dt\,.
$$
Young's inequality yields
$$
\vert(P)\vert\le\int_0^s(t+\tau)\vert\partial_t\hat{v}(t,\xi)\vert^2dt+C_0^2n^4\vert\xi\vert^4\left[\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\right]^2\int_0^s(t+\tau)\vert\hat{v}(t,\xi)\vert^2dt
$$
and, consequently, since $\omega(s)\in[0,1]$ for all $s\in[0,1]$ and, in turn, $-\omega(s)^2>-\omega(s)$ for all $s\in[0,1]$,
$$
(P)\ge -\underbrace{\int_0^s(t+\tau)\vert\partial_t\hat{v}(t,\xi)\vert^2dt}_{(P_1)}-\underbrace{C_0^2n^4\vert\xi\vert^4\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\int_0^s(t+\tau)\vert\hat{v}(t,\xi)\vert^2dt}_{(P_2)}\,.
$$
Let us consider now the term $(Q)$. For the properties of the modulus of continuity, there exists $C_1$ such that
$$
\vert (a_{i,j}^\epsilon)^\prime(t)\vert\le C_1\frac{\omega(\epsilon)}{\epsilon}\,,
$$
for all $\epsilon$, for all $i$, for all $j$ and for all $t$. As a consequence, if
$$
\epsilon=\frac{1}{\vert\xi\vert^2+1}\,,
$$
then
\begin{multline*}
(Q)=\sum_{i,j=1}^n\xi_i\xi_j\int_0^s\vert\hat{v}(t,\xi)\vert^2(t+\tau)(a_{i,j}^\epsilon)^\prime(t)dt+\\
+\sum_{i,j=1}^n\xi_i\xi_j\int_0^s\vert\hat{v}(t,\xi)\vert^2a_{i,j}^\epsilon(t)dt\ge\\
\ge -\underbrace{C_1n^2\vert\xi\vert^2(\vert\xi\vert^2+1)\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\int_0^s(t+\tau)\vert\hat{v}(t,\xi)\vert^2dt}_{(Q_1)}+\\
+\underbrace{\sum_{i,j=1}^n\xi_i\xi_j\int_0^s a_{i,j}^\epsilon(t)\vert\hat{v}(t,\xi)\vert^2dt}_{(Q_2)}\,.
\end{multline*}
As far as the terms (T) and (U) are concerned,
$$
(U)-(T)\ge -(U_1)-(U_2)-(T_1)-(T_2)\,,
$$
where
$$
(U_1)=2k_C^2\int_0^s(t+\tau)\vert\hat{v}(t,\xi)\vert^2dt\,,\quad (U_2)=\frac{1}{2}\int_0^s(t+\tau)\vert\partial_t\hat{v}(t,\xi)\vert^2dt\,,
$$
$$
(T_1)=2n^2k_B^2\vert\xi\vert^2\int_0^s(t+\tau)\vert\hat{v}(t,\xi)\vert^2dt\,,\quad (T_2)=\frac{1}{2}\int_0^s(t+\tau)\vert\partial_t\hat{v}(t,\xi)\vert^2dt\,.
$$
Note, moreover, that
$$
(H)\ge \alpha k_A \vert\xi\vert^4\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\int_0^s(t+\tau)\vert \hat{v}(t,\xi)\vert^2 dt\,,
$$
and
$$
(Q_2)\ge k_A\vert\xi\vert^2\int_0^s\vert\hat{v}(t,\xi)\vert^2dt\,.
$$
\noindent
We claim now that there exist two positive constants $\alpha_1$ and $\gamma_1$ such that, for all $\xi\in{\mathbb R}^n$,
\begin{multline}
\frac{\gamma_1}{4T}+\frac{\alpha_1}{2}k_A\vert\xi\vert^4\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)-C_0^2n^4\vert\xi\vert^4\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)+\\[2mm]
-C_1n^2\vert\xi\vert^2\left(\vert\xi\vert^2+1\right)\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)-2n^2k_B^2\vert\xi\vert^2-2k_C^2+\\[2mm]
-\frac{\alpha_1^2}{2}\vert\xi\vert^4\left(\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\right)^2-\alpha_1\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)k_C\ge 0\,.\label{eq_per_alfa_e_gamma_prima}
\end{multline}
Letting the the proof of (\ref{eq_per_alfa_e_gamma_prima}) to the reader, we
remark that it relies on the following facts:
when $\vert\xi\vert\ge 1$, the function
$$
\xi\to\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)
$$
is bounded from below by a positive quantity and
$$
\lim_{\vert\xi\vert\to+\infty}\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)=0\,.
$$
We remark also that taking a constant $\alpha\geq \alpha_1$, the inequality (\ref{eq_per_alfa_e_gamma_prima}) remains true with $\alpha$ at the place of $\alpha_1$, provided the choice of a possibly bigger $\gamma_1$.
As a consequence, if $\alpha=\max\{\alpha_1,1/T^{\prime\prime}\}$ and $\gamma\ge\gamma_1$, then
\begin{equation}\label{eq_diseg}
\frac{1}{2}(L)+\frac{1}{2}(H)-(P_2)-(Q_1)-(T_1)-(U_1)-(F)-(I)\ge 0\,.
\end{equation}
By using (\ref{eq_diseg})
into (\ref{eq_grande_prima_e_mezza}) and taking into account that $(E)=(T_2)+(U_2)=(P_1)$ and that $(R)\ge 0$, yields
\begin{multline}\label{eq_grande_terza}
\frac{1}{2}(H)+(Q_2)-\underbrace{\alpha\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\int_0^s(t\!+\!\tau)\phi_\lambda^\prime\left(\frac{t+\tau}{\beta}\right)\vert\hat{v}(t,\xi)\vert^2dt}_{(G)}+\frac{1}{2}(L)+\\
-\underbrace{\gamma(s+\tau)\vert\hat{v}(s,\xi)\vert^2}_{(M)}+\underbrace{\int_0^s\left[-\phi_\lambda^{\prime\prime}\left(\frac{t\!+\!\tau}{\beta}\right)\left(\frac{t\!+\!\tau}{\beta}\right)-\phi_\lambda^\prime\left(\frac{t\!+\!\tau}{\beta}\right)\right]\vert\hat{v}(t,\xi)\vert^2dt}_{(N)}+\\
-\underbrace{\phi_\lambda^\prime\left(\frac{\tau}{\beta}\right)\tau\vert\hat{v}(0,\xi)\vert^2}_{(O)}-\underbrace{(s+\tau)\sum_{i,j=1}^na^\epsilon_{i,j}(s)\xi_i\xi_j\vert \hat{v}(s,\xi)\vert^2}_{(S)}\le 0\,.
\end{multline}
Recall, now, that $\phi_\lambda$ is a solution of equation (\ref{eq_diff_per_phi}) with $q=k_A$. Since $\omega(z)/z>1$ for all $z\in(0,1)$, equation (\ref{eq_diff_per_phi}) implies
\begin{equation}\label{seconda_eq_per_phi}
-\frac{1}{2}y\phi_\lambda^{\prime\prime}(y)>\frac{\lambda k_A}{2}\phi_\lambda^\prime(y)\,,\quad\textnormal{ for all }y\in(0,1)\,.
\end{equation}
Hence, if $\phi_\lambda$ is solution of (\ref{eq_diff_per_phi}) with $\lambda>2/k_A$,
$$
(N)\ge -\frac{1}{2}\int_0^s\phi_\lambda^{\prime\prime}\left(\frac{t+\tau}{\beta}\right)\left(\frac{t+\tau}{\beta}\right)\vert\hat{v}(t,\xi)\vert^2 dt\,,
$$
provided that $(t+\tau)/\beta\in(0,1)$ for all $t\in(0,s)$.
Consider, now, the following two cases.
\begin{enumerate}
\item If
$$
\phi_\lambda^\prime\left(\frac{t+\tau}{\beta}\right)\le \frac{(\vert\xi\vert^2+1)k_A}{4}\,,
$$
then
$$
(G)\le\frac{1}{4}\alpha k_A\vert\xi\vert^2(\vert\xi\vert^2+1)\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\int_0^s(t+\tau)\vert\hat{v}(t,\xi)\vert^2dt
$$
and hence, if
\begin{equation}\label{eq_nuova_gamma}
\gamma > \bar{\gamma}\triangleq\max\left\{\gamma_1,8T\alpha k_A\omega\left(\frac{1}{2}\right)\right\}\,,
\end{equation}
then
$$
\frac{1}{2}(H)+\frac{1}{4}(L)\ge (G)\,.
$$
In fact if $\vert\xi\vert>1$, then
$$
\frac{1}{4}\alpha k_A\vert\xi\vert^2(\vert\xi\vert^2+1)\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\int_0^s(t+\tau)\vert\hat{v}(t,\xi)\vert^2dt\le \frac{1}{2}(H)\,.
$$
If $\vert\xi\vert\le 1$, then
$$
(\vert\xi\vert^2+1)\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\le 2\omega\left(\frac{1}{2}\right)
$$
and choosing $\gamma$ according to (\ref{eq_nuova_gamma}) guarantees $(G)\le (L)/4$.
\item On the contrary, if
$$
\phi_\lambda^\prime\left(\frac{t+\tau}{\beta}\right)> \frac{(\vert\xi\vert^2+1)k_A}{4}\,,
$$
then, since the function $h:(0,1)\to\mathbb{R}$ defined by $h(y)=\omega(y)/y$ is decreasing,
\begin{multline*}
(\vert\xi\vert^2+1)\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)=\frac{\displaystyle \omega\left(\frac{1}{\vert\xi\vert^2+1}\right)}{\displaystyle \frac{1}{\vert\xi\vert^2+1}}\le\\[2mm]
\le\frac{\displaystyle \omega\left(\frac{k_A}{4\phi_\lambda^\prime\left(\frac{t+\tau}{\beta}\right)}\right)}{\displaystyle \frac{k_A}{4\phi_\lambda^\prime\left(\frac{t+\tau}{\beta}\right)}}=\frac{4}{k_A}\phi_\lambda^\prime\left(\frac{t+\tau}{\beta}\right)\omega\left(\frac{k_A}{4\phi_\lambda^\prime\left(\frac{t+\tau}{\beta}\right)}\right)
\end{multline*}
and, since $\omega$ is increasing,
$$
(\vert\xi\vert^2+1)\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\le\frac{4}{k_A}\phi_\lambda^\prime\left(\frac{t+\tau}{\beta}\right)\omega\left(\frac{k_A}{\phi_\lambda^\prime\left(\frac{t+\tau}{\beta}\right)}\right)\,.
$$
As a consequence, if $\phi_\lambda$ is solution of (\ref{eq_diff_per_phi}) with $\lambda>4/k_A$, then
\begin{multline}
(N)\ge-\frac{1}{2}\int_0^s\phi_\lambda^{\prime\prime}\left(\frac{t+\tau}{\beta}\right)\left(\frac{t+\tau}{\beta}\right)\vert\hat{v}(t,\xi)\vert^2dt=\\
=\frac{\lambda}{2}\int_0^s\phi_\lambda^\prime\left(\frac{t+\tau}{\beta}\right)\left(\phi_\lambda^\prime\left(\frac{t+\tau}{\beta}\right)\omega\left(\frac{k_A}{\phi_\lambda^\prime\left(\frac{t+\tau}{\beta}\right)}\right)\right)\vert\hat{v}(t,\xi)\vert^2dt\ge\\
\ge\frac{\lambda k_A}{8}(\vert\xi\vert^2+1)\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\int_0^s\phi_\lambda^\prime\left(\frac{t+\tau}{\beta}\right)\vert\hat{v}(t,\xi)\vert^2dt\,.
\end{multline}
Moreover, if
$$
\lambda>\bar{\lambda}\triangleq\max\left(\frac{4}{k_A},\frac{16T\alpha}{k_A}\right)\,,
$$
then
$$
(N)\ge\alpha(\vert\xi\vert^2+1)\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)\int_0^s(t+\tau)\phi_\lambda^\prime\left(\frac{t+\tau}{\beta}\right)\vert\hat{v}(t,\xi)\vert^2dt\ge(G)\,.
$$
\end{enumerate}
In conclusion, taking into account that $(N)\ge 0$, $(H)\ge 0$, $(L)\ge 0$ and $(G)\ge 0$, leads to the inequality
\begin{equation}\label{eq_access_finale}
\frac{1}{2}(H)+\frac{1}{4}(L)+(N)-(G)\ge 0\,.
\end{equation}
Furthermore, using (\ref{eq_access_finale}) into (\ref{eq_grande_terza}) and taking into account that
$$
\frac{1}{2}(Q_2)\ge \frac{1}{2}k_A\vert\xi\vert^2\int_0^s\vert\hat{v}(t,\xi)\vert^2dt\,,
$$
yields
\begin{multline}\label{eq_finale_v}
\left(\frac{k_A\vert\xi\vert^2}{2}+\frac{\gamma}{4}\right)\int_0^s\vert\hat{v}(t,\xi)\vert^2dt\le\\
\le \phi_\lambda^\prime\left(\frac{\tau}{\beta}\right)\tau\vert\hat{v}(0,\xi)\vert^2+(s+\tau)(\gamma+k_A^{-1}\vert\xi\vert^2)\vert\hat{v}(s,\xi)\vert^2\,.
\end{multline}
Finally, substituting (\ref{eq_v_hat}) into (\ref{eq_finale_v}) yields
\begin{multline}\label{eq_u_hat_iniziale}
\frac{1}{4}\left(k_A\vert\xi\vert^2+\gamma\right)\int_0^se^{(1-\alpha t)\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)}e^{2\gamma t}e^{-2\beta\phi_\lambda\left(\frac{t+\tau}{\beta}\right)}\vert\hat{u}(t,\xi)\vert^2dt\le\\
\le \phi_\lambda^\prime\left(\frac{\tau}{\beta}\right)\tau e^{\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)}e^{-2\beta\phi_\lambda\left(\frac{\tau}{\beta}\right)}\vert\hat{u}(0,\xi)\vert^2+\\
+(s+\tau)(\gamma+k_A^{-1}\vert\xi\vert^2)e^{(1-\alpha s)\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)}e^{2\gamma s}e^{-2\beta\phi_\lambda\left(\frac{s+\tau}{\beta}\right)}\vert\hat{u}(s,\xi)\vert^2\,.
\end{multline}
Equation (\ref{eq_u_hat_iniziale}) holds for all $s\in(0,\sigma]$; choosing $s=\sigma$ one obtains (\ref{eq_u_hat_seconda}).$
\blacksquare$
\subsection{An integral estimate}
Proposition~\ref{prop_con_lettere} provides a punctual estimate of the Fourier transform of $u$ which will allow us to obtain, by integration, an analogously estimate on the norm of $u$. To obtain this result the following lemma and Definition~\ref{def_OGS} are accessory.
\begin{lemma}\label{lemma_gamma}
If $u\in\mathcal{H}_0$ is solution of (\ref{eq_L}), then there exists $\bar{\gamma}$, not depending on $\xi$, such that, for all $\xi$, $e^{2\bar{\gamma} t}\vert\hat{u}(t,\xi)\vert^2$ is (weakly) increasing in $t$.$
\square$
\end{lemma}
\textbf{Proof. }We want to show that there exists $\bar{\gamma}$ such that
$$
\partial_t(e^{2\bar{\gamma} t}\hat{u}(t,\xi)\bar{\hat{u}}(t,\xi))\ge 0\,.
$$
Note that
\begin{multline}\label{eq_modulo_crescente}
\partial_t(e^{2\bar{\gamma} t}\hat{u}(t,\xi)\bar{\hat{u}}(t,\xi))=2\bar{\gamma} e^{2\bar{\gamma} t}\vert\hat{u}(t,\xi)\vert^2+\\
+e^{2\bar{\gamma} t}\partial_t(\hat{u}(t,\xi))\bar{\hat{u}}(t,\xi)+e^{2\bar{\gamma} t}\hat{u}(t,\xi)\partial_t(\bar{\hat{u}}(t,\xi))\,.
\end{multline}
From (\ref{identita_trasf_four}), multiplying by $\bar{\hat{u}}(t,\xi)$ we obtain
\begin{multline*}
\bar{\hat{u}}(t,\xi)\partial_t \hat{u}(t,\xi)=\\
=\sum_{i,j=1}^na_{i,j}(t)\xi_i\xi_j\vert\hat{u}(t,\xi)\vert^2-\imath \sum_{i=1}^nb_i(t)\xi_i\vert\hat{u}(t,\xi)\vert^2+c(t)\vert\hat{u}(t,\xi)\vert^2
\end{multline*}
and also, taking in both term the complex conjugate values,
\begin{multline*}
\hat{u}(t,\xi)\partial_t \bar{\hat{u}}(t,\xi)=\\
=\sum_{i,j=1}^na_{i,j}(t)\xi_i\xi_j\vert\hat{u}(t,\xi)\vert^2+\imath \sum_{i=1}^n\bar{b}_i(t)\xi_i\vert\hat{u}(t,\xi)\vert^2+\bar{c}(t)\vert\hat{u}(t,\xi)\vert^2
\end{multline*}
and, consequently,
\begin{multline}\label{eq_per_lemma_1}
\partial_t(e^{2\bar{\gamma} t}\hat{u}(t,\xi)\bar{\hat{u}}(t,\xi))=2\bar{\gamma} e^{2\bar{\gamma} t}\vert\hat{u}(t,\xi)\vert^2+2e^{2\bar{\gamma} t}\sum_{i,j=1}^na_{i,j}(t)\xi_i\xi_j\vert\hat{u}(t,\xi)\vert^2+\\
+2e^{2\bar{\gamma} t}\sum_{i=1}^n\Im\{b_i(t)\}\xi_i\vert\hat{u}(t,\xi)\vert^2+2e^{2\bar{\gamma} t}\Re\{c(t)\}\vert\hat{u}(t,\xi)\vert^2\ge\\
2e^{2\bar{\gamma} t}\vert\hat{u}(t,\xi)\vert^2(\bar{\gamma}+k_A\vert\xi\vert^2-nk_B\vert\xi\vert-k_C)\,.
\end{multline}
Now, if $\vert \xi\vert\ge nk_B/k_A$, then $k_A\vert\xi\vert^2>nk_B\vert\xi\vert$ and hence, if $\bar{\gamma}>k_C$, we have
$$
\bar{\gamma}+k_A\vert\xi\vert^2-nk_B\vert\xi\vert-k_C\ge 0\,.
$$
On the other hand, if $\vert \xi\vert< nk_B/k_A$, then $-\vert\xi\vert>-nk_B/k_A$ and hence $-nk_B\vert\xi\vert>-n^2k_B^2/k_A$. In conclusion, the claim holds for any $\bar{\gamma}$ such that $\bar{\gamma}>2\max\{k_C,n^2k_B^2/k_A\}$. $
\blacksquare$
Let us, now, come back to equation (\ref{eq_u_hat_seconda}). By integrating it with respect to $\xi$, the following result can be obtained.
\begin{prop}\label{prop_norma_nuova}
Let $\sigma$ and $\tau$ be as in Proposition~\ref{prop_con_lettere}. Set $\bar{\sigma}\triangleq \sigma/8$. There exists $C>0$ such that, whenever $u\in\mathcal{H}_0$ is a solution of (\ref{eq_L}), with $\mathcal{L}$ fulfilling Assumption~\ref{ipot_princ}, one has, for all $\beta\ge \sigma+\tau$,
\begin{multline}\label{eq_u_settima}
\sup_{z\in[0,\bar{\sigma}]}\Vert u(z,\cdot)\Vert^2_{H_{\frac{1}{2},\omega}^1}\le\\[2mm]
\le C e^{-\sigma\phi^\prime\left(\frac{\sigma+\tau}{\beta}\right)}\left[\phi^\prime\left(\frac{\tau}{\beta}\right)e^{-2\beta\phi\left(\frac{\tau}{\beta}\right)}\Vert u(0,\cdot)\Vert^2_{H_{1,\omega}^0}+\Vert u(\sigma,\cdot)\Vert^2_{H^1}\right]\,,
\end{multline}
\end{prop}
where $\phi=\phi_{\bar{\lambda},k_A}$ with $\bar{\lambda}$ given by Proposition~\ref{prop_con_lettere}. The constant $C$ depends no $n$, $k_A$, $k_B$, $k_C$, $\omega$, $T$ and $T^{\prime\prime}$.
$
\square$
\textbf{Proof. }In the hypotheses of the claim, Proposition~\ref{prop_con_lettere} guarantees the existence of $\sigma$, $\alpha$, $\gamma$ and $\phi_{\lambda}$ such that (\ref{eq_u_hat_seconda}) holds. The integrand function in (\ref{eq_u_hat_seconda}) is positive and, consequently, the term on the left hand side can be bounded from below by integrating on an interval contained in $[0,\sigma]$. Let $\tau\le \sigma/4$ and let $z$ be a value such that $0<z\le \bar{\sigma}$; we have
$$
[z,2z+\tau]\subset [0,\sigma/2]\,;
$$
by integrating with respect to $\xi$ and taking into account that, since $\sigma=1/\alpha$,
$$
1-\alpha t\ge 1-\alpha \frac{\sigma}{2}\ge\frac{1}{2}\,,
$$
for all $t\in[0,\sigma/2]$, one obtains
\begin{multline}\label{eq_u_hat_terza}
\frac{1}{4}\int_{\mathbb{R}^n}\left(k_A\vert\xi\vert^2+\gamma\right)e^{\frac{1}{2}\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)}\int_z^{2z+\tau}e^{2\gamma t}e^{-2\beta\phi_{\lambda}\left(\frac{t+\tau}{\beta}\right)}\vert\hat{u}(t,\xi)\vert^2dtd\xi\le\\
\le \tau \phi_{\lambda}^\prime\left(\frac{\tau}{\beta}\right)e^{-2\beta\phi_{\lambda}\left(\frac{\tau}{\beta}\right)}\int_{\mathbb{R}^n} e^{\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)}\vert\hat{u}(0,\xi)\vert^2d\xi+\\
+(\sigma+\tau)e^{2\gamma\sigma}e^{-2\beta\phi_{\lambda}\left(\frac{\sigma+\tau}{\beta}\right)}\int_{\mathbb{R}^n}(\gamma+k_A^{-1}\vert\xi\vert^2)\vert\hat{u}(\sigma,\xi)\vert^2d\xi\,.
\end{multline}
Now, let $\bar{\bar{\gamma}}$ be a value of $\gamma$ fulfilling equation (\ref{eq_nuova_gamma}), let $\bar{\gamma}$ be the value provided by Lemma~\ref{lemma_gamma} and let
$$
\gamma>\max\{\bar{\bar{\gamma}},\bar{\gamma}\}\,.
$$
Since $\phi_{\lambda}$ is increasing, we have that
$$
e^{-2\beta\phi_{\lambda}\left(\frac{t+\tau}{\beta}\right)}\ge e^{-2\beta\phi_{\lambda}\left(\frac{2(z+\tau)}{\beta}\right)}
$$
for all $t<2z+\tau$. As a consequence, using also the fact that $e^{2\gamma z}\ge 1$, equation (\ref{eq_u_hat_terza}) yields
\begin{multline}\label{eq_u_hat_quarta}
c_1(z+\tau)\int_{\mathbb{R}^n}(\vert\xi\vert^2+1)e^{\frac{1}{2}\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)}\vert\hat{u}(z,\xi)\vert^2d\xi\le\\
\le \tau\phi_{\lambda}^\prime\left(\frac{\tau}{\beta}\right)e^{2\beta\left[\phi_{\lambda}\left(\frac{2(z+\tau)}{\beta}\right)-\phi_{\lambda}\left(\frac{\tau}{\beta}\right)\right]}\int_{\mathbb{R}^n} e^{\vert\xi\vert^2\omega\left(\frac{1}{\vert\xi\vert^2+1}\right)}\vert\hat{u}(0,\xi)\vert^2d\xi+\\
+c_2(\sigma+\tau)e^{2\gamma\sigma}e^{2\beta\left[\phi_{\lambda}\left(\frac{2(z+\tau)}{\beta}\right)-\phi_{\lambda}\left(\frac{\sigma+\tau}{\beta}\right)\right]}\int_{\mathbb{R}^n}(1+\vert\xi\vert^2)\vert\hat{u}(\sigma,\xi)\vert^2d\xi\,,
\end{multline}
where the constant values
$$
c_1\triangleq\frac{1}{4}\min\left\{k_A,\gamma\right\}\,,\qquad c_2\triangleq \max\left\{\gamma,k_A^{-1}\right\}
$$
have been introduced.
Dividing by $\tau$ and taking into account that $(z+\tau)/\tau>1$ and that $\phi_{\lambda}$ is negative, it is easy to see that (\ref{eq_u_hat_quarta}) implies
\begin{multline}\label{eq_u_quinta}
c_1\Vert u(z,\cdot)\Vert^2_{H_{\frac{1}{2},\omega}^1}\le \phi_{\lambda}^\prime\left(\frac{\tau}{\beta}\right)e^{2\beta\left[\phi_{\lambda}\left(\frac{2(z+\tau)}{\beta}\right)-\phi_{\lambda}\left(\frac{\tau}{\beta}\right)\right]}\Vert u(0,\cdot)\Vert^2_{H_{1,\omega}^0}+\\
+c_2\frac{\sigma+\tau}{\tau}e^{2\gamma\sigma}e^{2\beta\left[\phi_{\lambda}\left(\frac{2(z+\tau)}{\beta}\right)-\phi_{\lambda}\left(\frac{\sigma+\tau}{\beta}\right)\right]}\Vert u(\sigma,\cdot)\Vert^2_{H^1}\le\\
\le\phi_{\lambda}^\prime\left(\frac{\tau}{\beta}\right)e^{2\beta\left[\phi_{\lambda}\left(\frac{2(z+\tau)}{\beta}\right)-\phi_{\lambda}\left(\frac{\tau}{\beta}\right)-\phi_{\lambda}\left(\frac{\sigma+\tau}{\beta}\right)\right]}\Vert u(0,\cdot)\Vert^2_{H_{1,\omega}^0}+\\
+c_2\frac{\sigma+\tau}{\tau}e^{2\gamma\sigma}e^{2\beta\left[\phi_{\lambda}\left(\frac{2(z+\tau)}{\beta}\right)-\phi_{\lambda}\left(\frac{\sigma+\tau}{\beta}\right)\right]}\Vert u(\sigma,\cdot)\Vert^2_{H^1}\,,
\end{multline}
Moreover, with respect to $\phi_{\lambda}$, note that since $\phi_{\lambda}$ is increasing,
$$
2(z+\tau)\le\frac{\sigma}{2}+\tau\quad\Rightarrow\quad\phi_{\lambda}\left(\frac{2(z+\tau)}{\beta}\right)\le\phi_{\lambda}\left(\frac{\frac{\sigma}{2}+\tau}{\beta}\right)\,.
$$
In addition, since $\phi_{\lambda}$ is also concave,
$$
\phi_{\lambda}\left(\frac{\sigma+\tau}{\beta}\right)-\phi_{\lambda}\left(\frac{\frac{\sigma}{2}+\tau}{\beta}\right)\ge\frac{\sigma}{2\beta}\phi_{\lambda}^\prime\left(\frac{\sigma+\tau}{\beta}\right)\,.
$$
As a consequence, from (\ref{eq_u_quinta}) one obtains
\begin{multline}\label{eq_u_sesta}
c_1\Vert u(z,\cdot)\Vert^2_{H_{\frac{1}{2},\omega}^1}\le\\
\le e^{-\sigma\phi_{\lambda}^\prime\left(\frac{\sigma+\tau}{\beta}\right)}\left[\phi_{\lambda}^\prime\left(\frac{\tau}{\beta}\right)e^{-2\beta\phi_{\lambda}\left(\frac{\tau}{\beta}\right)}\Vert u(0,\cdot)\Vert^2_{H_{1,\omega}^0}+c_2\frac{\sigma+\tau}{\tau}e^{2\gamma\sigma}\Vert u(\sigma,\cdot)\Vert^2_{H^1}\right]\,,
\end{multline}
namely
\begin{multline}\label{eq_u_settima_seconda}
\Vert u(z,\cdot)\Vert^2_{H_{\frac{1}{2},\omega}^1}\le\\
\le C e^{-\sigma\phi_{\lambda}^\prime\left(\frac{\sigma+\tau}{\beta}\right)}\left[\phi_{\lambda}^\prime\left(\frac{\tau}{\beta}\right)e^{-2\beta\phi_{\lambda}\left(\frac{\tau}{\beta}\right)}\Vert u(0,\cdot)\Vert^2_{H_{1,\omega}^0}+\Vert u(\sigma,\cdot)\Vert^2_{H^1}\right]\,,
\end{multline}
where
$$
C=\max\left\{\frac{1}{c_1},\frac{c_2(\sigma+\tau)e^{2\gamma\sigma}}{c_1\tau}\right\}\,.
$$
Equation (\ref{eq_u_settima_seconda}) holds for all $z\in[0,\bar{\sigma}]$ and hence equation (\ref{eq_u_settima}) immediately follows.$
\blacksquare$
\subsection{Proof of Theorem~\ref{teo_finale_stab_norme_classiche}}
Proposition~\ref{prop_norma_nuova} states, in particular, that the norm of $u$ in any insatant of the sub-interval $[0,\bar{\sigma}]\subset[0,\sigma]$ is bounded by a quantity depending on the value of the norm in the initial and final instants, i.e. on $\Vert u(0,\cdot)\Vert_{H_{1,\omega}^0}$ and $\Vert u(\sigma,\cdot)\Vert_{H^1}$. Nevertheless, to obtain a stability result, the right hand side term in equation (\ref{eq_u_settima_seconda}) must tend to zero when $\Vert u(0,\cdot)\Vert_{H_{1,\omega}^0}$ tends to zero, which is not immediate to guess. The following lemma allows one to choose $\beta$ in such a way that (\ref{eq_u_settima_seconda}) can be written in a form from which the stability property can be obtained more easily.
\begin{lemma}\label{lem_finale}
Let $\phi$ be a solution of (\ref{eq_diff_per_phi}) with $\lambda>0$ and $q>0$ and let $\tau>0$. Let $h:]0,1[\to]q,+\infty[$ be defined by
$$
h(z)\triangleq e^{-2\tau \phi(z)/z}\phi^\prime(z)\,.
$$
The function $h$ so defined is strictly decreasing with
$$
\lim_{z\to 0}h(z)=+\infty\,,\quad \lim_{z\to 1}h(z)=q\,.
$$
$
\square$
\end{lemma}
\textbf{Proof. }The claim is easily proven by computing $h^\prime$.$
\blacksquare$
As a consequence of Lemma~\ref{lem_finale}, $h$ can be inverted and its inverse $h^{-1}:]q,+\infty[\to ]0,1[$ is strictly increasing and
$$
\lim_{y\to+\infty}h^{-1}(y)=0\,.
$$
Now the main stability result can be proven.
\textbf{Proof of Theorem~\ref{teo_finale_stab_norme_classiche}.}
In (\ref{eq_u_settima}) of Proposition~\ref{prop_norma_nuova} we want to choose $\beta>\sigma+\tau$ in such a way that
$$
\phi^\prime\left(\frac{\tau}{\beta}\right)e^{-2\beta\phi\left(\frac{\tau}{\beta}\right)}=\Vert u(0,\cdot)\Vert_{H_{1,\omega}^0}^{-2}\,.
$$
This goal is achieved by taking
$$
\beta=\frac{\tau}{h^{-1}\left(\Vert u(0,\cdot)\Vert^2_{H_{1,\omega}^0}\right)}\,,
$$
provided that $\Vert u(0,\cdot)\Vert_{H_{1,\omega}^0}<q^{-1/2}$ and $\Vert u(0,\cdot)\Vert_{H^0_{1,\omega}}<h\left(\frac{\tau}{\sigma+\tau}\right)^{-1/2}$. With this choice of $\beta$, one obtains, from (\ref{eq_u_settima}),
\begin{equation}\label{eq_u_settima_ultima}
\sup_{z\in[0,\bar{\sigma}]}\Vert u(z,\cdot)\Vert_{H_{\frac{1}{2},\omega}^1}^2\le C e^{-\sigma \widehat{g}\left(\Vert u(0,\cdot)\Vert^2_{H_{1,\omega}^0}\right)}\left[1+\Vert u(\sigma,\cdot)\Vert^2_{H^1}\right]\,,
\end{equation}
where $\widehat{g}$ is defined by
$$
\widehat{g}(y)=\phi^\prime\left(\frac{\sigma+\tau}{\tau}h^{-1}(y^{-1})\right)\,,
$$
so that
$$
\lim_{y\to 0}\widehat{g}(y)=+\infty\,.
$$
Note, in particular, that taking $\tau=\sigma/4$ the condition $\Vert u(0,\cdot)\Vert_{H^0_{1,\omega}}<h(\tau/(\sigma+\tau))^{-1/2}$ yields $\Vert u(0,\cdot)\Vert_{H^0_{1,\omega}}<\widehat{\rho}$ where
$$
\widehat{\rho}\triangleq\min\{e^{-\tau\frac{4}{5}\phi\left(\frac{5}{4}\right)}\phi^\prime\left(\frac{5}{4}\right)^{1/2},q^{-1/2}\}\,.
$$
Note, now, that
\begin{equation}\label{eq_ultime_1}
\Vert u(z,\cdot)\Vert_{H^1}^2\le \Vert u(z,\cdot)\Vert_{H_{\frac{1}{2},\omega}^1}^2
\end{equation}
and that, for all $\nu>0$ and all $\epsilon>0$, there exists $\widetilde{C}_{\nu,\epsilon}$ such that
$$
\Vert u(0,\cdot)\Vert_{H_{1,\omega}^0}^2\le\widetilde{C}_{\nu,\epsilon}\Vert u(0,\cdot)\Vert_{H_{\nu,\epsilon}^0}^2\,.
$$
It follows that
\begin{equation}\label{eq_ultima_questa}
\sup_{z\in[0,\bar{\sigma}]}\Vert u(z,\cdot)\Vert^2_{H_1}\le C e^{-\sigma \widehat{g}\left(\widetilde{C}_{\nu,\epsilon}\Vert u(0,\cdot)\Vert^2_{H^0_{\nu,\epsilon}}\right)}\left[1+\Vert u(\sigma,\cdot)\Vert^2_{H^1}\right]\,,
\end{equation}
provided that
$$
\Vert u(0,\cdot)\Vert_{H^0_{\nu,1}}<\frac{\widehat{\rho}}{C^{1/2}_{\nu,\epsilon}}\,.
$$
By defining $g(y)=\widetilde{g}(\widetilde{C}_{\nu,\epsilon}y)$, equation (\ref{eq_ultima_questa}) allows one to easily obtain (\ref{eq_u_settima_ultima_norme_classiche}).$
\blacksquare$
The claim of Theorem~\ref{teo_finale_stab_norme_classiche} to the whole interval $[0,T]$.
\subsection{Proof of Theorem~\ref{teo_nuovo}}
Theorem~\ref{teo_nuovo} is proven iterating a finite number of times the estimate given by the following lemma.
\begin{lemma}\label{lemma_iterativo}
Under the same hypotheses of Theorem~\ref{teo_finale_stab_norme_classiche},
$$
\sup_{z\in[0,\bar{\sigma}/2]}\Vert u(z,\cdot)\Vert_{L^2}\le C^\prime Ce^{-\sigma g\left(C^{\prime\prime}\Vert u(0,\cdot)\Vert^2_{L^2}\right)}\left[1+\Vert u(\sigma,\cdot)\Vert^2_{L^2}\right]\,.
$$
The constants $C^\prime$ and $C^{\prime\prime}$ depend on $n$, $k_A$, $k_B$, $k_C$, $\nu$, $\epsilon$ and $\sigma$ and tend to $+\infty$ as $\sigma$ tends to zero.$
\square$
\end{lemma}
\textbf{Proof. }Analogously to Lemma~\ref{lem_tutti gevrey}, extend $a_{i,j}$, $b_i$ and $c$ on $[-\sigma/2,T]$ and $u$ to a solution of $\mathcal{L}$ on $[-\sigma/2,T]$. Then the results of Theorem~\ref{teo_finale_stab_norme_classiche} on $[-\bar{\sigma}/2,T-\bar{\sigma}/2]$ gives
$$
\sup_{z\in[-\bar{\sigma}/2,\bar{\sigma}/2]}\Vert u(z,\cdot)\Vert^2_{H_1}\le C e^{-\sigma g\left(\Vert u(-\bar{\sigma}/2,\cdot)\Vert^2_{H^0_{\nu,\epsilon}}\right)}\left[1+\Vert u(\sigma-\bar{\sigma}/2)\Vert^2_{H_1}\right]\,.
$$
By Lemma~\ref{lem_tutti gevrey} we obtain
\begin{multline}
\sup_{z\in[0,\bar{\sigma}/2]}\Vert u(z,\cdot)\Vert^2_{L^2}\le C e^{-\sigma g\left(C^{\prime\prime}\Vert u(0,\cdot)\Vert^2_{L^2}\right)}\left[1+\Vert u(\sigma-\frac{\sigma}{16},\cdot)\Vert^2_{H^1}\right]\le\\[2mm]
\le C^\prime C e^{-\sigma g\left(C^{\prime\prime}\Vert u(0,\cdot)\Vert^2_{L^2}\right)}\left[1+\Vert u(\sigma,\cdot)\Vert^2_{L^2}\right]\,.
\end{multline}
$
\blacksquare$
Now set $G(y)\triangleq (1+D)C^\prime C e^{-\sigma g\left(C^{\prime\prime} y\right)}$ and note that $\lim_{y\to 0}G(y)=0$. We have just proven that
\begin{equation}\label{eq_con_la_G}
\sup_{z\in[0,\bar{\sigma}/2]}\Vert u(z,\cdot)\Vert^2_{L^2}\le G\left(\Vert u(0,\cdot)\Vert^2_{L^2}\right)\,.
\end{equation}
Finally, let $T^\prime:0<T^\prime<T$; take $T^{\prime\prime}=(T+T^\prime)/2$ (so that $T^\prime<T^{\prime\prime}<T$). Note that $\bar{\sigma}/2=\sigma/16$ and recall that $\sigma=\min\{1/\alpha_1,T^{\prime\prime}\}$. To complete the proof of Theorem~\ref{teo_nuovo} it is sufficient to iterate inequality (\ref{eq_con_la_G}) a finite number of times. Indeed, set $T_0=0$ and, for $i\ge 0$,
$$
T_{i+1}=T_i+\frac{1}{16}\min\left\{\frac{1}{\alpha_1},T^{\prime\prime}-T_i\right\}\,.
$$
For all $i$ inequality (\ref{eq_con_la_G}) provides
$$
\sup_{z\in[T_i,T_{i+1}]}\Vert u(z,\cdot)\Vert^2_{L^2}\le G_i\left(\Vert u(T_i,\cdot)\Vert^2_{L^2}\right)\,.
$$
The result follows by noting that
$$
T_{i+1}-T_i=\frac{1}{16}\min\left\{\frac{1}{\alpha_1},T^{\prime\prime}-T_i\right\}\,,
$$
and that, for all $j$
$$
T_{j+1}=\sum_{i=0}^j\frac{1}{16}\min\left\{\frac{1}{\alpha_1},T^{\prime\prime}-T_i\right\}\,.
$$
The sequence $\left\{T_j\right\}_{j\in\mathbb{N}}$ is increasing and bounded from above by $T^{\prime\prime}$; hence it admits a limit. Let this limit be $T^\ast$; we want to show that $T^\ast=T^{\prime\prime}$. Obviously, $T^\ast\le T^{\prime\prime}$; suppose that $T^\ast<T^{\prime\prime}$, then $T^{\prime\prime}-T_i\ge T^{\prime\prime}-T^\ast>0$ and, consequently,
$$
T_{j+1}\ge \sum_{i=0}^j\frac{1}{16}\min\left\{\frac{1}{\alpha_1},T^{\prime\prime}-T^\ast\right\}
$$
for all $j$, yielding $\lim_{j\to\infty}T_j=+\infty$, which is a contradiction. Therefore it must be $T^\ast=T^{\prime\prime}$ which means that $T_j>T^\prime$ for some $j$.$
\blacksquare$
\section{A specific case}\label{sec_part}
In this section the explicit expression of the function $G$ appearing in the statement of~\ref{teo_nuovo} is computed when the modulus of continuity $\omega:]0,e^{1-e}]\to\mathbb{R}$ is defined by
$$
\omega(s)=s(1-\log s)\log(1-\log s)\,.
$$
Note that $\omega$ is increasing, fulfils the Osgood condition but is not a Log-Lipschitz function. Consider, now, the function $\theta:[e^{e-1},+\infty[\to [0,+\infty[$ defined by
\begin{equation*}
\theta(\tau)=\int_{1/\tau}^{e^{1-e}}\frac{1}{\omega(s)}ds=\log(\log(1+\log\tau))
\end{equation*}
and the function $\psi_{\lambda,q}:]0,1]\to [e^{e-1},+\infty[$ defined by
\begin{equation}\label{eq_def_psi}
\psi_{\lambda,q}(y)=\theta^{-1}(-\lambda q\log y)=\exp(e^{y^{-\lambda q}}-1)\,.
\end{equation}
From the definition of $\psi_{\lambda,q}$, one can easily check that it is strictly decreasing and that
\begin{equation}
\psi_{\lambda,q}^\prime(y)=\exp\left(e^{y^{-\lambda q}}-1\right)e^{y^{-\lambda q}}(-\lambda q)y^{-\lambda q-1}=-\frac{\lambda q}{y}\left(\psi_{\lambda,q}(y)\right)^2\omega\left(\frac{1}{\psi_{\lambda,q}(y)}\right)\,,
\end{equation}
hence the function $\phi_{\lambda,q}:]0,1]\to]-\infty,0]$ defined by
\begin{equation*}\label{eq_phi_lambda_integrale}
\phi_{\lambda,q}(y)=-q\int_y^1 \psi_{\lambda,q}(z)dz
\end{equation*}
is such that
$$
\phi_{\lambda,q}^{\prime\prime}(y)=-\frac{\lambda}{y}\left(\phi^\prime_{\lambda,q}(y)\right)^2\omega\left(\frac{q}{\phi_{\lambda,q}^\prime}(y)\right)
$$
i.e. $\phi_{\lambda,q}$ is a solution of equation (\ref{eq_diff_per_phi}). Note, as an accessory result, that
$$
\phi_{\lambda,q}^\prime(y)=q\phi_{\lambda,q}(y)\ge q e^{e-1}\,.
$$
From now on, we choose $q=k_A$ and $\lambda\ge\bar{\lambda}$ as in the proof of Proposition~\ref{prop_con_lettere} and, for the sake of a simpler notation, we write $\phi_\lambda$ and $\psi_\lambda$ instead of $\phi_{\lambda,q}$ and $\psi_{\lambda,q}$, respectively. Proposition~\ref{prop_norma_nuova} then, gives
\begin{equation}\label{eq_stima_seconda}
\sup_{z\in[0,\bar{\sigma}]}\Vert u(z,\cdot)\Vert_{L^2}^2\le C e^{-\sigma\phi_\lambda^\prime\left(\frac{\sigma+\tau}{\beta}\right)}\phi^\prime_\lambda\left(\frac{\tau}{\beta}\right)\left[e^{-2\beta\phi_\lambda\left(\frac{\tau}{\beta}\right)}\Vert u(0,\cdot)\Vert^2_{H_{1,\omega}^0}+\Vert u(\sigma,\cdot)\Vert_{H^1}^2\right]\,.
\end{equation}
Arguing as in Lemma~\ref{lemma_iterativo} one may obtain
\begin{equation}\label{eq_sessanta}
\sup_{z\in[0,\bar{\sigma}/2]}\Vert u(z,\cdot)\Vert^2_{L^2}\le C e^{-\sigma\phi^\prime_{\lambda}\left(\frac{\sigma+\tau}{\beta}\right)}\phi_{\lambda}^\prime\left(\frac{\tau}{\beta}\right)\left[e^{-2\beta\phi_\lambda\left(\frac{\tau}{\beta}\right)}\Vert u(0,\cdot)\Vert^2_{L^2}+\Vert u(\sigma,\cdot)\Vert^2_{L^2}\right]\,.
\end{equation}
We, now, introduce the function $\Lambda:[0,+\infty[\to]-\infty,0]$ defined by
\begin{equation}\label{eq_def_Lambda}
\Lambda(y)=y\phi_\lambda\left(\frac{1}{y}\right)
\end{equation}
which is strictly decreasing and, hence, invertible. Its inverse, $\Lambda^{-1}:]-\infty,0]\to[1,+\infty[$ is also strictly decreasing. We want to find a value of $\beta>\sigma+\tau$ such that
$$
e^{-2\tau\Lambda\left(\frac{\beta}{\tau}\right)}=\Vert u(0,\cdot)\Vert^{-2}_{L^2}\,.
$$
Easy computations yield
\begin{equation}\label{eq_espressione_beta}
\beta=\tau\Lambda^{-1}\left(\frac{1}{\tau}\log\Vert u(0,\cdot)\Vert_{L^2}\right)
\end{equation}
Note that this value of $\beta$ is larger than $\sigma+\tau$ if and only if
$$
\Vert u(0\cdot)\Vert_{L^2}<e^{\tau\Lambda\left(\frac{\sigma+\tau}{\tau}\right)}\triangleq \rho\,.
$$
In particular, if $\tau=\sigma/4$ then $\rho=e^{\tau\Lambda\left(5/4\right)}$; we show below that a smaller value of $\tau$ performs better. Note, now, that for $\zeta>1$ and $y<1/\zeta$
$$
\log\left(\psi_{\lambda,q}(\zeta y)\right)=\left(\log\left(\psi_{\lambda,q}(y)\right)+1\right)^{\zeta^{-\lambda q}}-1\,;
$$
therefore
\begin{equation}\label{eq_sessanta4}
\phi^\prime_{\lambda}\left(\frac{\sigma+\tau}{\beta}\right)=\frac{k_A}{e}\exp\left[\left(\log\left(\psi_{\lambda,k_A}\left(\frac{\tau}{\beta}\right)\right)+1\right)^{\delta_1}\right]\,,
\end{equation}
where $\delta_1=((\sigma+\tau)/\tau)^{-\lambda k_A}$. From (\ref{eq_sessanta}), (\ref{eq_espressione_beta}) and (\ref{eq_sessanta4}) one obtains
\begin{multline}\label{eq_con_log_esp_prima}
\sup_{z\in[0,\bar{\sigma}/2]}\Vert u(z,\cdot)\Vert^2_{L^2}\le C k_A \psi_{\lambda,k_A}\left(\frac{1}{\Lambda\left(\frac{1}{\tau}\log\Vert u(0,\cdot)\Vert_{L^2}\right)}\right)\times\\[2mm]
\times \exp\left\{-\frac{\sigma k_A}{e}\exp\left[\left(\log\left(\psi_{\lambda,k_A}\left(\frac{1}{\Lambda^{-1}\left(\frac{1}{\tau}\log \Vert u(0,\cdot)\Vert_{L^2}\right)}\right)\right)+1\right)^{\delta_1}\right]\right\}\times\\[2mm]
\times\left(1+\Vert u(\sigma,\cdot)\Vert^2_{L^2}\right)\,.
\end{multline}
Consider, now, the function $F$ defined by
$$
F(\zeta)\triangleq (1+D)C k_A \zeta \exp\left\{-\frac{\sigma k_A}{2e}\exp\left[\left(\log\zeta+1\right)^{\delta_1}\right]\right\}
$$
and note that
$$
\lim_{\zeta\to+\infty}F(\zeta)=0\,.
$$
Indeed, let $\epsilon>0$. It is easy to check that
\begin{multline*}
F(\zeta)<\epsilon \Leftrightarrow\exp\left\{-\frac{\sigma k_A}{2e}\exp\left[\left(\log \zeta+1\right)^{\delta_1}\right]\right\}\le \frac{\epsilon \zeta^{-1}}{C k_A}\Leftrightarrow\\[2mm]
\Leftrightarrow -\frac{\sigma k_A}{2e}\exp\left[\left(\log \zeta+1\right)^{\delta_1}\right]\le -\log\zeta+\log\frac{\epsilon}{C k_A(1+D)}\Leftrightarrow\\[2mm]
\Leftrightarrow\frac{\sigma k_A}{2e}\exp\left[\left(\log\zeta+1\right)^{\delta_1}\right]\ge \log \zeta-\log\frac{\epsilon}{Ck_A(1+D)}\Leftrightarrow\\[2mm]
\Leftrightarrow \exp\left[\left(\log\zeta+1\right)^{\delta_1}\right]\ge \frac{2e}{\sigma k_A}\log\zeta-\frac{2e}{\sigma k_A}\log\frac{\epsilon}{C k_A(1+D)}\Leftrightarrow\\[2mm]
\Leftrightarrow\left(\log\zeta+1\right)^{\delta_1}\ge \log\left(\frac{2e}{\sigma k_A}\log\zeta-\frac{2e}{\sigma k_A}\log\frac{\epsilon}{C k_A(1+D)}\right)\,,
\end{multline*}
which is true for sufficiently large $\zeta$. Analogousy, for sufficiently small $\Vert u(0,\cdot)\Vert_{L^2}$, one has
\begin{multline*}
(1+D)C k_A\psi_{\lambda,k_A}\left(\frac{1}{\Lambda^{-1}\left(\frac{1}{\tau}\log\Vert u(0,\cdot)\Vert_{L^2}\right)}\right)\times\\[2mm]
\times\exp\left\{-\frac{\sigma k_A}{2e}\exp\left[\left(\log\left(\psi_{\lambda,k_A}\left(\frac{1}{\Lambda^{-1}\left(\frac{1}{\tau}\log \Vert u(0,\cdot)\Vert_{L^2}\right)}\right)\right)+1\right)^{\delta_1}\right]\right\}\le 1\,.
\end{multline*}
So, if $\Vert u(0,\cdot)\Vert_{L^2}\le \widetilde{\rho}$, one has
\begin{equation}\label{eq_sessantasei}
\sup_{z\in[0,\bar{\sigma}/2]}\Vert u(z,\cdot)\Vert^2_{L^2}\le \exp\left\{-\frac{\sigma k_A}{2e}\exp\left[\left(\log\left(\psi_{\lambda,k_A}\left(\frac{1}{\Lambda^{-1}\left(\frac{1}{\tau}\log\Vert u(0,\cdot)\Vert\right)}\right)\right)+1\right)^{\delta_1}\right]\right\}\,.
\end{equation}
Now, since
$$
\lim_{\zeta\to +\infty}\frac{\psi_{\lambda,k_A}\left(\frac{1}{\tau}\right)}{\vert\Lambda(\zeta)\vert}=+\infty
$$
(see Lemma~\ref{lemma_per_il_logaritmo} in Appendix) for $\Vert u(0,\cdot)\Vert_{L^2}$ sufficiently small one has
$$
\psi_{\lambda,k_A}\left(\frac{1}{\Lambda^{-1}\left(\frac{1}{\tau}\left(\log\Vert u(0,\cdot)\Vert_{L^2}\right)\right)}\right)\ge\frac{1}{\tau}\left\vert \log\Vert u(0,\cdot)\Vert_{L^2}\right\vert\,.
$$
As a consequence, (\ref{eq_sessantasei}) yields
\begin{equation}\label{eq_sessantasette}
\sup_{z\in[0,\bar{\sigma}/2]}\Vert u(z,\cdot)\Vert_{L^2}^2\le \exp\left\{-\frac{\sigma k_A}{2e}\exp\left[\left(\log\left(\frac{1}{\tau}\left\vert \log\Vert u(0,\cdot)\Vert_{L^2}\right\vert\right)\right)^{\delta_1}\right]\right\}
\end{equation}
which may also be rewritten as
\begin{equation}\label{eq_sessantotto}
\sup_{z\in[0,\bar{\sigma}_1]}\Vert u(z,\cdot)\Vert_{L^2}^2\le \exp\left\{-\frac{\sigma_1 k_A}{2e}\exp\left[\left(\log\left(\frac{1}{2\tau_1}\left\vert \log\Vert u(0,\cdot)\Vert_{L^2}^2\right\vert\right)\right)^{\delta_1}\right]\right\}\,,
\end{equation}
where $\bar{\sigma}_1=\sigma_1/16$. Now, choose
$$
\tau_1=\min\left\{\frac{\sigma_1}{4},\frac{\sigma_1 k_A}{4e}\right\}
$$
and iterate the above arguments on $[\bar{\sigma}_1,T]$, finding
\begin{equation}\label{eq_sessantanove}
\sup_{z\in[\bar{\sigma}_1,\bar{\sigma}_2]}\Vert u(z,\cdot)\Vert_{L^2}^2\le \exp\left\{-\frac{\sigma_2 k_A}{2e}\exp\left[\left(\log\left(\frac{1}{2\tau_2}\left\vert \log\Vert u(\bar{\sigma}_1,\cdot)\Vert_{L^2}^2\right\vert\right)\right)^{\delta_2}\right]\right\}\,,
\end{equation}
where $\bar{\sigma}_2=\sigma_2/16$ and $\tau_2=\min\left\{\frac{\sigma}{4},\frac{\sigma_2 k_A}{4e}\right\}$. Note that
$$
\sigma_1=\min\left\{\frac{1}{\alpha_1},T^{\prime\prime}\right\}\,,\quad \sigma_2=\min\left\{\frac{1}{\alpha_1},T^{\prime\prime}-\sigma_1\right\}\,;
$$
hence $\sigma_2\le\sigma_1$ and $\tau_2\le\tau_1$. As a consequence,
\small
\begin{multline*}
\sup_{z\in[\bar{\sigma}_1,\bar{\sigma}_2]}\Vert u(z,\cdot)\Vert_{L^2}^2\le\\[2mm]
\exp\left\{-\frac{\sigma_2 k_A}{2e}\exp\left[\left(\log\left\vert\frac{1}{2\tau_2}\log\left(\exp\left\{-\frac{\sigma_1 k_A}{2e}\exp\left[\left(\log\frac{\left\vert\log\Vert u(0,\cdot)\Vert^2_{L^2}\right\vert}{2\tau_1}\right)^{\delta_1}\right]\right\}\right)\right\vert\right)^{\delta_2}\right]\right\}=\\[2mm]
=\exp\left\{-\frac{\sigma_2 k_A}{2e}\exp\left[\left(\log\left\vert-\frac{\sigma_1 k_A}{4e\tau_2}\exp\left[\left(\log\frac{\left\vert\log\Vert u(0,\cdot)\Vert^2_{L^2}\right\vert}{2\tau_1}\right)^{\delta_1}\right]\right\vert\right)^{\delta_2}\right]\right\}=\\[2mm]
=\exp\left\{-\frac{\sigma_2 k_A}{2e}\exp\left[\left(\log\frac{\sigma_1 k_A}{4e\tau_2}+\left(\log\frac{\left\vert\log\Vert u(0,\cdot)\Vert^2_{L^2}\right\vert}{2\tau_1}\right)^{\delta_1}\right)^{\delta_2}\right]\right\}\le\\[2mm]
\le \exp\left\{-\frac{\sigma_2 k_A}{2e}\exp\left[\left(\log\frac{1}{2\tau_1}\left\vert \log\Vert u(0,\cdot)\Vert^2_{L^2}\right\vert\right)^{\delta_1\delta_2}\right]\right\}\,,
\end{multline*}
\normalsize
where the last inequality holds since $\sigma_1 k_A\ge 4e\tau_2$. Merging the estimates obtained for the two intervals, yields
$$
\sup_{[0,\bar{\sigma}_2]}\Vert u(z,\cdot)\Vert^2_{L^2}\le \exp\left\{-\frac{\sigma_2 k_A}{2e}\exp\left[\log\frac{1}{2\tau_1}\left\vert\log\Vert u(0,\cdot)\Vert^2_{L^2}\right\vert\right]\right\}\,,
$$
which has the same form of the inequality obtained in $[0,\bar{\sigma}_1]$. Hence, if $T^{\prime\prime}$ is such that $0<T^\prime<T^{\prime}{\prime}<T$, iterating a finite number of times one obtains an estimate on $[0,T^\prime]$ of the form
$$
\sup_{[0,T^\prime]}\Vert u(z,\cdot)\Vert^2_{L^2}\le\exp\left\{-\widetilde{\sigma}\exp\left[\left(\log\frac{1}{2\tilde{\tau}}\left\vert\log\Vert u(0,\cdot)\Vert^2_{L^2}\right\vert\right)^{\widetilde{\delta}}\right]\right\}\,.
$$
\section*{Appendix}
\begin{lemma}\label{lemma_per_il_logaritmo}
The functions $\psi_{\lambda, k_A}$ (equation (\ref{eq_def_psi})) and $\Lambda$ (equation (\ref{eq_def_Lambda})) are such that
$$
\lim_{\zeta\to+\infty}\frac{\psi_{\lambda, k_A}\left(\frac{1}{\zeta}\right)}{\vert\Lambda(\zeta)\vert}=+\infty\,.
$$
{\bf Proof. }Note that
\begin{multline*}
\lim_{\zeta\to+\infty}\frac{\psi_{\lambda, k_A}\left(\frac{1}{\zeta}\right)}{\vert\Lambda(\zeta)\vert} = \lim_{\rho\to 0}-\frac{\rho\psi_{\lambda,k_A}(\rho)}{\phi_{\lambda,k_A}(\rho)}=\lim_{\rho\to 0}-\frac{\psi_{\lambda,k_A}(\rho)+\rho\psi^\prime_{\lambda,k_A(\rho)}}{k_A \psi_{\lambda,k_A}(\rho)}=\\[2mm]
=-\frac{1}{k_A}-\lim_{\rho\to 0}\frac{\rho \psi_{\lambda, k_A}^\prime(\rho)}{k_A \psi_{\lambda, k_A}(\rho)}=-\frac{1}{k_A}+\lim_{\rho\to 0}\frac{1}{k_A}\lambda \psi_{\lambda, k_A}(\rho)\omega\left(\frac{1}{\psi_{\lambda, k_A}(\rho)}\right)=\\[2mm]
=-\frac{1}{k_A}+\lim_{q\to 0}\frac{\lambda}{k_A}\frac{\omega(q)}{q}=-1+\lim_{q\to 0}(1-\log q)\log(1-\log q)=+\infty\,.
\end{multline*}
\end{lemma}
\end{document} |
\begin{document}
\begin{frontmatter}
\title{Coexistence of full which-path information and interference in Wheeler's delayed choice experiment with photons}
\author[embd]{K. Michielsen},
\author[rug]{S. Yuan},
\author[rug]{S. Zhao},
\author[rug]{F. Jin},
\author[rug]{H. De Raedt\thanksref{thank1}}
\address[embd]{EMBD, Vlasakker 21, 2160 Wommelgem, Belgium}
\address[rug]{Department of Applied Physics, Zernike Institute for Advanced Materials,
University of Groningen, Nijenborgh 4, 9747 AG Groningen, The Netherlands}
\thanks[thank1]{
Corresponding author.
E-mail: [email protected]}
\begin{abstract}
We present a computer simulation model
that is a one-to-one copy of an experimental realization of Wheeler's delayed choice experiment
that employs a single photon source and a Mach-Zehnder interferometer composed of a 50/50 input beam splitter and
a variable output beam splitter with adjustable reflection coefficient $R$ (V. Jacques {\sl et al.}, Phys. Rev. Lett. 100, 220402 (2008)).
For $0\le R\le 0.5$, experimentally measured values of the interference visibility $V$ and the path
distinguishability $D$, a parameter quantifying the which-path information WPI, are found to fulfill the complementary relation
$V^2+D^2\le 1$, thereby allowing to obtain partial WPI while keeping interference with limited visibility.
The simulation model that is solely based on experimental facts,
that satisfies Einstein's criterion of local causality and that does not rely on any concept of quantum theory or of probability theory,
reproduces quantitatively the averages calculated from quantum theory.
Our results prove that it is possible to give a particle-only description of the experiment, that one can have full WPI
even if $D=0$, $V=1$ and therefore that the relation $V^2+D^2\le 1$ cannot be regarded as quantifying the notion of complementarity.
\end{abstract}
\begin{keyword}
Wheeler's delayed choice \sep complementarity \sep wave-particle duality \sep
computational techniques \sep quantum theory
\PACS 02.70.-c \sep 03.65.-w
\end{keyword}
\end{frontmatter}
\section{Introduction}
Particle-wave duality, a concept of quantum theory, attributes to photons the
properties of both wave and particle behavior depending upon the circumstances of
the experiment~\cite{HOME97}.
The particle behavior of photons has been shown in an experiment composed of a
single beam splitter (BS) and a source emitting single photons and pairs of
photons~\cite{GRAN86}. The wave character has been demonstrated in a single-photon Mach-Zehnder
interferometer (MZI) experiment~\cite{GRAN86}.
In 1978, Wheeler proposed a gedanken
experiment~\cite{WHEE83}, a variation on Young's double slit experiment, in which the
decision to observe wave or particle behavior is made after the photon has
passed the slits. The pictorial description of this experiment defies common sense:
The behavior of the photon in the past is said to be changing from a particle to a wave or vice versa.
\begin{figure*}
\caption{Schematic diagram of the experimental setup for Wheeler's delayed-choice gedanken
experiment~\cite{JACQ08}
\label{wheeler}
\end{figure*}
Recently, Jacques {\sl et al.}
reported on an experimental realization of Wheeler's delayed choice experiment using
a single photon source and a MZI composed of a 50/50 input BS and a variable output
BS with adjustable reflection coefficient $R$~\cite{JACQ08}, a modification of the experiment presented in \cite{JACQ07} in
which two 50/50 BSs were used. A schematic picture of
the experimental set-up is shown in Fig.~\ref{wheeler}. The reflection coefficient $R$
of the variable beam splitter (BS$_{output}$) can be controlled by a
voltage applied to an electro-optic modulator (EOM), making it act as a variable wave plate.
This can be done after each photon has entered the MZI.
The phase-shift $\Phi$ between the two arms of the MZI is varied by tilting the polarizing beam splitter (PBS)
of the variable output BS.
For $0\leq R\leq 0.5$ measured values of the interference
visibility $V$~\cite{BORN64} and the path distinguishability $D$~\cite{JACQ08}, a parameter that quantifies the which-path information (WPI),
were found to fulfill the complementary relation $V^2+D^2\leq 1$.
The extreme situations ($V=0$, $D=1$) and ($V=1$, $D=0$), obtained for $R=0$ and $R=0.5$, give full and no WPI, associated
with particlelike and wavelike behavior, respectively.
By choosing $0<R<0.5$ Jacques {\sl et al.} claim to have obtained partial WPI while keeping interference with limited visibility~\cite{JACQ08}, thereby
having accomplished an affirmative delayed choice test of complementarity or wave-particle duality as it is often phrased.
Although the detection events (detector ``clicks'') are the only experimental facts,
the pictorial description of Jacques {\sl et al.}~\cite{JACQ08,JACQ07} is as follows:
Linearly polarized single photons are sent through a 50/50 PBS (BS$_{input}$),
spatially separating photons with S polarization (path 0) and P polarization (path 1) with equal frequencies.
After the photon has passed BS$_{input}$, but before the photon
enters the variable BS$_{output}$ the decision to apply a voltage to the EOM is made.
The PBS of BS$_{output}$ merges the paths of the orthogonally polarized photons travelling paths 0 and 1 of the MZI, but
afterwards the photons can still be unambiguously identified by their polarizations.
If no voltage is applied to the EOM then $R=0$ and the EOM can be regarded as doing nothing to the photons.
Because the polarization eigenstates of the Wollaston prism correspond to the P and S polarization
of the photons travelling path 0 an 1 of the MZI, each detection event
registered by one of the two detectors $D_0$ or $D_1$
is associated with a specific path (path 0 or 1, respectively).
Both detectors register an equal amount of detection events, independent of the phase shift $\Phi$ in the MZI.
This experimental setting, corresponding to the open configuration of the MZI, clearly gives full WPI about the photon within
the interferometer (particle behavior), characterized by $D=1$.
In this case no interference effects are observed, corresponding with a zero interference visibility ($V=0$).
When a voltage is applied to the EOM, then $R\ne 0$ (see Eq.~(2) in ~\cite{JACQ08}) and
the EOM acts as a wave plate rotating the polarization of the incoming photon by an angle depending on $R$.
The Wollaston prism partially recombines the polarization of the photons that have travelled along
different optical paths with phase difference $\Phi$ (closed configuration), and interference appears ($V\ne0$), a result
expected for a wave. The WPI is partially washed out, up to be totally erased when $R=0.5$ ($D=0$, $V=1$).
The outcome of delayed-choice experiments~\cite{JACQ08,JACQ07,HELL87,BALD89,LAWS96,KAWA98,KIM00}, that is the average results of many detection
events, is in agreement with wave theory (Maxwell or quantum theory).
However, the pictorial description explaining the experimental facts~\cite{JACQ07} defies common sense: The decision to apply a voltage
to the EOM after the photon left BS$_{input}$ but before it passes BS$_{output}$,
influences the behavior of the photon in the past and changes the
representation of the photon from a particle to a wave~\cite{JACQ07}.
Although on one hand quantum theory can be used to describe the final outcome of this type of experiments
(the average results of many detection events),
on the other hand it does not describe single events~\cite{HOME97}.
Therefore, it should not be a surprise that the application of concepts
of quantum theory to the domain of individual events may lead to conclusions
that are at odds with common sense.
Although not applying this reasoning to describe this type of experiments could prevent us from making nonsensical
conclusions, this unfortunately would not give us a single clue as how to explain the fact that individual events are observed
experimentally and, when collected over a sufficiently long time, yield averages that agree with quantum theory.
Since no theory seems to exist that can give a sensical description of the ``whole'' experiment,
we adopted the idea to search for algorithms that could mimic (simulate) the detection events
and experimental processes, including for example the random switching of the EOM for each photon sent into the interferometer.
We moreover require that the algorithms used to simulate the action of an optical element, such as a BS or wave plate, on a photon should
be independent of the experimental setup. In other words, the algorithms to simulate an optical component should be the same for all
identical optical components within the same experiment but also within a different experiment.
Hence, first solving the Schr\"odinger equation for a given experimental configuration and then simply generating events according
to the resulting probability distribution is not what we have in mind when we perform an event-by-event simulation of the experiment.
Similarly, first calculating the quantum potential (which requires the solution of the Schr\"odinger equation)
and then solving for the Bohm trajectories
is a different kind of event-by-event simulation than the one we describe in this paper.
In this paper, the event-by-event simulation algorithm reproduces the results of quantum theory,
without first solving a wave equation.
In this paper, we describe a model that, when implemented
as a computer program, performs an event-by-event simulation
of Wheeler's delayed-choice experiment.
Every essential component of the laboratory experiment
(PBS, EOM, HWP, Wollaston prism, detector) has a counterpart in the
algorithm.
The data is analyzed by counting detection events, just like in the experiment~\cite{JACQ08,JACQ07}.
The simulation model is solely based on experimental facts, satisfies Einstein's criterion
of local causality and does not rely on any concept of quantum theory or of probability theory.
Nevertheless, our simulation model reproduces the averages obtained from the quantum theoretical
description of Wheeler's delayed choice experiment but as our approach does
not rely on concepts of quantum theory and gives a description on the
level of individual events, it provides a description of the experimental facts that
does not defy common sense.
In a pictorial description of our simulation model, we may speak about ``photons'' generating
the detection events. However, these so-called photons, as we will call them in the sequel,
are elements of a model or theory for the real laboratory experiment only.
The experimental facts are the settings of the various apparatuses and
the detection events. What happens in between activating the source and the registration of the detection
events is not measured and is therefore not known.
Although we always have full WPI of the photons
in the closed configuration of the interferometer (we can always track the photons during the simulation),
the photons build up an interference
pattern at the detector. The appearance of an interference pattern is
commonly considered to be characteristic for a wave. In this paper, we demonstrate
that, as in experiment, it can also be build up by many photons.
These photons have full WPI, never directly communicate with each other
and arrive one by one at a detector.
The work described in this paper elaborates on the work described in~\cite{ZHAO08a} to simulate the experiment
reported in~\cite{JACQ07}.
The simulation model is built on earlier work~\cite{RAED05d,RAED05b,RAED05c,MICH05,RAED06c,RAED07a,RAED07b,RAED07c,ZHAO08b,ZHAO08c} that
demonstrated that it may be possible to simulate quantum phenomena on the level of individual events
without invoking a single concept of quantum theory.
Specifically, we have demonstrated that locally-connected networks of processing units
with a primitive learning capability can simulate event-by-event,
the single-photon beam splitter and MZI experiments of Grangier {\sl et al.}~\cite{GRAN86}
and Einstein-Podolsky-Rosen experiments with photons~\cite{ASPE82a,ASPE82b,WEIH98}.
Furthermore, we have shown that this approach can be generalized
to simulate universal quantum computation by an event-by-event process~\cite{RAED05c,MICH05}.
Our event-by-event simulation approach rigorously satisfies
Einstein's criterion of local causality and builds up the final outcome that agrees
with quantum theory event-by-event, as observed in real experiments.
\section{Simulation model}
The simulation algorithm can be viewed as a message-processing and message-passing
process: It routes messengers one by one through a network of units that process messages.
The messengers may be regarded as ``particles''.
These messengers carry a message which contains information about for example the relative time the particle traveled,
its polarization, its color, its velocity, and so on.
In other words, the message represents a so-called variable property of the particle that can
be manipulated and measured given particular experimental setttings.
The components of the experimental setup
such as the BS, the wave plates, the Wollaston prism and so on are so-called processing units
that interpret and manipulate the messages carried by the particles. These processing
units are put in a network that represents the complete experimental setup.
Since at any given time there is only one messenger being routed through the whole network,
there is no direct communication between the messengers. The only form of communication
is through the processing units when the messengers are routed through the network.
The model satisfies the intuitive notion of local causality.
In general, processing units consist of an input stage,
a transformation stage, an output stage and have an internal vector representing their internal state.
The input (output) stage may have several channels
at (through) which messengers arrive (leave).
Some processing units are simpler in the sense that the input stage
is not necessary for the proper functioning of the device.
As a messenger arrives at an input channel of a processing unit,
the input stage updates its internal vector, and
sends the message, represented by a vector, together with its internal vector
to the transformation stage that implements the operation of the particular device.
Then, a new message is sent to the output stage,
using a pseudo-random number to select the output channel
through which the messenger will leave the unit.
We use pseudo-random numbers to mimic the apparent unpredictability of the
experimental data only. The use of pseudo-random numbers is merely convenient, not essential.
In the experimental realization of Wheeler's delayed choice experiment by
Jacques {\sl et al.}~\cite{JACQ08} linearly polarized single photons
are sent through a PBS that
together with a second, movable, variable output PBS with adjustable reflectivity $R$ forms an interferometer (see Fig.~\ref{wheeler}).
The basic idea now is that we have to construct a model for the messengers representing the photons and
for the processing units representing the optical components in the experimental setup.
We require that the processing units for identical optical components should be reusable within the same and whitin different experiments.
The network of processing units is a one-to-one
image of the experimental setup~\cite{JACQ08} and is shown in Fig.~\ref{wheeler}.
In what follows we describe some elements of our model in more detail.
Additional information can be found in~\cite{ZHAO08a}.
\subsection{Messengers}
In a pictorial description of the experiment the photons can be
regarded as particles playing the role of messengers.
Each messenger carries a (variable) message which contains information about its
phase and polarization.
The phase combines information about the frequency of the light source and
the time that particles need to travel a given path.
However, no explicit information about distances and frequencies is required
since we can always work with relative phases.
\begin{figure*}
\caption{Diagram of a DLM-based processing unit that performs an event-based simulation of a
polarizing beam splitter (PBS). The solid lines represent the
input and output channels of the PBS. The presence of a message is indicated
by an arrow on the corresponding channel line. The dashed lines indicate the
data flow within the PBS.}
\label{PBS}
\end{figure*}
The information carried by the messenger can be represented by a
six-dimensional unit vector ${\bf y}_{k,n}=( \cos \psi
_{k,n}^{H},\sin \psi _{k,n}^{H},\cos \psi _{k,n}^{V},\sin \psi
_{k,n}^{V},\cos \xi _{k,n},\sin \xi _{k,n}) $.
The superscript H (V) refers to the horizontal (vertical)
component of the polarization
and $\psi _{k,n}^{H}$, $\psi _{k,n}^{V}$, and $\xi _{k,n}$
represent the phases and polarization of the photon, respectively.
It is evident that the representation used here maps one-to-one
to the plane-wave description of a classical electromagnetic field~\cite{BORN64},
except that we assign these properties to each individual photon, not to a wave.
The subscript $n\geq 0$ numbers
the consecutive messages and $k=0,1$ labels the channel of the PBS
at which the message arrives (see below).
Since in this paper we will demonstrate explicitly that in our model photons always have full WPI even if
interference is observed, we give the messengers one extra label, the path label having the value 0 or 1.
The information contained in this label is not accessible in the experiment~\cite{JACQ08}.
We only use it to track the photons in the network. The path label is set in the input BS and remains
unchanged until detection. Therefore we do not consider this label in the description of the processing units
but take it into account when we detect the photons.
\subsection{Polarizing beam splitter}
From classical electrodynamics we know that if an electric field is applied to a dielectric material
the material becomes polarized~\cite{BORN64}. The polarization ${\bf P({\bf k},t)}$ is given by
\begin{equation}
{\bf P}({\bf k},t)=\int_0^t\chi({\bf k},u){\bf E}({\bf k},t-u) du,
\label{polarization}
\end{equation}
where ${\bf E}({\bf k},t)$ denotes the electric field vector,
${\bf k}$ is the wave vector, and $\chi$ is the linear response function~\cite{BORN64}.
From Eq.~(\ref{polarization}) it is evident
that the dielectric material shows some kind of memory effect because the response (the polarization)
of the material to the applied electric field is a function of both present and past values of the electric field.
We use this kind of memory effect in our algorithm to model the PBS.
The processor that performs the event-by-event simulation of a PBS is depicted in Fig.~\ref{PBS}.
It consists of an input stage, a simple deterministic learning machine (DLM)~\cite{RAED05d,RAED05b,RAED05c,MICH05},
a transformation stage (T), an output stage (O) and
has two input and two output channels labeled with $k=0,1$.
We now define the operation of each stage explicitly.
\begin{itemize}
\item{Input stage: The DLM receives a message on either input channel 0 or 1,
never on both channels simultaneously. The arrival of a message on channel 0
(1) is named a 0 (1) event. The input events are represented by the vectors
${\bf v}_{n}=(1,0)$ or ${\bf v}_{n}=(0,1)$ if the $n$th
event occurred on channel 0 or 1, respectively.
The DLM has six internal registers ${\bf Y}_{k,n}^{H}=(
C_{k,n}^{H},S_{k,n}^{H}) ,$ ${\bf Y}_{k,n}^{V}=(
C_{k,n}^{V},S_{k,n}^{V}) ,$ ${\bf Y}_{k,n}^{P}=(
C_{k,n}^{P},S_{k,n}^{P}) $ and one internal vector ${\bf x}
_{n}=( x_{0,n},x_{1,n}) $, where $x_{0,n}+x_{1,n}=1$
and $x_{k,n}\geq 0$ for $k=0,1$ and all $n$. These seven two-dimensional vectors are labeled by the
message number $n$ because their contents are updated every time the DLM
receives a message.
Note that the DLM stores information about the last message only.
The information carried by earlier messages is overwritten by
updating the internal registers.
Upon receiving the $(n+1)$th input event, the DLM performs the following steps:
It stores the first two elements of message ${\bf y}_{k,n+1}$ in
its internal register ${\bf Y}_{k,n+1}^{H}=(
C_{k,n+1}^{H},S_{k,n+1}^{H}) $, the middle two elements
of ${\bf y}_{k,n+1}$ in
${\bf Y}_{k,n+1}^{V}=( C_{k,n+1}^{V},S_{k,n+1}^{V})$,
and the last two elements of ${\bf y}_{k,n+1}$
in
${\bf Y}_{k,n+1}^{P}=( C_{k,n+1}^{P},S_{k,n+1}^{P})$.
Then, it updates its internal vector according to the rule~\cite{RAED05d}
\begin{equation}
x_{i,n+1}=\alpha x_{i,n}+( 1-\alpha ) \delta _{i,k},
\label{update}
\end{equation}
where $0<\alpha <1$ is a parameter that controls the learning process~\cite{RAED05d}.
Note that by construction $x_{0,n+1}+x_{1,n+1}=1$, $x_{0,n+1}\geq 0$ and $x_{1,n+1}\geq 0$.
From the solution of Eq.~(\ref{update}),
\begin{equation}
{\bf x}_n=\alpha^n {\bf x}_0 +(1-\alpha)\sum_{j=1}^{n-1}\alpha^{n-2-j}{\bf v}_j,
\end{equation}
the correspondence to the expression for the polarization in classical electrodynamics Eq.~(\ref{polarization})
can be seen. The vector ${\bf v}$ plays the role of the electric field vector ${\bf E}$
and the internal vector ${\bf x}$ plays the role of the polarization $P$.
Hence, one could say that the internal vector ${\bf x}$ is the response of the PBS to the incoming messages (photons)
represented by the vectors ${\bf v}$. Therefore the PBS "learns" so to speak from the information carried
by the photons. The characteristics of the learning process depend on the parameter $\alpha$
(corresponding to the response function).
Equation~(\ref{update}) is the simplest learning rule we could think of.
If experimental measurements for a single PBS would require another maybe more complicated rule to simulate the experimental outcome
then we could modify the learning rule but given the information we have right now Eq.~(\ref{update}) suffices.
}
\item{Transformation stage:
The second stage (T) of the DLM-based processor takes as input the
data stored in the six internal registers ${\bf Y}
_{k,n+1}^{H}=( C_{k,n+1}^{H},S_{k,n+1}^{H}) $, ${\bf Y}
_{k,n+1}^{V}=( C_{k,n+1}^{V},S_{k,n+1}^{V}) $, ${\bf Y}
_{k,n+1}^{P}=( C_{k,n+1}^{P},S_{k,n+1}^{P}) $ and in the internal
vector ${\bf x}_{n+1}=( x_{0,n+1},x_{1,n+1}) $ and
combines the data into an eight-dimensional vector (see Fig.~\ref{PBS}).
Rewriting this vector as
\begin{equation}
\left(
\begin{array}{c}
\left( C_{0,n+1}^{H}+iS_{0,n+1}^{H}\right) C_{0,n+1}^{P}x_{0,n+1}^{1/2} \\
i\left( C_{1,n+1}^{V}+iS_{1,n+1}^{V}\right) S_{1,n+1}^{P}x_{1,n+1}^{1/2} \\
\left( C_{1,n+1}^{H}+iS_{1,n+1}^{H}\right) C_{1,n+1}^{P}x_{1,n+1}^{1/2} \\
i\left( C_{0,n+1}^{V}+iS_{0,n+1}^{V}\right) S_{0,n+1}^{P}x_{0,n+1}^{1/2}
\end{array}
\right)
\equiv
\left(
\begin{array}{c}
a_{0}^{H} \\
ia_{1}^{V} \\
a_{1}^{H} \\
ia_{0}^{V}
\end{array}
\right) ,
\end{equation}
shows that the operation performed by the transformation stage T corresponds
to the matrix-vector multiplication in the quantum theoretical description
of a PBS, namely
\begin{equation}
\left(
\begin{array}{c}
b_{0}^{H} \\
b_{0}^{V} \\
b_{1}^{H} \\
b_{1}^{V}
\end{array}
\right) =
\left(
\begin{array}{cccc}
1 & 0 & 0 & 0 \\
0 & 0 & 0 & i \\
0 & 0 & 1 & 0 \\
0 & i & 0 & 0
\end{array}
\right) \left(
\begin{array}{c}
a_{0}^{H} \\
a_{0}^{V} \\
a_{1}^{H} \\
a_{1}^{V}
\end{array}
\right),
\end{equation}
where $(a_{0}^{H},a_{0}^{V},a_{1}^{H},a_{1}^{V})$ and
$(b_{0}^{H},b_{0}^{V},b_{1}^{H},b_{1}^{V})$ denote the input and output
amplitudes of the photons with polarization $H$\ and $V$ in the 0 and 1
channels of a PBS, respectively.
Note that in our simulation model there is no need to introduce the concept of a vacuum field,
a requirement in the quantum optical description of a PBS.
}
\item{Output stage: The final stage (O) sends the message
${\bf w}=\left(w_0,w_1,w_2,w_3,w_4,w_5\right)^T$,
where
\begin{eqnarray}
w_0 &=&C_{0,n+1}^{H}C_{0,n+1}^{P}\sqrt{x_{0,n+1}}/uw_4,\nonumber \\
w_1 &=&S_{0,n+1}^{H}C_{0,n+1}^{P}\sqrt{x_{0,n+1}}/uw_4,\nonumber \\
w_2 &=&-S_{1,n+1}^{V}S_{1,n+1}^{P}\sqrt{x_{1,n+1}}/uw_5,\nonumber \\
w_3 &=&C_{1,n+1}^{V}S_{1,n+1}^{P}\sqrt{x_{1,n+1}}/uw_5,\nonumber \\
w_4 &=&\sqrt{w_0^{2}+w_1^{2}}/u,\nonumber \\
w_5 &=&\sqrt{w_2^{2}+w_3^{2}}/u,\nonumber\\
u &=&\sqrt{w_0^{2}+w_1^{2}+w_2^{2}+w_3^{2}},\nonumber \\
&&
\end{eqnarray}
through output channel 0 if $u^{2}>r$ where
$0<r<1$ is a uniform pseudo-random number.
Otherwise, if $u\le r$, the output stage sends
through output channel 1 the message
${\bf z}=\left(z_0,z_1,z_2,z_3,z_4,z_5\right)^T$,
where
\begin{eqnarray}
z_0 &=&C_{1,n+1}^{H}C_{1,n+1}^{P}\sqrt{x_{1,n+1}}/vz_4,\nonumber \\
z_1 &=&S_{1,n+1}^{H}C_{1,n+1}^{P}\sqrt{x_{1,n+1}}/vz_4,\nonumber \\
z_2 &=&-S_{0,n+1}^{V}S_{0,n+1}^{P}\sqrt{x_{0,n+1}}/vz_5,\nonumber \\
z_3 &=&C_{0,n+1}^{V}S_{0,n+1}^{P}\sqrt{x_{0,n+1}}/vz_5,\nonumber \\
z_4 &=&\sqrt{z_0^{2}+z_1^{2}}/v,\nonumber \\
z_5 &=&\sqrt{z_2^{2}+z_3^{2}}/v,\nonumber \\
v &=&\sqrt{z_0^{2}+z_1^{2}+z_2^{2}+z_3^{2}}.\nonumber \\
&&
\end{eqnarray}
}
\end{itemize}
Any other algorithm
that selects the output channel in a systematic manner might be employed as well.
This will change the order in which messages are being processed but the content
of the messages will be left intact and the resulting averages do not change significantly.
\subsection{Remaining optical components}
The Wollaston prism is a PBS with one input channel and two output channels
and is simulated as the PBS described earlier.
In contrast to the PBS, the HWP and the EOM are passive devices.
As can be seen from the wave mechanical description, a HWP
does not only change the polarization of the photon but also its phase~\cite{BORN64}.
When a voltage is applied to the EOM, $R\neq 0$ (see Eq. (2) in~\cite{JACQ08}) and the EOM acts as a wave plate that rotates the
polarization of the incoming photons by an angle depending on $R$. In the simulation a pseudo-random number is used
to decide to apply a voltage to the EOM or not.
Also here we use a pseudo-random number to mimic the experimental procedure to control the EOM~\cite{JACQ08,JACQ07}.
Any other (systematic) sequence to control the EOM can be used as well.
\subsection{Detection and data analysis procedure}Detector $D_0$ ($D_1$) registers
the output events at channel 0 (1).
During a run of $N$ events, the algorithm generates the data set
\begin{equation}
\Gamma (R)=\left\{x_{n},y_{n},A_{n}|n=1,...,N;\Phi =\Phi_{1}-\Phi _{0}\right\} ,
\end{equation}
where $x_{n}=0,1$ indicates which detector fired ($D_{0}$ or $D_{1}$),
$y_{n}=0,1$ indicates through which arm of the MZI the messenger (photon)
came that generated the detection event (note that $y_{n}$ is only
measured in the simulation, not in the experiment),
and $A_{n}=0,1$ is a pseudo-random number that is chosen
after the $n$th messenger has passed the first PBS,
determining whether or not a voltage is applied to the
EOM (hence whether the MZI configuration is open or closed).
Note that in one run of $N$ events a choice is made between no voltage or a particular
voltage corresponding to a certain reflectivity $R$ of the output BS
(see Eq. (2) in~\cite{JACQ08}).
The angle $\Phi $ denotes the phase shift between the two interferometer arms.
This phase shift is varied by applying a plane rotation on the phase of the particles entering
channel 0 of the second PBS. This corresponds to tilting the second PBS in
the laboratory experiment~\cite{JACQ08}.
For each $\Phi $ and
MZI configuration the number of 0 (1) output
events $N_{0}$ ($N_{1}$) is calculated.
\setlength{\unitlength}{1cm}
\begin{figure}
\caption{(Color online) Event-by-event simulation results of the interference visibility $V$
for $R=0.5$.
Markers give the results for the normalized intensity $N_0/N$
as a function of the phase shift $\Phi$, $N_0$
denoting the number of events registered at detector D$_0$.
Circles (triangles) represent the detection events generated by photons that followed path 0 (1)
and squares represent the total number of detection events.
For each value of $\Phi$,
the number of input events $N=10000$.
The total number of detection events per data point (squares) is approximately the same as in experiment.
The solid line represents the results of quantum theory.
}
\label{malus}
\end{figure}
\setlength{\unitlength}{1cm}
\begin{figure*}
\caption{(Color online) Event-by-event simulation results of the interference visibility $V$
for different values of $R$ ((a)-(c)) and of $V^2$, $D^2$ and $V^2+D^2$ as a function of the EOM voltage (d).
(a)-(c) Circles give the results for the normalized
intensities $N_{0}
\label{fig}
\end{figure*}
\section{Simulation results}
The algorithm described above directly translates
into a simple computer program that simulates the messenger routing
in a network that contains all the optical components of the
laboratory experiment~\cite{JACQ08}.
Before the simulation starts we set ${\bf x}
_{0}=( x_{0,0},x_{1,0}) =( r,1-r) $, where $r$ is a
uniform pseudo-random number. In a similar way we use pseudo-random numbers
to initialize ${\bf Y}_{0,0}^{H}$, ${\bf Y}_{0,0}^{V}$, $
{\bf Y}_{0,0}^{P}$, ${\bf Y}_{1,0}^{H}$, $
{\bf Y}_{1,0}^{V}$ and ${\bf Y}_{1,0}^{P}$.
In this simulation, we send messengers to one input channel of the input PBS only
(see Fig.~\ref{wheeler}).
The HWP in BS$_{output}$ changes the phases and also interchanges the roles of channels 0 and 1.
Disregarding a few exceptional events, the PBS in BS$_{output}$ generates messages in one of the channels only.
For a fixed set of input parameters, each simulation takes a few seconds on a
present-day PC.
In all simulations, $\alpha=0.99$~\cite{RAED05d}.
We first demonstrate that our model yields full WPI of the photons.
Figure~\ref{malus} shows the number of detection events at $D_0$ as a function of $\Phi$ for $R=0.5$.
The events generated by photons following path 0 and path 1 of the MZI are counted separately.
It is clear that the number of photons that followed path 0 and path 1 is equal and that the total intensity in output channel 0
obeys Malus law. Hence, although the photons have full WPI for all $\Phi$ they can build an interference pattern by arriving one by one at a detector.
Next, we calculate for $R=0,0.05,0.43$~\cite{JACQ08} and for each phase shift $\Phi$ and configuration (open or closed) of the MZI the number of
events registered by the two detectors behind the output BS, just like in the experiment.
Figure~\ref{fig}(a)-(c) depicts the interference visibility $V$.
The simulation data quantitatively agree with the averages calculated from quantum theory and
qualitatively agree with experiment (see Fig.3 in \cite{JACQ08}).
Calculation of $D$ as described in \cite{JACQ08} gives the results for $D^2$ and $V^2$ shown in Fig.~\ref{fig}(d).
Comparison with Fig.4 in \cite{JACQ08} shows excellent qualitative agreement.
\section{Conclusion}
In this paper, we have presented a simulation model that is solely based on experimental facts,
that satisfies Einstein's criterion of local causality, that does not rely
on any concept of quantum theory or of probability theory,
and that provides a description of the experimental observations in~\cite{JACQ08} on the level of
individual events.
In a pictorial description of our simulation model, we may speak about ``photons'' generating the detection events.
In the simulation we can always track the photons, even in the closed configuration of the MZI. The photons always have full WPI,
never directly communicate with each other, arrive one by one at a detector but nevertheless build up an interference pattern at the detector in
the case of the closed configuration of the MZI.
Hence, although for $0<R\le 0.5$ we find that $0\le D< 1$ and $D^2+V^2\leq 1$ with values for $D$ and $V$ in qualitative agreement
with the experimental results, we always have access to full WPI, even in the case $D=0$, $V=1$.
Our model thus provides a counter example for the fact that full WPI would correspond to $D=1$.
A further consequence is that the relation $V^2+D^2\le 1$ cannot be regarded as quantifying the notion of complementarity: Our model
allows a particle-only description for both the open and closed configuration of the MZI.
\end{document} |
\begin{document}
\author[Lee and Yu]{Zachary Lee and Xueying Yu}
\address{Zachary Lee
\newline
\indent Department of Mathematics, MIT \indent
\newline \indent 77 Massachusetts Ave, Cambridge, MA 02139,\indent }
\email{[email protected]}
\address{Xueying Yu
\newline \indent Department of Mathematics, University of Washington\indent
\newline \indent C138 Padelford Hall Box 354350, Seattle, WA 98195,\indent }
\email{[email protected]}
\title[]{A note on recovering the nonlinearity for generalized higher-order Schr\"odinger equations}
\subjclass[2020]{}
\keywords{}
\begin{abstract}
In this note, we generalize the nonlinearity-recovery result in \cite{HMG} for classical cubic nonlinear Schr\"odinger equations to higher-order Schr\"odinger equations with a more general nonlinearity. More precisely, we consider a spatially-localized nonlinear higher-order Schr\"odinger equation and recover the spatially-localized coefficient by the solutions with data given by small-amplitude wave packets.
\end{abstract}
\maketitle
\setcounter{tocdepth}{1}
\tableofcontents
\parindent = 10pt
\parskip = 8pt
\section{Introduction}
In this note, we consider the following spatially localized nonlinear higher-order Schr\"odinger equations with a general nonlinearity
\begin{align}\label{eq NLS}
\begin{cases}
(i \partial_t + \frac{1}{2n} (-{\bf \dotDelta}elta)^{n}) u = \beta (x) G (\abs{u}^2) u , \\
u(-T, x) = u_0 .
\end{cases}
\end{align}
Here the solution $u : {\mathbb{R}}_t \times {\mathbb{R}}_x^d \to {\mathbb{C}}$ is a complex-valued function of time and space, and we take $T>0$ and $\beta \in C_c^{\infty} ({\mathbb{R}}^d)$ is nonnegative. The operator $(-{\bf \dotDelta}elta)^n$ with $n \in {\mathbb{N}}_+$ is a generalized higher order Laplacian. In addition, the nonlinearity $G:{\mathbb{R}}_{\ge 0} \to{\mathbb{R}}_{\ge 0}$ vanishes at and is real-analytic in a neighborhood of the origin. In other words, we can write
\begin{align}
G(x)=\sum_{k\ge 1}\frac{a_k}{k!} x^k
\end{align}
converging uniformly for $|x|<R$ for some possibly small $R>0$.
The goal of this paper is to determine the nonlinear coefficient $\beta$ in \eqref{eq NLS}. Before we start the proof, let us briefly review the history. Hogan, Murphy, and Grow \cite{HMG} previously studied a similar problem for the second order case nonlinear Schr\"odinger equation (NLS) with a cubic nonlinearity ($n=1$, $G(x) =x$ in \eqref{eq NLS}). We employ a similar approach in their work, which itself is adapted from S\'a Barreto and Stefanov \cite{BS1, BS2}, who considered a similar recovery problem for the cubic wave equation (and more general nonlinear wave models). We show that solutions to \eqref{eq NLS} with data given by small-amplitude wave packets generate phase that determines the X-ray transform of $\beta$, essentially the integrals of $\beta$ on all possible lines in its domain.
Now we state the main result in this note.
\begin{thm}[Main result]\label{thm Main}
Let $d$ be any dimension, and $p>n$, $a_0, \beta\in \mathcal{S}(\mathbb{R}^d)$ with $\beta$ compactly supported. Fix $T>0$ large enough that the support of $\beta$ is contained in $\{x: |x|< T\}$. Given $\xi\in\mathbb{R}^d$ with $|\xi|=1$, define
\begin{align}
a (t,x) = a_0 (x+ \varepsilon^{1-2n} t \xi) e^{-i \varepsilon^{-2n} G(\varepsilon^{2p} \abs{a_0(x + \varepsilon^{1-2n} t \xi) }^2) \int_0^t \beta (\varepsilon^{-1} x + \varepsilon^{-2n} (t-s)\xi -T\xi) \, dx} .
\end{align}
and
\begin{align}
v (t,x) : = \varepsilon^p a (\varepsilon^{2n} (t+T), \varepsilon (x + T\xi)) e^{ i (x \cdot \xi + \frac{1}{2n} t)} .
\end{align}
Let $u_0(x)=v(-T,x)$. Then for $\varepsilon>0$ sufficiently small, the solution $u$ to \eqref{eq NLS} exists and satisfies
\begin{align}\label{error}
\norm{u - v}_{L_t^{\infty} \mathcal{F}L^1 ([-T,T] \times {\mathbb{R}}^d)} \lesssim \max \{ \varepsilon^{p+2}, \varepsilon^{3p-2n- } \}.
\end{align}
\end{thm}
\begin{rmk}
We can obtain the same result by replacing the space $\mathcal{F}L^1({\mathbb{R}}^d)$ with $H^{d/2+}({\mathbb{R}}^d)$, if desired, using essentially the same argument. This is because the key property we rely on for estimating solutions is that the space is an algebra, which is also true for the latter space.
\end{rmk}
Theorem \ref{thm Main} shows that by approximately solving \eqref{eq NLS}, we can recover the X-ray transform of $\beta$,
\begin{align}
X(x, \theta)=\int_{\mathbb{R}} \beta(x+t\theta)\,dt \quad \theta, x\in{\mathbb{R}}^n, \quad |\theta|=1 .
\end{align}
In other words the X-ray transform of $\beta$ is the integral of $\beta$ on various straight lines in space, which in turn is enough to reconstruct $\beta$ itself (see e.g. \cite{Deans}). Indeed, as explained in \cite{HMG}, we first see that the approximate solution $v(T,x)$ contains the all lines integrals of $\beta$ if we let $\xi$ range over $\mathbb{S}^{d-1}$. Additionally, in the regime $p>n$ and $\varepsilon\ll 1$, we have that the error in \eqref{error} is much smaller than the size of both the solution and the approximate solution. \par
Part of the motivation for this work originates in \cite{BS1}, where the authors proved a similar result for the following nonlinear wave equation
\begin{align*}
\partial_{tt} u -{\bf \dotDelta}elta u +\beta |u|^2 u=0.
\end{align*}
The authors probed the equation with a wave of the form
\begin{align*}
u(t,x)=h^{-1/2} e^{(x\cdot\xi -t)/h} \chi(x\cdot\xi -t), \quad \text{where }0<h\ll 1
\end{align*}
outside of the support of $\beta$ and devised a geometric optics approximation similar to the one appearing in \cite{HMG} as well as in Theorem \ref{thm Main}. Our construction of an approximate solution is a modification of the ones in \cite{HMG},\cite{Carles_2010}, which are based on geometric optics solutions of NLS equations. We use the same Fourier-Lebesgue space $\mathcal{F}L^1$ to establish well-posedness and stability. As is done in \cite{HMG}, our approximation controls the stronger norm $L^\infty_t\mathcal{F}L^1$. \par
Theorem \ref{thm Main} is a generalization of the result in \cite{HMG} (which itself establishes well-posedness and stability for a cubic NLS as well as constructs an approximate solution) with a follow-up in \cite{Mur}. The work in \cite{HMG} is closely related to that done in \cite{Wa}, where the authors worked on reconstructing nonlinearities of the form $q(x)|u|^{p-1} u$ from the scattering map data. In this paper, we generalize the order of the NLS to any $n\ge 1$ as well as the nonlinearity to a function $G:{\mathbb{R}}_{\ge 0}\to{\mathbb{R}}_{\ge 0}$ vanishing at and real analytic in a neighborhood of the origin.
We require essentially the same smoothness as in \cite{Wa}, though the compact support argument is needed in the argument since $\varepsilon\to 0$ if we let $T\to\infty$. We are able to obtain this result for rather general nonlinearities $G(x)$. We need the real-analyticity of $G$ to obtain an estimate needed to apply the Banach contraction mapping theorem. Such functions include polynomials but also transcendental functions like $x^2\cos(x)$. \par
We briefly describe the approach we take to prove Theorem \ref{thm Main}. The key approach is to consider an approximate solution to \eqref{eq NLS} of the following ansatz
\begin{align}\label{ansatz}
v (t,x) : = \varepsilon^p a (\varepsilon^{2n} (t+T), \varepsilon (x + T\xi)) e^{ i (x \cdot \xi + \frac{1}{2n} t)}.
\end{align}
After substituting this ansatz into \eqref{eq NLS}, it can be shown through a direct calculation that $v(t,x)$ approximately solves \eqref{eq NLS} (with an error term that involves derivatives of $a$ up to order $2n$) if a certain nonlinear transport equation is satisfied by $a$. To complete the proof, it is necessary to establish a theory of well-posedness and stability for \eqref{eq NLS} in the $\mathcal{F}L^1$ norm and to obtain suitable estimates for the error term in the same norm. The restriction $p>n$ is required to demonstrate that the error term is small relative to the amplitude of the solution, which is on the order of $\varepsilon^p$.
Let us remark that in \cite{HMG}, a similar ansatz as in \eqref{ansatz} was considered, which is inspired by the scaling symmetry and Galilean invariance of the linear Schr\"odinger equation. This ansatz provides a solution to the NLS with only one error term to control and yields an exact transport equation for $a$. However, when considering higher order Schr\"odinger equations of the form \eqref{eq NLS}, although the scaling symmetry is still present, Galilean invariance breaks down completely. Therefore, additional approximate analysis is required to account for this missing symmetry and to control the many error terms arising from the ansatz form.
We organize the rest of the paper as such: In Section \ref{prelim} we give some notation and collect some various estimates. In Section \ref{WPS}, we prove well-posedness and stability of \eqref{eq NLS} in the space $\mathcal{F} L^1$. In Section \ref{Approx}, we construct the approximate solution $v$ to \eqref{eq NLS} appearing in Theorem \ref{thm Main} and prove estimates for the error term involving $a$ and its derivatives. Finally, in Section~\ref{Main}, we carry out the proof of Theorem \ref{thm Main}.
\section{Preliminaries}\label{prelim}
In this section, we define the function spaces that will be used in the rest of this paper.
\subsection{Notations}
We use the usual notation that $A \lesssim B$ to denote an estimate of the form $A \leq C B$, for some constant $0 < C < \infty$ depending only on the {\it a priori} fixed constants of the problem. We also use $a+$ and $a-$ to denote expressions of the form $a + \sigma$ and $a - \sigma$, for any $0 < \sigma \ll 1$.
\subsection{Fourier Transforms and Function Spaces}
\begin{defn}[Fourier transform]
Let $\widehat{u} $ or $\mathcal{F} u$ be the Fourier transform of $u$ defined as follows
\begin{align}
\widehat{u} (y) = \mathcal{F} u (y)= \int_{{\mathbb{R}}^d} e^{-i x \cdot y} u(x) \, dx ,
\end{align}
and $\mathcal{F}^{-1} u$ be the inverse Fourier transform
\begin{align}
\mathcal{F}^{-1} v (x) = \int_{{\mathbb{R}}^d} e^{i x \cdot y} v(y) \, dy .
\end{align}
\end{defn}
\begin{defn}[Fourier-Lebesgue space]\label{defn FL}
We recall the Fourier-Lebesgue space $\mathcal{F}L^1$ equipped with the norm
\begin{align}
\norm{u}_{\mathcal{F}L^1 ({\mathbb{R}}^d)} : = \norm{\widehat{u}}_{L^1 ({\mathbb{R}}^d)} .
\end{align}
\end{defn}
\begin{prop}[Embedding property]
Using the Hausdorff–Young inequality and Definition \ref{defn FL}, we have
\begin{align}
\norm{u}_{L^{\infty} ({\mathbb{R}}^d)} = \norm{\mathcal{F}^{-1} \widehat{u}}_{L^{\infty} ({\mathbb{R}}^d)} \lesssim \norm{\widehat{u}}_{L^1 ({\mathbb{R}}^d)} = \norm{u}_{\mathcal{F}L^1 ({\mathbb{R}}^d)}.
\end{align}
By Cauchy–Schwarz inequality, the integrability of $(1 + \abs{y}^2)^{-\frac{d}{4}-}$ in $L^2 ({\mathbb{R}}^d)$ and Plancherel theorem, we obtain
\begin{align}\label{FL1}
\norm{u}_{\mathcal{F}L^1 ({\mathbb{R}}^d)} \lesssim \norm{(1 + \abs{y}^2)^{\frac{d}{4}+} \widehat{u}}_{L^2 ({\mathbb{R}}^d)} \lesssim \norm{u}_{H^{\frac{d}{2}+} ({\mathbb{R}}^d)}.
\end{align}
\end{prop}
\begin{prop}[Algebra property]\label{prop Algebra}
Using Definition \ref{defn FL} and Young's inequality, we write
\begin{align}
\norm{u v}_{\mathcal{F}L^1 ({\mathbb{R}}^d)} = \norm{\mathcal{F} [uv]}_{L^1 ({\mathbb{R}}^d)} = \norm{\widehat{u} * \widehat{v}}_{L^1 ({\mathbb{R}}^d)} \leq \norm{\widehat{u}}_{L^1 ({\mathbb{R}}^d)} \norm{\widehat{v}}_{L^1 ({\mathbb{R}}^d)} = \norm{u }_{\mathcal{F}L^1 ({\mathbb{R}}^d)} \norm{v}_{\mathcal{F}L^1 ({\mathbb{R}}^d)} .
\end{align}
Hence we conclude that $\mathcal{F} L^1$ is an algebra.
\end{prop}
\section{Well-posedness and Stability in $\mathcal{F}L^1$}\label{WPS}
In this section, we present a local well-posedness argument (Proposition \ref{prop WP}) and a stability result (Proposition \ref{prop Stab}).
\begin{prop}[Well-posedness in $\mathcal{F}L^1$]\label{prop WP}
Let $d$ be any dimension, $ \beta \in \mathcal{F}L^1 ({\mathbb{R}}^d) $, and $T>0$. There exists $\delta_0 = \delta_0 (T , \norm{\beta}_{\mathcal{F}L^1 ({\mathbb{R}}^d) }) >0$ such that for any $u_0 \in \mathcal{F}L^1 ({\mathbb{R}}^d)$ with
\begin{align}
\norm{u_0}_{\mathcal{F}L^1 ({\mathbb{R}}^d)} < \delta_0 ,
\end{align}
there exists a unique solution $u \in L_t^{\infty} ([-T, T] ; \mathcal{F}L^1 )$ to \eqref{eq NLS} satisfying
\begin{align}\label{WP1}
\norm{u}_{L_t^{\infty} \mathcal{F}L^1 ([-T, T] \times {\mathbb{R}}^d)} \leq 2 \norm{u_0}_{\mathcal{F}L^1 ({\mathbb{R}}^d)}
\end{align}
\end{prop}
\begin{proof}[Proof of Proposition \ref{prop WP}]
We will establish a solution to the Duhamel formula for \eqref{eq NLS},
\begin{align}
u(t)=e^{i(t+T) (-{\bf \dotDelta}elta)^{n}/2n}u_0-i\int_{-T}^t e^{i(t-s) (-{\bf \dotDelta}elta)^{n}/2n} \beta (x)[G(|u|^2) u](s)\,ds.
\end{align}
Fix $T>0$ and $\beta \in \mathcal{F}L^1$. Let $\delta_0>0$ be determined later and let $u_0\in\mathcal{F}L^1$ satisfy
\begin{align}
\norm{u_0}_{\mathcal{F}L^1 ({\mathbb{R}}^d)}<\delta_0.
\end{align}
Define the complete metric space $(B,d)$ by
\begin{align}
B=\{u\in L^\infty([-T,T]; \mathcal{F}L^1(\mathbb{R}^d)): \norm{u}_{L^\infty_t \mathcal{F}L^1([-T,T] \times \mathbb{R}^d)} \le 2\norm{u_0}_{\mathcal{F}L^1 ({\mathbb{R}}^d)}\}
\end{align}
and
\begin{align}
d(u,v)= \norm{u -v}_{L^\infty_t \mathcal{F}L^1([-T,T] \times \mathbb{R}^d)}.
\end{align}
We also define
\begin{align}
\Psi[u](t):=e^{i(t+T) (-{\bf \dotDelta}elta)^{n}/2n}u_0-i\int_{-T}^t e^{i(t-s) (-{\bf \dotDelta}elta)^{n}/2n}[ \beta (x) G(|u(s)|^2) u(s)]\,ds.
\end{align}
We will show that for small enough $\delta_0$, $\Psi$ is a contraction on $B$. We notice that we also have $\norm{u}_{L_x^\infty} < 2\delta_0$. Hence, we have that, for small enough $\delta_0$,
\begin{align}
G(|u|^2)(x)=\sum_{k\ge 1} \frac{a_k}{k!}|u|^{2k}(x)\quad \text{for any } x\in{\mathbb{R}}^d.
\end{align}
Hence, taking the $\mathcal{F}L^1$ norm and utilizing the triangle inequality and the algebra property, we have for all $u\in B$,
\begin{align}
\norm{G(|u|^2)}_{\mathcal{F}L^1} &\le \sum_{k\ge 1} \frac{\abs{a_k}}{k!} \norm{u}_{\mathcal{F}L^1}^{2k} \le \sum_{k\ge 1} \frac{\abs{a_k}}{k!} 2^{2k} \delta_0^{2k} =O(\delta_0^2).
\end{align}
The last step above is due to $G$ being holomorphic near the origin.
Now, let $u\in B$. For each $t\in[-T,T]$, we use $e^{it (-{\bf \dotDelta}elta)^n/2n}=\mathcal{F}^{-1}e^{it|y|^{2n}/2n}\mathcal{F}$, the triangle inequality, $\norm{u}_{L_t^\infty \mathcal{F}L^1} \le 2 \norm{u_0}_{\mathcal{F}L^1}<2\delta_0$ and the algebra property (Proposition \ref{prop Algebra}) to estimate
\begin{align}
\norm{\mathcal{F}[\Psi u](t)}_{L^1 ({\mathbb{R}}^d)} &\le \norm{\widehat{u}_0}_{L^1 ({\mathbb{R}}^d)} + \norm{\int_{-T}^t e^{i(t-s)|y|^{2n}/{2n}}\mathcal{F}[ \beta (x) G(|u(s)|^2) u(s)]\,ds}_{L^1 ({\mathbb{R}}^d) } \\
&\le \norm{u_0}_{\mathcal{F}L^1 ({\mathbb{R}}^d)} + \int_{-T}^t \norm{\beta}_{\mathcal{F}L^1 ({\mathbb{R}}^d)}\norm{G(|u(s)|^2)}_{\mathcal{F}L^1 ({\mathbb{R}}^d)}\norm{u(s)}_{\mathcal{F}L^1 ({\mathbb{R}}^d)} \, ds \\
&\le \norm{u_0}_{\mathcal{F}L^1 ({\mathbb{R}}^d)} + T \,\,O(\delta_0^2)\norm{\beta}_{\mathcal{F}L^1 ({\mathbb{R}}^d)}\norm{u_0}_{\mathcal{F}L^1 ({\mathbb{R}}^d)} \\
&\le 2 \norm{u_0}_{\mathcal{F}L^1 ({\mathbb{R}}^d)}
\end{align}
for small enough $\delta_0$ (where $O(\delta_0^2) \le C\delta_0^2$ for some $C>0$). Taking the supremum over $t\in[-T, T]$, we see that $\Psi$ maps $B$ to itself.
Next, we let $u, v \in B$ and write
\begin{align}
u\,G(|u|^2)-v\,G(|v^2|)=\sum_{k\ge 1}\frac{a_k}{k!}\left(u\,|u|^{2k}-v\,|v|^{2k}\right).
\end{align}
Now, notice that by factorizing, the expression $u\,|u|^{2k}-v\,|v|^{2k}$ can be written as $(u-v)P(u,v)$ with $P $ a polynomial in $u,v$ with $P=O(\delta_0^{2k})$ for $u,v$ in a neighborhood of the origin. Hence, we have that, utilizing the algebra property of the Fourier-Lebesgue norm, for $\norm{u}_{\mathcal{F}L^1}, \norm{v}_{\mathcal{F}L^1} <2\delta_0$, and the analyticity of $G$ near the origin
\begin{align}
\bigg\Vert u\,G(|u|^2)-v\,G(|v^2|)\bigg\Vert_{\mathcal{F} L^1 ({\mathbb{R}}^d)} & \le \sum_{k\ge 1}\frac{|a_k|}{k!} \left|P(u,v)\right| \big\Vert u-v\big\Vert_{\mathcal{F}L^1 ({\mathbb{R}}^d)} \\
&\le C \big\Vert u-v\big\Vert_{\mathcal{F}L^1 ({\mathbb{R}}^d)} \sum_{k\ge 1}\frac{|a_k|}{k!} \delta_0^{2k} \\
&\le O(\delta_0^2) \big\Vert u-v\big\Vert_{\mathcal{F}L^1 ({\mathbb{R}}^d)}.
\end{align}
Hence, we have
\begin{align}
\norm{\mathcal{F}[\Psi u](t) - \mathcal{F}[\Psi v](t)}_{L^1 ({\mathbb{R}}^d)} &\le \int_{-T}^t \norm{\beta}_{\mathcal{F}L^1 ({\mathbb{R}}^d)} \norm{G(|u|^2) u - G(|v|^2) v}_{\mathcal{F}L^1 ({\mathbb{R}}^d)}\,ds \\
&\lesssim T \delta_0^2 \norm{\beta}_{\mathcal{F}L^1 ({\mathbb{R}}^d) }\norm{u-v}_{L^\infty_t \mathcal{F}L^1([-T,T] \times \mathbb{R}^d)} \\
&\le \frac{1}{2}\norm{u-v}_{L^\infty_t \mathcal{F}L^1([-T,T] \times \mathbb{R}^d)}
\end{align}
for small enough $\delta_0$.
Taking the supremum over $t$, we see that $\Psi$ is a contraction mapping, and thus by the Banach fixed point theorem, has a unique fixed point $u\in B$, yielding the desired solution to \eqref{eq NLS}.
\end{proof}
\begin{prop}[Stability in $\mathcal{F}L^1$]\label{prop Stab}
Let $d$ be any dimension, $\beta \in \mathcal{F}L^1 ({\mathbb{R}}^d)$, and $T>0$. Let $\delta_0 >0$ be as in Proposition \ref{prop WP}.
If $v : [-T, T] \times {\mathbb{R}}^d \to {\mathbb{C}}$ satisfies
\begin{align}
\norm{v (-T,x)}_{\mathcal{F}L^1 ({\mathbb{R}}^d)} < \delta_0 \label{stability1}, \\
\norm{v}_{L_t^{\infty} \mathcal{F}L^1 ([-T,T] \times {\mathbb{R}}^d)} \lesssim \delta_0 \label{stability2},
\end{align}
and
\begin{align}
\norm{\int_{-T}^t e^{i(t-s) (-{\bf \dotDelta}elta)^{n}/2n} [(i \partial_t + \frac{1}{2n}(-{\bf \dotDelta}elta)^n)v - \beta (x) G(\abs{v}^2) v] (s) \, ds}_{L_t^{\infty}\mathcal{F}L^1 ([-T,T] \times {\mathbb{R}}^d)} = \delta \label{eq2},
\end{align}
then the solution $u$ to \eqref{eq NLS} with $u(-T,x) = v(-T ,x) $ exists on $[-T,T]$ and satisfies
\begin{align}
\norm{u - v}_{L_t^{\infty} \mathcal{F}L^1 ([-T,T] \times {\mathbb{R}}^d)} \lesssim \delta \label{stability3}.
\end{align}
\end{prop}
\begin{proof}[Proof of Proposition \ref{prop Stab}]
We fix $T>0$ and $\beta \in \mathcal{F} L^{1}$. Assume that $v:[-T, T] \times \mathbb{R}^{d} \rightarrow \mathbb{C}$ satisfies \eqref{stability1}, \eqref{stability2}. We let $u:[-T, T] \times \mathbb{R}^{d} \rightarrow \mathbb{C}$ be the solution to \eqref{eq NLS} with $\left.u\right|_{t=-T}=\left.v\right|_{t=-T}$ satisfying \eqref{WP1}, whose existence is guaranteed by Proposition \ref{prop WP} and \eqref{stability1}.
We now observe that the difference $u-v$ satisfies the Duhamel formula
\begin{align}
u(t)-v(t)=-i \int_{-T}^{t} e^{i(t-s) (-{\bf \dotDelta}elta)^{n} / 2n}\left(\beta(x)\left[G(|u|^{2}) u-G(|v|^{2}) v\right](s)-E(s)\right) d s,
\end{align}
where
\begin{align}
E(t):=\left(i \partial_{t}+\frac{1}{2n} (-{\bf \dotDelta}elta)^{n} \right) v- \beta(x) G(|v|^{2}) v .
\end{align}
We now estimate as we did in the proof of Proposition \ref{prop WP}. Using \eqref{eq NLS}, \eqref{eq2}, \eqref{stability2}, and \eqref{WP1}, we have
\begin{align}
\norm{u(t) - v(t)}_{\mathcal{F}L^1 ({\mathbb{R}}^d)}
& \lesssim T(\delta_{0}^{2}) \norm{\beta}_{\mathcal{F}L^1 ({\mathbb{R}}^d)} \norm{u-v}_{L_{t}^{\infty} \mathcal{F}L^1 ({\mathbb{R}}^d)}+\delta
\end{align}
for some $C>0$, where all space-time norms are over $[-T, T] \times \mathbb{R}^{d}$. Taking the supremum over $t \in[-T, T]$ and choosing $\delta_{0}$ smaller if necessary (so that $T C_{u,v} (\delta_{0}^{2})\|\beta\|_{\mathcal{F}L^1 ({\mathbb{R}}^d)}<\frac{1}{2}$, say), we deduce that
\begin{align}
\norm{u-v}_{L_{t}^{\infty} \mathcal{F}L^{1}\left([-T, T] \times \mathbb{R}^{d}\right)} \leq \frac{1}{2} \norm{u-v}_{L_{t}^{\infty} \mathcal{F}L^{1}\left([-T, T] \times \mathbb{R}^{d}\right)}+\delta,
\end{align}
which yields \eqref{stability3}, as desired.
\end{proof}
\section{Approximation Solutions}\label{Approx}
In this section, we consider an approximate solution to \eqref{eq NLS} and show the estimates needed to apply the stability result (Proposition \ref{prop Stab}).
\begin{prop}\label{prop Appx}
Let $d$ be any dimension. Fix $T>0$, $0 < \varepsilon \ll 1$, and $\xi \in {\mathbb{R}}^d$ with $\abs{\xi} =1$. Let $a_0 \in \mathcal{F}L^1$ and define
\begin{align}
a (t,x) = a_0 (x+ \varepsilon^{1-2n} t \xi) e^{-i \varepsilon^{-2n} G(\varepsilon^{2p} \abs{a_0(x + \varepsilon^{1-2n} t \xi) }^2) \int_0^t \beta (\varepsilon^{-1} x + \varepsilon^{-2n} (t-s)\xi -T\xi) \, dx} .
\end{align}
and
\begin{align}
v (t,x) : = \varepsilon^p a (\varepsilon^{2n} (t+T), \varepsilon (x + T\xi)) e^{ i (x \cdot \xi + \frac{1}{2n} t)} .
\end{align}
Then we have the following
\begin{enumerate}
\item
The function $v$ satisfies the initial condition
\begin{align}
v(-T, x) = u_0 (x) := \varepsilon^p a_0 (\varepsilon (x+ T)) e^{ i (x \cdot \xi + \frac{1}{2n} T)} ;
\end{align}
\item
We have the following identity
\begin{align}
(i \partial_t + \frac{1}{2n} (-{\bf \dotDelta}elta)^n) v - \beta (x) G(\abs{v}^2) v & = \varepsilon^{p+2n} e^{i \Phi} \sq{ i \partial_t a - i \varepsilon^{1-2n} \xi \cdot \nabla a - \varepsilon^{-2n} \beta(x) G(\varepsilon^{2p} \abs{a}^2) a } \\
& \quad + \varepsilon^{p+2n} e^{i \Phi} \sq{\sum_{j=2}^{2n} \mathcal{O} (\varepsilon^{j-2n} e^{i\Phi} D_x^j a ) } ,
\end{align}
where $D_x^j = \sum\limits_{ \substack{ (j_1, j_2 , \ldots , j_d) \\ j_1 + j_2 + \cdots + j_d = j}} \partial_{x_1}^{j_1} \partial_{x_2}^{j_2} \cdots \partial_{x_d}^{j_d} $;
\item
The function $a$ satisfies the following estimates
\begin{align}
\norm{a}_{\mathcal{F}L^1} & \lesssim \norm{a}_{H_x^{\frac{d}{2}+} ({\mathbb{R}}^d)} \lesssim \max \{ 1 , \varepsilon^{2p - 2n - }\} ,\\
\norm{D_x^j a}_{\mathcal{F}L^1} & \lesssim \max \{ 1 , \varepsilon^{2p - 2n -j - }\} .
\end{align}
\end{enumerate}
\end{prop}
\begin{proof}[Proof of Proposition \ref{prop Appx}]
Let
\begin{align}
\Phi = \Phi (t,x) = x \cdot \xi + \frac{1}{2n} t ,
\end{align}
and consider an approximate solution of the following form
\begin{align}
v (t,x) = \varepsilon^p a (\varepsilon^{2n} (t+T) , \varepsilon (x +T\xi)) e^{i\Phi} .
\end{align}
Then we compute
\begin{align}
\partial_t v & = \varepsilon^p e^{i\Phi} a (\varepsilon^{2n} (t+T) , \varepsilon (x +T\xi)) (\frac{i}{2n}) + \varepsilon^p e^{i\Phi} \partial_t a (\varepsilon^{2n} (t+T) , \varepsilon (x +T\xi)) \varepsilon^{2n} \\
\partial_{x_i} v & = \varepsilon^p e^{i\Phi} a (\varepsilon^{2n} (t+T) , \varepsilon (x +T\xi)) (i \xi_i) + \varepsilon^p e^{i\Phi} \partial_{x_i} a (\varepsilon^{2n} (t+T) , \varepsilon (x +T\xi)) \varepsilon \\
\partial_{x_i}^2 v & = \varepsilon^p e^{i\Phi} a (\varepsilon^{2n} (t+T) , \varepsilon (x +T\xi)) (i \xi_i)^2 + 2 \varepsilon^p e^{i\Phi} \partial_{x_i} a (\varepsilon^{2n} (t+T) , \varepsilon (x +T\xi)) (i \xi_i) \varepsilon \\
& \quad + \varepsilon^p e^{i\Phi} \partial_{x_i}^2 a (\varepsilon^{2n} (t+T) , \varepsilon (x +T\xi)) \varepsilon^2
\end{align}
and
\begin{align}
(i \partial_t + \frac{1}{2n} (-{\bf \dotDelta}elta)^{n} ) v & = \cancel{-\frac{1}{2n} \varepsilon^p e^{i\Phi} a (\varepsilon^{2n} (t+T) , \varepsilon (x +T\xi)) } + i \varepsilon^{p+2n} e^{i\Phi} \partial_t a (\varepsilon^{2n} (t+T) , \varepsilon (x +T\xi)) \\
& \quad + \cancel{\frac{1}{2n} \varepsilon^p e^{i\Phi} a (\varepsilon^{2n} (t+T) , \varepsilon (x +T\xi)) } \\
& \quad - i \varepsilon^{p+1} e^{i\Phi} \xi \cdot \nabla a (\varepsilon^{2n} (t+T) , \varepsilon (x +T\xi))\\
& \quad + \sum_{j=2}^{2n} \mathcal{O} (\varepsilon^{p+j} e^{i\Phi} D_x^j a ) \\
& = i \varepsilon^{p+2n} e^{i\Phi} \partial_t a (\varepsilon^{2n} (t+T) , \varepsilon (x +T\xi)) - i \varepsilon^{p+1} e^{i\Phi} \xi \cdot \nabla a (\varepsilon^{2n} (t+T) , \varepsilon (x +T\xi))\\
& \quad + \sum_{k=2}^{2n} \mathcal{O} (\varepsilon^{p+j} e^{i\Phi} D_x^j a ) .
\end{align}
Combining
\begin{align}
G(\abs{v}^2) v = \varepsilon^p e^{i\Phi} G(\varepsilon^{2p} \abs{a}^2) a ,
\end{align}
we have
\begin{align}
(i \partial_t + \frac{1}{2n} (-{\bf \dotDelta}elta)^n) v - \beta (x) G(\abs{v}^2) v & = \varepsilon^{p+2n} e^{i \Phi} \sq{ i \partial_t a - i \varepsilon^{1-2n} \xi \cdot \nabla a - \varepsilon^{-2n} \beta(x) G(\varepsilon^{2p} \abs{a}^2) a } \label{eq Trans}\\
& \quad + \varepsilon^{p+2n} e^{i \Phi} \sq{\sum_{j=2}^{2n} \mathcal{O} (\varepsilon^{j-2n} e^{i\Phi} D_x^j a ) } , \label{eq Err}
\end{align}
where $a$ and its derivatives are evaluated at $(t' , x') = (\varepsilon^{2n} (t+T) , \varepsilon (x + T\xi))$.
Now we wish to find $a$ such that $a$ solves,
\begin{align}
\begin{cases}
i \partial_t a - i \varepsilon^{1-2n} \xi \cdot \nabla a - \varepsilon^{-2n} \beta (\varepsilon^{-1} x - T\xi) G(\varepsilon^{2p}\abs{a}^2) a =0 ,\\
a(0,x) = a_0 ,
\end{cases}
\end{align}
which is the first term in \eqref{eq Trans}, and as a result, we can treat \eqref{eq Err} as an error term during this approximation process.
Using the method of characteristics, we set $x(t) = x_0 - \varepsilon^{1-2n} t\xi$ for some $x_0 \in {\mathbb{R}}^d$ and $b(t) = a (t, x(t))$.
We reduce the equation along the characteristic in the following form
\begin{align}\label{eq 2}
\frac{d}{dt} b = -i \varepsilon^{-2n} \beta(\varepsilon^{-1} x(t) - T\xi) G(\varepsilon^{2p} \abs{b}^2) b .
\end{align}
Notice that
\begin{align}
\frac{d}{dt} \abs{b}^2 = 2\re [ - i \varepsilon^{-2n} \beta(\varepsilon^{-1} x(t) - T\xi) G(\varepsilon^{2p} \abs{b}^2) \abs{b}^2 ] = 0 ,
\end{align}
hence $\abs{b}^2 = \abs{a_0(x_0)}^2 $ is a constant.
Then \eqref{eq 2} becomes
\begin{align}
\frac{d}{dt} b & = - i \varepsilon^{-2n} \beta(\varepsilon^{-1}x (t) - T\xi) G(\varepsilon^{2p} \abs{a_0(x_0)}^2) b ,
\end{align}
then
\begin{align}
b & = a_0 (x_0) e^{ - i \varepsilon^{-2n} G(\varepsilon^{2p} \abs{a_0(x_0)}^2) \int_0^t \beta(\varepsilon^{-1} x(s) - T\xi) \, ds} .
\end{align}
Hence
\begin{align}
a (t,x) = a_0 (x+ \varepsilon^{1-2n} t \xi) e^{-i \varepsilon^{-2n} G(\varepsilon^{2p} \abs{a_0(x + \varepsilon^{1-2n} t \xi) }^2) \int_0^t \beta (\varepsilon^{-1} x + \varepsilon^{-2n} (t-s)\xi -T\xi) \, dx} .
\end{align}
Next, we will compute $H^N$ norms of $a$. When computing the derivative of $a$, we have the following two cases
\begin{enumerate}
\item
if $\nabla$ falls on $a_0$, this is a good case, which gives a constant multiple of $a$
\item
if $\nabla$ falls on in the exponential term, there are two subcases
\begin{enumerate}
\item
$\nabla$ hits $G$ in the exponential term, and it contributes $\varepsilon^{-2n}$ from the phase and $\varepsilon^{2p}$ from the chain rule in $G$.
\item
$\nabla$ hits the integral in the exponential term, and it contributes $\varepsilon^{-2n}$ from the phase and $\varepsilon^{-1}$ from $\beta$, also due to $\beta$ being localized in a ball of radius $\varepsilon T$, we have an $\varepsilon^{\frac{d}{2}}$. Notice that we have the following property of $G$
\begin{align}
\abs{G (\varepsilon^{2p} x)} \lesssim \varepsilon^{2p} |x|
\end{align}
for small enough $\varepsilon$ and bounded $x$.
hence we have an $\varepsilon^{2p}$ from $G$.
\end{enumerate}
\end{enumerate}
Hence
\begin{align}
\norm{a}_{H_x^1} \lesssim \max \{ 1 , \varepsilon^{-2n +2p} , \varepsilon^{-2n + 2p - 1 + \frac{d}{2}} \} .
\end{align}
These term are fine, since we can ask $p >n$.
Moreover, we can compute higher Sobolev norms of $a$ following the same idea
\begin{align}
\norm{a}_{H_x^1} & \lesssim \max \{ 1 , \varepsilon^{-2n +2p} , \varepsilon^{-2n + 2p - 1 + \frac{d}{2}} \} , \\
\norm{a}_{H_x^2} & \lesssim \max \{ 1 , \varepsilon^{-2n +2p} , \varepsilon^{-2n + 2p - 2 + \frac{d}{2}} \} , \\
\norm{a}_{H_x^N} & \lesssim \max \{ 1 , \varepsilon^{-2n +2p} , \varepsilon^{ -2n + 2p -N + \frac{d}{2}}\} .
\end{align}
In particular, we have
\begin{align}
\norm{a}_{H_x^{\frac{d}{2}+}} \lesssim \max \{ 1 , \varepsilon^{- 2n +2p}, \varepsilon^{- 2n +2p + \frac{d}{2} - \frac{d}{2} -}\} = \max \{ 1 , \varepsilon^{2p - 2n -}\} ,
\end{align}
which implies
\begin{align}
\norm{a}_{\mathcal{F}L^1} & \lesssim \norm{a}_{H_x^{\frac{d}{2}+}} \lesssim \max \{ 1 , \varepsilon^{2p - 2n - }\} , \\
\norm{D_x^j a}_{\mathcal{F}L^1} & \lesssim \max \{ 1 , \varepsilon^{2p - 2n -j - }\} .
\end{align}
\end{proof}
\section{Proof of Main Theorem}\label{Main}
In this section, we prove the main result in this note Theorem \ref{thm Main} by combining the well-posedness and stability result and the approximation theorem.
\begin{proof}[Proof of Theorem \ref{thm Main}]
We define the function $v(t,x)$ satisfying the initial condition
\begin{align}
v(-T, x) = u_0 (x) := \varepsilon^p a_0 (\varepsilon (x+ T)) e^{ i (x \cdot \xi + \frac{1}{2n} t)}.
\end{align}
given in Proposition \ref{prop Appx}.
We first observe that
\begin{align}
\norm{v}_{L_t^{\infty} \mathcal{F}L^1} \lesssim \varepsilon^p \norm{a}_{L_t^{\infty} \mathcal{F}L^1}\lesssim \varepsilon^p
\end{align}
so that \eqref{stability2} is satisfied for sufficiently small $\varepsilon$. Next, we use the estimates in Proposition \ref{prop Appx} for the norm $L_t^{\infty} \mathcal{F}L^1$ of $a$ and its derivatives to estimate
\begin{align}
& \quad \norm{\int_{-T}^t e^{i(t-s) (-{\bf \dotDelta}elta)^n/2n} [(i \partial_t + \frac{1}{2n}(-{\bf \dotDelta}elta)^n)v - \beta (x) G(\abs{v}^2) v] (s) \, ds}_{L_t^{\infty}\mathcal{F}L^1 ([-T,T] \times {\mathbb{R}}^d)} \\
& \lesssim \varepsilon^{p+2n} \sum_{j=2}^{2n} \varepsilon^{j-2n} \norm{D_x^j a}_{L_t^{\infty} \mathcal{F}L^1} \\
& \lesssim \sum_{j=2}^{2n} \varepsilon^{p+2n} \max\{\varepsilon^{j-2n} , \varepsilon^{j-2n} \varepsilon^{2p-2n-j-} \} \\
& \leq \max \{ \varepsilon^{p+2}, \varepsilon^{3p-2n- } \}.
\end{align}
where we need $3p -2n - \geq p$, hence $p>n$.
Thus, \eqref{eq2} is satisfied for sufficiently small $\varepsilon$ and $p>n$. Using Proposition \ref{prop Stab}, we deduce the existence of $u$ such that
\begin{align}
\norm{u - v}_{L_t^{\infty} \mathcal{F}L^1 ([-T,T] \times {\mathbb{R}}^d)} \lesssim \max \{ \varepsilon^{p+2}, \varepsilon^{3p-2n- } \}.
\end{align}
and Theorem \ref{thm Main} is proven.
\end{proof}
\nocite{*}
\end{document} |
\begin{document}
\begin{abstract}
We study the mean curvature flow of graphs both with Neumann boundary
conditions and transport terms. We derive boundary gradient estimates
for the mean curvature flow. As an application, the existence of the
mean curvature flow of graphs is presented. A key argument is a boundary
monotonicity formula of a Huisken type derived using reflected backward
heat kernels. Furthermore, we provide regularity conditions for the
transport terms.
\end{abstract}
\title{Gradient estimates for mean curvature flow with Neumann boundary
conditions}
\section{Introduction}
We consider the mean curvature flow of graphs with transport terms and Neumann
boundary conditions:
\begin{equation}
\label{eq:1.1}
\left\{
\begin{aligned}
\frac{\partial_tu}{\sqrt{1+|du|^2}}
&=\Div\left(\frac{du}{\sqrt{1+|du|^2}}\right)
+\vec{f}(x,u,t)\cdot
\vec{n}&\quad &x\in\Omega,\ t>0, \\
du\cdot\nu\big|_{\partial\Omega}&=0,& &t>0, \\
u(x,0)&=u_0(x),& &x\in\Omega,
\end{aligned}
\right.
\end{equation}
where $\Omega\subset\mathbb{R}^n$ is a bounded domain with a smooth boundary,
$\nu$ is an outer unit normal vector on $\partial\Omega$,
$u=u(x,t):\Omega\times[0,\infty)\rightarrow\mathbb{R}$ is an unknown function,
$du:=(\partial_{x_1}u,\ldots,\partial_{x_n}u)$ is the tangential
gradient of $u$, $u_0=u_0(x):\Omega\rightarrow\mathbb{R}$ is given initial
data, $\vec{f}:\Omega\times\mathbb{R}\times[0,\infty)\rightarrow\mathbb{R}^{n+1}$ is
a given transport term, and $\vec{n}=\frac1{\sqrt{1+|du|^2}}(-du,1)$.
For a
solution $u$ of \eqref{eq:1.1} and $t>0$, the graph of $u(x,t)$, which
is
\begin{equation}
\label{eq:1.2}
\Gamma_t:=\{(x,u(x,t)):x\in\Omega\},
\end{equation}
satisfies the mean curvature flow with the transport term, which is
subjected to right angle boundary conditions given by
\begin{equation}
\label{eq:1.3}
\left\{
\begin{aligned}
\vec{V}&=\vec{H}+(\vec{f}\cdot\vec{n})\vec{n},&\quad \text{on}\
\Gamma_t,\ &t>0, \\
\Gamma_t&\perp\partial(\Omega\times \mathbb{R}),& &t>0, \\
\end{aligned}
\right.
\end{equation}
where
$\vec{n}:=\frac1{\sqrt{1+|du|^2}}(-du,1)$ is the unit normal vector of
$\Gamma_t$,
$\vec{V}:=\frac{\partial_tu}{\sqrt{1+|du|^2}}\vec{n}$ is the normal
velocity vector of $\Gamma_t$, and
$\vec{H}:=\Div(\frac{du}{\sqrt{1+|du|^2}})\vec{n}$ is the mean curvature
vector of $\Gamma_t$ (see Figure~\ref{fig:1.1}).
It is interesting to derive the regularity criterion of the transport
term to obtain the classical solution of
\eqref{eq:1.1}.
Liu-Sato-Tonegawa~\cite{MR2956324} studied the following incompressible and viscous non-Newtonian two-phase fluid flow:
\begin{equation}
\label{eq:1.4}
\left\{
\begin{aligned}
&\frac{\partial \vec{f}}{\partial t} +\vec{f} \cdot \nabla \vec{f}
= \Div ( T^{\pm} (\vec{f},\Pi)) , \ \Div \vec{f} =0,
& \quad \text{on} \ \Omega ^{\pm} _t , \ & t>0,\\
&\vec{n} \cdot (T^+ (\vec{f} ,\Pi) -T^- (\vec{f} ,\Pi)) =\vec{H},
&\quad \text{on} \ \Gamma_t,\ &t>0, \\
&\vec{V}=\vec{H}+(\vec{f}\cdot\vec{n})\vec{n},&\quad \text{on}\
\Gamma_t,\ &t>0,
\end{aligned}
\right.
\end{equation}
where $\Omega ^+ _t \cup \Omega ^- _t \cup \Gamma _t =\mathbb{T}^{n+1}=(\mathbb{R} / \mathbb{Z})^{n+1}$, $\vec{f}$ is the velocity vector of the fluids, $T^\pm$ is the stress tensor of the fluids, and $\Pi$ is the pressure of the fluids. The physical background of \eqref{eq:1.4} was studied by Liu-Walkington~\cite{MR1857673}.
The phase
boundary $\Gamma _t$ moves by the fluid flow and its mean curvature. In
\eqref{eq:1.3}, the transport term is corresponding to the fluid
velocity of \eqref{eq:1.4}. Since the regularity of non-Newtonian fluid flow is still
difficult problems, it is important to study regularity conditions to
obtain the classical solution of \eqref{eq:1.1}.
\begin{figure}
\caption{Mean curvature flow with the transport $\vec{F}
\label{fig:1.1}
\end{figure}
To study the behavior of $\Gamma_t$, we need to investigate
$v:=\sqrt{1+|du|^2}$, which is the volume element of $\Gamma_t$. Thus,
it is important to derive gradient estimates for
\eqref{eq:1.1}. Interior gradient estimates for \eqref{eq:1.1} under
$\vec{f}\equiv0$ were studied by Ecker-Huisken~\cite{MR1117150} when the
initial surface is $C^1$, and by Colding-Minicozzi II~\cite{MR2099114}
when $u_0$ is bounded. It is difficult to apply their arguments to
\eqref{eq:1.1} under non-smooth transport terms because their arguments
essentially use the comparison arguments.
Ecker-Huisken~\cite{MR1025164} also derived the interior gradient
estimates for \eqref{eq:1.1} under $\vec{f}\equiv0$. In their arguments,
monotonicity formula is crucial to show the gradient estimates.
Takasao~\cite{MR3058702} studied the interior gradient estimates for
\eqref{eq:1.1} when $u_0$ is $C^1$ and the transport $\vec{f}$ is
bounded in time and space variables. An essential part of Takasao's
proof is to derive the monotonicity formula of the Huisken type with the
bounded transport terms.
Huisken~\cite{MR0983300} studied \eqref{eq:1.1} with the Neumann
boundary condition and without the transport $\vec{f}$. He showed the
existence of a classical solution of \eqref{eq:1.1} under
$\vec{f}\equiv0$. To show the existence of the solution, it is important
to derive up-to-boundary a priori gradient estimates of
\eqref{eq:1.1}. Huisken showed the gradient estimates when the initial
data $u_0$ is $C^{2,\alpha}$ up to boundary and $\partial\Omega$ is of
class $C^{2,\alpha}$ via the Schauder estimates. Stahl~\cite{MR1393271}
also considered the gradient estimates of \eqref{eq:1.1} without the
transport and obtained some blow-up criterion of the classical solution
of~\eqref{eq:1.1} under $\vec{f}\equiv0$. Up-to-boundary a priori
gradient estimates of the mean curvature flow with the Neumann boundary
condition are studied by many researchers and we mention
\cite{MR1384396, MR2180601, MR3479560, 1602.03614, MR2886118,
MR1402731, MR1393271, MR1425580, MR2691040, 1405.7774, MR3200337,
MR3150205}.
Our arguments are similar to Ecker's or Takasao's work~\cite{MR2024995,
MR3058702}. In our setting, we need to derive a boundary monotonicity
formula for \eqref{eq:1.1}. From this point, Buckland~\cite{MR2180601}
obtained the boundary monotonicity formula for \eqref{eq:1.1} under
$\vec{f}\equiv0$. Takasao derived the monotonicity formula for
\eqref{eq:1.1} with transport terms in ~\cite{MR3058702} but the
condition was not optimal. On the other hand, reasonable conditions for
the transport terms for the regularity of weak mean curvature flow were
obtained in \cite{MR3194675, MR3176585}. In this paper, we obtain a
priori gradient estimates with reasonable conditions for the transport
terms.
Our problem \eqref{eq:1.1} imposes Neumann boundary conditions; thus,
up-to-the-boundary gradient estimates are also important. Mizuno and
Tonegawa~\cite{MR3348119} studied weak mean curvature flow with Neumann
boundary conditions via phase field methods. To study boundary behavior,
it was important to derive an $\varepsilon$-diffused boundary
monotonicity formula of a Huisken type via reflected backward heat
kernels (cf. \cite{MR1030675}, \cite{MR1237490}). Thus, it is also
important to derive the boundary monotonicity formula for \eqref{eq:1.1}
and determine the optimal regularity condition for the transport
terms. In this paper, we derive the boundary monotonicity formula for
\eqref{eq:1.1} and as an application, we derive a priori boundary
gradient estimates and prove the existence of a classical solution of
\eqref{eq:1.1}.
This paper is organized as follows. In section \ref{sec:2}, we present
basic notation and the main results. In section \ref{sec:3}, we derive
the boundary monotonicity formula for \eqref{eq:1.1}. In section
\ref{sec:4}, we derive the up-to-boundary gradient estimates for
\eqref{eq:1.1} and some integral estimates for the transport terms. In
section \ref{sec:5}, we prove the existence of the classical solution of
\eqref{eq:1.1}. In section \ref{sec:6}, we discuss some optimality for
the transport terms to obtain the gradient estimates for \eqref{eq:1.1}.
\section{Preliminaries and main results}
\label{sec:2}
\subsection{Notation}
Let $\vec{\nu}$ be an outer unit normal vector on
$\partial(\Omega\times\mathbb{R})$; $\vec{\nu}=(\nu,0)$. For $n$-dimensional
symmetric matrices $A$ and $B$, define the inner product $A:B$ as
$A:B=\tr(AB)$. Set $Q_T:=\Omega \times (0,T)$ and $Q_T^\varepsilon
:=\Omega \times (\varepsilon,T)$ for $0<\varepsilon <T$. Let $d$ and $D$
be the gradients on $\Omega$ and $\Omega\times\mathbb{R}$, respectively. Let
$D_{\Gamma_t}$ and $\Delta_{\Gamma_t}$ be the covariant differentiation and
Laplace-Beltrami operator on $\Gamma_t$, respectively. For a solution
$u$ of \eqref{eq:1.1}, let
$h:=-\Div\Big(\frac{du}{\sqrt{1+|du|^2}}\Big)$,
$v:=\sqrt{1+|du|^2}$. Then, equation \eqref{eq:1.1} becomes
\begin{equation}
\label{eq:2.4}
\partial_tu=-vh+(\vec{f}\cdot\vec{n})v.
\end{equation}
\subsection{Main results}
Let $T_0>0$ be fixed. We impose a regularity assumption on the transport term
such that for $p,q\geq1$ satisfying $\frac{n}p+\frac2q<1$,
\begin{equation}
\label{eq:2.3}
\|\vec{f}\|_{L^q_tL^p_x(0,T_0,\Gamma_t)}
:=
\left(
\int_0^{T_0}\left(
\int_{\Gamma_t}|\vec{f}(X,t)|^p\,d\mathscr{H}^n
\right)^\frac{q}{p}
dt
\right)^\frac1q
<\infty.
\end{equation}
\begin{remark}
Using the Meyer-Ziemer inequality (cf.
\cite[p. 266, Theorem 5.12.4]{MR1014685}),
\[
\int_{\Gamma_t}|\vec{f}(X,t)|^p\,d\mathscr{H}^n
\leq C\|\vec{f}(\cdot,t)\|^p_{W^{1,p}(\Omega\times\mathbb{R})};
\]
hence, our assumption \eqref{eq:2.3} is fulfilled if
$\vec{f}\in L^q([0,T_0]:W^{1,p}(\Omega\times\mathbb{R}))$.
\end{remark}
First we derive a priori gradient estimates for \eqref{eq:1.1}.
\begin{theorem}[A priori estimates for the gradient]
\label{thm:1}
Let $u$ be a classical solution of \eqref{eq:1.1} on
$\Omega\times(0,T_0)$. Assume that $\Omega$ is convex, $u_0\in
W^{1,\infty}(\Omega)$, and the transport term $\vec{f}$ satisfies
\eqref{eq:2.3}. Then there exists $T>0$
depending only on $n$, $p$, $q$, $T_0$, $\|du_0\|_\infty$,
$\|\vec{f}\|_{L^q_tL^p_x(0,T_0,\Gamma_t)}$, and $\Omega$
such that
\begin{equation}
\label{eq:2.2}
\sup_{0<t<T,x\in\overline{\Omega}}\sqrt{1+|du(x,t)|^2}
\leq 4
(1+\|du_0\|_\infty^2).
\end{equation}
\end{theorem}
The regularity assumption \eqref{eq:2.3} is reasonable from
blow-up arguments. Indeed, using the scale transform
\[
x=\lambda y,\quad t=\lambda^2s,\quad w(y,s)=\frac1\lambda u(x,t),
\]
we obtain
\[
\left\{
\begin{aligned}
\frac{\partial_sw}{\sqrt{1+|dw|^2}}
&=\Div\left(\frac{dw}{\sqrt{1+|dw|^2}}\right)
+\lambda\vec{f}(\lambda y,\lambda w,\lambda^2s)\cdot\dfrac{(-dw,1)}{\sqrt{1+|dw|^2}},
\\
dw&=du.
\end{aligned}
\right.
\]
Then
\[
\|\lambda\vec{f}(\lambda y,\lambda w,\lambda^2s)\|_{L^q_sL^p_y}
=\lambda^{1-\frac{n}{p}-\frac2q}\|\vec{f}\|_{L^q_tL^p_x}
\]
and $\|\lambda\vec{f}(\lambda y,\lambda
w,\lambda^2s)\|_{L^q_sL^p_y}\rightarrow0$ as $\lambda\rightarrow0$ if
\eqref{eq:2.3} is fulfilled; that is, the transport is a small
perturbation for blow-up arguments. Note that the regularity assumption
\eqref{eq:2.3} is the same as the assumption for the parabolic Allard's
regularity theory developed by
Kasai-Tonegawa~\cite{MR3194675,MR3176585}. Furthermore, our results
include results from the study by Takasao~\cite{MR3058702} because our
argument also applies to interior gradient estimates. We further explain
in Section \ref{sec:6}
if \eqref{eq:2.3} is fulfilled for $\frac{n}{p}+\frac{2}{q}>1$, then
there is a solution of \eqref{eq:1.1} such that the gradient of the
solution is unbounded.
From the regularity estimate \eqref{eq:2.2}, the graph $\Gamma_t$
subjected to \eqref{eq:1.2} is a $C^1$-Riemannian manifold up to the
boundary. Furthermore, the graph $\Gamma_t$ is perpendicular to
$\partial\Omega\times\mathbb{R}$, which is the boundary of a cylinder
$\Omega\times\mathbb{R}$. In terms of partial differential equations, Theorem
\ref{thm:1} can be regarded as an up-to-the-boundary parabolic smoothing
effect for
$\partial_tu-\sqrt{1+|du|^2}\Div\Bigl(\frac{du}{\sqrt{1+|du|^2}}\Bigr)$.
The non-divergence elliptic differential operator
$-\sqrt{1+|du|^2}\Div\Bigl(\frac{du}{\sqrt{1+|du|^2}}\Bigr)$ is
degenerate hence regularity for solutions of \eqref{eq:1.1} is not
clear. When the gradient of solutions is bounded, then the Schauder
estimates for \eqref{eq:1.1} is applicable thus the higher regularity of
solutions and the existence of a solution of \eqref{eq:1.1} can be
deduced. Theorem \ref{thm:1} also can be regarded as a parabolic
smoothing effect for the mean curvature operator. To summarize,
\eqref{eq:2.2} determines how we obtain regularity of the mean curvature
flow.
To obtain the gradient estimates via the comparison arguments, the
boundedness of $\|D\vec{f}\|_ \infty$ is needed. On the other hand, to
obtain the gradient estimates via the monotonicity formula of the
Huisken type, the boundedness of $\|D\vec{f}\|_ \infty$ is not needed.
Note that the idea using the weighted monotonicity formula is
called the noncompact maximum principle~\cite[Proposition 4.27]{MR2024995}. To
show the up-to-boundary monotonicity formula, we introduce reflected
backward heat kernels to compute the boundary integrals and derive
integral estimates for the transport terms under the assumption
\eqref{eq:2.3}.
Next, we demonstrate the existence of a classical solution of
\eqref{eq:1.1}. We assume parabolic H\"older continuity for $\vec{f}$;
that is, there is $\alpha\in(0,1]$ such that
\begin{equation}
\label{eq:2.1}
K:= \sup_{(X,t),(Y,s) \in (\Omega \times \mathbb{R}) \times (0,T_0) } \frac{|\vec{f}(X,t) -\vec{f}(Y,s)|}{|X-Y|^\alpha +|t-s|^{\alpha/2}}< \infty .
\end{equation}
\begin{theorem}[Existence of a classical solution]
\label{thm:2}
Assume that $\Omega$ is convex, $u_0\in W^{1,\infty}(\Omega)$ with
$du_0\cdot\nu\equiv0$ on $\partial\Omega$ and the transport term
$\vec{f}$ satisfies \eqref{eq:2.1} with some $\alpha\in(0,1]$. Then,
there exists a time local unique solution $u\in C(\overline{Q_T})\cap
C^{2,\alpha}(Q_T^{\varepsilon})$ for all $\varepsilon>0$ of
$(\ref{eq:1.1})$ with $u(0)=u_0$ for some $0<T<T_0$. Furthermore, for
any $\varepsilon >0$ there exists $\Cl[const]{c-1.2}>0$ depending
only on $n,\alpha,L ,K $ such that
\begin{equation}
\|u\|_{C^{ 2,\alpha}(Q _T^\varepsilon )} \leq \Cr{c-1.2}.
\label{schauder}
\end{equation}
\end{theorem}
Theorem \ref{thm:2} is deduced from the Leray-Schauder fixed point
theorem for the linearized problem of \eqref{eq:1.1}. Theorem
\ref{thm:1} is employed as a priori gradient estimates for the
Leray-Schauder fixed point theorem. As a result of the gradient bounds,
the linearized problem of \eqref{eq:1.1}
can be computed in the same class as the uniformly elliptic operator;
hence, we can derive the Schauder estimates for \eqref{eq:1.1} and apply
the Leray-Schauder fixed point theorem.
\section{Monotonicity of the metric}
\label{sec:3}
Our first task is to establish the up-to-the-boundary
monotonicity formula of the Huisken type.
\begin{lemma}
\label{lem:3.2}
Let $u$ be a classical solution of \eqref{eq:1.1} and
$v:=\sqrt{1+|du|^2}$. Then
\begin{equation}
\label{eq:3.2}
\partial_tv-\Delta_{\Gamma_t}v
-\left(\frac{du}{v}\cdot dv\right)\frac{\partial_tu}{v}
=-|A_t|^2v-\frac{2|D_{\Gamma_t}v|^2}{v}
+du\cdot d(\vec{f}\cdot\vec{n}),
\end{equation}
where $D_{\Gamma_t}$, $\Delta_{\Gamma_t}$ and $|A_t|$ denote covariant
differentiation in $\Gamma_t$, Laplace-Beltrami operator, norm of
second fundamental form of $\Gamma_t$ respectively.
\end{lemma}
\begin{proof}
According to Ecker-Huisken~\cite{MR0998603},
\[
-\Delta_{\Gamma_t}v+|A_t|^2v
+\frac{2|D_{\Gamma_t}v|^2}{v}
-v^2(D_{\Gamma_t}h\cdot \vec{e}_{n+1})=0,
\]
where $\vec{e}_{n+1}=(0,\ldots,0,1)$. Because
\[
\begin{split}
v^2(D_{\Gamma_t}h\cdot \vec{e}_{n+1})
&=v^2(Dh\cdot
\vec{e}_{n+1}-(Dh\cdot\vec{n})(\vec{n}\cdot\vec{e}_{n+1})) \\
&=dh\cdot du \\
&=-d\left(\frac{\partial_tu}{v}\right)\cdot du
+d(\vec{f}\cdot\vec{n})\cdot du \quad (\because \eqref{eq:1.1})\\
&=-\partial_tv
+\left(\frac{du}{v}\cdot dv\right)\frac{\partial_tu}{v}
+d(\vec{f}\cdot\vec{n})\cdot du,
\end{split}
\]
we obtain \eqref{eq:3.2}.
\end{proof}
Let
\[
R:=\frac{1}{\|\text{principal curvature of }\partial\Omega\|_{L^\infty(\partial\Omega)}}.
\]
Because $\partial\Omega$ is smooth and compact, $0<R<\infty$. For
$r<R$, let $N_r$ denote the interior tubular neighborhood of
$\partial\Omega$;
\[
N_r:=\{x\in\Omega:\dist(x,\partial\Omega)<r\}.
\]
For $x\in N_r$, there uniquely exists $\zeta(x)\in\partial\Omega$ such
that $\dist(x,\partial\Omega)=|x-\zeta(x)|$. Thus, define the reflection
point $\tilde{x}$ with respect to $\partial\Omega$ as
$\tilde{x}=2\zeta(x)-x$ (see Figure \ref{fig:3.1}). We fix a radially
symmetric cut-off function $\eta_R=\eta_R (|X|)\in C^\infty(\mathbb{R}^{n+1})$
such that
\[
0\leq \eta_R \leq 1,\quad
\eta'_R\leq 0,\quad
\supp\eta_R \subset B_{R/8},\quad
\eta_R =1\ \text{on}\ B_{R/16}.
\]
\begin{figure}
\caption{The reflection point of $x\in\Omega\cap N_r$ with respect to
$\partial\Omega$ is denoted by $\tilde{x}
\label{fig:3.1}
\end{figure}
For $0<t<s<T_0$ and $X=(x,x_{n+1}),\ Y=(y,y_{n+1})\in N_R\times\mathbb{R}$, we
define the $n$-dimensional backward heat kernel $\rho_{(Y,s)}(X,t)$ and
reflected backward heat kernel $\tilde\rho_{(Y,s)}(X,t)$ as
\begin{equation}
\begin{split}
\rho_{(Y,s)}(X,t)
:=\frac{1}{(4\pi(s-t))^\frac{n}{2}}
\exp\left(-\frac{|X-Y|^2}{4(s-t)}\right), \\
\tilde\rho_{(Y,s)}(X,t)
:=\frac{1}{(4\pi(s-t))^\frac{n}{2}}
\exp\left(-\frac{|\tilde{X}-Y|^2}{4(s-t)}\right), \\
\end{split}
\end{equation}
where $\tilde{X}=(\tilde{x},x_{n+1})$. For fixed $0<t<s$ and $X,Y\in
N_R\times\mathbb{R}$, we define a truncated version of $\rho$ and $\tilde\rho$ as
\begin{equation}
\begin{split}
\rho_1
&=\rho_1(X,t)
:=\eta_R (X-Y)\rho_{(Y,s)}(X,t), \\
\rho_2
&=\rho_2(X,t)
:=\eta_R (\tilde{X}-Y)\tilde\rho_{(Y,s)}(X,t).
\end{split}
\end{equation}
To derive Huisken's monotonicity formula,
\begin{equation}
\label{eq:3.4}
\frac{(\vec{w}\cdot D\rho)^2}{\rho}
+((I-\vec{w}\otimes\vec{w}):D^2\rho)
+\partial_t\rho=0
\end{equation}
is the crucial identity, where $\rho=\rho_{(Y,s)}(X,t)$,
$\vec{w}\in\mathbb{R}^{n+1}$ is any unit vector, $I$ is the identity matrix,
$\vec{w}\otimes\vec{w}$ is tensor product, and
$(I-\vec{w}\otimes\vec{w}):D^2\rho$ is
$\tr((I-\vec{w}\otimes\vec{w})D^2\rho)$. In \cite{MR3348119}, a
similar identity for the reflected backward heat kernel
$\tilde\rho_{(Y,s)}$ was obtained.
\begin{lemma}
[\cite{MR3348119}]
\label{lem:3.1}
There is a constant $\Cl[const]{c-3.8}>0$ depending on $\Omega$
such that for $\vec{w}=(w_i)\in\mathbb{R}^{n+1}$ with $|\vec{w}|=1$ and
$\tilde\rho=\tilde\rho_{(Y,s)}(X,t)$,
\begin{equation}
\label{eq:3.5}
\frac{(\vec{w}\cdot D\tilde{\rho})^2}{\tilde{\rho}}
+((I-\vec{w}\otimes \vec{w}): D^2\tilde{\rho})
+\partial_t\tilde{\rho}
\leq\Cr{c-3.8}\left(
\frac{|\tilde{X}-Y|}{s-t}
+\frac{|\tilde{X}-Y|^3}{(s-t)^2}
\right)
\tilde{\rho}
\end{equation}
for $0<t<s$ and $X,Y\in N_{R/2}\times\mathbb{R}$.
\end{lemma}
To prove Lemma \ref{lem:3.1}, we use the following lemma.
\begin{lemma}
[cf.\cite{MR0397520,MR0863638}]
Let
\[
Q(X):=D\zeta(X)-(I-\vec{\nu}\otimes\vec{\nu}),
\]
where $\vec\nu$ is the unit normal vector at
$\zeta(X)\in\partial\Omega$. Then
\begin{enumerate}
\item $Q(X)$ is symmetric;
\item $Q(X)\vec{\nu}=\vec{0}$ for all $X\in N_{R/2}\times\mathbb{R}$;
\item $Q(X)\vec{e}_{n+1}=\vec{0}$ for all $X\in N_{R/2}\times\mathbb{R}$, where $\vec{e}_{n+1}=(0,\ldots,0,1)$;
\item $|Q(X)|\leq 2|X-\zeta(X)|$ for all $X\in N_{R/2}\times\mathbb{R}$;
\item If $\partial\Omega\in C^3$, then $|D Q|$ is bounded.
\end{enumerate}
\end{lemma}
For $X,Y\in N_{R/2}\times\mathbb{R}$, by convexity
\[
|X-\zeta(X)|
=\frac12|X-\tilde{X}|
\leq\frac12(|X-Y|+|Y-\tilde{X}|)
\leq|\tilde{X}-Y|,
\]
thus $|Q(X)|\leq 2|\tilde{X}-Y|$.
\begin{proof}
[Proof of Lemma \ref{lem:3.1}]
Since $D \zeta(X)=I-\vec{\nu}\otimes\vec{\nu} +Q(X)$ and $\tilde
X=2\zeta(X)-X$, we have
\begin{equation}
\label{eq:3.15}
\begin{split}
D|\tilde{X}-Y|^2&=2(I-2\vec{\nu}\otimes\vec{\nu}+2Q(X))(\tilde{X}-Y), \\
|D|\tilde{X}-Y|^2|^2&=4|(I-2\vec{\nu}\otimes\vec{\nu}+2Q(X))(\tilde{X}-Y)|^2 \\
&\leq4|\tilde{X}-Y|^2+\Cr{c-3.9}|\tilde{X}-Y|^3, \\
D_{ij}
|\tilde{X}-Y|^2&=2\delta_{ij}-4\sum_{k=1}^n (\partial_{X_j}(\nu_i
\nu_k)-\partial_{X_j}q_{ik})(\tilde X_k-Y_k) \\
&\quad+8q_{ij}+8\sum_{k=1}^{n}(q_{ik}q_{jk}-\nu_i\nu_kq_{jk}-\nu_j\nu_kq_{ik}),
\end{split}
\end{equation}
where $Q(X)=(q_{ij})$ and $\Cl[const]{c-3.9}>0$ is some constant
depending on $\Omega$. By direct calculation, we have
\begin{equation}
\label{eq:3.16}
\begin{split}
\partial_t\tilde\rho&=\left(\frac{n}{2(s-t)}-\frac{|\tilde{X}-Y|^2}{4(s-t)^2}\right)\tilde\rho, \ \
D\tilde\rho=-\frac{D|\tilde{X}-Y|^2}{{4(s-t)}}\tilde\rho, \\
D^2\tilde\rho
&=\left(\frac{D|\tilde{X}-Y|^2\otimes D|\tilde{X}-Y|^2}{16(s-t)^2}
-\frac{\tr(D^2|\tilde{X}-Y|^2)}{4(s-t)}
\right)\tilde\rho.
\end{split}
\end{equation}
Using \eqref{eq:3.15} and \eqref{eq:3.16}, we obtain \eqref{eq:3.5}.
\end{proof}
We next prove a weighted boundary monotonicity inequality.
\begin{lemma}
\label{lem:3.4}
Let $\phi\in C^1([0,\infty):C^2(\Omega))$ be a non-negative
function. Then there exist positive numbers $\Cl[const]{c-3.1}$,
$\Cl[const]{c-3.2}$ and $\Cl[const]{c-3.3}>0$ depending on $n$,
$\Omega$ such that
\begin{equation}
\label{eq:3.10}
\begin{split}
\frac{d}{dt}\int_{\Gamma_t}\phi(\rho_1+\rho_2)\,d\mathscr{H}^n
&\leq \int_{\Gamma_t}(\rho_1+\rho_2)
\left(\partial_t\phi-\Delta_{\Gamma_t}\phi-
\left(d\phi\cdot\frac{du}{v}\right)
\frac{\partial_tu}{v}\right)\,d\mathscr{H}^n \\
&\quad
+\frac14\int_{\Gamma_t}\phi(\rho_1+\rho_2)(\vec{f}\cdot\vec{n})^2
\,d\mathscr{H}^n \\
&\quad
+\Cr{c-3.1}\mathscr{H}^n (\Gamma _t)+\Cr{c-3.2}(s-t)^{-\frac34}\int_{\Gamma_t}\phi\rho_2
\,d\mathscr{H}^n \\
&\quad
+\Cr{c-3.3}\int_{\Gamma_t\cap\supp\rho_2}\phi
\,d\mathscr{H}^n
+\int_{\partial\Gamma_t}(\rho_1+\rho_2)(D_{\Gamma_t}\phi\cdot\vec{\nu})
\,d\mathscr{H}^{n-1}.
\end{split}
\end{equation}
\end{lemma}
\begin{proof}
For $i=1,2$
\begin{equation}
\label{eq:3.3}
\begin{split}
\frac{d}{dt}\int_{\Gamma_t}\phi\rho_i\,d\mathscr{H}^n
&=\frac{d}{dt}\int_{\Omega}\phi(x,t)\rho_i(x,u(x,t),t)v(x,t)\,dx \\
&=\int_{\Omega}
\Bigl(
\partial_t\phi(x,t)\rho_i(x,u(x,t),t) \\
&\qquad
+\phi(x,t)\partial_{x_{n+1}}\rho_i(x,u(x,t),t)\partial_tu(x,t)
+\partial_{t}\rho_i(x,u(x,t),t)
\Bigr)v(x,t)\,dx \\
&\quad+\int_{\Omega}\phi(x,t)
\rho_i(x,u(x,t),t)\partial_{t}v(x,t)\,dx \\
&=\int_{\Gamma_t}(\partial_t\phi\rho_i
+\phi\partial_t\rho_i
+\phi\partial_{x_{n+1}}\rho_i\partial_tu)\,
d\mathscr{H}^n \\
&\quad+\int_{\Omega}\phi(x,t)\rho_i(x,u(x,t),t)\frac{du(x,t)\cdot d(\partial_tu(x,t))}{v(x,t)}\,dx. \\
\end{split}
\end{equation}
We consider the last term of the equation
\eqref{eq:3.3}. Using integration by parts, we obtain
\[
\begin{split}
&\quad\int_{\Omega}\phi(x,t)\rho_i(x,u(x,t),t)
\frac{du(x,t)\cdot d(\partial_tu(x,t))}{v(x,t)}\,dx \\
&=-\int_\Omega\Div\left(
\phi(x,t)\rho_i(x,u(x,t),t)\frac{du(x,t)}{v(x,t)}\right)
\partial_tu(x,t)\,dx \\
&=-\int_\Omega
d\phi(x,t)\cdot\frac{du(x,t)}{v(x,t)}\rho_i(x,u(x,t),t)
\frac{\partial_tu(x,t)}{v(x,t)}v(x,t)\,dx \\
&\quad-\int_\Omega
\phi(x,t)\bigl(d\rho_i(x,u(x,t),t)
+\partial_{x_{n+1}}\rho_i(x,u(x,t),t)du(x,t)\bigr)
\cdot\frac{du(x,t)}{v(x,t)}
\frac{\partial_tu(x,t)}{v(x,t)}v(x,t)\,dx\\
&\quad-\int_\Omega
\phi(x,t)\rho_i(x,u(x,t),t)\Div\left(\frac{du(x,t)}{v(x,t)}\right)
\frac{\partial_tu(x,t)}{v(x,t)}v(x,t)\,dx \\
&=-\int_{\Gamma_t}\Bigl(d\phi\cdot\frac{du}{v}\Bigr)
\rho_i\frac{\partial_tu}{v}\,d\mathscr{H}^n
-\int_{\Gamma_t}\phi
\Bigl((d\rho_i+\partial_{x_{n+1}}\rho_idu)\cdot\frac{du}{v}\Bigr)
\frac{\partial_tu}{v}\,d\mathscr{H}^n \\
&\quad+\int_{\Gamma_t}\phi\rho_ih\frac{\partial_tu}{v}\,d\mathscr{H}^n.
\end{split}
\]
We note that
\[
\begin{split}
\partial_{{x_{n+1}}}\rho_iv
-\partial_{{x_{n+1}}}\rho_i\frac{|du|^2}{v}
-d\rho_i\cdot\frac{du}{v}
&=\partial_{{x_{n+1}}}\rho_i\frac{1}{v}
-d\rho_i\cdot\frac{du}{v}=(D\rho\cdot\vec{n}).
\end{split}
\]
Hence, we obtain
\[
\begin{split}
\frac{d}{dt}\int_{\Gamma_t}\phi\rho_i\,d\mathscr{H}^n
&=\int_{\Gamma_t}\partial_t\phi\rho_i\,d\mathscr{H}^n
+\int_{\Gamma_t}\phi\partial_t\rho_i\,d\mathscr{H}^n
+\int_{\Gamma_t}\phi(D\rho_i\cdot\vec{n})\frac{\partial_tu}{v}
\,d\mathscr{H}^n \\
&\quad-\int_{\Gamma_t}\Bigl(d\phi\cdot\frac{du}{v}\Bigr)\rho_i\frac{\partial_tu}{v}
\,d\mathscr{H}^n
+\int_{\Gamma_t}\phi\rho_ih\frac{\partial_tu}{v}\,d\mathscr{H}^n.
\end{split}
\]
Using \eqref{eq:1.1} or \eqref{eq:2.4},
\[
\begin{split}
\left(\phi(D\rho_i\cdot\vec{n})
+\phi\rho_ih\right)\frac{\partial_tu}{v}
&=\left(\phi(D\rho_i\cdot\vec{n})
+\phi\rho_ih\right)(-h+\vec{f}\cdot\vec{n}) \\
&=\phi(D\rho_i\cdot\vec{H})
-\phi\rho_i|\vec{H}|^2
+\phi\rho_i\left(
\left(\frac{D^\perp\rho_i}{\rho_i}-\vec{H}\right)\cdot\vec{n}\right)
(\vec{f}\cdot\vec{n}) \\
&=-\phi\rho_i\left|\vec{H}-\frac{D^\perp\rho_i}{\rho_i}\right|^2
+\phi\frac{|D^\perp\rho_i|^2}{\rho_i}
-\phi(D^\perp\rho_i\cdot\vec{H}) \\
&\quad+\phi\rho_i\left(
\left(\frac{D^\perp\rho_i}{\rho_i}-\vec{H}\right)\cdot\vec{n}\right)
(\vec{f}\cdot\vec{n}) \\
&\leq\phi\frac{|D^\perp\rho_i|^2}{\rho_i}
-\phi(D^\perp\rho_i\cdot\vec{H})
+\frac14\phi\rho_i(\vec{f}\cdot\vec{n})^2,
\end{split}
\]
where $\vec{H}=-h\vec{n}$ and $D^\perp \rho _i = (D\rho _i \cdot \vec{n}) \vec{n}$ are used. Therefore,
\[
\begin{split}
\frac{d}{dt}\int_{\Gamma_t}\phi\rho_i\,d\mathscr{H}^n
&\leq\int_{\Gamma_t}\partial_t\phi\rho_i\,d\mathscr{H}^n
-\int_{\Gamma_t}\Bigl(d\phi\cdot\frac{du}{v}\Bigr)\rho_i\frac{\partial_tu}{v}
\,d\mathscr{H}^n \\
&\quad
+\int_{\Gamma_t}\phi
\left(\partial_t\rho_i+\frac{|D^\perp\rho_i|^2}{\rho_i}
-(D^\perp\rho_i\cdot\vec{H})\right)
\,d\mathscr{H}^n \\
&\quad
+\frac14\int_{\Gamma_t}\phi\rho_i(\vec{f}\cdot\vec{n})^2\,d\mathscr{H}^n.
\end{split}
\]
According to the divergence theorem on $\Gamma_t$,
\[
\begin{split}
-\int_{\Gamma_t}\phi(D^\perp\rho_i\cdot\vec{H})\,d\mathscr{H}^n
&=-\int_{\Gamma_t}\phi(D\rho_i\cdot\vec{H})\,d\mathscr{H}^n \\
&=\int_{\Gamma_t}\Div_{\Gamma_t}(\phi D\rho_i)\,d\mathscr{H}^n
-\int_{\partial\Gamma_t}\phi(D\rho_i\cdot\vec{\nu})\,d\mathscr{H}^{n-1} \\
&=\int_{\Gamma_t}D_{\Gamma_t}\phi\cdot D\rho_i\,d\mathscr{H}^n
+\int_{\Gamma_t}\phi
((I-\vec{n}\otimes\vec{n}):D^2\rho_i)\,d\mathscr{H}^n \\
&\quad
-\int_{\partial\Gamma_t}\phi(D\rho_i\cdot\vec{\nu})\,d\mathscr{H}^{n-1} \\
&=-\int_{\Gamma_t}\rho_i\Delta_{\Gamma_t}\phi\,d\mathscr{H}^n
+\int_{\Gamma_t}\phi
((I-\vec{n}\otimes\vec{n}):D^2\rho_i)\,d\mathscr{H}^n \\
&\quad
+\int_{\partial\Gamma_t}(\rho_i(D_{\Gamma_t}\phi\cdot\vec{\nu})-\phi(D\rho_i\cdot\vec{\nu}))\,d\mathscr{H}^{n-1}. \\
\end{split}
\]
Using \eqref{eq:3.4} and \eqref{eq:3.5}, we obtain
\begin{equation}
\label{eq:3.6}
\frac{|D^\perp\rho_1|^2}{\rho_1}
+((I-\vec{n}\otimes\vec{n}):D^2\rho_1)
+\partial_t\rho_1\leq \Cr{c-3.4}
\end{equation}
and
\begin{equation}
\label{eq:3.7}
\frac{|D^\perp\rho_2|^2}{\rho_2}
+((I-\vec{n}\otimes\vec{n}):D^2\rho_2)
+\partial_t\rho_2
\leq
\Cr{c-3.5}
\left(
\frac{|\tilde{X}-Y|}{s-t}
+\frac{|\tilde{X}-Y|^3}{(s-t)^2}
\right)
\rho_2
+\Cr{c-3.6}
\end{equation}
for some constants $\Cl[const]{c-3.4}$, $\Cl[const]{c-3.5}$,
$\Cl[const]{c-3.6}>0$ depending on $\Omega$.
To compute the integration of \eqref{eq:3.7}, we decompose the
integration as
\[
\begin{split}
&\quad\int_{\Gamma_t}\phi\frac{\Cr{c-3.5}|\tilde{X}-Y|}{s-t}\rho_2\,d\mathscr{H}^n \\
&\leq\int_{\Gamma_t\cap\{|\tilde{X}-Y|\leq(s-t)^\frac14\}}
\phi\frac{\Cr{c-3.5}|\tilde{X}-Y|}{s-t}\rho_2\,d\mathscr{H}^n \\
&\quad+\int_{\Gamma_t\cap\{|\tilde{X}-Y|\geq(s-t)^\frac14\}}
\phi\frac{\Cr{c-3.5}|\tilde{X}-Y|}{s-t}\rho_2\,d\mathscr{H}^n \\
&=:I_1+I_2,
\end{split}
\]
and
\[
\begin{split}
&\quad\int_{\Gamma_t}\phi\frac{\Cr{c-3.5}|\tilde{X}-Y|^3}{(s-t)^2}\rho_2\,d\mathscr{H}^n \\
&\leq\int_{\Gamma_t\cap\{|\tilde{X}-Y|\leq(s-t)^\frac{5}{12}\}}
\phi\frac{\Cr{c-3.5}|\tilde{X}-Y|^3}{(s-t)^2}\rho_2\,d\mathscr{H}^n \\
&\quad+\int_{\Gamma_t\cap\{|\tilde{X}-Y|\geq(s-t)^\frac{5}{12}\}}
\phi\frac{\Cr{c-3.5}|\tilde{X}-Y|^3}{(s-t)^2}\rho_2\,d\mathscr{H}^n \\
&=:I_3+I_4.
\end{split}
\]
$I_1$ is estimated by
\begin{equation}
\label{eq:3.8}
I_1
\leq \Cr{c-3.5}(s-t)^{-\frac34}
\int_{\Gamma_t\cap\{|\tilde{X}-Y|\leq(s-t)^\frac14\}}
\phi\rho_2\,d\mathscr{H}^n
\leq \Cr{c-3.5}(s-t)^{-\frac34}\int_{\Gamma_t}\phi\rho_2\,d\mathscr{H}^n.
\end{equation}
$I_2$ is estimated by
\begin{equation}
\label{eq:3.9}
I_2
\leq\frac{\Cr{c-3.5}}{(s-t)^{1+\frac{n}{2}}}e^{-\frac{1}{4\sqrt{s-t}}}
\int_{\Gamma_t\cap\supp\rho_2}
\phi|\tilde{X}-Y|\,d\mathscr{H}^n
\leq\Cr{c-3.7}R
\int_{\Gamma_t\cap\supp\rho_2}
\phi
\,d\mathscr{H}^n
\end{equation}
for some constant $\Cl[const]{c-3.7}>0$ depending on $n$ and
$\Omega$. $I_3$ and $I_4$ are estimated as a similar manner.
Using \eqref{eq:3.8}, \eqref{eq:3.9},
$D(\rho_1+\rho_2)\cdot\vec{\nu}\big|_{\partial\Omega}\equiv0$, and $|\tilde{X}-Y|\leq R$ when $X \in \supp{\rho _2}$, we compute
\[
\begin{split}
&\quad\frac{d}{dt}\int_{\Gamma_t}\phi(\rho_1+\rho_2)\,d\mathscr{H}^n \\
&\leq\int_{\Gamma_t}(\rho_1+\rho_2)
\left(\partial_t\phi-\Delta_{\Gamma_t}\phi
-\left(d\phi\cdot\frac{du}{v}\right)\frac{\partial_tu}{v}\right)\,
d\mathscr{H}^n \\
&\quad+\frac14\int_{\Gamma_t}(\rho_1+\rho_2)
\phi(\vec{f}\cdot\vec{n})^2\,d\mathscr{H}^n \\
&\quad+(\Cr{c-3.4}+\Cr{c-3.6})\mathscr{H}^n(\Gamma_t)
+2\Cr{c-3.5}(s-t)^{-\frac34}\int_{\Gamma_t}\phi\rho_2\,d\mathscr{H}^n \\
&\quad+\Cr{c-3.7}(R+R^3)\int_{\Gamma_t\cap\supp\rho_2}
\phi \,d\mathscr{H}^n
+\int_{\partial\Gamma_t}
(\rho_1+\rho_2)(D_{\Gamma_t}\phi\cdot\vec{\nu})
\,d\mathscr{H}^{n-1}.
\end{split}
\]
For $\Cr{c-3.1}=\Cr{c-3.4}+\Cr{c-3.6}$, $\Cr{c-3.2}=2\Cr{c-3.5}$,
and $\Cr{c-3.3}=\Cr{c-3.7}(R+R^3)$,
we obtain \eqref{eq:3.10}.
\end{proof}
We use the following lemma to handle the boundary integral.
\begin{lemma}
Let $u$ be a classical solution of \eqref{eq:1.1} and
$v:=\sqrt{1+|du|^2}$. If $\Omega$ is convex, then
\begin{equation}
\label{eq:3.11}
(D_{\Gamma_t}v\cdot\vec{\nu})|_{\partial(\Omega\times\mathbb{R})}\leq0
\end{equation}
for all $t>0$.
\end{lemma}
\begin{proof}
Because
\[
\begin{split}
D_{\Gamma_t}v&=Dv-(Dv\cdot\vec{n})\vec{n} \\
&=(dv,0)+\frac1{v^2}(dv\cdot du)(-du,1),
\end{split}
\]
and boundary condition of $u$,
\[
\begin{split}
(D_{\Gamma_t}v\cdot\vec{\nu})|_{\partial(\Omega\times\mathbb{R})}
&=\left((dv\cdot\nu)+\frac{1}{v^2}(dv\cdot du)(-du\cdot\nu)\right)\bigg|_{\partial\Omega} \\
&=\frac{1}{2v}d|du|^2\cdot\nu|_{\partial\Omega} \\
&=\frac{1}{v}B(du,du)|_{\partial\Omega},
\end{split}
\]
where $B$ is the second fundamental form of $\partial\Omega$. Because
of the convexity of $\Omega$, $B(du,du)\leq0$.
\end{proof}
Using \eqref{eq:3.2}, \eqref{eq:3.10}, and \eqref{eq:3.11}, monotonicity
of the metric is obtained as follows:
\begin{proposition}
\label{prop:3.6}
Let $u$ be a classical solution of \eqref{eq:1.1} and
$v:=\sqrt{1+|du|^2}$. Then
for $Y\in N_{R/4}\times\mathbb{R}$ and $0<t<s$,
\begin{equation}
\label{eq:3.12}
\begin{split}
&\quad\frac{d}{dt}\int_{\Gamma_t}v(\rho_1+\rho_2)\,d\mathscr{H}^n \\
&\leq -\int_{\Gamma_t}(\rho_1+\rho_2)
\left(|A_t|^2v
+\frac{2|D_{\Gamma_t}v|^2}{v}
-du\cdot d(\vec{f}\cdot\vec{n})\right)
\,d\mathscr{H}^n \\
&\quad
+\frac14\int_{\Gamma_t}v(\rho_1+\rho_2)(\vec{f}\cdot\vec{n})^2
\,d\mathscr{H}^n \\
&\quad
+\Cr{c-3.1}\mathscr{H}^n(\Gamma _t) +\Cr{c-3.2}(s-t)^{-\frac34}\int_{\Gamma_t}v(\rho_1+\rho_2)
\,d\mathscr{H}^n \\
&\quad
+\Cr{c-3.3}\int_{\Gamma_t\cap\supp\rho_2}v
\,d\mathscr{H}^n
\end{split}
\end{equation}
where $\Cr{c-3.1}$, $\Cr{c-3.2}$, $\Cr{c-3.3}$ are constants as in
Lemma \ref{lem:3.4}.
\end{proposition}
\section{Gradient estimates}
\label{sec:4}
We deduce the integral estimates for the transport terms.
\begin{lemma}
\label{lem:4.1}
Let $\vec{f}$ be in $ L^q_tL^p_x(0,T_0,\Gamma_t)$ with
$1-\frac{n}{p}-\frac2q>0$. Let $u$ be a classical solution of
\eqref{eq:1.1} and $v:=\sqrt{1+|du|^2}$. Let $\eta\in L^\infty(0,T_0)$
be a nonnegative function. Then there is a constant
$\Cl[const]{c-4.1}>0$
depending
only on $n$, $p$, $q$ and $T_0$ such that
\begin{equation}
\label{eq:4.1}
\begin{split}
&\qquad\int_0^\tau \eta\,dt\int_{\Gamma_t}(\rho_1+\rho_2)
du\cdot d(\vec{f}\cdot\vec{n})
\,d\mathscr{H}^n \\
&\quad+\frac14\int_0^\tau \eta\, dt\int_{\Gamma_t}v(\rho_1+\rho_2)(\vec{f}\cdot\vec{n})^2
\,d\mathscr{H}^n \\
&\leq \frac12
\int_0^\tau\eta\,dt
\int_{\Gamma_t}
(\rho_1+\rho_2)|A_t|^2v
\,d\mathscr{H}^n
+\int_0^\tau\eta\,dt\int_{\Gamma_t}
(\rho_1+\rho_2)\frac{|D_{\Gamma_t}v|^2}{v}
\,d\mathscr{H}^n \\
&\quad+\Cr{c-4.1}
\|\eta\|_{L^\infty(0,T_0)}
\|v\|_{L^\infty(\Omega\times(0,\tau))}^3
\|\vec{f}\|_{L^q_tL^p_x(0,\tau,\Gamma_t)}
(1+\|\vec{f}\|_{L^q_tL^p_x(0,\tau,\Gamma_t)})
\end{split}
\end{equation}
for $0<\tau<s$, where
\[
\|\vec{f}\|_{L^q_tL^p_x(0,\tau,\Gamma_t)}
:=
\left(
\int_0^\tau\left(
\int_{\Gamma_t}|\vec{f}(X,t)|^p\,d\mathscr{H}^n
\right)^\frac{q}{p}
dt
\right)^\frac1q.
\]
\end{lemma}
\begin{proof}
For simplicity, set $\bar{\rho}:=\rho_1+\rho_2$. Then
\[
\begin{split}
&\quad\int_{\Gamma_t}(\rho_1+\rho_2)
(du\cdot d(\vec{f}\cdot\vec{n}))
\,d\mathscr{H}^n \\
&=\int_{\Omega}\bar{\rho}
(du\cdot d(\vec{f}\cdot\vec{n}))
v\,dx \\
&=-\int_{\Omega}
(\bar{\rho}\Delta uv+(du\cdot d(\bar{\rho}(x,u,t)))v+\bar{\rho}(du\cdot dv))
(\vec{f}\cdot\vec{n})\,dx \\
&=-\int_{\Gamma_t}
\biggl(\bar{\rho}\Delta u+(du\cdot d(\bar{\rho}(x,u,t)))
+\bar{\rho}\Bigl(\frac{du}{v}\cdot dv\Bigr)\biggr)
(\vec{f}\cdot\vec{n})\,d\mathscr{H}^n.
\end{split}
\]
Here
\[
h=-\Div\left(\frac{du}{v}\right)=-\frac1{v}\Delta
u+\frac{1}{v^2}(du\cdot dv);
\]
hence,
\[
\begin{split}
\int_{\Gamma_t}\bar{\rho}
du\cdot d(\vec{f}\cdot\vec{n})
\,d\mathscr{H}^n
&=\int_{\Gamma_t}\bar{\rho} vh(\vec{f}\cdot\vec{n})\,d\mathscr{H}^n \\
&\quad-2\int_{\Gamma_t}\bar{\rho}
\Bigl(\frac{du}{v}\cdot dv\Bigr)
(\vec{f}\cdot\vec{n})\,d\mathscr{H}^n \\
&\quad-\int_{\Gamma_t}(du\cdot d(\bar{\rho}(x,u,t)))
(\vec{f}\cdot\vec{n})\,d\mathscr{H}^n \\
&=:I_1+I_2+I_3.
\end{split}
\]
$I_1$ is estimated by
\begin{equation}
\label{eq:4.2}
\begin{split}
|I_1|
&\leq\frac1{2n}\int_{\Gamma_t}\bar{\rho} h^2v\,d\mathscr{H}^n
+\frac{n}{2}\int_{\Gamma_t}\bar{\rho}
v(\vec{f}\cdot\vec{n})^2\,d\mathscr{H}^n \\
&\leq\frac1{2}\int_{\Gamma_t}\bar{\rho} |A_t|^2v\,d\mathscr{H}^n
+\frac{n}{2}\int_{\Gamma_t}\bar{\rho}
v(\vec{f}\cdot\vec{n})^2\,d\mathscr{H}^n
\end{split}
\end{equation}
because $h^2\leq n|A_t|^2$.
Note that $D_{\Gamma_t}v=Dv-(Dv\cdot\vec{n})\vec{n}$,
\[
\begin{split}
|D_{\Gamma_t}v|^2
&=|Dv|^2-(Dv\cdot\vec{n})^2 \\
&=|dv|^2-\frac{1}{v^2}(du\cdot dv)^2 \quad (\because Dv=(dv,0))\\
&\geq|dv|^2-\frac{1}{v^2}|du|^2|dv|^2 \\
&=\frac1{v^2}|dv|^2.
\end{split}
\]
Therefore,
\begin{equation}
\label{eq:4.3}
\begin{split}
|I_2|
&\leq\int_{\Gamma_t}\bar{\rho}
\frac{|du|^2|dv|^2}{v^5}\,d\mathscr{H}^n
+\int_{\Gamma_t}\bar{\rho} v^3(\vec{f}\cdot\vec{n})^2\,d\mathscr{H}^n \\
&\leq\int_{\Gamma_t}\bar{\rho}
\frac{|D_{\Gamma_t}v|^2}{v}\,d\mathscr{H}^n
+\int_{\Gamma_t}\bar{\rho} v^3(\vec{f}\cdot\vec{n})^2\,d\mathscr{H}^n. \\
\end{split}
\end{equation}
In the following, we derive the integral estimates for the transport terms.
Using the H\"older inequality,
\[
\begin{split}
&\quad\left|\int_0^\tau\eta\,dt\int_{\Gamma_t}
\bar{\rho}(\vec{f}\cdot\vec{n})^2v^3
\,d\mathscr{H}^n \right| \\
&\leq\|\eta\|_{L^\infty(0,T_0)}
\|v\|_{L^\infty(\Omega\times(0,\tau))}^3
\left(\int_0^s\,dt
\left(\int_{\Gamma_t}\bar{\rho}^{p'}\,d\mathscr{H}^n\right)^\frac{q'}{p'}
\right)^\frac1{q'}
\|\vec{f}\|^2_{L^q_tL^p_x(0,\tau,\Gamma_t)},
\end{split}
\]
where $\frac2p+\frac1{p'}=1$ and $\frac2q+\frac1{q'}=1$.
Using the convexity of $\Omega$,
$|\tilde{X}-Y|\geq|X-Y|$; hence,
\[
\begin{split}
\int_{\Gamma_t}\bar{\rho}^{p'}\,d\mathscr{H}^n
&\leq \frac{2^{p'-1}}{(4\pi(s-t))^{\frac{np'}{2}}}
\int_{\Gamma_t}\exp\left(-\frac{p'|X-Y|^2}{4(s-t)}\right)\,d\mathscr{H}^n \\
&\leq \Cr{c-4.2}(s-t)^{-\frac{np'}{2}+\frac{n}{2}},
\end{split}
\]
where $\Cl[const]{c-4.2}>0$ is some constant depending only on $n$ and
$p$. Therefore,
\[
\left(\int_0^s\,dt
\left(\int_{\Gamma_t}\bar{\rho}^{p'}\,d\mathscr{H}^n\right)^\frac{q'}{p'}
\right)^\frac1{q'} <\infty
\]
if $-\frac{nq'}2+\frac{nq'}{2p'}>-1$, which provides
$1-\frac{n}{p}-\frac{2}{q}>0$. Using \eqref{eq:4.2} and \eqref{eq:4.3}, we
obtain
\begin{equation}
\label{eq:4.6}
\begin{split}
\int_0^\tau\eta(|I_1|+|I_2|)\,dt
&\leq
\frac12
\int_0^\tau\eta\,dt
\int_{\Gamma_t}
\bar{\rho}|A_t|^2v
\,d\mathscr{H}^n \\
&\quad+\int_0^\tau\eta\,dt\int_{\Gamma_t}
\bar{\rho}\frac{|D_{\Gamma_t}v|^2}{v}
\,d\mathscr{H}^n \\
&\quad+
\Cr{c-4.3}\|\eta\|_{L^\infty(0,T_0)}
\|v\|_{L^\infty(\Omega\times(0,\tau))}^3
\|\vec{f}\|^2_{L^q_tL^p_x(0,\tau,\Gamma_t)}
\end{split}
\end{equation}
for a positive constant $\Cl[const]{c-4.3}>0$ depending only on $n$,
$p$, $q$ and $T_0$.
Because
\[
|du\cdot d(\bar{\rho}(x,u,t))|
=|du\cdot d\bar{\rho}+|du|^2\bar{\rho}_{x_{n+1}}|
\leq v^2|D\bar{\rho}|,
\]
we obtain
\[
|I_3|\leq\int_{\Gamma_t}v^2|D\bar{\rho}||\vec{f}\cdot\vec{n}|\,d\mathscr{H}^n.
\]
Then using the H\"older
inequality,
\begin{equation}
\label{eq:4.5}
\begin{split}
\int_0^\tau\eta|I_3|\,dt
&\leq
\|\eta\|_{L^\infty(0,T_0)}
\|v\|_{L^\infty(\Omega\times(0,\tau))}^2\\
&\quad\times
\left(\int_0^s\,dt
\left(\int_{\Gamma_t}|D\bar{\rho}|^{p'}\,d\mathscr{H}^n\right)^\frac{q'}{p'}
\right)^\frac1{q'}
\|\vec{f}\|_{L^q_tL^p_x(0,\tau,\Gamma_t)},
\end{split}
\end{equation}
where $\frac1p+\frac1{p'}=1$ and $\frac1q+\frac1{q'}=1$.
Using the convexity of $\Omega$,
\[
|D\bar{\rho}|\leq \Cr{c-4.4}\frac{1}{(s-t)^{\frac12+\frac{n}2}}\exp\left(-\frac{|X-Y|^2}{8(s-t)}\right),
\]
where $\Cl[const]{c-4.4}>0$ is some constant depending only on $n$ and
$p$. Therefore,
\[
\int_{\Gamma_t}|D\bar{\rho}|^{p'}\,d\mathscr{H}^n
\leq \Cr{c-4.4}(s-t)^{-\frac{p'}2-\frac{np'}{2}+\frac{n}{2}};
\]
hence,
\[
\left(\int_0^s\,dt
\left(\int_{\Gamma_t}|D\bar{\rho}|^{p'}\,d\mathscr{H}^n\right)^\frac{q'}{p'}
\right)^\frac1{q'}
<\infty
\]
if $-\frac{q'}2-\frac{nq'}{2}+\frac{nq'}{2p'}>-1$, which provides
$1-\frac{n}p-\frac2q>0$. Therefore, using \eqref{eq:4.5} we obtain
\begin{equation}
\label{eq:4.7}
\int_0^\tau\eta|I_3|\,dt
\leq \Cr{c-4.6}
\|\eta\|_{L^\infty(0,T_0)}
\|v\|_{L^\infty(\Omega\times(0,\tau))}^2
\|\vec{f}\|_{L^q_tL^p_x(0,\tau,\Gamma_t)}
\end{equation}
for some constant $\Cl[const]{c-4.6}>0$ depending only on $n$,
$p$, $q$ and $T_0$.
Combining \eqref{eq:4.6} and \eqref{eq:4.7}, we obtain \eqref{eq:4.1}.
\end{proof}
\begin{proof}
[Proof of Theorem \ref{thm:1}]
Let $T\in(0,T_0)$.
We denote
\[
M_T := \sup _{0<t <T} \| v(\cdot,t) \|
_{L^\infty(\Omega)}.
\]
We first consider the interior gradient estimates. By arguments similar
to that in Proposition \ref{prop:3.6} and Lemma \ref{lem:4.1} with
$\eta\equiv1$, for $Y=(y,y_{n+1})\in (\Omega\setminus N_{R/6}) \times\mathbb{R}$ and
$0<\tau <s\leq T$,
\begin{equation}
\label{eq:4.8}
\begin{split}
&\quad\frac{d}{dt}\int_{\Gamma_t}v\rho_1 \,d\mathscr{H}^n \\
&\leq -\int_{\Gamma_t} \rho_1
\left(|A_t|^2v
+\frac{2|D_{\Gamma_t}v|^2}{v}
-du\cdot d(\vec{f}\cdot\vec{n})\right)
\,d\mathscr{H}^n \\
&\quad
+\frac14\int_{\Gamma_t}v \rho_1 (\vec{f}\cdot\vec{n})^2
\,d\mathscr{H}^n
+\Cr{c-3.1}\mathscr{H}^n(\Gamma _t)
\end{split}
\end{equation}
and
\begin{equation}
\label{eq:4.9}
\begin{split}
&\ \int_0^\tau\,dt\int_{\Gamma_t}\rho_1
du\cdot d(\vec{f}\cdot\vec{n})
\,d\mathscr{H}^n
+\frac14\int_0^\tau\,dt\int_{\Gamma_t}v\rho_1(\vec{f}\cdot\vec{n})^2
\,d\mathscr{H}^n \\
&\leq \frac12
\int_0^\tau\,dt
\int_{\Gamma_t}
\rho_1|A_t|^2v
\,d\mathscr{H}^n
+\int_0^\tau\,dt\int_{\Gamma_t}
\rho_1\frac{|D_{\Gamma_t}v|^2}{v}
\,d\mathscr{H}^n \\
&\quad+\Cr{c-4.1}\|v\|_{L^\infty(\Omega\times(0,\tau))}^3
\|\vec{f}\|_{L^q_tL^p_x(0,\tau,\Gamma_t)}(1+\|\vec{f}\|_{L^q_tL^p_x(0,\tau,\Gamma_t)})
\end{split}
\end{equation}
where the positive constants $\Cr{c-3.1}$ and $\Cr{c-4.1}$ are same as
in Lemma \ref{lem:3.4} and Lemma \ref{lem:4.1} respectively.
Using \eqref{eq:4.8} and \eqref{eq:4.9} we have
\begin{equation*}
\begin{split}
&\quad\int_{\Gamma_\tau}v(x,\tau)\rho_1(X,\tau) \,d\mathscr{H}^n
-\int_{\Gamma_0}v(x,0)\rho_1(X,0) \,d\mathscr{H}^n \\
&\leq \Cr{c-3.1} \int _ 0 ^{\tau} \int _\Omega v \, dx dt
+\Cr{c-4.1}\|v\|_{L^\infty(\Omega\times(0,\tau))}^3\|\vec{f}\|_{L^q_tL^p_x(0,\tau,\Gamma_t)}(1+\|\vec{f}\|_{L^q_tL^p_x(0,\tau,\Gamma_t)})
\\
&\leq \Cr{c-3.1}|\Omega| M_Ts
+\Cr{c-4.1}M_T^3\|\vec{f}\|_{L^q_tL^p_x(0,s,\Gamma_t)}(1+\|\vec{f}\|_{L^q_tL^p_x(0,s,\Gamma_t)}).
\end{split}
\end{equation*}
Passing $\tau\to s$ we obtain
\begin{equation}
\label{eq:4.11}
\begin{split}
v(y,s)
&\leq \int _\Omega v^2 _0 \rho_1 (X,0) \, dx
+\Cr{c-3.1}|\Omega| M_Ts
\\
&\quad+\Cr{c-4.1}M_T^3\|\vec{f}\|_{L^q_tL^p_x(0,s,\Gamma_t)}(1+\|\vec{f}\|_{L^q_tL^p_x(0,s,\Gamma_t)})
\\
&\leq \|v_0\|_\infty^2
+\Cr{c-3.1}|\Omega| M_Ts
+\Cr{c-4.1}M_T^3\|\vec{f}\|_{L^q_tL^p_x(0,s,\Gamma_t)}
(1+\|\vec{f}\|_{L^q_tL^p_x(0,s,\Gamma_t)})
\end{split}
\end{equation}
where $v_0:=\sqrt{1+|du_0|^2}$.
Next we consider the boundary gradient estimates. By Proposition
\ref{prop:3.6}, for $Y=(y,y_{n+1})\in N_{R/4}\times\mathbb{R}$, $0<\tau<s\leq T$, and
a non-negative function $\eta=\eta(t)\in L^\infty(0,T_0)$,
\begin{equation*}
\begin{split}
&\quad\eta\frac{d}{dt}\int_{\Gamma_t}v(\rho_1+\rho_2)\,d\mathscr{H}^n \\
&\leq -\eta\int_{\Gamma_t}(\rho_1+\rho_2)
\left(|A_t|^2v
+\frac{2|D_{\Gamma_t}v|^2}{v}
-du\cdot d(\vec{f}\cdot\vec{n})\right)
\,d\mathscr{H}^n \\
&\quad
+\frac\eta4\int_{\Gamma_t}v(\rho_1+\rho_2)(\vec{f}\cdot\vec{n})^2
\,d\mathscr{H}^n \\
&\quad
+\Cr{c-3.1}\eta\mathscr{H}^n(\Gamma _t)
+\Cr{c-3.2}\eta(s-t)^{-\frac34}\int_{\Gamma_t}v(\rho_1+\rho_2)
\,d\mathscr{H}^n \\
&\quad
+\Cr{c-3.3}\eta\int_{\Gamma_t\cap\supp\rho_2}v
\,d\mathscr{H}^n.
\end{split}
\end{equation*}
Let
\[
\eta(\tau)
:=\exp\left(-\Cr{c-3.2}\int_0^\tau(s-t)^{-\frac34}\,dt\right)
=\exp\left(-\Cr{c-3.2}(s^\frac14-(s-\tau)^\frac14)\right).
\]
Note that $\|\eta\|_\infty=1$. Then by Lemma \ref{lem:4.1}, we have
\begin{equation*}
\begin{split}
&\quad
\exp\left(-\Cr{c-3.2}(s^\frac14-(s-\tau)^\frac14)\right)
\int_{\Gamma_\tau}v(x,\tau)(\rho_1+\rho_2)(X,\tau) \,d\mathscr{H}^n \\
&\qquad-\int_{\Gamma_0}v(x,0)(\rho_1+\rho_2)(X,0) \,d\mathscr{H}^n \\
&\leq \Cr{c-3.1} \int_ 0^{\tau}\eta\,dt
\int _\Omega v \, dx
+\Cr{c-3.3}\int_0^\tau\eta\,dt\int_{\Omega}v^2\,dx \\
&\quad+\Cr{c-4.1}\|v\|_{L^\infty(\Omega\times(0,\tau))}^3
\|\vec{f}\|_{L^q_tL^p_x(0,\tau,\Gamma_t)}
(1+\|\vec{f}\|_{L^q_tL^p_x(0,\tau,\Gamma_t)})
\\
&\leq \Cr{c-3.1}|\Omega| M_Ts
+\Cr{c-3.3}|\Omega|M_T^2s
+\Cr{c-4.1}M_T^3\|\vec{f}\|_{L^q_tL^p_x(0,s,\Gamma_t)}(1+\|\vec{f}\|_{L^q_tL^p_x(0,s,\Gamma_t)}).
\end{split}
\end{equation*}
Passing $\tau\to s$ we obtain
\begin{equation}
\label{eq:4.10}
\begin{split}
\exp\left(-\Cr{c-3.2}s^\frac14\right)
v(y,s)
&\leq 2\|v_0\|_\infty^2
+\Cr{c-3.1}|\Omega| M_Ts
+\Cr{c-3.3}|\Omega|M_T^2s
\\
&\quad+
\Cr{c-4.1}M_T^3\|\vec{f}\|_{L^q_tL^p_x(0,s,\Gamma_t)}(1+\|\vec{f}\|_{L^q_tL^p_x(0,s,\Gamma_t)})
\end{split}
\end{equation}
where $v_0:=\sqrt{1+|du_0|^2}$.
Compared \eqref{eq:4.11} with \eqref{eq:4.10}, we obtain for all
$y\in\overline{\Omega}$ and $0<s\leq T$
\begin{equation}
\label{eq:4.12}
\begin{split}
\exp\left(-\Cr{c-3.2}s^\frac14\right)
v(y,s)
&\leq 2\|v_0\|_\infty^2
+\Cr{c-4.5}(s)M_T^3
\end{split}
\end{equation}
where
\begin{equation*}
\Cl[const]{c-4.5}(s)
:=(\Cr{c-3.1}+\Cr{c-3.3})|\Omega|s
+\Cr{c-4.1}\|\vec{f}\|_{L^q_tL^p_x(0,s,\Gamma_t)}
(1+\|\vec{f}\|_{L^q_tL^p_x(0,s,\Gamma_t)}).
\end{equation*}
Note that $\Cr{c-4.5}(s)$ is monotone increasing and
$\Cr{c-4.5}(s)\rightarrow0$ as $s\downarrow 0$.
Now, select $(y,s)$ such that $M_T=v(y,s)$ and $Y=(y,u(y,s))$. Then,
by monotonicity of $\Cr{c-4.5}(s)$
\begin{equation}
\label{eq:4.13}
\Cr{c-4.5}(T)M_T^3
-\exp\left(-\Cr{c-3.2}T^\frac14\right)M_T
+2\|v_0\|_\infty^2\geq0.
\end{equation}
When $4\|v_0\|_\infty^2\leq M_T\leq 5\|v_0\|_\infty^2$ for some
$T>0$. Then by \eqref{eq:4.13} we have
\[
\Cr{c-4.5}(T)
\geq \frac{\exp\left(-\Cr{c-3.2}T^\frac14\right)}{M_T^2}
-\frac{2\|v_0\|_\infty^2}{M_T^3} \\
\geq \frac{\Cr{c-4.7}(T)}{\|v_0\|_\infty^4}
\]
where
\[
\Cl[const]{c-4.7}(T)=\frac{\exp\left(-\Cr{c-3.2}T^\frac14\right)}{25}
-\frac{1}{32}.
\]
Thus, we have
\[
M_{T_1}\leq 4\|v_0\|_\infty^2=1+\|du_0\|_\infty^2
\]
where $T_1$ is sufficiently small constant satisfying
$\Cr{c-4.7}(T)>0$ and $ \Cr{c-4.5}(T)<\frac{\Cr{c-4.7}(T)}{\|v_0\|_\infty^4}$.
\end{proof}
\section{Existence of classical solutions}
\label{sec:5}
Finally, we prove Theorem $\ref{thm:2}$. To use the Schauder estimates, we
provide the following:
\begin{lemma}
Let $T>0$ and $u\in C^{2,1}(Q_T )$ be a solution of $(\ref{eq:1.1})$. Then
\begin{equation}
\sup _{Q_T}|u| \leq \sup_{\Omega \times \mathbb{R} \times[0,1]}|\vec{f}| \, T + \sup _{\Omega}| u_0 |.
\label{supu}
\end{equation}
\end{lemma}
\begin{proof}
We set $w(x,t)=\sup_{\Omega \times \mathbb{R} \times[0,1]}|\vec{f}| \, t + \sup _{\Omega}| u_0 |$. We note that
\[ \partial_t w \geq \sqrt{1+|dw|^2} \Div \left( \frac{dw}{\sqrt{1+|dw|^2}} \right) + \vec{f}(x,w,t)\cdot (-dw,1). \]
Using the comparison principle, we determine that
\[w\geq u, \qquad (x,t) \in Q_T. \]
Similarly to the above argument,
\[u\geq -w, \qquad (x,t) \in Q_T. \]
Hence, we obtain $(\ref{supu})$.
\end{proof}
\begin{proof}
[Proof of Theorem $\ref{thm:2}$]
Fix $\alpha \in (0,1)$. We assume that $u_0 \in C^{2,\alpha}(\Omega)$
and let $T>0$, which is given by Theorem \ref{thm:1}.
Let $\beta \in (0,\alpha]$ and we set $X:= C^{1,\beta }(Q_T )$.
We consider the following linear parabolic type equation:
\begin{equation}
\left\{
\begin{aligned}
\partial _t u &= \sum _{i,j=1} ^{n} a_{ij}(dw) \partial _{x_i x_j} u + \vec{f}(x,w,t) \cdot (-du,1)& , \qquad &\text{in} \ Q_T , \\
du\cdot \nu \Big|_{\partial \Omega} &=0,\\
u\Big|_{t=0}&=u_0 ,& \qquad &\text{on} \ \Omega,
\end{aligned}
\right.
\label{mcf3}
\end{equation}
where $w \in X$ and $\displaystyle a_{ij}(r)=\left( \delta _{ij}
-\frac{r_i r_j }{1+|r|^2} \right)$ for $r=(r_1,\dots , r_{n})$. Because
\begin{equation}
\begin{split}
\|a_{ij}(dw)\|_{C^{\alpha\beta}(Q_T)}
&\leq\|a_{ij}(dw)\|_{C^{\beta}(Q_T)} \\
&\leq\|a_{ij}\|_{C^1 (\mathbb{R}^n)} \|dw\|_{C^\beta (Q_T)}
\leq \| a_{ij}\|_{C^1 (\mathbb{R}^n)} \|w\|_{X}
\end{split}
\label{aij}
\end{equation}
for any $w\in X$, \eqref{mcf3} is uniformly parabolic in $Q_T$. Note
that $\|a_{ij}\|_{C^1 (\mathbb{R}^n)} <\infty$.
Using
\eqref{eq:2.1}, we obtain
\begin{equation}
\|\vec{f}(\cdot,w,\cdot) \|_{C^{\alpha\beta}(Q_T)}
\leq K\|w\|_{C^\beta (Q_T)}
\leq K\|w\|_{X}
\label{fv}
\end{equation}
for any $w \in X$. Hence, for any $w\in X$ there exists a unique solution $u_w \in C^{2,\alpha\beta}(Q_T) \subset X$ of $(\ref{mcf3})$ such that
\begin{equation}
\|u_w\| _{C^{2,\alpha\beta }(Q_T)}\leq \Cr{c-5.1},
\label{schauder2}
\end{equation}
where $\Cl[const]{c-5.1}>0$ depends only on
$n,\alpha, \beta, \|w\|_{X}, \|u_0\|_{C^{2,\alpha}(\Omega)}$
and $K$ (see \cite[Theorem 4.5.3]{MR0241822}).
We define $A :X\to X$ as $A w=u_w$. Note that $A$ is continuous and compact. We show that
\[ S :=\{ u \ | \ u = \sigma A u \ \text{in} \ X, \ \text{for some} \ \sigma \in [0,1] \} \]
is bounded in $X$. If $u\in S $, then
\begin{equation}
\left\{
\begin{aligned}
\partial _t u &= \sum _{i,j=1} ^{n} a_{ij}(du) \partial _{x_i x_j} u + \vec{f}(x,u,t) \cdot (-du,\sigma ),& \qquad &\text{in} \ Q_T , \\
du\cdot \nu \Big|_{\partial \Omega} &=0,&&\\
u\Big|_{t=0}&= \sigma u_0,& \qquad &\text{on} \ \Omega.
\end{aligned}
\right.
\label{mcf4}
\end{equation}
According to Theorem \ref{thm:1},
\begin{equation}
\sup_{Q_T} |du| \leq 4(1+\| du_0\|_\infty^2).
\label{estc1}
\end{equation}
Because $du\cdot\nu=0$ on $\partial \Omega$, we can use similar
arguments to the interior Schauder estimates (cf. \cite[Theorem
6.2.1]{MR0241822}); hence,
\begin{equation}
\|du\|_{C^\beta (Q_T) } \leq \Cr{c-5.3},
\label{estc2}
\end{equation}
where $\Cl[const]{c-5.3}=\Cr{c-5.3}$ is a positive constant depending
only on $n$, $\sup _{Q_T}|u|$, $\|du_0 \|_{C^\alpha(\Omega)}$,
$\sup_{\Omega \times \mathbb{R} \times [0,T]} |\vec{f}|$, and $\partial
\Omega$.
Using the same argument as \eqref{schauder2},
\begin{equation}
\|u\|_{X}\leq \|u\|_{C^{2,\alpha\beta}(Q_T)}\leq \Cr{c-5.4},
\label{estc3}
\end{equation}
where $\Cl[const]{c-5.4}=\Cr{c-5.4} (n,\alpha, \|u_0\|_{C^{2,\alpha} (\Omega)}, \Cr{c-5.3} , K)>0$ (see ~\cite{MR0241822}). According to \eqref{estc1}, \eqref{estc2}, and \eqref{estc3}, $\Cr{c-5.4}$ depends only on $n,\alpha,\|u_0 \|_{C^{2,\alpha} (\Omega)}, \sup _{\Omega} |du_0|$ and $K$. Thus, $S$ is bounded in $X$.
According to the Leray-Schauder fixed point theorem, there exists a solution $u \in C^{ 2,\alpha}(Q_T )$ of $(\ref{eq:1.1})$.
We return to the assumption that $u_0$ is a Lipschitz function with
a Lipschitz constant $L>0$. Set $\varepsilon >0$. We choose smooth
functions $u_0 ^k$ converging uniformly to $u_0$ on $\Omega$. We note
that according to Theorem $\ref{thm:1}$,
\[
\sup_{Q_T}|du ^k|\leq 4(1+L^2)
\]
for all $k\geq 1$. Using an argument similar to
\eqref{estc2}, \eqref{estc3} and the interior Schauder estimates, there
exists $\Cl[const]{c-5.5}=\Cr{c-5.5}(n,\alpha,L,\varepsilon,K)>0$ such that
\begin{equation*}
\sup _k
\| u^k \|_{C^{ 2,\alpha}(Q_T ^\varepsilon)}
\leq \Cr{c-5.5}.
\end{equation*}
where $u^k$ is the solution of $(\ref{eq:1.1})$ with $u^k (x,0)=u_0
^k(x)$ in $\Omega$. Note that $\varepsilon=\dist (Q^\varepsilon
_T,\partial Q_T)$.
Hence, for any $\varepsilon >0$, passing to a subsequence if necessary,
$\{ u^k \}_{k=1} ^{\infty}$ converges to a classical solution $u$ in
$Q_T ^\varepsilon$ and we obtain \eqref{schauder}. Therefore, by
diagonal arguments, we obtain the solution $u \in C(\overline{Q_T})\cap
C^{2,1} (Q_T ^\varepsilon)$ of \eqref{eq:1.1}. The comparison principle
implies the uniqueness of the solution of \eqref{eq:1.1}. Thus, we have
proved Theorem $\ref{thm:2}$.
\end{proof}
\section{Optimality}
\label{sec:6}
We now discuss optimality about the assumption to transport terms for
the gradient estimates. We present some transport term $\vec{f}$ such
that the gradient of some solution
of \eqref{eq:1.1} is not bounded and
$\|\vec{f}\|_{L^q_tL^p_x(0,1,\Gamma_t)}<\infty$ for some $p$, $q$
satisfying $\frac{n}{p}+\frac{2}{q}>1$(see Figure \ref{fig:6.1}).
\begin{figure}
\caption{For
$p$ and $q$ \emph{not}
\label{fig:6.1}
\end{figure}
Let $\phi=\phi(\xi)$ be a smooth function on $\mathbb{R}^n$ compactly supported
on $\Omega$. We seek the transport term such that
$u(x,t)=(1-t)^\alpha\phi(x/\sqrt{1-t})$ is a solution of \eqref{eq:1.1}
for $\alpha\in\mathbb{R}$. By direct calculation, we obtain
\[
\begin{split}
du(x,t)&=(1-t)^{\alpha-\frac12}d_\xi\phi,\quad
d^2u(x,t)=(1-t)^{\alpha-1}d^2_\xi\phi \\
\partial_tu(x,t)&=(1-t)^{\alpha-1}\left(-\alpha\phi
+\frac12 d_\xi\phi\cdot\frac{x}{\sqrt{1-t}}
\right).
\end{split}
\]
Note that if $\alpha<\frac12$, then $du$ is not bounded. If
$u(x,t)=(1-t)^\alpha\phi(x/\sqrt{1-t})$ is a solution of \eqref{eq:1.1},
then we obtain
\begin{equation}
\label{eq:6.1}
\begin{split}
\vec{f}(x,u,t)\cdot(-du,1)
&=(1-t)^{\alpha-1}\left(-\alpha\phi
+\frac12 d_\xi\phi\cdot\frac{x}{\sqrt{1-t}}
\right) \\
&\quad-(1-t)^{\alpha-1}\Delta_\xi\phi
+(1-t)^{3\alpha-2}
\frac{d_\xi^2\phi:(d_\xi\phi\otimes
d_\xi\phi)}{1+(1-t)^{2\alpha-1}|d_\xi\phi|^2}.
\end{split}
\end{equation}
Thus, for $p,q\geq1$ and $\alpha<\frac12$ if $t$ is sufficiently
near to $1$, we obtain
\begin{equation*}
\Cr{c-6.1}(\phi)(1-t)^{3\alpha-2+\frac{n}{2p}+\frac{2\alpha-1}{p}}
\leq \left(\int_{\Gamma_t}|\vec{f}|^p\,d\mathscr{H}^n\right)^\frac1p
\leq\Cr{c-6.2}(\phi)(1-t)^{3\alpha-2+\frac{n}{2p}+\frac{2\alpha-1}{p}}
\end{equation*}
for some constants $\Cl[const]{c-6.1}(\phi), \Cl[const]{c-6.2}(\phi)>0$
since $\frac1{2(1-t)^{2\alpha-1}|d\phi|^2}\leq
\frac1{1+(1-t)^{2\alpha-1}|d\phi|^2}\leq \frac1{(1-t)^{2\alpha-1}|d\phi|^2}$.
When $1+\varepsilon_0:=\frac{n}{p}+\frac{2}{q}>1$ and
$\|\vec{f}\|_{L^q_tL^p_x(0,1,\Gamma_t)}<\infty$, then
\[
\int_0^1(1-t)^{(3\alpha-2+\frac{n}{2p}+\frac{2\alpha-1}{p})q}\,dt<\infty
\]
hence we obtain $\alpha>\frac12-\frac{\varepsilon_0}{2}(3+\frac2p)^{-1}$. For
$\alpha_0:=\frac12-\frac{\varepsilon_0}{4}(3+\frac2p)^{-1}$, let $\vec{f}$ be given by
\eqref{eq:6.1} with $\alpha=\alpha_0$. Then
$u(t,x):=(1-t)^\alpha\phi(x/\sqrt{1-t})$ is a solution of
\eqref{eq:6.1}, $\|\vec{f}\|_{L^q_tL^p_x(0,1,\Gamma_t)}<\infty$ for some
$p$, $q$ satisfying $\frac{n}{p}+\frac{2}{q}>1$ and $du$ is not bounded
on $\Omega\times(0,1)$.
\section{Final remark}
By our argument, we do not obtain a time global classical solution of
\eqref{eq:1.1}. Indeed, when $f\not\equiv0$, the maximum existence time
in Theorem \ref{thm:2} cannot be taken infinity. On the other hand,
Huisken~\cite[Theorem 1.1]{MR0983300} proved that there exists a time
global solution of \eqref{eq:1.1} under $\vec{f}\equiv 0$ and $u_0 \in
C^{2,\alpha}(\overline{\Omega}) $ and the solution converges to some
constant function as $t\to \infty$. In the case of $\vec{f} \not \equiv
0$, a priori time global gradient bounds is not known hence we do not
show the global existence for solutions of \eqref{eq:1.1}. It is
expected that with the assumption \eqref{eq:2.1} there is a time global
solution of \eqref{eq:1.1} and that solution converges to a solution of
the prescribed mean curvature equation $\vec{H}=-(\vec{f} \cdot \vec{n})
\vec{n}$.
To apply our results for $\Gamma_t$ in \eqref{eq:1.4}, the velocity
vector $\vec{f}$ in \eqref{eq:1.4} needs to be smooth enough. On the
other hand, if $\vec{f}$ is a weak solution of \eqref{eq:1.4}, namely
$\vec{f} \in L^2_t(H^1_x)$, then $\vec{f}$ does not satisfy our
assumption. When we consider (1.4) with $\vec{f} \in L^2_t(H^1_x)$, we
need to study the relationship between the velocity vector $\vec{f}$ and
the phase boundary $\Gamma_t$.
\section*{Acknowledgments}
The authors are grateful to the referee for his or her helpful comments.
This work was supported by JSPS KAKENHI Grant Numbers 25800084,
25247008, 16K17622, 17J02386. The second author is supported by JSPS
Research Fellowships for Young Scientists.
\end{document} |
\begin{document}
\begin{frontmatter}
\title{Generalization of the Nualart--Peccati criterion}
\runtitle{Generalized Nualart--Peccati criterion}
\begin{aug}
\author[A]{\fnms{Ehsan}~\snm{Azmoodeh}\corref{}\ead[label=e1]{[email protected]}\thanksref{m1}\thanksref{T1}},
\author[B]{\fnms{Dominique}~\snm{Malicet}\ead[label=e2]{[email protected]}\thanksref{m2}},
\author[C]{\fnms{Guillaume}~\snm{Mijoule}\ead[label=e3]{[email protected]}\thanksref{m3}}
\and
\author[D]{\fnms{Guillaume}~\snm{Poly}\ead[label=e4]{[email protected]}\thanksref{m4}\thanksref{T2}}
\runauthor{Azmoodeh, Malicet, Mijoule and Poly}
\affiliation{University of Luxembourg\thanksmark{m1}, PUC-Rio\thanksmark{m2}, Universit\'{e} Paris-Sud 11\thanksmark{m3} and University Rennes 1\thanksmark{m4}}
\address[A]{E. Azmoodeh\\
Mathematices Research Unit\\
Universit\'e du Luxembourg\\
Luxembourg City, 1359\\
Luxembourg\\
\printead{e1}}
\address[B]{D. Malicet\\
Departamento de Matem\'atica\\
PUC-Rio\\
Rio de Janeiro, 22451-900\\
Brazil\\
\printead{e2}\hspace*{23pt}}
\address[C]{G. Mijoule\\
D\'{e}partment de Math\'{e}matiques\\
Facult\'{e} des Sciences d'Orsay\\
Universit\'{e} Paris-Sud 11\\
F-91405 Orsay Cedex\\
France\\
\printead{e3}}
\address[D]{G. Poly\\
Institut de Recherche Math\'{e}matiques \\
Universit\'e de Rennes 1\\
35042, Rennes C\'edex\\
France\\
\printead{e4}}
\end{aug}
\thankstext{T1}{Supported by research project
F1R-MTH-PUL-12PAMP.}
\thankstext{T2}{Supported by AFR Grant 4897114.}
\received{\smonth{11} \syear{2013}}
\revised{\smonth{12} \syear{2014}}
\begin{abstract}
The celebrated Nualart--Peccati criterion
[\textit{Ann. Probab.} \textbf{33} (2005) 177--193]
ensures the
convergence in distribution toward a standard Gaussian random variable
$N$ of a given sequence $\{X_n\}_{n\ge1}$ of multiple\vspace*{1pt} Wiener--It\^{o}
integrals of fixed order, if $\mathbb{E}[X_n^2]\to1$ and $\mathbb{E}[X_n^4]\to\mathbb{E}
[N^4]=3$. Since its appearance in 2005, the natural question of
ascertaining which
other moments can replace the fourth moment in the above criterion has
remained entirely open. Based on the technique recently
introduced in
[\textit{J. Funct. Anal.} \textbf{266} (2014) 2341--2359], we settle this problem and establish that
the convergence of \textit{any even moment}, greater than four, to the
corresponding moment of the standard Gaussian distribution, guarantees
the central convergence. As a by-product, we provide many new moment
inequalities for multiple Wiener--It\^{o} integrals. For instance, if
$X$ is a normalized multiple Wiener--It\^o integral of order greater than
one,
\[
\forall k \ge2, \qquad \mathbb{E}\bigl[X^{2k}\bigr] > \mathbb{E}\bigl[N^{2k}
\bigr]=(2k-1)!!.
\]
\end{abstract}
\begin{keyword}[class=AMS]
\kwd{60F05}
\kwd{47D07}
\kwd{33C45}
\kwd{60H07}
\kwd{34L05}
\end{keyword}
\begin{keyword}
\kwd{Nualart--Peccati criterion}
\kwd{Markov diffusive generators}
\kwd{moment inequalities}
\kwd{$\Gamma$-calculus}
\kwd{Hermite polynomials}
\kwd{spectral theory}
\end{keyword}
\end{frontmatter}
\section{Introduction and summary of the main results}\label{sec1}
Let $\{B_t\}_{t\ge0}$ be a standard Brownian motion and $p$ be an
integer greater than $1$. For any deterministic and symmetric function
$f\in L^2(\mathbb{R}_+^p,\lambda_p)$ ($\lambda_p$ stands for $p$-dimensional
Lebesgue measure), let $I_p(f)$ be the $p{\mathrm{th}}$ multiple
Wiener--It\^{o} integral of $f$ with respect to $\{B_t\}_{t\ge0}$ (see
\cite{Nu-book} for a precise definition). The vector space spanned by all
the multiple integrals of order $p$ is called the $p{\mathrm{th}}$
Wiener chaos. A fundamental result of stochastic calculus, customarily
called the \textit{Wiener--It\^{o} decomposition}, asserts that any
square integrable functional of $\{B_t\}_{t\ge0}$ can be uniquely
expanded as an orthogonal sum of multiple Wiener--It\^{o} integrals. As
such, the study of the properties of multiple Wiener--It\^{o} integrals
becomes a central topic of research in modern stochastic analysis and a
great part of the so-called \textit{Malliavin calculus} (see, e.g.,
\cite{Nu-book,n-p}) relies on it.
The following result, nowadays known as the \textit{fourth moment
theorem}, yields an effective criterion of central convergence for a
given sequence of multiple Wiener--It\^{o} integrals of a fixed order.
\begin{thm}[(Nualart--Peccati \cite{n-p-05})]\label{peccati}
Let $p \ge2$ and $f_n$ be a sequence of symmetric elements of $L^2(\mathbb{R}
_+^p,\lambda_p)$. Assume $X_n=I_p(f_n)$ verifies $ \mathbb{E} [X_n^2
]\to1$. Then, as $n \to\infty$,
\[
\label{CLTFM}
X_n \stackrel{\mathit{law}}{\rightarrow}N \sim
\mathcal{N}(0,1) \quad \mbox{if and only if}\quad \mathbb{E}\bigl[X_n^4
\bigr]\to\mathbb{E}\bigl[N^4\bigr]=3.
\]
\end{thm}
The main goal of this article is to show that the above theorem is a
particular case of a more general phenomenon. This is the content of
the next theorem.
\begin{thm}\label{CLTFMquibourre}
Under the assumptions of Theorem~\ref{peccati}, for any integer $k\ge
2$, as $n \to\infty$,
\[
\label{CLTFM-ultimate}
X_n \stackrel{\mathit{law}} {\rightarrow}N \sim
\mathcal{N}(0,1)\quad \mbox{if and only if}\quad \mathbb{E}\bigl[X_n^{2k}
\bigr]\to\mathbb{E}\bigl[N^{2k}\bigr]=(2k-1)!!,
\]
where the double factorial is defined by $(2k-1)!!=\prod_{i=1}^k (2i-1)$.
\end{thm}
The discovery of the fourth moment theorem by Nualart and Peccati
(see \cite{n-p-05}) is arguably a major breakthrough in the field of
Gaussian approximation in the Wiener space. It resulted in a
drastic simplification of the so-called \textit{method of moments}
(consisting in checking the convergence of \textit{all} the moments)
which was so far the only alternative to establish a central limit
theorem. We refer to Breuer and Major \cite{BM}, Chambers and Slud \cite
{CS}, Surgailis \cite{SU} and \cite{NS11,GW} for a nonexhaustive
exposition of some results provided by the method of moments. The first
proof of Theorem~\ref{peccati} relies on tools from stochastic analysis (namely the
Dambis, Dubins and Schwartz's theorem; see, e.g.,
\cite{RY}, Chapter V). Later on, in the seminal paper \cite{n-o}, Nualart and
Ortiz-Latorre discovered a fundamental link between the central
convergence of a sequence of elements of a fixed Wiener chaos and the
convergence to a constant of the norms of their Malliavin derivatives.
The role played
by the Malliavin derivative in the fourth moment theorem was later on
confirmed in the landmark paper \cite{n-p-2}. There, Nourdin and
Peccati combined Stein's method and Malliavin calculus to provide a new
proof of Theorem~\ref{peccati} culminating in sharp estimates for
various distances. As an illustrative example of such estimates, they
could prove the following quantitative version of Theorem~\ref{peccati}.
\begin{thm}[(Nourdin--Peccati \cite{n-p-2})]\label{quantificationtheo}
Let $p\ge2$. Assume\vspace*{1pt} that $X=I_p(f)$ where $f$ is a symmetric element
of $L^2(\mathbb{R}_+^p,\lambda_p)$ such that $\mathbb{E}[X^2]=1$. Then,
\begin{equation}
\label{quantification}
d_{\mathrm{TV}} \bigl(X,\mathcal{N}(0,1) \bigr) \le
\tfrac{2}{\sqrt{3}} \sqrt{\mathbb{E} \bigl[X^4 \bigr]-3},
\end{equation}
where $d_{\mathrm{TV}}$ stands for the total variation distance.
\end{thm}
This innovative approach, combining Malliavin calculus and Stein's\break
method, gave a new impetus to the well studied field of normal
approximation within Gaussian spaces. Indeed, it resulted in
spectacular improvements of many classical results previously obtained
by the
method of moments. For an exposition of this fertile line of research,
one can consult the book \cite{n-p}, the surveys \cite{p14survey,C14}
and the following frequently updated webpage which aims at
referencing all the articles dealing with the so-called \textit{Malliavin--Stein method}:
\surl{https://sites.google.com/site/malliavinstein/home}.
Finally, we mention that Theorem~\ref{quantificationtheo} has been
generalized in various directions such as the optimality of the rate of
convergence \cite{Optimal}, multivariate settings
\cite{Tudor,Entropy}, free probability settings \cite{K12} and general
homogeneous sums \cite{npsp14}.
Unfortunately, if the quantitative aspects of the latter approach are
now quite well understood, the heavily combinatorial nature of the
proofs remains a major stumbling block in tackling the following
central questions:
\begin{longlist}[(C)]
\item[(A)] What are the target distributions for which a moment
criterion similar to~(\ref{CLTFM}) is available?
\item[(B)] What are the moment conditions ensuring the central convergence?
\item[(C)] What are the special properties of Wiener chaos playing a
role in the fourth moment phenomenon?
\end{longlist}
Indeed, most of the aforementioned proofs of the fourth moment theorem
make crucial use of the product formula for multiple Wiener--It\^{o}
integrals together with some properties of the underlying Fock space.
Such an approach becomes already inextricable when one tries to make
explicit the Wiener--It\^{o} decomposition of the $6$th power
of a multiple integral. As such, writing explicitly the Wiener--It\^{o}
decompositions of the successive powers of a given multiple
Wiener--It\^{o} integral, which is the core of the previous strategies,
seems totally hopeless for our purpose. Inspired by the remarkable intuition
that the fourth moment phenomenon could also be explained by the
spectral properties of Markov operators, Ledoux produced a new proof
of Theorem~\ref{peccati} (see \cite{le}). His approach, exclusively
based on the study of some differential operators such as the
Ornstein--Uhlenbeck generator and the iterated gradients, and avoids
completely the product formula for multiple integrals.\vadjust{\goodbreak} Unfortunately,
due to
an inappropriate definition of the chaos of a Markov operator, this attempt
became rather involved and could not produce any of the expected
generalizations of the fourth moment criterion. Later on, in the same
spirit as \cite{le} (i.e., exploiting spectral theory of Markov operators
and Gamma-calculus), the authors of \cite{a-c-p} could produce a very
simple and fully transparent proof of the fourth moment theorem,
henceforth bringing both, a complete answer to question (C), and some
generalizations of the criterion for other Markov operators than
Ornstein--Uhlenbeck. Roughly speaking, the technique used in \cite{a-c-p} consists in exploiting the stability of chaoses under the
product operation to provide some suitable spectral
inequalities, which, after elementary computations, become moment
inequalities. In particular, the latter approach does not need any of
the combinatorics computations required by the product formula. The
present article fully generalizes this idea and builds a complete
methodology enabling to provide a wide range of inequalities for
polynomial functionals of a Gaussian field, hence for Wiener chaoses as well.
In particular, it leads to a partial answer to question (B). Combining
the formalism and the ideas of \cite{a-c-p} together with some fine
deterministic properties of Hermite polynomials, we could prove
Theorem~\ref{CLTFMquibourre}, which is our main achievement.
Interestingly, we could also prove the following quantitative version
which extends the celebrated estimate (\ref{quantification}) to all
even moments. Indeed, taking $k=2$ in the theorem below gives back the
bound in\break (\ref{quantification}).
\begin{thm}\label{quantificationdeouf}
Under the assumptions of Theorem~\ref{quantificationtheo}, for all
$k\ge2$, we have the following general quantitative bound:
\begin{equation}
\label{superquantification}
d_{\mathrm{TV}} \bigl(X,\mathcal{N}(0,1) \bigr)\le C_k
\sqrt{\frac{\mathbb{E}
[X^{2k} ]}{(2k-1)!!}-1},
\end{equation}
where the constant $C_k=\frac{4}{\sqrt{ 2k(k-1) \int_0^1 (({1+t^2})/{2})^{k-2} \,dt}}$.
\end{thm}
For the sake of clarity, we stated so far our main results in the more
familiar context of the Wiener space. Nevertheless, throughout the
whole article, instead of the Wiener--It\^{o} multiple integrals, we
shall consider a more general concept of eigenfunctions of a diffusive
Markov operator. We refer the reader to Section~\ref{assumptions}
for a precise exposition of our assumptions. We also stress
that this gain of generality enables us to give central limit criteria
in situations far beyond the scope of the usual criteria holding in the
Wiener chaoses.\vadjust{\eject}
\section{The setup}\label{setup}
\subsection{The general setup and assumptions \textup{(a)}--\textup{(b)}--\textup{(c)}}\label{assumptions}
One possible way to study the properties of the elements of the Wiener
space can be through the exploration of the spectral properties of the
so-called \textit{Ornstein--Uhlenbeck operator}. In order to
situate more precisely our purpose, we will restate below its main
properties.
The Wiener space is the space $L^2(E,\mu)$ where $E=\mathbb{R}^\mathbb{N}$ and $\mu$ is
the standard Gaussian measure on $\mathbb{R}^\mathbb{N}$. The Ornstein--Uhlenbeck
operator is the unbounded, symmetric, negative operator $\mathbf{L}$ acting
on some dense domain of $L^2(E,\mu)$ and defined (on the set of smooth
enough cylindric functionals $\mathbb{P}hi$) by
\[
\mathbf{L} [\mathbb{P}hi ]=\Delta\mathbb{P}hi- \vec{x}\cdot\vec{\nabla}\mathbb{P}hi
\]
(where $\Delta$ is the usual Laplacian and $\vec{\nabla}$ is the
gradient vector).
We shall denote its domain by $\mathcal{D}(\mathbf{L})$ and for a general $X\in
\mathcal{D}(\mathbf{L})$, $\mathbf{L} [X ]$ is defined via standard closure
operations. The associated \textit{carr\'e-du-champ} operator is the
symmetric, positive, bilinear form defined by
\[
\Gamma[\mathbb{P}hi,\mathbb{P}si]=\vec{\nabla}\mathbb{P}hi\cdot\vec{\nabla}\mathbb{P}si.
\]
Below, we summarize the fundamental properties of the
Ornstein--Uhlenbeck operator $\mathbf{L}$.
\begin{longlist}[(a)]
\item[(a)] \textit{Diffusion}: For any $\mathcal{C}^2_b$ function $\phi
\dvtx \mathbb{R}\to\mathbb{R}$, any $X \in\mathcal{D}(\mathbf{L})$, it holds that $\phi
(X)\in\mathcal{D}(\mathbf{L})$ and
\begin{equation}
\label{diff}
\mathbf{L} \bigl[\phi(X) \bigr] = \phi'(X) \mathbf{L}[X] +
\phi''(X) \Gamma[X,X].
\end{equation}
Note that, by taking $\phi=1 \in\mathcal{C}^2_b$, we get $\mathbf{L}[1]=0$
which is the Markov property. Equivalently, $\Gamma$ is a derivation in
the sense that
\[
\Gamma \bigl[ \phi(X),X \bigr] = \phi'(X) \Gamma[X,X].
\]
\item[(b)] \textit{Spectral decomposition}: The operator $-\mathbf{L}$ is
diagonalizable on $L^2(E,\mu)$ with $\mathbf{sp}(-\mathbf{L})=\mathbb{N}$, that is to say
\[
L^2(E,\mu)=\bigoplus_{i=0}^\infty
\mathbf{Ker}(\mathbf{L}+i \mathbf{Id}).
\]
\item[(c)] \textit{Spectral stability}: For any pair of eigenfunctions
$(X,Y)$ of the operator $- \mathbf{L}$ associated with eigenvalues $(p_1,p_2)$,
\begin{equation}
\label{fundamental-assumtionbis}
X Y\in\bigoplus_{i \le p_1+p_2 } \mathbf{Ker} ( \mathbf{L}+ i \mathbf{Id}).
\end{equation}
\end{longlist}
We refer to \cite{BH} for a precise exposition as well as all the
domain and integrability assumptions. Actually, these three properties
are the only one we will use. Thus, we naturally define the following
class of structures for which our results will hold.
\begin{defn}
A (a)--(b)--(c) structure is a triplet $(E,\mu,\mathbf{L})$, with an associated
``carr\'e-du-champ'' operator $\Gamma$, where:
\begin{itemize}
\item $(E,\mu)$ is a probability space,
\item $\mathbf{L}$ is a symmetric unbounded operator defined on some dense
domain of $L^2(E,\mu)$,
\item$\Gamma$ is
defined by
\begin{equation}
\label{Gamma}
2 \Gamma [X,Y ] = \mathbf{L} [XY ] - X \mathbf{L} [Y ] - Y \mathbf{L} [X ],
\end{equation}
\end{itemize}
such that the aforementioned properties (a), (b) and (c) hold.
In this context, we will sometimes write $\Gamma[X]$ to denote $\Gamma
[X,X]$ and $\mathbb{E}$ for the integration against $\mu$.
\end{defn}
Property (a) is important regarding functional calculus. For instance,
we will use several times the following \textit{integration by parts
formula}: for any $X,Y$ in $\mathcal{D}(\mathbf{L})$ and
$\phi\in\mathcal{C}^2_b$:
\begin{equation}
\label{by-parts}
\mathbb{E} \bigl[\phi'(X)\Gamma [X,Y ] \bigr] = - \mathbb{E} \bigl[
\phi(X) \mathbf{L} [Y ] \bigr] = -\mathbb{E} \bigl[Y \mathbf{L} \bigl[\phi(X) \bigr] \bigr].
\end{equation}
Property (b) allows to use spectral theory. Actually, we stress that
our results extend under the weaker assumption that $\mathbf{sp}(-\mathbf{L}
)\subset\mathbb{R}_+$ is simply discrete. However, we stick to the
assumption $\mathbf{sp}(-\mathbf{L})=\mathbb{N}$ since it encompasses the most common
cases (Wiener space and Laguerre space). The reader interested in
relaxing this spectral assumption can consult \cite{a-c-p} where the
spectrum is only assumed to be discrete.
Property (c) is our main assumption, which will allow us to obtain
fundamental spectral inequalities. A simple induction on (\ref{fundamental-assumtionbis}) shows that, for any $X\in\mathbf{Ker}(\mathbf{L}+p\mathbf{Id})$ and
any polynomial $P$ of degree $m$, we have
\begin{equation}
\label{fundamental-assumtion}
P(X) \in\bigoplus_{i \le mp } \mathbf{Ker} ( \mathbf{L}+ i
\mathbf{Id} ).
\end{equation}
For further details on our setup, we refer to \cite{ba,b-g-l}. We also
refer to Section~\ref{examples-assumptions} for many other examples.
\begin{rmk}\label{Hyperconractivity}
We remark that under the assumptions (a)--(b)--(c), the eigenspaces are
hypercontractive (see \cite{ba} for sufficient conditions), that is,
for any integer $M$, we have that
\begin{equation}
\label{Hyper1}
\bigoplus_{i \le M} \mathbf{Ker} ( \mathbf{L}+ i \mathbf{Id} )
\subseteq\bigcap_{p\ge1} L^{p}(E,\mu).
\end{equation}
Next, by using the open mapping theorem, we see that the embedding (\ref{Hyper1}) is continuous, that is, there exists a constant $C(M,k)$ such
that for any $X\in\bigoplus_{i \le M} \mathbf{Ker} ( \mathbf{L}+ i \mathbf{Id} )$:
\begin{equation}
\label{Hyper2}
\mathbb{E}\bigl(X^{2k}\bigr)\leq C(M,k) \mathbb{E}\bigl(X^2
\bigr)^k.
\end{equation}
\end{rmk}
We close this subsection with an useful lemma which will be used
several times in the sequel. This lemma is proved in \cite{n-po-2}, Lemma~2.4, in the Wiener structure but can be easily adapted to our
framework by taking into
account the Remark~\ref{Hyperconractivity}.
\begin{lma}\label{Hypercontract}
Let $\{ X_n\}_{n \ge1}$ be a sequence of random variables living in a
finite sum of eigenspaces\vspace*{1pt} $(X_n \in\bigoplus_{i \le M} \mathbf{Ker} ( \mathbf{L}+
i \mathbf{Id} ), \forall n\ge1)$ of a Markov generator $\mathbf{L}$ such that
our assumptions \textup{(a)}, \textup{(b)}
and \textup{(c)} hold. Assume that the sequence $\{X_n\}_{n \ge1}$ converges in
distribution as $n$ tends to infinity. Then
\[
\sup_{n \ge1} \mathbb{E}\bigl(\vert X_n \vert^r
\bigr) < \infty \qquad \forall r \ge1.
\]
\end{lma}
\subsection{Examples of structures fulfilling assumptions
\textup{(a)}--\textup{(b)}--\textup{(c)}}\label{examples-assumptions}
We refer to the article \cite{a-c-p} for a proof of the validity of the
assumptions (a)--(b)--(c) in the cases of the Wiener and Laguerre
structures. We now show how the validity of the assumptions (a)--(b)--(c)
is preserved by the elementary operations of tensorization and
superposition of structures. This simple fact will allow us to produce
many structures in which our results hold.
\subsubsection*{Tensorization} Let $(E_1,\mu_1,\mathbf{L}_1)$ and
$(E_2,\mu_2,\mathbf{L}_2)$ be two Markov triplets fulfilling assumptions
(a)--(b)--(c). On the product space $E_1\times E_2$ with measure
$\mu_1\otimes\mu_2$, we define the following operator $\mathbf{L}_3$. For $\mathbb{P}si
\dvtx E_1\times E_2\rightarrow\mathbb{R}$, we set $\mathbb{P}si_x(y)=\mathbb{P}si_y(x)=\mathbb{P}si(x,y)$,
and we define
\begin{equation}
\label{tensor}
\mathbf{L}_3[\mathbb{P}si](x,y)=\mathbf{L}_1[
\mathbb{P}si_y](x)+\mathbf{L}_2[\mathbb{P}si_x](y).
\end{equation}
In (\ref{tensor}), $\mathbf{L}_3$ is defined on the set of maps $\mathbb{P}si$ such that:
\begin{longlist}[(2)]
\item[(1)] $\mu_2$-a.s., $\mathbb{P}si_y\in\operatorname{Dom}(\mathbf{L}_1)$ and $\mu
_1$-a.s., $\mathbb{P}si_x\in\operatorname{Dom}(\mathbf{L}_2)$,
\item[(2)]
\[
\int_{E_1\times E_2} \bigl(\mathbf{L}_3[\mathbb{P}si](x,y)
\bigr)^2\,d\mu_1 \,d\mu _2<\infty.
\]
\end{longlist}
We claim that the triplet $(E_1\times E_2,\mu_1\otimes\mu_2,\mathbf{L}_3)$
verifies assumptions (a)--(b)--(c). First, it is well known that this
procedure preserves assumption (a); see, for instance, \cite{BH}.
Assumption (b) is also preserved by tensorization taking into account that
\begin{eqnarray}
\mathbf{Ker}(\mathbf{L}_3+k\mathbf{Id})
& =& \operatorname{Vect} \bigl\{\phi(x)
\psi(y) |\phi\in\mathbf{Ker}(\mathbf{L} _1+k_1\mathbf{Id}), \nonumber\\[-8pt]
\label{chaos-tensor}
\\[-8pt]
\nonumber
&& \hspace*{29pt}\qquad\qquad
\psi\in\mathbf{Ker}(\mathbf{L}_2+k_2\mathbf{Id}), k_1+k_2=k\bigr\}.
\end{eqnarray}
Finally, we check assumption (c) for $\mathbf{L}_3$. Let $\mathbb{P}si_1=\phi_1(x)\psi
_1(y)$ with $\phi_1\in\mathbf{Ker}(\mathbf{L}+k_1\mathbf{Id})$ and $\psi_1\in\mathbf{Ker}(\mathbf{L}+k_2\mathbf{Id})$,
and let $\mathbb{P}si_2=\phi_2(x)\psi_2(y)$ where $\phi_2\in\mathbf{Ker}(\mathbf{L}+k_3\mathbf{Id})$
and $\psi_2\in\mathbf{Ker}(\mathbf{L}+k_4\mathbf{Id})$. By applying (c) to $\phi_1 \phi_2$ and
$\psi_1 \psi_2$ together with equation (\ref{chaos-tensor}), we infer that
\[
\phi_1(x)\phi_2(x)\psi_1(y)
\psi_2(y)\in\bigoplus_{i\leq
k_1+k_2+k_3+k_4}\mathbf{Ker}(
\mathbf{L}_3+i\mathbf{Id}).
\]
Hence, using bilinearity, we see that assumption (c) also holds for
operator $\mathbf{L}_3$.
\subsubsection*{Superposition}
As before, we are given a Markov
triplet $(E,\mathbf{L},\mu)$ satisfying assumptions (a)--(b)--(c). The
superposition procedure consists in adding an independent noise to
$(E,\mathbf{L},\mu)$. To do so, we consider a generic probability space
$(\Omega,\mathcal{F},\mathbb{P})$, which will induce the noise on $(E,\mathbf{L},\mu
)$. We define on the set $E\times\Omega$ equipped with the product
probability measure $\mu\otimes\mathbb{P}$:
\begin{eqnarray}
\label{newdomainmixture}
\quad\qquad\operatorname{Dom}(\mathbf{L}_\Omega) &=& \biggl\{\mathbb{P}si(x,\omega) \Big|
\mathbb{P}si_\omega\in\mbox {Dom}(\mathbf{L}), \int_\Omega
\mathbb{E}_\mu \bigl[ (\mathbf{L}\mathbb{P}si_\omega )^2 \bigr] \,d\mathbb{P}<
\infty \biggr\},
\\
\label{newgeneratormixture}
\mathbf{L}_\Omega[\mathbb{P}si](x,\omega)&:=& \mathbf{L}[\mathbb{P}si_\omega](x)\qquad
\forall \mathbb{P}si \in\operatorname{Dom}(\mathbf{L}_\Omega).
\end{eqnarray}
Preservation of assumption (a) is a well-known consequence of the
superposition procedure. We refer to \cite{BH} where
superposition/product/semidirect product of Markov triplets (i.e.,
Dirichlet forms)
are studied to provide ways of constructing Dirichlet forms. To check
assumption (b), we are given $\mathbb{P}si(x,\omega)\in L^2(\mu\otimes\mathbb{P})$. By
assumption (b) on the space $L^2(E,\mu)$, we get
\begin{equation}
\mathbb{P}si_{\omega}(x)=\sum_{k=1}^\infty
f_{k,\omega}(x), \qquad f_{k,\omega}\in\mathbf{Ker} (\mathbf{L}+k\mathbf{Id}).
\end{equation}
Besides,
\[
\int_\Omega\mathbb{E}_\mu \bigl[\mathbb{P}si(x,
\omega)^2 \bigr]\,d\mathbb{P}=\sum_{k=1}^\infty
\int_\Omega\mathbb{E}_\mu \bigl[f_{k}(x,
\omega)^2 \bigr]\,d\mathbb{P}<\infty.
\]
This ensures that $\mathbb{P}$-a.s., $f_{k,\omega}\in\mathbf{Ker}(\mathbf{L}+k\mathbf{Id})$ and
that $f_{k}\in\operatorname{Dom}(\mathbf{L}_\Omega)$. Finally, one can see that
\begin{equation}
\label{newchaosmixt}
\hspace*{9pt}\quad\mathbf{Ker}(\mathbf{L}_\Omega+k\mathbf{Id})= \bigl\{\mathbb{P}si(x,\omega)\in\operatorname{Dom}(
\mathbf{L}_\Omega)| \mathbb{P}\mbox{-a.s. } \mathbb{P}si_\omega\in\mathbf{Ker}(\mathbf{L}+k\mathbf{Id}) \bigr\}.
\end{equation}
We infer that $f_k\in\mathbf{Ker}(\mathbf{L}_\Omega+k\mathbf{Id})$ which achieves the proof of
(b). Strictly speaking, assumption (c) is not necessarily preserved
because we need integrability on the product of two eigenfunctions of
$\mathbf{L}_\Omega$. This integrability, unlike in the tensorization procedure
is not automatically fulfilled in the superposition procedure.
Fortunately, under some slight additional assumption, (c) holds for $\mathbf{L}
_\Omega$.
More precisely, we have for all $X(x,\omega)\in\mathbf{Ker}(\mathbf{L}_\Omega
+k_1\mathbf{Id})$ and $Y(x,\omega)\in\mathbf{Ker}(\mathbf{L}_\Omega+k_2\mathbf{Id})$ such that $X Y \in
L^2(\mu\otimes\mathbb{P})$:
\begin{equation}
\label{c-mixt}
X Y\in\bigoplus_{i\leq k_1+k_2}\mathbf{Ker}(
\mathbf{L}_\Omega+i\mathbf{Id}).
\end{equation}
\begin{rmk}
One can consult the reference \cite{b-g-l}, page 515, to see that the two
aforementioned operations are a particular case of the so-called
wrapped product of symmetric diffusive operators.
\end{rmk}
\subsection{Some auxiliary results}\label{knownfacts}
To be self-contained, we restate here two well-known facts about
Stein's method applied to eigenfunctions of a diffusive Markov
operator. For more details, the reader can consult, for instance, \cite
{le} or the survey~\cite{CP14}.
\begin{thm}[(\cite{le})]\label{Gamma-bound-general}
Let $\mathbf{L}$ be a Markov diffusive operator satisfying the assumptions
\textup{(a)}--\textup{(b)} of Section~\ref{assumptions}, and $X$ be in $\mathbf{Ker}(\mathbf{L}+p\mathbf{Id})$
such that \mbox{$\mathbb{E}[X^2]=1$}. Then
\[
d_{\mathrm{TV}} \bigl(X,\mathcal{N}(0,1) \bigr)\le\frac{2}{p}\sqrt{\operatorname{Var} \bigl(\Gamma[X] \bigr)}.
\]
As a matter of fact, for a given sequence $\{X_n\}_{n \ge1}$ in $\mathbf{Ker}(\mathbf{L}
+p\mathbf{Id})$ such that $\mathbb{E}[X_n^2]\to1$:
\[
\Gamma[X_n]\stackrel{L^2}{\rightarrow}p \quad \mathbb{R}ightarrow \quad
X_n \stackrel {\mathit{law}} {\rightarrow}\mathcal{N}(0,1).
\]
\end{thm}
\begin{rmk}\label{gamma-criterion}
In \cite{le}, Proposition~2, given a sequence $\{X_n\}_{n \ge1}$ in $\mathbf{Ker}
(\mathbf{L}+p\mathbf{Id})$ with $\mathbb{E}[X_n^2]\to\theta$, it is shown that
\[
\operatorname{Var} \bigl( \Gamma[X_n] - p X_n \bigr) \to0
\quad \mathbb{R}ightarrow\quad X_n+\theta\stackrel{\mathrm{law}} {\rightarrow}\gamma(
\theta),
\]
where $\gamma(\theta)$ stands for the gamma distribution of parameter
$\theta$. This fact will be used only in the proof of Theorem~\ref{unify-criteria}.
\end{rmk}
Furthermore, we restate below the fourth moment theorem under the
assumptions (a)--(b)--(c). Actually, it can be proved under the weaker
assumption that, for any eigenfunction $X\in\mathbf{Ker}(\mathbf{L}+p\mathbf{Id})$, we have
\[
X^2\in\bigoplus_{k\le2p} \mathbf{Ker}(\mathbf{L}+k\mathbf{Id}),
\]
which in fact is a very particular case of the assumption (c). The
stronger assumption (c) will allow us to establish analogous
statements for higher moments.
\begin{thm}[(\cite{le,CP14})]\label{Steindirichletfinal}
Let $\mathbf{L}$ be a Markov diffusive operator satisfying the assumptions
\textup{(a)}--\textup{(b)}--\textup{(c)} and $X\in\mathbf{Ker}(\mathbf{L}+p\mathbf{Id})$ with $\mathbb{E}[X^2]=1$. Then
\begin{equation}
d_{\mathrm{TV}} \bigl(X,\mathcal{N}(0,1) \bigr)\le\frac{2}{p} \sqrt{\operatorname{Var}
\bigl(\Gamma[X] \bigr)}\le\frac{2}{\sqrt{3}} \sqrt{\mathbb{E}\bigl[X^4
\bigr]-3}.
\end{equation}
Thus, for a given sequence $\{X_n\}_{n\ge1}$ in $\mathbf{Ker}(\mathbf{L}+p\mathbf{Id})$ such
that $\mathbb{E}[X_n^2]\to1$ and $\mathbb{E}[X_n^4] \to3$, we have
\[
X_n \stackrel{\mathit{law}} {\rightarrow} \mathcal{N}(0,1).
\]
\end{thm}
\section{Algebraic framework}\label{algebraic}
The aforementioned assumptions (a)--(b)--(c) on the Markov generator $\mathbf{L}
$ can be suitably used to build an algebraic framework in order to
study properties of eigenfunctions of the generator $\mathbf{L}$. Throughout
this section, we shall use these assumptions in a natural way in order
to introduce a family of bilinear, symmetric and positive forms
$\mathcal{M}_k$. The fundamental assumption
(\ref{fundamental-assumtion}) is the crucial element yielding the
positivity of the bilinear forms $\mathcal{M}_k$.
Let $\mathbb{R}_{k}[T]$ stand for the ring of all polynomials of $T$ of degree
at most $k$ over~$\mathbb{R}$. Let $X$ be an eigenfunction of the generator $\mathbf{L}
$ with eigenvalue $-p$, that is, $- \mathbf{L} X = p X$. We consider
the following map:
\[
\mathcal{M}_k \dvtx \cases{
\displaystyle
\mathbb{R}_{k}[T] \times\mathbb{R}_{k}[T] \longrightarrow \mathbb{R},
\vspace*{3pt}\cr
\displaystyle (P,Q) \longmapsto \mathbb{E} \bigl[ Q(X) (\mathbf{L}+ k p \mathbf{Id}) P(X) \bigr].}
\]
\begin{rmk}
Notice that the mapping $\mathcal{M}_k$ strongly depends on the
eigenfunction $X$. We also remark that thanks to Remark~\ref{Hyperconractivity}, $\mathcal{M}_k$ is well defined.
\end{rmk}
The following theorem is the cornerstone of our approach.
\begin{thm}\label{thm:matrix}
The mapping $\mathcal{M}_k$ is bilinear, symmetric and nonnegative.
Moreover, its matrix representation over the canonical basis $\{ 1, T,
T^2, \break \ldots, T^k \}$ is given by $p \mathbf{M}_{k}$\vspace*{-2pt} where
\begin{equation}
\label{moment-matrix}
\mathbf{M}_{k} =
\biggl(\biggl(k - \frac{ij}{i+j-1}\biggr) \mathbb{E}\bigl[X^{i+j}\bigr]
\biggr)_{0 \le i,j \le k}
\end{equation}
with the convention that $\frac{ij}{i+j-1}=0$ for $(i,j)=(0,1)$ or $(1,0)$.
\end{thm}
\begin{pf}
Expectation is a linear operator, so the bilinearity property follows.
Symmetry proceeds from the symmetry of the diffusive generator $\mathbf{L}$.
To prove positivity of the matrix $\mathbf{M}_k$, using the
fundamental assumption (\ref{fundamental-assumtion}) we obtain that
for any polynomial $P$ of degree $\le k$,
\[
P(X)\in\bigoplus_{i \leq k p}\mathbf{Ker}(\mathbf{L}+ i \mathbf{Id}).
\]
Therefore, denoting by $J_i\dvtx L^2(E,\mu)\rightarrow\mathbf{Ker} ( \mathbf{L}+ i \mathbf{Id}
)$ the orthogonal projections,
\begin{eqnarray}
\mathbb{E}\bigl[ \bigl((\mathbf{L}+kp\mathbf{Id})P(X) \bigr)^2
\bigr]&=& \mathbb{E}\bigl[\mathbf{L} P(X) (\mathbf{L}+kp\mathbf{Id})P(X)\bigr]
\nonumber\\[-2pt]
&&{}+kp \mathbb{E}\bigl[P(X) (\mathbf{L}+kp\mathbf{Id})P(X)\bigr]
\nonumber\\[-2pt]
\label{Main-ineq}
& = &\sum_{i=0}^{kp} (- i) (kp - i) \mathbb{E}
\bigl[J_{i}^{2}\bigl(P(X)\bigr)\bigr]
\\[-2pt]
&&{}+ kp \mathbb{E}\bigl[P(X) (\mathbf{L}+kp\mathbf{Id})P(X)\bigr]
\nonumber\\[-2pt]
\nonumber
&\le& kp \mathcal{M}_k(P,P).
\end{eqnarray}
Hence, $\mathcal{M}_k$ is a positive form. To complete the proof,
notice that the $(i,j)$-component of the matrix $\mathbf{M}_k$ is given
by $\mathbb{E} [ X^j (\mathbf{L}+ kp \mathbf{Id}) X^i ]$. So, using the diffusive
property of the generator $\mathbf{L}$, we obtain
\begin{eqnarray*}
\nonumber
X^j (\mathbf{L}+ kp \mathbf{Id}) X^i
& =& i (i-1) X^{i+j-2} \Gamma(X) + p (k -i) X^{i+j}
\\
&= &\frac{i (i-1)}{i+j -1} \Gamma\bigl(X^{i+j-1}, X\bigr) + p (k -i)
X^{i+j}.
\end{eqnarray*}
Therefore,
\begin{eqnarray*}
\mathcal{M}_k\bigl(X^i,X^j
\bigr)&= &\frac{i (i-1)}{i+j -1} \mathbb{E}\bigl[\Gamma\bigl(X^{i+j-1}, X\bigr)\bigr] + p
(k -i) \mathbb{E}\bigl[X^{i+j}\bigr]
\\
&=& p\frac{ i (i-1)}{i+j-1} \mathbb{E} \bigl[X^{i+j} \bigr] + p (k-i) \mathbb{E}
\bigl[X^{i+j} \bigr]
\\
&=& p \biggl( \frac{i(i-1) + (k-i)(i+j-1)}{i+j -1} \biggr) \mathbb{E} \bigl[X^{i+j} \bigr]
\\
&=& p \biggl(k - \frac{ij}{i+j-1} \biggr) \mathbb{E} \bigl[X^{i+j} \bigr].
\end{eqnarray*}
\upqed\end{pf}
\begin{rmk}\label{mainrmk}
In Theorem~\ref{thm:matrix}, we only stated the positivity of the
family of quadratic forms $\mathcal{M}_k$. However, it is worth
mentioning that, thanks to the inequality~(\ref{Main-ineq}), each quadratic
form $\mathcal{M}_k$ dominates the nonnegative quadratic form
\[
P \longmapsto\mathbb{E}\bigl[ \bigl\{(\mathbf{L}+ kp \mathbf{Id})P(X) \bigr\}^2\bigr].
\]
\end{rmk}
\begin{cor}\label{determinant}
For any eigenfunction $X$ of the generator $\mathbf{L}$ with eigenvalue $-p$,
that is, $- \mathbf{L}(X)=pX$:
\begin{longlist}[(ii)]
\item[(i)] All the eigenvalues of matrix $\mathbf{M}_k$ are nonnegative.
\item[(ii)] All the $l$th leading principal minor of the
matrix $\mathbf{M}_k$ are nonnegative for $l\le k$.
\end{longlist}
\end{cor}
\begin{pf}
The proof follows directly from standard linear algebra (see, e.g.,
\cite{S}).
\end{pf}
The moments matrix $\mathbf{M}_k$ can help one to give nontrivial
moment inequalities, sometimes sharper than the existing estimates so
far, involving the moments of the eigenfunctions of a generator $\mathbf{L}$.
Here is an application where we sharpen the standard fourth moment
inequality $\mathbb{E}[X^4]\geq3\mathbb{E}[X^2]^2$. We mention that the next theorem
unifies the two well-known criteria of convergence in law
(i.e., Gaussian and Gamma approximation) for a sequence of random
variables inside a fixed Wiener chaos; see \cite{n-p-05,n-pe-2}.
\begin{thm}\label{unify-criteria}
If $X$ is a nonzero eigenfunction of generator $\mathbf{L}$, then
\begin{equation}
\label{4moment}
\frac{\mathbb{E}[X^4]}{3} - \mathbb{E}\bigl[X^2\bigr]^2
\ge\frac{\mathbb{E}[X^3]^2}{2 \mathbb{E}[X^2]}.
\end{equation}
Moreover, assume that $X_n \in\mathbf{Ker}(\mathbf{L}+p\mathbf{Id})$ for each $n \ge1$ and
\begin{equation}
\label{equalimit}
\frac{\mathbb{E}[X_n^4]}{3} - \mathbb{E}\bigl[X_n^2
\bigr]^2 -\frac{\mathbb{E}[X^3]^2}{2 \mathbb{E}[X^2]}\to0.
\end{equation}
Then all the adherence values in distribution of the sequence $\{X_n\}
_{n\ge1}$ is either a Gaussian or a scaling of a centered Gamma random
variable.
\end{thm}
\begin{pf}
The moments matrix $\mathbf{M}_2$ associated to $X$ is given by
\begin{equation}
\mathbf{M}_2(X)=
\pmatrix{ 2 & 0 & 2 \mathbb{E}
\bigl[X^2\bigr] \vspace*{3pt}
\cr
0 & \mathbb{E}\bigl[X^2\bigr] &
\mathbb{E}\bigl[X^3\bigr]\vspace*{3pt}
\cr
2 \mathbb{E}\bigl[X^2\bigr]& \mathbb{E}
\bigl[X^3\bigr] & \displaystyle\tfrac{2}{3} \mathbb{E}\bigl[X^4\bigr] }.
\end{equation}
Using Corollary~\ref{determinant}, we infer that
\[
\operatorname{det}(\mathbf{M}_2) = 4 \mathbb{E}\bigl[X^2
\bigr] \biggl\{ \frac{\mathbb{E}[X^4]}{3} - \mathbb{E} \bigl[X^2\bigr]^2
\biggr\} - 2 \mathbb{E}\bigl[X^3\bigr]^{2} \ge0,
\]
which immediately implies (\ref{4moment}).
Up to extracting a
subsequence, we may assume that $X_n \to X_\infty$ in distribution. We
further assume that $X_\infty\neq0$. Assumption (\ref{equalimit})
entails that
\[
\det\mathbf{M}_2(X_n)\to0.
\]
Let $V_n= (\frac{2}{3}\mathbb{E}[X_n^4] \mathbb{E}[X_n^2]-\mathbb{E}[X_n^3]^2, 2 \mathbb{E}
[X_n^2] \mathbb{E}[X_n^3], -2\mathbb{E}[X_n^2]^2)$ be the first line of the
adjugate matrix of $\mathbf{M}_2(X_n)$. Since $X_n$ converges in
distribution, we have
$V_n \to V_\infty=(a,b,c)$. We set $P(X)=c X^2+bX+a$. As a result, we have:
\begin{equation}
\label{polynomialstuff}
\mathcal{M}_2(X_n) (P,P)\to0.
\end{equation}
Using Remark~\ref{mainrmk}, we see that
\[
\mathbb{E} \bigl[ \bigl\{(\mathbf{L}+2p\mathbf{Id})P(X_n) \bigr\}^2 \bigr]
\to0.
\]
Next,
\begin{eqnarray*}
(\mathbf{L}+2p\mathbf{Id})P(X_n)&=&c(\mathbf{L}+2p\mathbf{Id})X_n^2+b p
X_n+2 a p
\\
&=& 2 c \Gamma(X_n)+b p X_n+2 a p.
\end{eqnarray*}
We notice that $c\neq0$ since $X_\infty\neq0$. Now two possible
cases can happen.
\begin{longlist}[\textit{Case} (2):]
\item[\textit{Case} (1):] If $\mathbb{E}[X_n^3]\to0$, then $b=0$. Hence, we have $\mathbb{E}
[\Gamma(X_n)+\frac{a p}{c} ]^2\to0$ and therefore the
sequence $\{X_n\}_{n \ge1}$ converges toward a Gaussian random variable.
See Theorem~\ref{Gamma-bound-general}.
\item[\textit{Case} (2):] If $\mathbb{E}[X_n^3] \not\to0$, then $b\neq0$. Hence, we
have $\mathbb{E}[\Gamma(X_n)+\frac{b p}{2c} X_n+\frac{a p}{c}]^2\to0$. We set
$X_n=\lambda Y_n$ and we may choose $\lambda$ in such way that
\[
\operatorname{Var} \bigl(\Gamma(Y_n)-p Y_n \bigr)\to0.
\]
This enables us to use the content of the Remark~\ref{gamma-criterion}
and assert that $Y_n+\mathbb{E}[Y_n^2]$ converges in distribution toward a gamma
random variable. Hence, $X_n$ converges in distribution toward a
scaling of a centered gamma law.\quad\qed
\end{longlist}
\noqed\end{pf}
It is clear that Theorem~\ref{unify-criteria} also gives back the
fourth moment theorem in (a)--(b)--(c) structures from the fact that a
random variable $G$ satisfying $\mathbb{E}[G^2]=1$, $\mathbb{E}[G^3]=0$ and $\mathbb{E}[G^4]=3$
cannot have a gamma distribution.
The following proposition states a nontrivial inequality between the
second, fourth and sixth moments of eigenfunctions of $\mathbf{L}$.
\begin{prop}\label{6moment-prop}
If $X$ is an eigenfunction of $\mathbf{L}$, then
\begin{equation}
\label{6moment1}
\mathbb{E}\bigl[X^4\bigr]^2\leq\tfrac{3}{5}
\mathbb{E}\bigl[X^6\bigr] \mathbb{E}\bigl[X^2\bigr].
\end{equation}
\end{prop}
\begin{rmk}
Notice that this inequality is an equality when the distribution of $X$
is\vspace*{-1pt} Gaussian.
\end{rmk}
\begin{pf*}{Proof of Proposition~\ref{6moment-prop}}
The moments matrix $\mathbf{M}_3$ associated to $X$ has the form\vspace*{-3pt}
\begin{equation}
\mathbf{M}_3=
\pmatrix{ 3 & \star& 3 \mathbb{E}
\bigl[X^2\bigr] & \star\vspace*{1.5pt}
\cr
\star& 2 \mathbb{E}
\bigl[X^2\bigr] & \star& 2 \mathbb{E}\bigl[X^4\bigr]\vspace*{1.5pt}
\cr
3 \mathbb{E}\bigl[X^2\bigr] &\star &\displaystyle \tfrac{5}{3} \mathbb{E}
\bigl[X^4\bigr] & \star\vspace*{1.5pt}
\cr
\star& 2 \mathbb{E}
\bigl[X^4\bigr] & \star& \displaystyle\tfrac{6}{5} \mathbb{E}\bigl[X^6
\bigr] }.
\end{equation}
Since this matrix is positive, we have in particular
\[
\left\vert
\matrix{ 2\mathbb{E}\bigl[X^2\bigr] & 2\mathbb{E}
\bigl[X^4\bigr]
\vspace*{1.5pt}\cr
2\mathbb{E}\bigl[X^4\bigr] & \displaystyle\tfrac{6}{5} \mathbb{E}\bigl[X^6
\bigr] }
\right\vert \geq0,
\]
which gives the claimed inequality.\vspace*{-1pt}
\end{pf*}
Using Proposition~\ref{6moment-prop}, we can already prove the
following sixth moment theorem, that is, Theorem~\ref{CLTFMquibourre}
in the case $k=3$. Note that we will get back this result when we will
prove our main result (Section~\ref{sectioevnemoment}).\vspace*{-1pt}
\begin{cor}\label{6moment}
A sequence $\{X_n\}_{n \ge1}$ such that $X_n \in\mathbf{Ker}(\mathbf{L}+ p \mathbf{Id})$ for
each $n \ge1$, converges in distribution toward the standard Gaussian
law if and only if $\mathbb{E}[X_n^2]\to1$ and $ \mathbb{E}[X_n^6]\to15$.
\end{cor}
\begin{pf}
By Proposition (\ref{6moment-prop}), for $X \in\mathbf{Ker} ( \mathbf{L}+ p \mathbf{Id}
)$, we have
\[
\mathbb{E}\bigl[X^6\bigr]\geq\frac{5}{3}\frac{\mathbb{E}[X^4]^2}{\mathbb{E}[X^2]}\geq
\frac{5}{3}\frac
{(3\mathbb{E}[X^2]^2)^2}{\mathbb{E}[X^2]}=15\mathbb{E}\bigl[X^2
\bigr]^3.
\]
Therefore, for the sequence $\{X_n\}_{n\geq1}$ in $\mathbf{Ker} ( \mathbf{L}+ p
\mathbf{Id} )$, if $\mathbb{E}[X_n^2]\to1$ and $\mathbb{E}[X_n^6]\to15$, then from the
previous chain of inequalities, we deduce that $\mathbb{E}[X_n^4]\to3$.
Hence, the sequence $\{X_n\}_{n \ge1}$ converges in distribution toward
$\mathcal{N}(0,1)$ according to Theorem~\ref{Steindirichletfinal}.
\end{pf}
\section{New central limit theorems}\label{Central-limit-main}
In this section, we will establish our main criteria for central
convergence. In a first subsection, we will first focus on the main
theorem of the paper, the so-called \textit{even moment criterion}. In a
second subsection, we will give additional criteria of central
convergence. As before, we work under assumptions (a)--(b)--(c) stated in\vspace*{-2pt}
Section~\ref{setup}.
\subsection{The even moment criterion}\label{sectioevnemoment}
We state below our main result. Note that Theorem~\ref{CLTFMquibourre}
is a particular case of Theorem~\ref{2-2k-moment}, by simply choosing
$\mathbf{L}$ to be the Ornstein--Uhlenbeck generator.
\begin{thm}\label{2-2k-moment}
Let $\mathbf{L}$ be a Markov operator satisfying \textup{(a)}--\textup{(b)}--\textup{(c)}, $p\ge1$ be an
eigenvalue of $-\mathbf{L}$, and $\{X_n\}_{n\geq1}$ a sequence\vspace*{1pt} of elements in
$\mathbf{Ker} ( \mathbf{L}+ p \mathbf{Id} )$ for all $n \ge1$, such that
$\lim_{n \to\infty}\mathbb{E} [X_n^2 ]=1$. Then, for any integer
$k\ge2$, as $n \to\infty$, we have
\begin{equation}
\label{2-2k-criterion} X_n \stackrel{\mathit{law}} {\rightarrow}
\mathcal{N}(0,1)\quad \mbox{if and only if} \quad\mathbb{E}\bigl[X_n^{2k}
\bigr] \to\mathbb{E}\bigl[N^{2k}\bigr]=(2k-1)!!.
\end{equation}
\end{thm}
The proof of Theorem~\ref{2-2k-moment} is rather lengthy; it is thus
divided in three steps which are detailed below.
\begin{pf*}{Sketch of the proof}
\textit{Step} (1): We find a family $\mathscr{P}=\{ W_k \vert k \ge2\}$ of
real polynomials which satisfies the two following properties:
\begin{longlist}[(ii)]
\item[(i)] $\mathbb{E} \bigl[W_k(X_n) \bigr]\ge0, \forall k
\ge2, \forall n \ge1$,
\item[(ii)] $X_n \stackrel{\mathrm{law}} {\rightarrow}
\mathcal{N}(0,1) \mbox{ if and only if } \mathbb{E} \bigl[W_2(X_n)
\bigr]\to0$, as $n \to\infty$.
\end{longlist}
\textit{Step} (2): In the second step, we construct a polynomial $T_k$
such that, under the assumptions of Theorem~\ref{2-2k-moment}, we have
\[
T_k = \sum_{i=2}^k
\alpha_{i,k} W_i \qquad \mbox{such that for all }i,\alpha_{i,k}>0,
\]
and
\[
\mathbb{E} \bigl[T_k(X_n) \bigr] \to 0 \qquad \mbox{as } n \to\infty.
\]
\textit{Step} (3): In the last step, using the fact that $\alpha
_{i,k}>0$ and property (i) of step~(1), we obtain that $\mathbb{E}
[W_2(X_n) ]\to0$. Finally, using property (ii) of step (1), we
complete the proof.
\end{pf*}
\begin{pf*}{Proof of Theorem~\protect\ref{2-2k-moment}}
The ``if'' part is a
simple consequence of Lemma~\ref{Hypercontract}. For the ``only if''
part, we go into the details of the three aforementioned steps.
\begin{longlist}[\textit{Step} (1):]
\item[\textit{Step} (1):] First, we introduce the suitable family $\mathscr{P}$ of
polynomials. To this end, we denote by $\{H_k\}_{k\ge0}$ the family of
Hermite polynomials defined by the recursive relation
\begin{equation}
H_{0}(x) =1,\qquad H_1(x)=x,\qquad H_{k+1}(x) = x
H_k (x)-k H_{k-1}(x).
\end{equation}
For any $k \ge2$, we define the polynomial $W_k$ as
\begin{equation}
\label{Wk}
W_k(x) = (2k-1) \biggl( x \int_{0}^{x}
H_{k}(t)H_{k-2}(t)\, d t - H_{k}(x)H_{k-2}(x)
\biggr),
\end{equation}
and the family $\mathscr{P}$ as
\begin{equation}
\label{eq:polynomial-family}
\mathscr{P}= \Biggl\{ P \Big\vert P(x)= \sum_{k=2}^{m}
\alpha_k W_k(x); m \ge2, \alpha_k \ge0, 2\le k \le m \Biggr\}.
\end{equation}
The family $\mathscr{P}$ encodes interesting properties of central convergence
which are the content of the two next lemmas. Below, Lemma~\ref{main1}
will provide the answer to property (i) of step (1).
\end{longlist}
\begin{lma}\label{main1}
Let $\mathbf{L}$ be a general Markov generator satisfying assumptions
\textup{(a)}--\textup{(b)}--\textup{(c)} in Section~\ref{setup}, and let $P$ be a polynomial
belonging to $\mathscr{P}$. Then:
\begin{longlist}[(2)]
\item[(1)] If $N\sim\mathcal{N}(0,1)$, $\mathbb{E}[P(N)]=0$.
\item[(2)] If $X$ is an eigenvalue of $\mathbf{L}$, $\mathbb{E}[P(X)]\geq0$.
\end{longlist}
\end{lma}
\begin{pf}
It is enough to prove that $E[W_k(X)]\geq0$ and $E[W_k(N)]=0$. Using
the diffusive property (\ref{diff}), the fact that $-\mathbf{L} X = p X$ and
the recursive property of Hermite polynomials, we obtain that
\begin{eqnarray}
(\mathbf{L}+ k p \mathbf{Id}) H_{k}(X)& =& H_{k}^{\prime\prime}(X)
\Gamma(X) + H_{k}^{\prime}(X) \mathbf{L}(X) + k p H_{k}(X)
\nonumber\\
& = & H_{k}^{\prime\prime}(X) \Gamma(X) - p X H_{k}^{\prime}(X)
+k p H_{k}(X)
\nonumber
\\[-8pt]
\label{computation1}
\\[-8pt]
\nonumber
&= & H_{k}^{\prime\prime}(X) \bigl( \Gamma(X) - p \bigr)
\\
\nonumber
&=& k(k-1) H_{k-2}(X) \bigl( \Gamma(X) - p \bigr).
\end{eqnarray}
Therefore,
\begin{eqnarray}
\mathcal{M}_k(H_k)&=& \mathbb{E} \bigl[
H_{k}(X) ( \mathbf{L}+ k p \mathbf{Id} ) H_{k}(X) \bigr]
\nonumber
\\[-8pt]
\label{combination22}
\\[-8pt]
\nonumber
&= & k (k-1) \mathbb{E} \bigl[ H_{k}(X) H_{k-2}(X) \bigl( \Gamma(X) -
p \bigr) \bigr].
\end{eqnarray}
Next, by the integration by parts formula (\ref{by-parts}), we have
\begin{eqnarray}
&& \mathbb{E} \bigl[ H_{k}(X) H_{k-2}(X)
\bigl( \Gamma(X) - p \bigr) \bigr]
\nonumber\\
&&\qquad = \mathbb{E} \biggl[ \Gamma \biggl(\int
_{0}^{X} H_{k}(t) H_{k-2}(t)
\,d t , X \biggr) \biggr]
- p \mathbb{E} \bigl[ H_{k}(X) H_{k-2}(X) \bigr]
\nonumber
\\[-8pt]
\label{combination2}
\\[-8pt]
\nonumber
&&\qquad=p \mathbb{E} \biggl[ X \int_{0}^{X}
H_{k}(t) H_{k-2}(t)\, d t - H_{k}(X)
H_{k-2}(X) \biggr]
\\
\nonumber
&&\qquad= \frac{p}{2k-1} \mathbb{E} \bigl[ W_k(X) \bigr].
\end{eqnarray}
Hence,
\[
\mathcal{M}_k(H_k)=\frac{pk(k-1)}{2k-1} \mathbb{E} \bigl[
W_k(X) \bigr],
\]
and the inequality $\mathbb{E}[W_k(X)]\geq0$ follows from the positivity of
the bilinear form $\mathcal{M}_k$. Finally, choosing $\mathbf{L}$ to be the
Ornstein--Uhlenbeck generator and $X=N$ a standard Gaussian random
variable living in the first Wiener chaos (i.e., $p=1$) with variance
$1$, then $\Gamma(N)=p=1$ and computation (\ref{combination2}) shows
that $\mathbb{E}[W_k(N)]=0$ for every $k\geq2$. Hence,
$\mathbb{E}[P(N)]=0$ for every $P \in\mathscr{P}$.
\end{pf}
The next lemma is central in the proof of the even moment Theorem~\ref
{2-2k-moment}. In fact, the next lemma will provide answer to property
(ii) of step (1).
\begin{lma}\label{carac}
Assume that $\mathbf{L}$ be a general Markov generator satisfying assumptions
\textup{(a)}--\textup{(b)}--\textup{(c)} of Section~\ref{setup}. Let $p \ge1$ and $\{X_n\}_{n\geq
1}$ a sequence\vspace*{1pt} of elements in $\mathbf{Ker} ( \mathbf{L}+ p \mathbf{Id} )$
for all $n \ge1$. Let $P=\sum_{k=2}^m \alpha_k W_k \in\mathscr{P}$ such that
$\alpha_2\neq0$. Then, as $n \to\infty$, we have
\[
X_n \stackrel{\mathit{law}} {\rightarrow} \mathcal{N}(0,1)\quad \mbox{if
and only if}\quad \mathbb{E}\bigl[P(X_n)\bigr] \to\mathbb{E}\bigl[P(N)\bigr]=0.
\]
\end{lma}
\begin{pf}
In virtue of Lemma~\ref{main1},
\begin{eqnarray*}
\mathbb{E}\bigl[P(X_n)\bigr]&=&\sum_{k=2}^m
\alpha_k \mathbb{E}\bigl[W_k(X_n)\bigr]
\\
&\ge& \alpha_2 \mathbb{E}\bigl[W_2(X_n)\bigr]
\\
&=&\alpha_2 \bigl(\mathbb{E}\bigl[X_n^4\bigr]-6\mathbb{E}
\bigl[X_n^2\bigr]+3 \bigr).
\end{eqnarray*}
This leads to
\[
0\leq\mathbb{E}\bigl[X_n^4\bigr]-6\mathbb{E}\bigl[X_n^2
\bigr]+3 \leq\frac{1}{\alpha_2} \mathbb{E}\bigl[P(X_n)\bigr].
\]
By assumption, $\mathbb{E}[P(X_n)]\to0$, so $\mathbb{E}[X_n^4]-6\mathbb{E}[X_n^2]+3 \to0$. On
the other hand,
\[
\mathbb{E}\bigl[X_n^4\bigr]-6\mathbb{E}\bigl[X_n^2
\bigr]+3=\mathbb{E}\bigl[X_n^4\bigr]-3 \mathbb{E}\bigl[X_n^2
\bigr]^2+3\bigl(\mathbb{E}\bigl[X_n^2\bigr]-1
\bigr)^2.
\]
Thus, we obtain that $\mathbb{E}[X_n^2] \to1$ and $\mathbb{E}[X_n^4]\to3$, and we can
use Theorem~\ref{Steindirichletfinal} to conclude.
\end{pf}
\begin{longlist}[\textit{Step} (2):]
\item[\textit{Step} (2):] This step consists in finding a suitable polynomial
$T_k \in\mathscr{P}$ of the form
\begin{equation}
\label{Tk}
T_k (x) = x^{2k} - \alpha_k
x^2 + \beta_k,\qquad \alpha_k, \beta_k
\in \mathbb{R}.
\end{equation}
\end{longlist}
To find such a polynomial, notice that according to step (1), the
function $\phi_k\dvtx x\mapsto\mathbb{E}[T_k(xN)]$ must be positive and vanish at
$x=1$. Hence, we must have
$\phi_k(1)=\phi_k '(1)=0$. This leads us to the following system of equations:
\[
\cases{
(2k-1)!!-\alpha_k+
\beta_k=0,
\vspace*{2pt}\cr
2k (2k-1)!! -2\alpha_k=0.}
\]
Therefore, the coefficients $\alpha_k$ and $\beta_k$ are necessarily
given by
\[
\alpha_k = k (2k-1)!! \quad\mbox{and}\quad \beta_k= (k-1)
(2k-1)!!.
\]
It remains to check that the corresponding polynomial $T_k(x)=x^{2k} -
k (2k-1)!! x^2 + (k-1) (2k-1)!! \in\mathscr{P}$. To this end, one needs
to show that $T_k$ can be expanded over the basis
$\{W_k\}_{k \geq2}$ with positive coefficients. We answer to this by
the affirmative with the next proposition, which also provides an
explicit formula for the coefficients.
\begin{prop}\label{tenduduslip}
Let $k \geq2$, and $T_k(x)=x^{2k} - k (2k-1)!! x^2+ (k-1)
(2k-1)!!$. Then
\begin{equation}
\label{eq:expx2kk}
T_k(x)=\sum_{i=2}^k
\alpha_{i,k} W_i(x),
\end{equation}
where
\[
\alpha_{i,k} = \frac{(2k-1)!!}{2^{i-1}(2i-1)(i-2)!}
\pmatrix{k
\cr
i}
\int_0^1(1-u)^{-1/2}
u^{i-2} \biggl(1-\frac{u}{2} \biggr)^{k-i} \,du.
\]
In particular, $T_k\in\mathscr{P}$ and $\alpha_{2,k}>0$ for all $k\ge1$.
\end{prop}
The proof of this proposition is rather involved and can be found in
the \hyperref[app]{Appendix}.
\begin{longlist}[\textit{Step} (3):]
\item[\textit{Step} (3):] Let $p \ge1$. Assume that $\{X_n\}_{n\geq1}$ is a
sequence\vspace*{1pt} of elements of $\mathbf{Ker}(\mathbf{L}+p\mathbf{Id})$ for all $n \geq1$ such that
$\lim_{n\to\infty} \mathbb{E} [X_n^2 ]=1$. We further assume that
$\mathbb{E} [X_n^{2k} ]\to(2k-1)!!$. Using step (2), we have
\begin{eqnarray*}
\mathbb{E} \bigl[T_k(X_n) \bigr]&=&\mathbb{E} \bigl[X_n^{2k}
\bigr]-k (2k-1)!! \mathbb{E} \bigl[X_n^2\bigr]+(k-1) (2k-1)!!
\\
&\to& 0.
\end{eqnarray*}
To finish the proof, by step (2), we know that $T_k\in\mathscr{P}$ and
$c_{2,k}>0$. Thus, Lemma~\ref{carac} applies and one gets the desired
conclusion.\quad\qed
\end{longlist}
\noqed\end{pf*}
We end this section with the following result containing a quantitative
version of the Theorem~\ref{2-2k-moment}. We remark that item (1) of
Theorem~\ref{last-thm} contains Theorem~\ref{superquantification} in
the \hyperref[sec1]{Introduction} by assuming $\mathbf{L}$ to be Ornstein--Uhlenbeck operator.
\begin{thm}\label{last-thm}
Let $\mathbf{L}$ be a Markov operator satisfying assumptions \textup{(a)}--\textup{(b)}--\textup{(c)} of
Section~\ref{setup}. Let $p\ge1$ and $X$ be an eigenfunction of $\mathbf{L}$
with eigenvalue $p$ such that $\mathbb{E}[X^2]=1$. Assume that $k \ge2$. Then
\begin{longlist}[(2)]
\item[(1)] We have the following general quantitative bound:
\begin{equation}
\label{boundclt}
d_{\mathrm{TV}} \bigl(X,\mathcal{N}(0,1) \bigr)\le
C_k\sqrt{\frac{\mathbb{E}
[X^{2k} ]}{(2k-1)!!}-1},
\end{equation}
where the constant $C_k=\frac{4}{\sqrt{ 2k(k-1) \int_0^1 (({1+t^2})/{2})^{k-2} \,dt}}$.
\item[(2)] The moment estimate $\mathbb{E}[X^{2k}] \ge\mathbb{E}[N^{2k}]=(2k-1)!!$ holds.
\end{longlist}
\end{thm}
\begin{pf}
Taking into account Remark~\ref{mainrmk}, for any polynomial $P=\break \sum_{k=2}^m \alpha_k W_k$ in family $\mathscr{P}$, we obtain that
\[
\mathbb{E}\bigl[P(X)\bigr]\geq\frac{1}{p^2}\sum_{k=2}^m
(2k-1) (k-1)\alpha_k\mathbb{E} \bigl[ H_{k-2} (X)^2
\bigl( \Gamma(X) - p \bigr)^2 \bigr].
\]
By applying the latter bound to $P=T_k$ and using Proposition~\ref
{tenduduslip}, we infer that
\begin{eqnarray*}
\mathbb{E}\bigl[T_k(X)\bigr]&\geq& \frac{1}{p^2}\sum
_{i=2}^m (2i-1) (i-1)\alpha_{i,k}\mathbb{E}
\bigl[ H_{i-2} (X)^2 \bigl( \Gamma(X) - p
\bigr)^2 \bigr]
\\
&\ge& \frac{3 \alpha_{2,k}}{p^2} \mathbb{E} \bigl[ \bigl( \Gamma(X) - p \bigr)^2
\bigr].
\end{eqnarray*}
On the other hand, Proposition~\ref{tenduduslip} shows that
\[
\alpha_{2,k}=\frac{(2k-1)!!}{6}\pmatrix{k
\cr
2}\int
_0^1 (1-u)^{-{1}/{2}} \biggl(1-
\frac{u}{2}\biggr)^{k-2}\,du.
\]
This leads us to
\begin{eqnarray}
\qquad\mathbb{E}\bigl[X_n^{2k}\bigr]-(2k-1)!!
&\geq & \biggl(
\frac{(2k-1)!! }{4}k (k-1) \int_0 ^1
\frac{1}{\sqrt{1-u}} \biggl(1-\frac{u} 2 \biggr)^{k-2}\,du \biggr)
\nonumber
\\[-8pt]
\\[-8pt]
\nonumber
&&{}\times
\mathbb{E} \biggl[ \biggl(\frac{\Gamma(X_n)}{p}-1 \biggr)^2 \biggr].
\end{eqnarray}
Now,\vspace*{-1pt} the desired inequality follows from
Theorem~\ref{Steindirichletfinal} and identity\break $\int_0 ^1 \frac{1}{\sqrt
{1-u}}(1-\frac{u} 2)^{k-2}\,du=2 \int_0^1 (\frac{1+t^2}{2})^{k-2} \,dt$. We
stress that with taking
$k=2$ in \eqref{boundclt}, we recover the well-known bound (see,
e.g., \cite{Optimal,n-p-2}):
\[
d_{\mathrm{TV}} \bigl(X_n,\mathcal{N}(0,1) \bigr)\le
\frac{2}{\sqrt
{3}}\sqrt{\mathbb{E} \bigl[X_n^4
\bigr]-3}.
\]
The\vspace*{1pt} second item (2) easily follows from the fact that $\mathbb{E}[T_k(X)] \ge
0$. When $\mathbb{E}[X^2]\neq1$, using the normalized random variable $\tilde
{X}=\frac{X}{\sqrt{\mathbb{E}[X^2]}}$, we obtain the inequality
$\mathbb{E}[X^{2k}] \ge\mathbb{E}^k[X^2] \mathbb{E}[N^{2k}]$ for all $k \ge1$.
\end{pf}
\begin{rmk}
The statement (2) of Theorem~\ref{last-thm} does not hold for any kind
of Markov operators. Below, we present a simple counterexample.\vspace*{1pt} Let $U$
denote a uniform random variable on the interval $(-1,1)$. Set
$X=U^2- \frac{1}{3}$. Then\vspace*{1pt} $X$ belongs to the second Wiener chaos of
the Jacobi structure (see \cite{a-c-p}, Section~4) with parameters
$\alpha=\beta=1$. Besides, $\mathbb{E}[X^2]=\frac{4}{45}$. Then it is
straightforward to check that the inequality $\mathbb{E}[X^{2k}] \ge\mathbb{E}[N^{2k}]
\mathbb{E}^k[X^2]$ in the item (2) of Theorem~\ref{last-thm} does not hold even
for $k=2$.
This is mainly because the assumption (c) fails in this setup. Roughly
speaking, the spectrum of Jacobi operators has a quadratic growth
whereas our assumption suggests a linear growth.
\end{rmk}
\begin{rmk}\label{studentexample}
Here, we give a concrete application of Theorem~\ref{last-thm} in some
situation where the usual criteria in the Wiener space fail. Let $\nu
\ge1$ be an integer number. Assume that $\{Q_n\}_{n \ge1}$ is a
sequence of i.i.d. random variables having chi-squared distribution
with $\nu$ degrees of freedom. We are also given $\{N_n\}_{n\ge1}$ an
independent sequence of i.i.d. standard Gaussian random variables. As a result,
$ \{ S_n \}_{n \ge1}= \{ N_n \times\sqrt{\frac{\nu
}{Q_n}} \}_{n \ge1}$ is a sequence of i.i.d. Student random
variables with $\nu$ degrees of freedom. Now, set
\[
X=\sum_{1= i_1<i_2<\cdots<i_p}^{\infty} \alpha(i_1,
\ldots,i_p) S_{i_1}\cdots S_{i_p},
\]
such that $\mathbb{E} [X^2 ]=1$. Relying on the superposition
procedure (see Section~\ref{examples-assumptions}) and Theorem~\ref
{last-thm}, if $\nu> 2k$, it can be shown that
\begin{equation}
\label{student-ex}
d_{\mathrm{TV}} \bigl(X,\mathcal{N}(0,1) \bigr)\le
C_k \sqrt{\frac{\mathbb{E}
[X^{2k} ]}{(2k-1)!!}-1}.
\end{equation}
In addition, since $X$ does not have moments of all orders, $X$ does
not belong to any Wiener chaos and therefore the estimate (\ref
{student-ex}) is strictly beyond existing moments-based total-variation
estimates on Wiener space.
\end{rmk}
\subsection{Other polynomial criteria for central convergence}
In the previous section, in order to prove the even moment theorem, we
use heavily the fourth moment Theorem~\ref{Steindirichletfinal}. The
reason is that in the decomposition of $T_k$ over the
basis $\{W_k\}_{k\geq2}$, the coefficient $\alpha_2$ in front of $W_2$
is strictly positive. It is then natural to consider the cases where
$\alpha_2 =0$, which turns out to be more delicate. The main result
of this section is the following.
\begin{thm}\label{thm:main1}
Let $\mathbf{L}$ be a general Markov generator satisfying assumptions
\textup{(a)}--\textup{(b)}--\textup{(c)} in Section~\ref{setup}. Assume that $\{X_n\}_{n \ge1}$ is
a sequence of eigenfunctions of~$\mathbf{L}$ with eigenvalue $-p$, that is, $-
\mathbf{L} X_n = p X_n$ for
each $n$. We suppose that $P=\sum_{k=2}^m \alpha_k W_k$ is a nonzero
polynomial belonging to the family $\mathscr{P}$, such that as $n \to\infty$,
we have
\begin{equation}
\label{poly-condi}
\mathbb{E}\bigl[P(X_n)\bigr] \to\mathbb{E}\bigl[P(N)\bigr]=0.
\end{equation}
Then, as $n \to\infty$, the two following statements hold:
\begin{longlist}[(2)]
\item[(1)] If there exist at least two indices $2 <i < j$ such that
$\alpha_i \alpha_j>0$ and $i$ or $j$ is even, then
\[
X_n \stackrel{\mathit{law}} {\rightarrow}\mathcal{N}(0,1).
\]
\item[(2)] If there exist at least two indices $2< i < j$ such that
$\alpha_i \alpha_j>0$ and both $i$ and $j$ are odd integers, then each
accumulation point of sequence $\{X_n\}_{n \ge1}$ in distribution is
in the form
\[
\alpha\mathcal{N}(0,1)+(1-\alpha)\operatorname{det}ta_0
\]
for some $\alpha\in[0,1]$.
\end{longlist}
\end{thm}
\begin{pf}
We will consider each case separately.
\begin{longlist}[\textit{Case} (1):]
\item[\textit{Case} (1):] Let us notice that there exist $A>0$ and $B\in\mathbb{R}$
such that $\forall x\in\mathbb{R}, P(x)\ge Ax^2+B$. Then $Ax^2<P(x)-B$. By
assumption, $\mathbb{E}[P(X_n)]\rightarrow0$, so $\mathbb{E}[P(X_n)-B]$ is bounded and
$\mathbb{E}[X_n^2]$ is bounded as
well. Hence, by Lemma~\ref{Hypercontract}, the sequence $\{X_n\}_{n\ge
1}$ is bounded in $L^p(E,\mu)$ for each $p\ge1$. Since $\Gamma
(X_n)=\frac{1}{2}(\mathbf{L}+2p\mathbf{Id})[X_n^2]$, and because of the fact that $\mathbf{L}
$ is a continuous operator when its domain is restricted to a finite
sum of eigenspaces of $\mathbf{L}$, $\Gamma(X_n)$ is also bounded in any
$L^p(E,\mu)$. Finally, up to extracting a subsequence, we may assume
that the sequence of random vectors $ \{(X_n,\Gamma(X_n)) \}
_{n\ge1}$ converges in distribution toward a random vector $(U,V)$. As
a consequence of Remark~\ref{mainrmk}, we have
\begin{eqnarray*}
\mathbb{E}\bigl[H_{i-2}(X_n)^2 \bigl(
\Gamma[X_n]-p \bigr)^2\bigr] &\to & 0,
\\
\mathbb{E}\bigl[H_{j-2}(X_n)^2 \bigl(
\Gamma[X_n]-p \bigr)^2\bigr] &\to& 0.
\end{eqnarray*}
Recalling that $\{(X_n,\Gamma(X_n))\}_{n\ge1}$ converges in
distribution toward $(U,V)$, we infer that almost surely
\begin{equation}
\label{intermediarystep}
H_{i-2}(U) (V-p )=H_{j-2}(U) (V-p )=0.
\end{equation}
Thus, on the set $\{V\neq p\}$, we have $H_{i-2}(U)=H_{j-2}(U)=0$. But
the roots of two Hermite polynomials of different orders are distinct
if at least one of the orders is even. By assumption, either
$i-2$ or $j-2$ is even, and we conclude that $\mathbb{P} (V\neq p )=0$.
This proves that any accumulation point (in distribution) of the
sequence $\{\Gamma(X_n)\}_{n\ge1}$ is $p$, and, as a consequence, the
sequence $\Gamma(X_n)$ converges to $p$ in~$L^2$. Now, we can conclude
by using Theorem~\ref{Gamma-bound-general}.
\item[\textit{Case} (2):] Following the same line of reasoning as in case (1),
we obtain:
\[
H_{i-2}(U) (V-p )=H_{j-2}(U) (V-p )=0,\qquad \mbox{a.s.}
\]
On the set $\{V\neq p\}$, we have $H_{i-2}(U)=H_{j-2}(U)=0$. But the
roots of two Hermite polynomials with odd orders only coincide at $0$.
This implies $U (V-p)=0$ almost surely. Now, let $\phi$ be any test
function. Using the integration by parts formula \eqref{by-parts} with
$Y=X_n$ and $X=\phi(X_n)$ and letting $n \rightarrow+\infty$, one
leads to
\begin{equation}
\label{intermediary2}
\mathbb{E}\bigl[\phi'(U) V\bigr]=p\mathbb{E}\bigl(U\phi(U)\bigr].
\end{equation}
Splitting the expectations in \eqref{intermediary2} into the disjoint
sets $\{V=p\}$ and $\{V\neq p\}$, we obtain
\begin{equation}
\label{intermediary3}
p\mathbb{E}\bigl[ \bigl(\phi'(U)-U\phi(U) \bigr)
\mathbh{1}_{\{V=p\}}\bigr]+\phi'(0)\mathbb{E}[V\mathbh{1}_{\{V\neq p\}}]=0.
\end{equation}
Take $\phi(x)=e^{i\xi x}$. Then \eqref{intermediary3} reads
\[
p i\xi\mathbb{E}\bigl[e^{i\xi U}\mathbh{1}_{\{V=p\}}\bigr]-p\mathbb{E}\bigl[U
e^{i\xi U}\mathbh{1}_{\{
V=p\}}\bigr]+i \xi\mathbb{E}[V\mathbh{1}_{\{V\neq p\}}]=0.
\]
Setting $f(\xi)=\mathbb{E}[e^{i\xi U}\mathbh{1}_{\{V=p\}}]$, we obtain that
\begin{eqnarray*}
&\displaystyle p \xi f(\xi)+p f'(\xi)+\xi\mathbb{E}[V\mathbh{1}_{\{V\neq p\}}]=0,&
\\
&\displaystyle f(\xi)= \biggl(\mathbb{P}(V=p)- \frac{1}{p} \mathbb{E}[V] \biggr)+ \frac{1}{p}
\mathbb{E}[V] e^{-{\xi^2}/{2}}.&
\end{eqnarray*}
It is straightforward to deduce from above equations that the
characteristic function of random variable $U$ is given by
\begin{eqnarray*}
\mathbb{E}\bigl[e^{i\xi U}\bigr]&=&\mathbb{P}(V\neq p)+f(\xi)
\\
&=& \biggl(1- \frac{1}{p} \mathbb{E}[V] \biggr) +
\frac{1}{p} \mathbb{E}[V] e^{-{\xi^2}/{2}}.
\end{eqnarray*}
\end{longlist}
\upqed\end{pf}
Although case (2) in Theorem~\ref{thm:main1} seems less interesting
than case (1), we point out that a Dirac mass at zero may appear
naturally under assumptions (a)--(b)--(c). Here is a simple example of this
phenomenon.
\begin{exm}
Set $E=\mathbb{R}^2$ and $\mu=\mathcal{N}(0,1)\otimes (\frac{1}{2}\operatorname{det}ta
_0+\frac{1}{2}\operatorname{det}ta_1 )$. Define
\begin{equation}
\label{newgene}
\mathbf{L}[\phi](x,y)=y \biggl(\frac{\partial^2\phi}{\partial x ^2} - x\frac
{\partial\phi}{\partial x}
\biggr).
\end{equation}
One can check that $\mathbf{L}$ fulfills assumptions (a)--(b)--(c) in Section~\ref{setup}. Consider the sequence
\[
X_n(x,y)=x y \in\mathbf{Ker}(\mathbf{L}+\mathbf{Id}),\qquad n\ge1.
\]
Then $X_n\sim\frac{1}{2}\mathcal{N}(0,1)+\frac{1}{2}\operatorname{det}ta_0$ for each
$n\ge1$. Moreover, $\mathbb{E} [W_3(X_n) ]=\break \mathbb{E} [W_5(X_n)
]=0$. As a matter of fact, the conclusions of Theorem~\ref{thm:main1}
are sharp when applied to $P=W_3+W_5 \in\mathscr{P}$.
\end{exm}
However, we show that in the particular setting of the Wiener space,
that is, when $\mathbf{L}$ is the Ornstein--Uhlenbeck operator, the case (2)
of Theorem~\ref{thm:main1} cannot take place. Furthermore, condition
(\ref{poly-condi}) will be a necessary and sufficient condition for
central convergence. To this end, we need the following lemma, which
has an interest on its own.
\begin{lma}\label{product-lemma}
Let $\{ U_n\}_{n \ge1}$ and $\{V_n\}_{n \ge1}$ be two bounded
sequences such that for some integer $M>0$, we have
\[
U_n, V_n \in \bigoplus_{i=0}^M
\mathbf{Ker}(\mathbf{L}+ i \mathbf{Id})\qquad \forall n\in\mathbb{N}.
\]
If $\mathbb{E} [ U_n^2 V_n^2 ] \to0$ as $n$ tends to infinity, then
$\mathbb{E}[U_n^2]\mathbb{E}[V_n^2] \to0$ as $n$ tends to infinity.
\end{lma}
We will make use of the next theorem, due to Carbery--Wright, restated
here for convenience. More precisely, we will apply it to Gaussian
distribution, which is log-concave.
\begin{thm}[(\cite{c-w}, Carbery--Wright)]\label{cw-thm}
Assume that $\mu$ is a log-concave probability measure on $\mathbb{R}^m$. Then
there exists an absolute constant $c>0$ (\textrm{independent of $m$ and
$\mu$}) such that for any polynomial $Q\dvtx \mathbb{R}^m\to\mathbb{R}$ of degree at most
$k$ and any
$\alpha>0$, the following estimate holds:
\begin{equation}
\label{cw-ineq}
\biggl(\int Q^2\,d\mu \biggr)^{{1}/({2k})}\times\mu
\bigl\{ x\in\mathbb{R}^m\dvtx \bigl|Q(x)\bigr|\leq\alpha \bigr\} \leq c k
\alpha^{{1}/k}.
\end{equation}
\end{thm}
\begin{pf*}{Proof of Lemma~\ref{product-lemma}}
Let us denote $E=\mathbb{R}^\mathbb{N}, \mu=\mathcal{N}(0,1)^{\otimes\mathbb{N}}$ and let $\mathbf{L}
$ be the Ornstein--Uhlenbeck generator. We assume that $\mathbb{E} [U_n^2
]$ does not converge to zero. Up to extracting a subsequence, we can
suppose that
$\mathbb{E} [U_n^2 ]>\theta>0$ for each $n\ge1$. Following the method of
\cite{n-po-2}, page~659, inequality (3.21), we can approximate in
$L^2(E,\mu)$ the random variable $U_n$ by polynomials of degree $M$.
Hence, applying the Carbery--Wright inequality for the approximating
sequence, and taking the limit, we obtain
\begin{equation}
\label{ineqCW}
\mu\bigl\{x\in E\dvtx \bigl|U_n(x)\bigr|\leq\alpha\bigr\}\leq
\frac{c M \alpha^{1/M}}{\theta
^{1/2M}}\leq K \alpha^{1/M},
\end{equation}
with $K=\frac{cM}{\theta^{1/2M}}$. Next, we have the following inequalities:
\begin{eqnarray*}
\mathbb{E} \bigl[V_n^2 \bigr]&=&\mathbb{E} \biggl[V_n^2
\frac{U_n^2}{U_n^2} \textbf{1}_{\bigl\{
|U_n|>\alpha\bigr\}} \biggr]+\mathbb{E} \bigl[V_n^2
\textbf{1}_{\bigl\{|U_n|\leq\alpha\bigr\}} \bigr]
\\
&\leq&\frac{1}{\alpha^2}\mathbb{E} \bigl[U_n^2
V_n^2 \bigr]+\sqrt{\mathbb{E} \bigl[V_n^4
\bigr]}\sqrt{\mu\bigl\{x\in E\dvtx \bigl|U_n(x)\bigr|\leq\alpha\bigr\}}
\\
&\le&\frac{1}{\alpha^2}\mathbb{E} \bigl[U_n^2
V_n^2 \bigr]+C K \alpha^{1/2M},
\end{eqnarray*}
where $K$ is the constant from the Carbery--Wright inequality and $C$
is such that $\sup_{n\ge1}\mathbb{E} [V_n^4 ]\leq C^2$. Note\vspace*{1pt} that
constant $C$ exists by hypercontractivity
(see Remark~\ref{Hyperconractivity}). We immediately deduce that
\[
\limsup_{n\to\infty}\mathbb{E} \bigl[V_n^2 \bigr]
\leq C K \alpha^{1/2M},
\]
which is valid for any $\alpha>0$. Let $\alpha\to0$ to achieve the proof.
\end{pf*}
\begin{thm}\label{main}
Let $\mathbf{L}$ stand for the Ornstein--Uhlenbeck operator and let $\{X_n\}
_{n\geq1}$ be a sequence of elements of $\mathbf{Ker}(\mathbf{L}+p\mathbf{Id})$ with variance
bounded from below by some positive constant. Then, for any nonzero
polynomial $P\in\mathscr{P}$, as $n \to\infty$, we have
\[
X_n \stackrel{\mathit{law}} {\rightarrow} \mathcal{N}(0,1)\quad \mbox{if
and only if}\quad \mathbb{E}\bigl[P(X_n)\bigr]\to0.
\]
\end{thm}
\begin{pf}
Although in Theorem~\ref{main} we assume that $\mathbf{L}$ is the
Ornstein--Uhlenbeck generator, we stress that the proof works in the
Laguerre structure or any tensor products of Laguerre and Wiener structures.
The ``if'' part is straightforward by using the continuous mapping
theorem. To show the ``only if'' part, we take a nonzero polynomial
$P\in\mathscr{P}$ of the form
\[
P(x)=\sum_{k=2}^m \alpha_k
W_{k}(x),
\]
with $\alpha_m >0$. Thanks to Remark~\ref{mainrmk}, as $n \to\infty$,
we know that
\begin{equation}
\mathbb{E} \bigl[H_{m-2}(X_n)^2\bigl(
\Gamma(X_n)-p\bigr)^2 \bigr] \rightarrow0.
\end{equation}
Let $Z_{m-2}=\{ t_1, t_2, \ldots, t_{m-2}\}$ be the set of the (real)
roots of the Hermite polynomial $H_{m-2}$. Then, as $n \to\infty$, we have
\[
\mathbb{E} \Biggl[ \Biggl(\prod_{k=1}^{m-2}
(X_n - t_k)^2 \Biggr) \bigl(\Gamma
(X_n)-p\bigr)^2 \Biggr] \to0.
\]
From the fact that $\Gamma(X_n)=\frac{1}{2}(\mathbf{L}+p\mathbf{Id})(X_n^2)$ together
with fundamental assumption (\ref{fundamental-assumtion}) (which
holds in the Wiener structure), we deduce that $H_{m-2}(X_n)$ and
$\Gamma(X_n)-p$ are both finitely expanded over the eigenspaces of the
generator $\mathbf{L}$. Besides, repeating the same argument as in the proof
of Theorem~\ref{thm:main1}, we can show that the sequence
$\{X_n\}_{n\ge1}$ is bounded in $L^2(E,\mu)$, as well as $\{ \Gamma
(X_n)-p\}_{n\ge1}$. Thus, from Lemma~\ref{product-lemma}, as $n \to
\infty$, we obtain
\[
\Biggl(\prod_{k=1}^{m-2} \mathbb{E}
\bigl[(X_n - t_k)^2 \bigr] \Biggr) \mathbb{E} \bigl[
\bigl(\Gamma (X_n)-p\bigr)^2 \bigr] \to0.
\]
Since $\mathbb{E} [(X_n - t_k)^2 ]\geq\operatorname{Var}(X_n)$ is bounded from
below by assumption, we conclude that $\Gamma(X_n)\to p$ in $L^2(E)$.
Hence, using Theorem~\ref{Gamma-bound-general}, we obtain that the
sequence $\{X_n\}_{n \ge1}$ converges in distribution toward $\mathcal{N}(0,1)$.
\end{pf}
\section{Conjectures}
The main motivation of this article is to provide an answer to the
question (B) stated in the \hyperref[sec1]{Introduction}. We have shown that the
convergence of any even moment guarantees the central convergence of a
normalized sequence (i.e., $\mathbb{E}[X_n^2] \to1$) living inside $\mathbf{Ker}(\mathbf{L}+p
\mathbf{Id})$. In the latter criterion, we have dealt with normalized sequences
because it seems more natural from the probabilistic
point of view. However, one could also try to replace this assumption
by the convergence of another even moment. Indeed, our framework could
provide a wider class of polynomial conditions ensuring central
convergence, namely through the family $\mathscr{P}$. Then it is natural to
check whether the family $\mathscr{P}$ is rich enough to produce other pair of
even moments ensuring a criterion for central convergence. To be more
precise, assume that for some pair $(k,l)$ ($k <l$) of positive
integers, we have $\mathbb{E}[X_n^{2k}]\to\mathbb{E}[N^{2k}]$ and $\mathbb{E}[X_n^{2l}]\to\mathbb{E}
[N^{2l}]$, we want to know if this implies a central convergence. Our
method would consist in deducing the
existence of a nontrivial polynomial $T_{k,l} \in\mathscr{P}$ such that
$\mathbb{E} [ T_{k,l}(X_n) ] \rightarrow0$. Natural candidates are
polynomials of the form
\[
T_{k,l}(x) = x^{2l} + \alpha x^{2k} + \beta,
\]
where $\alpha, \beta\in\mathbb{R}$. Using the same arguments as in the step
(2) of the proof of Theorem~\ref{2-2k-moment}, one can show that the
condition $P \in\mathscr{P}$ entails necessarily that $\alpha= \frac{l
(2l-1)!!}{k (2k-1)!!}$
and $\beta= ( \frac{l} k -1 ) (2k-1)!!$. Then the question
becomes: does the polynomial $T_{k,l}$ belong to family $\mathscr{P}$?
We exhibit the decomposition of $T_{k,l}$ for each pair of integers in
the set $\Theta= \{(2,3); (2,4); (2,5); (3,4); (3,5) \}$:
\begin{eqnarray*}
T_{2,3}(x)&=&x^6 -
\frac{15}{2}x^4 +\frac{15}{2} = W_{3}(x) +
\mathbf{\frac{5}{2}}W_2(x),
\\
T_{2,4}(x)&=&x^8 -70 x^4 +105 =
W_4(x) + \mathbf{\frac
{84}{5}}W_3(x)+\mathbf{28}
W_2(x),
\\
T_{2,5}(x)&=&x^{10} - \frac{1575}{2}x^4 +
\frac{2835}{2} \\
&=& W_5(x) + \mathbf{\frac{180}{7}}
W_4(x) +\mathbf{234} W_3(x) + \mathbf{\frac{585}{2}}
W_2(x),
\\
T_{3,4}(x)&=&x^8 -\frac{28}{3} x^6 +35
= W_4(x) + \mathbf{\frac
{112}{5}} W_3(x)+ \mathbf{
\frac{14}{3}} W_2(x),
\\
T_{3,5}(x)&=&x^{10}-105x^6 +630 =
W_5(x) + \mathbf{\frac{180}{7}}W_4(x) +
\mathbf{129}W_3(x) + \mathbf{30}W_2(x).
\end{eqnarray*}
The coefficients of each decomposition are positive, thus, for each
pair $(k,l) \in\Theta$, the convergence of the $2k{\mathrm{th}}$ and
$2l{\mathrm{th}}$ moments entails the central convergence. Naturally, we are
tempted to formulate the following conjecture.
\begin{con}
Let $k,l \geq2$ be two different positive integers. For any sequence
$\{X_n\}_{n \ge1}$ of eigenfunctions in the same eigenspace of a
Markov generator $\mathbf{L}$ satisfying assumptions (a)--(b)--(c), as
$n \to\infty$, the following statements are equivalent:
\begin{longlist}[(i)]
\item[(i)] $X_n \stackrel{\mathit{law}}{\longrightarrow} N \sim\mathcal{N}(0,1)$.\vspace*{1pt}
\item[(ii)] $\mathbb{E}[X_n^{2k}] \to\mathbb{E}[N^{2k}]$ and $\mathbb{E}[X_n^{2l}] \to\mathbb{E}[N^{2l}]$.
\end{longlist}
\end{con}
Unfortunately, we could not prove it since $T_{4,5}$ does not belong to
family $\mathscr{P}$:
\[
T_{4,5}(x)=x^{10} -\frac{45}{4}x^8 +
\frac{945}{4}= W_5(x) + \frac
{405}{28}W_4(x) +
W_3(x)\,\mathbf{-}\, \mathbf{\frac{45}{2}}W_2(x).
\]
We insist on the fact that the above conjecture might be true
nonetheless.
Another perspective of our algebraic framework is to provide nontrivial
moments inequalities for the eigenfunctions of the Markov operator $\mathbf{L}
$ satisfying suitable assumptions. The special role of the
fourth cumulant $\kappa_4$ in normal approximation for a sequence
living inside a fixed eigenspace is now well understood and it is known
that $\kappa_4(X) \ge0$. In a recent preprint, the authors of
\cite{app14} observed the prominent role of $\kappa_6$ for studying
convergence in distribution toward $N_1 \times N_2$, where $N_1$ and
$N_2$ are two
independent $\mathcal{N}(0,1)$ random variables, of a given sequence in
a fixed Wiener chaos. The computations suggest that $\kappa_6$ could be
greater than the variance of some
differential operator (analogous to $\operatorname{Var} (\Gamma
[X,X ] )$ in the case of normal approximation). However, the
techniques presented in \cite{app14} could not provide the positivity
of the sixth cumulant. We recall that
\[
\label{kappa6} \kappa_6(X)= \mathbb{E}\bigl[X^6\bigr] -15\mathbb{E}
\bigl[X^2\bigr]\mathbb{E}\bigl[X^4\bigr]-10\mathbb{E}\bigl[X^3
\bigr]^2+30\mathbb{E}\bigl[X^2\bigr]^3.
\]
Computations show that the least eigenvalue of the moment matrix
$\textbf{M}_3(X)$ is always bigger than $\kappa_6(X)$. Therefore, our
method does not give results precise enough, to insure the positivity of
the sixth cumulant. However, we know that $\kappa_6(X) \ge0$ in the
two first Wiener chaoses. Moreover, using Proposition~\ref{6moment-prop}, we could prove the following partial criterion.
\begin{prop}
Let $X$ be a multiple Wiener--It\^o integral of odd order such that $\mathbb{E}
[X^2] =1$. If $\kappa_4(X) \ge3$, then $\kappa_6(X)\ge0$.
\end{prop}
These two facts lead us to formulate the following conjecture.
\begin{con}
For any multiple Wiener--It\^o integral $X$ of order $p \ge2$, we
have $\kappa_6(X)>0$.
\end{con}
\begin{appendix}
\section*{Appendix}\label{app}
We give here a proof of Proposition~\ref{tenduduslip}. In the
following, $w$ stands for the density of the standard Gaussian
distribution over $\mathbb{R}$. Let us begin by stating a~lemma on elementary
computations on Hermite polynomials.
\begin{lma}
\label{lem:hkhkp2}
Let $l,m,n \in\mathbb N$. Then
\begin{equation}
\label{eq:lem1} \int_\mathbb{R} x^{2m}
H_{2n}(x) w(x) \,dx =\frac{(2m)!}{2^{m-n}(m-n)!}
\end{equation}
and
\begin{eqnarray}
&& \int_\mathbb{R} H_l(x)
H_m(x) H_n(x) w(x) \,dx
\nonumber
\\[-8pt]
\label{eq:lem2}
\\[-8pt]
\nonumber
&& \qquad= \frac{l! m! n!}{ (
({-l+m+n})/{2} )! ( ({l-m+n})/{2} )! (({l+m-n})/{2})!},
\end{eqnarray}
with the convention that $\frac{1}{p!} = 0$ if $p \notin\mathbb{N}$.
\end{lma}
\begin{pf}
We first focus on \eqref{eq:lem1}. Recall that $e^{-{x^2}/{2}}
H_n(x) = (-1)^n\times\break \frac{d^n}{dx^n} ( e^{-{x^2}/{2}})$.
Performing $2n$ integrations by parts (with $n\le m$), we obtain
\begin{eqnarray*}
\int_\mathbb{R} x^{2m} H_{2n}(x) w(x) \,dx &=&
\int_\mathbb{R}\frac{d^{2n}}{dx^{2n}} \bigl(x^{2m}\bigr) w(x)
\,dx
\\
&=&\frac{(2m)!}{(2(m-n))!} \int_\mathbb{R} x^{2(m-n)} w(x) \,dx
\\
&=&\frac{(2m)! (2(m-n)-1)!!}{(2(m-n))!}
\\
&=&\frac{(2m)!}{2^{m-n} (m-n)!}.
\end{eqnarray*}
If $m>n$, the formula follows from our convention. Now, \eqref{eq:lem2}
is a mere consequence of the product formula for Hermite polynomials,
which states that (see, e.g., Theorem~6.8.1 in
\cite{andrews1999special})
\[
H_n(x) H_m(x) = \sum_{k=0}^{\mathrm{min} (n,m)}
\pmatrix{n
\cr
k }
\pmatrix{
m
\cr
k }
k! H_{n+m-2k}(x),
\]
for all positive integers $n,m$. Indeed, integrating last equation
against $H_l w$, and using the orthogonality of Hermite polynomials
with respect to $w$, we obtain the desired result.
\end{pf}
Now, let us prove Proposition~\ref{tenduduslip}.
\begin{pf*}{Proof of Proposition~\protect\ref{tenduduslip}}
To make the notation less cluttered, we set $\beta_k = (k-1)
(2k-1)!!$ and $\alpha_k = k (2k-1)!!$. Since $W_p$ is an even
polynomial and $\deg(W_p) = 2p$, there exists a unique expansion of the form
\begin{equation}
\label{eq:expx2k}
x^{2k}-\alpha_k x^2 +
\beta_k=\sum_{p=2}^k
c_{p,k} W_p(x) + ax^2+b.
\end{equation}
Recall that the coefficients $\alpha_k$ and $\beta_k$ are chosen in
such a way that $\phi(t)=\mathbb{E} [t^{2k} N^{2k}-\alpha_k t^2 N^2+\beta
_k ]$ satisfies $\phi(1)=\phi'(1)=0$. Coming back to Lemma~\ref
{main1}, for each $p\ge2$ the two following conditions hold:
\[
\cases{
\displaystyle \mathbb{E} \bigl[W_p(N) \bigr]=0,\vspace*{2pt}\cr
\displaystyle\forall x\in\mathbb{R},\qquad \psi_p(x)=\mathbb{E} \bigl[W_p(x N) \bigr]\ge0.}
\]
Thus, $\psi_p$ reaches its minimum at $x=1$ and we have $\psi_p(1)=\psi
_p'(1)=0$. Setting
\[
\psi(x)=\mathbb{E} \Biggl[\sum_{p=2}^k
c_{p,k}W_p(x N) \Biggr]=\sum_{p=2}^k
c_{p,k} \psi_p(x),
\]
we must also have $\psi(1)=\psi'(1)=0$. Plugging\vspace*{1pt} the above conditions
on $\phi$ and $\psi$ into \eqref{eq:expx2k} implies that, if $\operatorname{det}ta
(x)=\mathbb{E} [ax^2 N^2+b ]=ax^2+b$, then $\operatorname{det}ta(1)=\operatorname{det}ta'(1)=0$.
Hence, $a+b=0$ and $2a=0$ so $a=b=0$. Define the (even) polynomial
$Q_k(x) = \sum_{p=2}^k c_{p,k} (2p-1) H_{p}(x) H_{p-2}(x)$. Using
the definition of $W_p$ and \eqref{eq:expx2k}, we see that $Q_k$ is
solution of the polynomial equation
\begin{equation}
\label{polynomialequation}
x\int_0^x Q_k(t)
\,dt - Q_k(x) = x^{2k}-\alpha_k x^2 +
\beta_k.
\end{equation}
In the following lemma, we solve the above equation.
\begin{lma}\label{solupoly}
Equation \eqref{polynomialequation} has a unique even polynomial
solution of degree $2k-2$, which is
\begin{equation}
\label{eq:decompohkhkm2}\qquad Q_k(x) = \sum_{p=2}^k
c_{p,k} (2p-1) H_{p}(x) H_{p-2}(x) = -\beta
_k+\sum_{p=1}^{k-1}
\frac{(2k-1)!!}{(2p-1)!!} x^{2p}.
\end{equation}
\end{lma}
\begin{pf}
Let $\mathbb{P}hi$ be the linear
operator from $\mathbb{R}[X]$ to $\mathbb{R}[X]$ defined by $\mathbb{P}hi(P) (X)=X \int_0^X
P(t)\,dt-P(X)$. Assume that $\mathbb{P}hi(P)=\mathbb{P}hi(Q)$, then $\Delta(X)=\int_0^X
(P(t)-Q(t) ) \,dt$ satisfies the differential equation $x y (x)
-y'(x)=0$. Thus, there exists $C>0$ such that $\Delta(X)=C e^{{X^2}/{2}}$. But $\Delta$ is a polynomial function so $C=0$. This
implies that $P-Q$ is a constant polynomial. By setting $x=0$ in
equation \eqref{polynomialequation}, we get that $Q_k(0)=-\beta_k$.
Now, set
\[
R_k(X)=-\beta_k+\sum_{p=1}^{k-1}
\frac{(2k-1)!!}{(2p-1)!!} X^{2p},
\]
we also have $R_k(0)=-\beta_k$. As a result, one is left to show that
$\mathbb{P}hi(R_k)=\mathbb{P}hi(Q_k)$. Indeed,
\begin{eqnarray*}
\mathbb{P}hi(R_k)&=&-\beta_k \bigl(X^2-1\bigr)+\sum
_{p=1}^{k-1} \frac{(2k-1)!!}{(2p-1)!!} \biggl(
\frac{1}{2p+1} X^{2p+2}-X^{2p} \biggr)
\\
&=&-\beta_k \bigl(X^2-1\bigr)+(2k-1)!!\sum
_{p=1}^{k-1}\frac{1}{(2p+1)!!} X^{2p+2}
\\
&&{}-(2k-1)!!\sum_{p=1}^{k-1}
\frac{1}{(2p-1)!!}X^{2p}
\\
&=&-\beta_k\bigl(X^2-1\bigr)+ X^{2k}-(2k-1)!!
X^2
\\
&=& X^{2k}-\alpha_k X^2+\beta_k
\\
&=&\mathbb{P}hi(Q_k).
\end{eqnarray*}
\upqed\end{pf}
Integrating \eqref{eq:decompohkhkm2} against $H_{2n} w$ over $\mathbb{R}$ for
each $1\leq n \leq k-1$ and using Lemma~\ref{lem:hkhkp2} shows that $\{
c_{p,k} \}_{2\leq p \leq k}$ is the solution of the following
triangular array:\vspace*{-2pt}
\begin{eqnarray*}
&&\sum_{p=n+1}^{k} c_{p,k} (2p-1)
\frac
{(p-2)! p! (2n)!}{(n+1)! (n-1)! (p-n-1)!}
\\[-2pt]
&&\qquad =\sum_{p=n+1}^{k} \frac{(2k-1)!! (2p-2)!}{
(2p-3)!!2^{p-n-1}(p-n-1)!} \qquad \forall n \in[1,k-1],
\end{eqnarray*}
which can be equivalently stated as
\begin{equation}
\label{eq:eqnarray}
\qquad\forall n \in[1,k-1], \qquad \sum_{p=n}^{k-1}
\frac{a_{p,k}}{(p-n)!}= \frac{ 2^{n}(n+1)!(n-1)!}{(2n)!} \sum_{p=n}^{k-1}
\frac{ p!}{ (p-n)!},
\end{equation}
by denoting, for all $1\leq p \leq k-1$,
\begin{equation}
\label{eq:apcp}
a_{p,k} = \frac{(2p+1)(p-1)! (p+1)!}{(2k-1)!!} c_{p+1,k}.
\end{equation}
In order to solve \eqref{eq:eqnarray}, we introduce the polynomial functions
\[
f(x) = -k+ \sum_{p=0}^{k-1}
x^{p},
\qquad
g(x) = \sum_{p=1}^{k-1} \frac{a_{p,k} }{p!}
x^p.
\]
Remark that, in terms of the functions $f$ and $g$, \eqref{eq:eqnarray}
reads
\[
\forall n \in[1,k-1], \qquad g^{(n)}(1)= \frac{
2^{n}(n+1)!(n-1)!}{(2n)!}
f^{(n)}(1).
\]
The multiplication formula for the Gamma function and a classic
property of the beta function (see, e.g., \cite{abramowitz1972handbook}, formulas (6.1.20) and (6.2.2)) imply
\begin{eqnarray*}
\frac{ 2^{n}(n+1)!(n-1)!}{(2n)!} &=& 2^n
\frac{\Gamma(n+2) \Gamma
(n)}{\Gamma(2n+1)}
\\
& =& \frac{\Gamma(n+2)}{2^n\Gamma(n+1)} \cdot \frac{\Gamma(1/2)\Gamma
(n)}{\Gamma(n+1/2)}
\\
&= & \frac{n+1}{2^n} \int_0^1
u^{n-1}(1-u)^{-1/2}\,du.
\end{eqnarray*}
Thus, $\forall x \in(1/4,3/4)$,
\begin{eqnarray*}
g(1-2x) -g(1)&= & \sum_{n=1}^{k-1}
\frac{g^{(n)}(1)}{n!} (-1)^n 2^n x^n
\\
&= & \sum_{n=1}^{k-1} \frac{f^{(n)}(1)}{n!}
(n+1)\int_0^1 u^{n-1}(1-u)^{-1/2}\,du
(-1)^n x^n
\\
&=& \int_0^1(1-u)^{-1/2} \sum
_{n=1}^{k-1} \frac{f^{(n)}(1)}{n!} (n+1)
(-1)^n u^{n-1}x^n \,du
\\
&= & \int_0^1(1-u)^{-1/2}
u^{-1}\frac{d}{du} \bigl( u f(1-ux) \bigr) \,du.
\end{eqnarray*}
Since $f(x) = -k + \frac{1-x^k}{1-x}$,
\[
\frac{d}{du } \bigl[ u f(1-ux) \bigr] =\frac{d}{du} \biggl[ -k u+
\frac{1-(1-ux)^{k}}{x} \biggr]= k \bigl( (1-ux)^{k-1} -1 \bigr),
\]
so that, $\forall x\in(1/4,3/4)$,
\[
g(1-2x) -g(1) = k\int_0^1(1-u)^{-1/2}
u^{-1} \bigl( (1-ux)^{k-1} -1 \bigr) \,du.
\]
Derive last equation to obtain that $\forall p \in[1, k-1]$, $\forall
x \in(1/4,3/4)$,
\[
2^p g^{(p)}(1-2x) = \frac{k!}{(k-1-p)!}\int
_0^1(1-u)^{-1/2} u^{p-1}
(1-ux)^{k-1-p} \,du.
\]
Note that we used Lebesgue's derivation theorem, which applies since
\[
\sup_{x \in(1/4,3/4)} \bigl| (1-u)^{-1/2} u^{p-1}
(1-ux)^{k-p-1} \bigr| \leq(1-u)^{-1/2} u^{p-1} \biggl(1-
\frac{u}{4} \biggr)^{k-p-1},
\]
and the upper bound in the last equation is in $L^1((0,1))$ as a
function of $u$.
Finally, for all $1\leq p \leq k-1$,
\[
a_{p,k} = g^{(p)}(0) = 2^{-p} \frac{k!}{(k-p-1)!}
\int_0^1(1-u)^{-1/2} u^{p-1}
\biggl(1-\frac{u}{2} \biggr)^{k-p-1} \,du,
\]
and we can use \eqref{eq:apcp} to conclude.
\end{pf*}
\end{appendix}
\section*{Acknowledgments}
The authors thank Giovanni Peccati and Lauri Viitasaari for many useful
discussions. We are grateful to two anonymous referees for their
valuable comments that led to an improved version of the previous work.
\printaddresses
\end{document} |
\begin{document}
\title{A refinement of weak order intervals into distributive lattices}
\author{Hugh Denoncourt}
\maketitle
\begin{abstract}
In this paper we consider arbitrary intervals in the left weak
order on the symmetric group $S_n$. We show that
the Lehmer codes of permutations in an interval form a distributive
lattice under the product order. Furthermore, the rank-generating function of
this distributive lattice matches that of the weak order interval. We construct a poset such that its lattice of order ideals is isomorphic to the lattice of Lehmer codes
of permutations in the given interval. We show that there are at least $\left(\lfloor\frac{n}{2}\rfloor\right)!$ permutations in $S_n$ that form a rank-symmetric
interval in the weak order.
\end{abstract}
\section{Introduction and preliminaries}
\subsection{Introduction}
Our results concern intervals in the weak order of the symmetric group $S_n$. Intervals in this fundamental order can arise in unexpected contexts. For example, Bj{\"{o}}rner and Wachs \cite[Theorem 6.8]{permstatslinext} showed that the set of linear extensions of a regularly labeled two-dimensional poset forms an interval in the weak order. The Bell classes defined by Rey in \cite{bellorder} are also weak order intervals \cite[Theorem 4.1]{bellorder}.
Stembridge \cite[Theorem 2.2]{onfc} showed that the interval $\Lambda_w = [\text{id},w]$ in the weak order is a distributive lattice if and only if $w$ is a fully commutative element. The Lehmer code \cite{lehmer} is an $n$-tuple that encodes information about the inversions of a permutation. Our main theorem, Theorem~\ref{t:distributivelattice}, states that the set of Lehmer codes for permutations in $\Lambda_w$, ordered by the product order on $\mathbb{N}^n$, is a distributive lattice. Furthermore, the rank-generating function of $\Lambda_w$ matches that of the corresponding distributive lattice. Theorem~\ref{t:distributivelattice} holds for arbitrary $w \in S_n$, so it tells us how an arbitrary weak order interval can be refined to form a distributive lattice when $w$ is not fully commutative.
\begin{center}
\begin{tikzpicture}[scale=1.0]
\draw (-0.6,1.2) -- (0.0,0.0);
\draw (1.9,1.2) -- (0.0,0.0);
\draw (1.3,2.4) -- (-0.6,1.2);
\draw (1.3,2.4) -- (1.9,1.2);
\draw (0.7,3.6) -- (1.3,2.4);
\draw (2.5,2.4) -- (1.9,1.2);
\draw (1.9,3.6) -- (2.5,2.4);
\draw (1.3,4.8) -- (0.7,3.6);
\draw (1.3,4.8) -- (1.9,3.6);
\draw (3.8,2.4) -- (1.9,1.2);
\draw (3.2,3.6) -- (1.3,2.4);
\draw (3.2,3.6) -- (3.8,2.4);
\draw (2.6,4.8) -- (0.7,3.6);
\draw (2.6,4.8) -- (3.2,3.6);
\draw (4.4,3.6) -- (2.5,2.4);
\draw (4.4,3.6) -- (3.8,2.4);
\draw (3.8,4.8) -- (1.9,3.6);
\draw (3.8,4.8) -- (4.4,3.6);
\draw (3.2,6.0) -- (1.3,4.8);
\draw (3.2,6.0) -- (2.6,4.8);
\draw (3.2,6.0) -- (3.8,4.8);
\draw (0.0,0.0) node[fill=white] {$12345$};
\draw (-0.6,1.2) node[fill=white] {$21345$};
\draw (1.9,1.2) node[fill=white] {$12435$};
\draw (1.3,2.4) node[fill=white] {$21435$};
\draw (0.7,3.6) node[fill=white] {$31425$};
\draw (2.5,2.4) node[fill=white] {$13425$};
\draw (1.9,3.6) node[fill=white] {$23415$};
\draw (1.3,4.8) node[fill=white] {$32415$};
\draw (3.8,2.4) node[fill=white] {$12534$};
\draw (3.2,3.6) node[fill=white] {$21534$};
\draw (2.6,4.8) node[fill=white] {$31524$};
\draw (4.4,3.6) node[fill=white] {$13524$};
\draw (3.8,4.8) node[fill=white] {$23514$};
\draw (3.2,6.0) node[fill=white] {$32514$};
\draw (4.9,1.2) -- (5.5,0.0);
\draw (7.4,1.2) -- (5.5,0.0);
\draw (6.8,2.4) -- (4.9,1.2);
\draw (6.8,2.4) -- (7.4,1.2);
\draw (6.2,3.6) -- (6.8,2.4);
\draw (8.0,2.4) -- (7.4,1.2);
\draw (7.4,3.6) -- (6.8,2.4);
\draw (7.4,3.6) -- (8.0,2.4);
\draw (6.8,4.8) -- (6.2,3.6);
\draw (6.8,4.8) -- (7.4,3.6);
\draw (9.3,2.4) -- (7.4,1.2);
\draw (8.7,3.6) -- (6.8,2.4);
\draw (8.7,3.6) -- (9.3,2.4);
\draw (8.1,4.8) -- (6.2,3.6);
\draw (8.1,4.8) -- (8.7,3.6);
\draw (9.9,3.6) -- (8.0,2.4);
\draw (9.9,3.6) -- (9.3,2.4);
\draw (9.3,4.8) -- (7.4,3.6);
\draw (9.3,4.8) -- (8.7,3.6);
\draw (9.3,4.8) -- (9.9,3.6);
\draw (8.7,6.0) -- (6.8,4.8);
\draw (8.7,6.0) -- (8.1,4.8);
\draw (8.7,6.0) -- (9.3,4.8);
\draw (5.5,0.0) node[fill=white] {$00000$};
\draw (4.9,1.2) node[fill=white] {$10000$};
\draw (7.4,1.2) node[fill=white] {$00100$};
\draw (6.8,2.4) node[fill=white] {$10100$};
\draw (6.2,3.6) node[fill=white] {$20100$};
\draw (8.0,2.4) node[fill=white] {$01100$};
\draw (7.4,3.6) node[fill=white] {$11100$};
\draw (6.8,4.8) node[fill=white] {$21100$};
\draw (9.3,2.4) node[fill=white] {$00200$};
\draw (8.7,3.6) node[fill=white] {$10200$};
\draw (8.1,4.8) node[fill=white] {$20200$};
\draw (9.9,3.6) node[fill=white] {$01200$};
\draw (9.3,4.8) node[fill=white] {$11200$};
\draw (8.7,6.0) node[fill=white] {$21200$};
\end{tikzpicture}
\end{center}
\begin{center}
Figure 1: The interval $\Lambda_{32514}$ and its Lehmer codes
\end{center}
The left weak order interval $\Lambda_{32514}$ shown on the left of Figure 1 is not a distributive lattice due to the subinterval $\left[12435, 32415\right]$. Restricted to the Lehmer codes of permutations in $\Lambda_{32514}$, the product order on $\mathbb{N}^5$ refines the left weak order. This is shown on the right of Figure 1. By Theorem~\ref{t:distributivelattice}, this refinement results in a distributive lattice.
Our results relating weak order intervals and distributive lattices are motivated by the existence of nice structure theorems for finite distributive lattices. For example, the fundamental theorem of finite distributive lattices states that any finite distributive lattice is isomorphic to the set $J(P)$ of down-closed subsets of a finite poset $P$, ordered by inclusion. In light of Theorem~\ref{t:distributivelattice}, we construct a finite poset $M_w$ associated to the set of Lehmer codes of permutations in $\Lambda_w$. In Section~\ref{s:baseposet}, we give a chain decomposition of $M_w$ in which the chains are determined by the Lehmer code. The relations between the chains are determined by an extension to the Lehmer code that we introduce in Section~\ref{s:extendedcodes}. The construction of $M_w$ and its properties are summarized by Theorem~\ref{t:codejoinirreducibles}.
Propp \cite{proppdistributive} gave a method for choosing elements uniformly at random from any finite distributive lattice of the form $J(P)$ that uses only the poset $P$. Thus, the description of $M_w$ given in Theorem~\ref{t:codejoinirreducibles} can be combined with this method to choose elements uniformly at random from any weak order interval in $S_n$.
Our current work is also motivated by questions given at the end of \cite{Weiweakorder} about the rank-generating function of $\Lambda_w$. One question asks which $w \in S_n$ are such that the interval $\Lambda_w$ is rank-symmetric. In Proposition~\ref{p:lowerbound}, we show that there are at least $\left(\lfloor\frac{n}{2}\rfloor\right)!$ such permutations in $S_n$.
\subsection{Preliminaries}
We use the convention that $\mathbb{N} = \{0,1,2,\ldots\}$ and $[n] = \{1,\ldots,n\}$. To specify permutations, we use $1$-line notation. That is, we say $w = w_1w_2 \cdots w_n$ to specify the permutation satisfying $w(i) = w_i$ for all $i \in [n]$.
For any poset $(P,\leq)$, we say that $P$ is ranked if there is a function $\rho:P \rightarrow \mathbb{N}$ satisfying $\rho(x) = 0$ for minimal elements $x \in P$ and $\rho(y) = \rho(x) + 1$ whenever $y$ covers $x$. Whenever $P$ is ranked and finite, the \emph{rank-generating function for $P$} is defined by
\begin{equation*}
F(P,q) = \sum_{x \in P} q^{\rho(x)}.
\end{equation*}
For any poset $(P,\leq)$, a down-closed subset $I \subseteq P$ is called an \emph{order ideal}. That is, a subset $I \subseteq P$ is an order ideal if $y \in I$ whenever $x \in I$ and $y \leq x$. We denote the weak order interval $[\text{id},w]$ by $\Lambda_w$.
For the remainder of this paper, let $n$ be a positive integer.
\begin{definition} \label{d:inversiondef}
Let $w \in S_n$ and set
\begin{equation*}
\inv{w} = \{(i,j) \in [n] \times [n] : i < j \text{ and } w(i) > w(j) \}.
\end{equation*}
The set $\inv{w}$ is called the \emph{inversion set of $w$} and each pair $(i,j) \in \inv{w}$ is called an \emph{inversion of $w$}. Regarding $w \in S_n$ as a permutation in $S_{n+1}$ satisfying $w(n+1) = n+1$, set
\begin{equation*}
\ninv{w} = \{ (i,j) \in [n] \times [n+1] : i \leq j \text{ and } w(i) \leq w(j) \}.
\end{equation*}
We call $\ninv{w}$ the \emph{set of non-inversions} of $w$ and each pair $(i,j) \in \ninv{w}$ is called a \emph{non-inversion} of $w$.
\end{definition}
\noindent
The choice to include pairs of the form $(i,i)$ or $(i,n+1)$ in the definition of non-inversion simplifies later characterizations and proofs. Note that $\ninv{w}$ is the complement of $\inv{w}$ relative to the ordered pairs $(i,j) \in [n] \times [n+1]$ satisfying $i \leq j$. In particular, when $(i,j) \in \ninv{w}$, we have $i \leq j$.
\begin{definition} \label{d:weakorderlength}
The \emph{length $\ell(w)$} of $w$ is defined by $\ell(w) = \card{\inv{w}}$. The left weak order $(S_n, \leq_L)$ is defined as the transitive closure of the relations
\begin{equation*}
v \leq_L w \text{ if } w = s_i v \text{ and } \ell(w) = \ell(v) + 1,
\end{equation*}
where $s_i = (i \;\; i + 1)$ is an adjacent transposition in $S_n$.
\end{definition}
It is known that $(S_n,\leq_L)$ is a ranked poset, where length is the rank function.
The right weak order $(S_n,\leq_R)$ has a similar definition where the condition $w = s_i v$ is replaced by $w = v s_i$. Thus $u \leq_R w$ if and only if $u^{-1} \leq_L w^{-1}$. The results of our paper can be translated to the right weak order by using the fact that
\begin{equation*}
(\Lambda_w,\leq_R) \cong (\Lambda_{w^{-1}},\leq_L).
\end{equation*}
Also, the dual of \cite[Proposition 3.1.6]{bb} states that $[\text{id},wv^{-1}] \cong [v,w]$ for intervals in the left weak order. Thus, our results for principal order ideals can be translated to arbitrary intervals in the left weak order.
For this paper, the following characterization of the left weak order will be more convenient to use than the definition.
\begin{lemma} \label{l:weaksubset}
Let $v,w \in S_n$. Then $v \leq_L w$ if and only if $\inv{v} \subseteq \inv{w}$. Consequently, we have $v \leq_L w$ if and only if $\ninv{w} \subseteq \ninv{v}$.
\end{lemma}
\begin{proof}
This is a dual version of \cite[Proposition 3.1]{permstatslinext}.
\end{proof}
\noindent
For each $i \in [n]$, let $\lehc{i}{w}$ be the number of inversions of $w$ with the first coordinate equal to $i$; that is, $$c_i(w) = \card{{k : (i,k) \in Inv(w)}}.$$ The finite sequence
\begin{equation*}
\leh{w} = (\lehc{1}{w},\ldots,\lehc{n}{w})
\end{equation*}
is called the \emph{Lehmer code for $w$}.
\begin{example}
Let $w = 412563$. The inversions are
\begin{equation*}
(1,2), (1,3), (1,6), (4,6), \text{ and } (5,6).
\end{equation*}
The number of inversions whose first coordinate is $i$ gives the $i$-th coordinate of the Lehmer code. Thus, the Lehmer code of $w$ is $(3,0,0,1,1,0)$.
\end{example}
We view $\bm{c}$ as a function
\begin{equation*}
\bm{c}:S_n \rightarrow \prod_{i=1}^n [0,n-i],
\end{equation*}
mapping each $w \in S_n$ to an $n$-tuple that satisfies the bound $0 \leq \lehc{i}{w} \leq n - i$. It is known (see \cite[Chapter I]{schubertnotes}) that $\bm{c}$ is a bijection and that
\begin{equation*}
\sum_{i=1}^n \lehc{i}{w} = \ell(w).
\end{equation*}
Whenever we need $\lehc{{n+1}}{w}$ to be defined, we make the reasonable convention that $\lehc{{n+1}}{w} = 0$.
\section{Extended codes and the weak order} \label{s:extendedcodes}
We define an extension of the standard Lehmer code. This extended code is used to characterize the weak order in terms of codes and is central to the construction given in Section~\ref{s:baseposet}.
\begin{definition} \label{d:extendedcode}
Let $w \in S_n$. For $1 \leq i < j \leq n+1$, define $\lehm{i}{j}{w}$ to be the number of inversions $(i,k) \in \inv{w}$ satisfying $k < j$; that is, $$\lehm{i}{j}{w} = \card{{k<j : (i,k) \in \inv{w}}}.$$ This defines a matrix of values that we call the \emph{extended Lehmer code for $w$}.
\end{definition}
\noindent
The Lehmer code of $w \in S_n$ is easily recovered from the extended Lehmer code of $w$.
\begin{lemma} \label{l:extendedextends}
Let $w \in S_n$. Then $\lehc{i}{w} = \lehm{i}{n+1}{w}$ for all $i \in [n]$.
\end{lemma}
\begin{proof}
The number of inversions $(i,k) \in \inv{w}$ satisfying $k < n + 1$ is precisely the number of inversions in $w$ of the form $(i,k)$.
\end{proof}
\begin{example}
Let $w = 31524$. The extended Lehmer code of $w$ (in matrix form) is
\begin{equation*}
\begin{bmatrix}
0 & 0 & 1 & 1 & 2 & 2\\
0 & 0 & 0 & 0 & 0 & 0\\
0 & 0 & 0 & 0 & 1 & 2\\
0 & 0 & 0 & 0 & 0 & 0\\
0 & 0 & 0 & 0 & 0 & 0\\
\end{bmatrix}
\end{equation*}
and the Lehmer code of $w$ is $(2,0,2,0,0)$. The Lehmer code $\leh{w}$ is obtained by reading down the last column of the matrix of $\lehm{i}{j}{w}$.
\end{example}
\noindent
\begin{lemma} \label{l:codeinequality}
Let $v,w \in S_n$ and suppose $v \leq_L w$. Then, for all $i \in [n]$ and $j \in [n+1]$, we have
\begin{enumerate}[(a)]
\item $\lehm{i}{j}{v} \leq \lehm{i}{j}{w}$;
\item $\lehc{i}{v} \leq \lehc{i}{w}$.
\end{enumerate}
\end{lemma}
\begin{proof}
Suppose $v \leq_L w$. By Lemma~\ref{l:weaksubset}, we have $(i,k) \in \inv{w}$ whenever $(i,k) \in \inv{v}$. Statement (a) follows from Definition~\ref{d:extendedcode}, which, by Lemma~\ref{l:extendedextends}, proves statement (b).
\end{proof}
\begin{remark} \label{r:extendedremark}
There exist $v,w \in S_n$ satisfying the inequality $\lehc{i}{v} \leq \lehc{i}{w}$ for all $i \in [n]$, but $v \not\leq_L w$. Thus the code inequality given in Lemma~\ref{l:codeinequality}(b) is not enough to characterize the left weak order. Proposition~\ref{p:weakequivalence} gives an inequality characterization of the left weak order using the extended Lehmer code.
\end{remark}
\noindent
Whether a pair is an inversion or a non-inversion can be detected using the extended Lehmer code. The hypothesis $i \leq j$ below guarantees that either $(i,j) \in \inv{w}$ or $(i,j) \in \ninv{w}$.
\begin{lemma} \label{l:inversionequivalence}
Let $w \in S_n$, $i \in [n]$, and $j, k \in [n+1]$. Suppose $i \leq j \leq k$. Then the following are equivalent:
\begin{enumerate}[(a)]
\item $(i,j) \in \ninv{w}$;
\item $\lehm{i}{k}{w} \leq \lehm{i}{j}{w} + \lehm{j}{k}{w}$;
\item $\lehc{i}{w} \leq \lehc{j}{w} + \lehm{i}{j}{w}$.
\end{enumerate}
\end{lemma}
\begin{proof}
By Lemma~\ref{l:extendedextends}, we have $\lehc{i}{w} = \lehm{i}{n+1}{w}$ and $\lehc{j}{w} = \lehm{j}{n+1}{w}$. Thus the specialization $k = n + 1$ proves that (b) $\Rightarrow$ (c). Define the following subsets of $\inv{w}$:
\begin{eqnarray*}
\begin{aligned}
A &= \{(i,l) \in \inv{w} : l < k \};\\
B &= \{(i,l) \in \inv{w} : l < j \};\\
C &= \{(i,l) \in \inv{w} : l = j \};\\
D &= \{(i,l) \in \inv{w} : j < l < k\}.
\end{aligned}
\end{eqnarray*}
It is clear that $A = B \cup C \cup D$ and that the union is pairwise disjoint. By Definition~\ref{d:extendedcode}, we have $\card{A} = \lehm{i}{k}{w}$ and $\card{B} = \lehm{i}{j}{w}$. Therefore
\begin{equation*}
\lehm{i}{k}{w} = \lehm{i}{j}{w} + \card{C} + \card{D}.
\end{equation*}
The remaining implications are proven below by comparing $\card{C} + \card{D}$ to $\lehm{j}{k}{w}$.
Suppose $(i,j) \in \ninv{w}$, so that $w(i) \leq w(j)$ and $|C| = 0$. If $(i,l) \in D$, then $l < k$ and $(j,l) \in \inv{w}$ since $w(j) \geq w(i) > w(l)$. Thus,
\begin{equation*}
\card{C} +\card{D} = \card{D} \leq \lehm{j}{k}{w}.
\end{equation*}
Therefore (a) $\Rightarrow$ (b).
Suppose $(i,j) \in \inv{w}$ so that $w(i) > w(j)$. Suppose that $(j,l) \in \inv{w}$ and that $j < l < k$. Then $(i,l) \in D$ since $w(i) > w(j) > w(l)$. Thus $\card{D} \geq \lehm{j}{k}{w}$. Since $(i,j) \in \inv{w}$, we have $\card{C} = 1$, which implies
\begin{equation*}
\lehm{i}{k}{w} > \lehm{i}{j}{w} + \lehm{j}{k}{w}.
\end{equation*}
Specializing to $k = n + 1$ gives the contrapositive of (c) $\Rightarrow$ (a).
\end{proof}
\noindent
The following lemma, which we frequently use in the sequel, is a simple consequence of transitivity on the usual ordering of $\mathbb{N}$.
\begin{lemma} \label{l:biconvexity}
Let $w \in S_n$ and let $i, j, k \in [n+1]$. Suppose $i\leq j \leq k$. Then
\begin{enumerate}[(a)]
\item If $(i,j) \in \inv{w}$ and $(j,k) \in \inv{w}$, then $(i,k) \in \inv{w}$;
\item If $(i,j) \in \ninv{w}$ and $(j,k) \in \ninv{w}$, then $(i,k) \in \ninv{w}$.
\item If $(i,j) \in \inv{w}$ and $(i,k) \in \ninv{w}$, then $(j,k) \in \ninv{w}$.
\item If $(i,j) \in \ninv{w}$ and $(i,k) \in \inv{w}$, then $(j,k) \in \inv{w}$.
\end{enumerate}
\end{lemma}
\begin{proof}
Each statement follows from Definition~\ref{d:inversiondef}.
\end{proof}
\noindent
The numerical characterization of the weak order given in Proposition~\ref{p:weakequivalence} below plays a central role in the theorems we obtain. For any pair $(i, j)$, we call the difference $j - i$ the \emph{height} of $(i, j)$.
\begin{proposition} \label{p:weakequivalence}
Let $v,w \in S_n$. The following statements are equivalent:
\begin{enumerate}[(a)]
\item The inequality $v \leq_L w$ holds in the left weak order;
\item For all $(i,j) \in \ninv{w}$, we have
\begin{equation*}
\lehc{i}{v} \leq \lehc{j}{v} + \lehm{i}{j}{w}.
\end{equation*}
\end{enumerate}
\end{proposition}
\begin{proof}
Suppose $v \leq_L w$ and $(i,j) \in \ninv{w}$. By Lemma~\ref{l:codeinequality}, we have
\begin{equation*}
\lehm{i}{j}{v} \leq \lehm{i}{j}{w},
\end{equation*}
and by Lemma~\ref{l:weaksubset}, we have $(i,j) \in \ninv{v}$. Thus, Lemma~\ref{l:inversionequivalence} implies
\begin{equation*}
\lehc{i}{v} \leq \lehc{j}{v} + \lehm{i}{j}{v}.
\end{equation*}
Combining these inequalities yields $\lehc{i}{v} \leq \lehc{j}{v} + \lehm{i}{j}{w}$. Thus (a) $\Rightarrow$ (b).
For the converse, suppose for a contradiction that $v\not\leq_L w$, thus $\inv{v} \not\subseteq \inv{w}$. Choose a pair $(i,k)$ of minimal height $k - i$, satisfying the property:
\begin{equation*}
\tag{P}
(i,k) \in \inv{v} \text{ and } (i,k) \in \ninv{w}.
\end{equation*}
Lemma~\ref{l:inversionequivalence} implies
\begin{equation*}
\lehc{i}{v} - \lehc{k}{v} > \lehm{i}{k}{v}.
\end{equation*}
By hypothesis, we have $\lehc{i}{v} \leq \lehc{k}{v} + \lehm{i}{k}{w}$ whenever $(i,k) \in \ninv{w}$. Thus,
\begin{equation*}
\lehm{i}{k}{w} \geq \lehc{i}{v} - \lehc{k}{v}.
\end{equation*}
Therefore $\lehm{i}{k}{w} > \lehm{i}{k}{v}$.
Definition~\ref{d:extendedcode} implies the existence of $j < k$ such that $(i,j) \in \inv{w}$ and $(i,j) \in \ninv{v}$.
By Lemma~\ref{l:biconvexity} (c) and (d), we have $(j,k) \in \ninv{w}$ and $(j,k) \in \inv{v}$. Since $k - j < k - i$, this contradicts the minimality of the height of $(i,k)$ with respect to property (P).
\end{proof}
\begin{remark}
Since $\lehc{{n+1}}{v} = 0$ and $\lehm{i}{n+1}{w} = \lehc{i}{w}$, the requirement of Lemma~\ref{l:codeinequality} that $\lehc{i}{v} \leq \lehc{i}{w}$ for all $i \in [n]$ whenever $v \leq_L w$ is contained in Proposition~\ref{p:weakequivalence}.
\end{remark}
\section{The distributive lattice of Lehmer codes for an interval} \label{s:distributive}
We mix partial order and lattice theoretic language in the usual way. When we say ``$(P,\leq)$ is a lattice'' we mean that the join and meet operations are given by the least upper bound and the greatest lower bound, respectively.
By \cite[Section 1.6]{birkhofflatticetheory}, the product space $\mathbb{N}^n$ is a distributive lattice, as is any sublattice of $\mathbb{N}^n$. Thus we use the symbol ``$\leq$'' for the usual order on $\mathbb{N}$, the symbol ``$\leq_S$'' for the product order on the product space $\mathbb{N}^n$, and the symbol ``$\leq_L$'' for the left weak order on $S_n$. The product order on $\mathbb{N}^n$ is given by
\begin{equation*}
(x_1,\ldots,x_n) \leq_S (y_1,\ldots,y_n) \text{ if and only if } x_i \leq y_i \text{ for all } i \in [n].
\end{equation*}
The meet and join on $\mathbb{N}^n$ are given by
\begin{eqnarray*}
\begin{aligned}
(x_1,\ldots,x_n) \vee (y_1,\ldots,y_n) &= (\text{max}\{x_1,y_1\},\ldots,\text{max}\{x_n,y_n\}) \text{ and }\\
(x_1,\ldots,x_n) \wedge (y_1,\ldots,y_n) &= (\text{min}\{x_1,y_1\},\ldots,\text{min}\{x_n,y_n\}).
\end{aligned}
\end{eqnarray*}
\noindent
For an arbitrary $w \in S_n$, consider the subposet $(\leh{\Lambda_w},\leq_S)$ of $\mathbb{N}^n$. This is the set of Lehmer codes for all $v \in S_n$ satisfying $v \leq_L w$, ordered by the product order $\leq_S$. By Lemma~\ref{l:codeinequality}, we know that $v \leq_L w$ implies $\bm{c}(v) \leq_S \leh{w}$. The converse is false in general, which is shown in the example below.
\begin{example}
Let $w = 32145$ and $w' = 34125$. Then $\leh{w} = (2,1,0,0,0)$ and $\leh{w'} = (2,2,0,0,0)$. It is straightforward to check that $w \not\leq_L w'$. By comparing coordinates, we see that $\leh{w} \leq_S \leh{w'}$.
\end{example}
The above discussion shows that the set $\leh{\Lambda_w}$ contains as many elements as $\Lambda_w$, but there are more pairs of permutations related by $\leq_S$ than by $\leq_L$. We use Proposition~\ref{p:weakequivalence} to show that the subset $\leh{\Lambda_w}$ of $\mathbb{N}^n$ is a sublattice of $(\mathbb{N}^n, \leq_S)$.
\begin{lemma} \label{l:latticeopsclosed}
Let $w \in S_n$. The set $\leh{\Lambda_w}$ of Lehmer codes for the order ideal $\Lambda_w$ is closed under the join and meet of $\mathbb{N}^n$.
\end{lemma}
\begin{proof}
Let $\bm{x},\bm{y} \in \leh{\Lambda_w}$. Let $\bm{x} = (x_1,\ldots,x_n)$ and $\bm{y} = (y_1,\ldots,y_n)$. For some $u_1,u_2 \in S_n$ such that $u_1,u_2 \leq_L w$, we have $\bm{x} = \bm{c}(u_1)$ and $\bm{y} = \bm{c}(u_2)$. Let $v \in S_n$ satisfy $\bm{c}(v) = \bm{x} \wedge \bm{y}$. Suppose $(i,j) \in \ninv{w}$.
Suppose, without loss of generality, that $\text{min}\{x_j,y_j\} = x_j$. We have
\begin{equation*}
x_i \leq x_j + \lehm{i}{j}{w},
\end{equation*}
by Proposition~\ref{p:weakequivalence} applied to $u_1$. Since $\text{min}\{x_i,y_i\} \leq x_i$, we have
\begin{equation*}
\text{min}\{x_i,y_i\} \leq \text{min}\{x_j,y_j\} + \lehm{i}{j}{w}.
\end{equation*}
Since $\text{min}\{x_i,y_i\} = \lehc{i}{v}$ and $\text{min}\{x_j,y_j\} = \lehc{j}{v}$, it follows that
\begin{equation*}
\lehc{i}{v} \leq \lehc{j}{v} + \lehm{i}{j}{w}.
\end{equation*}
Proposition~\ref{p:weakequivalence} implies $v \leq_L w$. Thus $v \in \Lambda_w$. Since $\bm{x} \wedge \bm{y}$ is the Lehmer code for $v$, it follows that $\bm{x} \wedge \bm{y} \in \leh{\Lambda_w}$.
A similar argument proves that $\bm{x} \vee \bm{y} \in \leh{\Lambda_w}$.
\end{proof}
\begin{lemma} \label{l:rankeddistributive}
Every finite distributive lattice is ranked.
\end{lemma}
\begin{proof}
See \cite[Theorem 3.4.1]{ecI} and \cite[Proposition 3.4.4]{ecI}.
\end{proof}
\begin{theorem} \label{t:distributivelattice}
Let $w \in S_n$. The poset $\leh{\Lambda_w}$ is a distributive lattice. Furthermore, we have $F(\Lambda_w,q) = F(\leh{\Lambda_w},q)$.
\end{theorem}
\begin{proof}
Lemma~\ref{l:latticeopsclosed} implies that $\leh{\Lambda_w}$ is a sublattice of $\mathbb{N}^n$. Every sublattice of a distributive lattice is itself distributive, so $\leh{\Lambda_w}$ is a distributive lattice. By Lemma~\ref{l:rankeddistributive}, there is a rank function $\rho$ for $\leh{\Lambda_w}$.
Let $v \leq_L w$. Let $\text{id} = v_0 <_L \cdots <_L v_k = v$ be a maximal chain in the weak order interval $[\text{id},v]$. Since $v_{i-1} <_L v_i$, we have $\bm{c}(v_{i-1}) \leq_S \bm{c}(v_i)$ by Lemma~\ref{l:codeinequality}. Since $v_i$ covers $v_{i-1}$ in the weak order, we have
\begin{equation*}
\sum_{k=1}^n c_k(v_i) = \ell(v_i) = \ell(v_{i-1}) + 1 = \sum_{k=1}^n c_k(v_{i-1}) + 1.
\end{equation*}
This implies that $\bm{c}(v_i)$ covers $\bm{c}(v_{i-1})$ in the product order. It follows that $\rho(\bm{c}(v_i)) = \rho(\bm{c}(v_{i-1})) + 1$ for $i \in [k]$. Since $\rho(\bm{c}(\text{id})) = \ell(\text{id}) = 0$, we have $\rho(\bm{c}(v)) = \ell(v)$ for all $v \in \leh{\Lambda_w}$. Thus, $\Lambda_w$ and $\bm{c}(\Lambda_w)$ have the same rank-generating function.
\end{proof}
\section{A description of the base poset for $\leh{\Lambda_w}$} \label{s:baseposet}
In this section, fix $w \in S_n$.
\subsection{Identifying the base poset $M_w$}
For any finite poset $P$, we denote the set of order ideals of $P$ by $J(P)$. The set of order ideals of a poset, ordered by inclusion, is a distributive lattice. Conversely, the fundamental theorem of finite distributive lattices states that every finite distributive lattice $L$ is isomorphic to $J(P)$ for some finite poset $P$. We call $P$ the \emph{base poset} for the distributive lattice $L$.
Recall that a \emph{join-irreducible} $z \in L$ is a nonzero lattice element that cannot be written as $x \vee y$, where $x$ and $y$ are nonzero lattice elements. It is known that the base poset $P$ of a distributive lattice $L$ is isomorphic to the set of join-irreducibles for $L$. See \cite[Theorem 3.4.1]{ecI} and \cite[Proposition 3.4.2]{ecI} for details.
In this section, we construct the base poset $M_w$ for $\bm{c}(\Lambda_w)$ by identifying its join-irreducibles.
We denote the $j$-th coordinate of $\bm{x} \in \mathbb{N}^n$ by $\pi_j(\bm{x})$.
\begin{definition} \label{d:irreduciblecoordinates}
If $i \in [n]$ and $x \in [\lehc{i}{w}]$, define $\minix{i}{x}{w}$ coordinate-wise by
\begin{equation*}
\pi_j(\minix{i}{x}{w}) = \begin{cases} 0 & \text{if $j < i$;}\\
0 & \text{if $(i,j) \in \inv{w}$;}\\
\text{max}\{0,x - \lehm{i}{j}{w}\} & \text{if $(i,j) \in \ninv{w}$.}
\end{cases}
\end{equation*}
\end{definition}
\noindent
Note that the coordinates of $\minix{i}{x}{w}$ are as small as possible while satisfying the constraints of Proposition~\ref{p:weakequivalence}. In Proposition~\ref{p:joinirreducible}, we show that the $\minix{i}{x}{w}$ defined in Definition~\ref{d:irreduciblecoordinates} are the join-irreducibles of $\leh{\Lambda_w}$.
\begin{example}
Let $w = 3412$. Then $\leh{w} = (2,2,0,0)$ and $\minix{1}{1}{w}$, $\minix{1}{2}{w}$, $\minix{2}{1}{w}$, and $\minix{2}{2}{w}$ are all defined. In general, we have $(i,i) \in \ninv{w}$ and $\lehm{i}{i}{w} = 0$. Thus, Definition~\ref{d:irreduciblecoordinates} implies that the $i$-th coordinate of $\minix{i}{x}{w}$ is always $x$. For $j \neq i$, the $j$-th coordinate is automatically zero unless $j > i$ and $(i,j) \in \ninv{w}$.
For $i = 2$, there are no such $j$, since $(2,3), (2,4) \in \inv{w}$. Thus, only the second coordinate is nonzero in $\minix{2}{1}{w}$ and $\minix{2}{2}{w}$:
\begin{equation*}
\minix{2}{1}{w} = (0,1,0,0) \text{ and } \minix{2}{2}{w} = (0,2,0,0).
\end{equation*}
For $i = 1$, we have $(1,2) \in \ninv{w}$, but $(1,3),(1,4) \in \inv{w}$. By Definition~\ref{d:irreduciblecoordinates}, we need to find $\text{max}\{0,x - \lehm{1}{2}{w}\}$ to find the second coordinate of $\minix{1}{1}{w}$ and $\minix{1}{2}{w}$. By Definition~\ref{d:extendedcode}, we have $\lehm{1}{2}{w} = 0$. Thus,
\begin{equation*}
\minix{1}{1}{w} = (1,1,0,0) \text{ and } \minix{1}{2}{w} = (2,2,0,0).
\end{equation*}
\end{example}
\begin{lemma} \label{l:ininterval}
Suppose $i \in [n]$ and $x \in [\lehc{i}{w}]$. Then $\minix{i}{x}{w} \in \bm{c}(\Lambda_w)$.
\end{lemma}
\begin{proof}
Let $v \in S_n$ be the permutation such that $\bm{c}(v) = \minix{i}{x}{w}$. We use Proposition~\ref{p:weakequivalence} to show that $v \leq_L w$. Thus suppose $(j,k) \in \ninv{w}$.
There are two cases: either $\lehc{j}{v} = 0$ or $\lehc{j}{v} > 0$.
Suppose $\lehc{j}{v} = 0$. Then $\lehc{j}{v} \leq \lehc{k}{v} + \lehm{j}{k}{w}.$
Suppose instead that $\lehc{j}{v} > 0$. By Definition~\ref{d:irreduciblecoordinates}, we have $(i, j) \in \ninv{w}$ and $\lehc{j}{v} = \lehc{i}{v} - \lehm{i}{j}{w}$. By Lemma~\ref{l:biconvexity}(b), we have $(i, k) \in \ninv{w}$. By Lemma~\ref{l:inversionequivalence}, we have
\begin{equation*}
\lehm{i}{k}{w} - \lehm{i}{j}{w} \leq \lehm{j}{k}{w},
\end{equation*}
and by Definition~\ref{d:irreduciblecoordinates}, we have
\begin{equation*}
\lehc{i}{v} - \lehm{i}{k}{w} \leq \text{max}\{0, \lehc{i}{w} - \lehm{i}{k}{w}\} = \lehc{k}{v}.
\end{equation*}
Adding the inequalities gives
\begin{equation*}
\lehc{i}{v} - \lehm{i}{j}{w} \leq \lehc{k}{v} + \lehm{j}{k}{w}.
\end{equation*}
Since $\lehc{j}{v} = \lehc{i}{v} - \lehm{i}{j}{w}$, it follows that $\lehc{j}{v} \leq \lehc{k}{v} + \lehm{j}{k}{w}$. By Proposition~\ref{p:weakequivalence}, we have $v \leq_L w$.
\end{proof}
\begin{lemma} \label{l:uniqueminimal}
Suppose $i \in [n]$ and $x \in [\lehc{i}{w}]$. Then $\minix{i}{x}{w}$ is the unique minimal element of $\bm{c}(\Lambda_w)$ with the $i$-th coordinate equal to $x$.
\end{lemma}
\begin{proof}
We have $(i,i) \in \ninv{w}$ and $\lehm{i}{i}{w} = 0$. Thus, by Definition~\ref{d:irreduciblecoordinates}, the $i$-th coordinate of $\minix{i}{x}{w}$ is $x$.
Suppose $\bm{y} \in \bm{c}(\Lambda_w)$, satisfying $\pi_i(\bm{y}) = x$. Suppose $(i, j) \in \ninv{w}$. By Proposition~\ref{p:weakequivalence}, we have
$\pi_j(\bm{y}) \geq x - \lehm{i}{j}{w}$. Since $\pi_j(\bm{y}) \geq 0$, we have $\pi_j(\bm{y}) \geq \text{max}\{0, x - \lehm{i}{j}{w}\}$. Therefore, by Definition~\ref{d:irreduciblecoordinates}, each coordinate of $\bm{y}$ is at least as large as the corresponding coordinate of $\minix{i}{x}{w}$.
Uniqueness follows from the finiteness of $\bm{c}(\Lambda_w)$ and the fact that the meet of all elements with the $i$-th coordinate equal to $x$ is an element whose $i$-th coordinate is $x$.
\end{proof}
\begin{lemma}\label{l:minixunique}
Suppose $\minix{i}{x}{w} = \minix{j}{y}{w}$, for some $i,j \in [n]$, $x \in [\lehc{i}{w}]$, and $y \in [\lehc{j}{w}]$. Then $i = j$ and $x = y$.
\end{lemma}
\begin{proof}
Let $v$ be the permutation whose Lehmer code is $\minix{i}{x}{w}$. Since $x > 0$, there is a permutation $u \in \Lambda_w$ such that $u$ is covered by $v$ in the left weak order. The codes of $u$ and $v$ differ in only one coordinate.
Suppose $i \neq j$. Then the $i$-th coordinate or the $j$-th coordinate of $\bm{c}(u)$ is the same as $\bm{c}(v)$. This either contradicts that $\bm{c}(v)$ has the property of being the unique minimal element of $\bm{c}(\Lambda_w)$ with the $i$-th coordinate equal to $x$ or that it is the unique minimal element with the $j$-th coordinate equal to $y$. Thus, $i = j$. Definition~\ref{d:irreduciblecoordinates} then implies that $x = y$.
\end{proof}
\begin{lemma} \label{l:decompose}
Let $\bm{x} = (x_1, \ldots, x_n)$ and suppose $\bm{x} \in \bm{c}(\Lambda_w)$. Then
\begin{equation*}
\bm{x} = \bigvee \minix{i}{x_i}{w},
\end{equation*}
where the join is over all $i \in [n]$ such that $x_i > 0$.
\end{lemma}
\begin{proof}
By Lemma~\ref{l:uniqueminimal}, we have $\minix{i}{x_i}{w} \leq_S \bm{x}$ for all $i \in [n]$ such that $x_i > 0$. Therefore,
\begin{equation*}
\bigvee_{i:x_i > 0} \minix{i}{x_i}{w} \leq_S \bm{x}.
\end{equation*}
Since the $i$-th coordinate of $\bm{x}$ is $x_i$, the $i$-th coordinate of $\bm{x}$ is $0$ or the same as the $i$-th coordinate of $\minix{i}{x_i}{w}$. Therefore,
\begin{equation*}
\bm{x} \leq_S \bigvee_{i:x_i > 0} \minix{i}{x_i}{w}.
\end{equation*}
Combining these inequalities proves the lemma.
\end{proof}
\begin{example}
Let $u = 3214$, $v = 2413$, and $w = 3412$. Then $u \not\in \Lambda_w$ and $v \in \Lambda_w$. We have
\begin{equation*}
\leh{u} = (2,1,0,0) \text{ and } \leh{v} = (1,2,0,0) .
\end{equation*}
By Definition~\ref{d:irreduciblecoordinates}, we have
\begin{eqnarray*}
\begin{aligned}
\minix{1}{1}{w} &= (1,1,0,0); \\
\minix{1}{2}{w} &= (2,2,0,0); \\
\minix{2}{1}{w} &= (0,1,0,0); \\
\minix{2}{2}{w} &= (0,2,0,0).
\end{aligned}
\end{eqnarray*}
In each instance, the $i$-th coordinate of $\minix{i}{x}{w}$ is equal to $x$. The additional nonzero coordinates ensure that the requirements of Proposition~\ref{p:weakequivalence} are satisfied.
Note that $\leh{v} = \minix{1}{1}{w} \vee \minix{2}{2}{w}$, but $\leh{u} \neq \minix{1}{2}{w} \vee \minix{2}{1}{w}$. Thus, the hypothesis in Lemma~\ref{l:decompose} that $\bm{x} \in \leh{\Lambda_w}$ is necessary.
\end{example}
\begin{proposition} \label{p:joinirreducible}
The set
\begin{equation*}
M_w = \{ \minix{i}{x}{w} : i \in [n] \text{ and } x \in [\lehc{i}{w}]\}
\end{equation*}
is the set of join-irreducibles for $\bm{c}(\Lambda_w)$.
\end{proposition}
\begin{proof}
Suppose $\bm{y} \vee \bm{z} = \minix{i}{x}{w}$. Then either $\bm{y}$ or $\bm{z}$ has the $i$-th coordinate equal to $x$. Suppose, without loss of generality, that $\bm{y}$ has the $i$-th coordinate equal to $x$. By Lemma~\ref{l:uniqueminimal}, we have $\minix{i}{x}{w} \leq_S \bm{y}$. Since $\minix{i}{x}{w}$ is the join of $\bm{y}$ and another element, we also have $\bm{y} \leq_S \minix{i}{x}{w}$. Therefore $\minix{i}{x}{w}$ is a join-irreducible of $\bm{c}(\Lambda_w)$.
For the converse, suppose $\bm{y}$ is a join-irreducible of $\bm{c}(\Lambda_w)$. By Lemma~\ref{l:decompose},
\begin{equation*}
\bm{y} = \bigvee_{i : x_i > 0} \minix{i}{x_i}{w}.
\end{equation*}
Since $\bm{y}$ is a join-irreducible, we have $\bm{y} = \minix{i}{x_i}{w}$ for some $i \in [n]$.
\end{proof}
\subsection{A chain decomposition for $M_w$}
We can describe the set $M_w$ defined in Proposition~\ref{p:joinirreducible} more explicitly. There is a partition of $M_w$ into chains.
\begin{definition} \label{d:chaindecomposition}
Let
\begin{equation*}
\chn{i}{w} = \{ \minix{i}{x}{w} \in M_w \; : \; 1 \leq x \leq \lehc{i}{w} \},
\end{equation*}
where $\chn{i}{w}$ is possibly empty. We call the sets $\chn{1}{w},\ldots,\chn{n}{w}$ the \emph{chain decomposition} of $M_w$.
\end{definition}
\noindent
The terminology is justified by the following lemma.
\begin{lemma}\label{l:chaindecomposition}
Let $\chn{1}{w},\ldots,\chn{n}{w}$ be the chain decomposition of $M_w$. Then each $\chn{i}{w}$ is a chain of $M_w$. Furthermore, we have
\begin{equation*}
M_w = \chn{1}{w} \cup \cdots \cup \chn{n}{w},
\end{equation*}
where the union is pairwise disjoint.
\end{lemma}
\begin{proof}
By Definition~\ref{d:irreduciblecoordinates}, we have $\minix{i}{x}{w} \leq \minix{i}{y}{w}$ whenever $x \leq y$. By Lemma~\ref{l:minixunique}, the chains are pairwise disjoint as sets.
\end{proof}
\begin{lemma} \label{l:lessthan}
Suppose $i < j$ and suppose $\minix{i}{x}{w}, \minix{j}{y}{w}$ are defined. Then
\begin{equation*}
\minix{i}{x}{w} \not\leq_S \minix{j}{y}{w}.
\end{equation*}
\end{lemma}
\begin{proof}
By Definition~\ref{d:irreduciblecoordinates}, the $i$-th coordinate of $\minix{i}{x}{w}$ is $x > 0$. Since $i < j$ by hypothesis, the $i$-th coordinate of $\minix{j}{y}{w}$ is $0$. Therefore, we have $\minix{i}{x}{w} \not\leq_S \minix{j}{y}{w}$.
\end{proof}
\begin{lemma} \label{l:chains}
Suppose $(i,j) \in \inv{w}$. Then, every element of $\chn{i}{w}$ is incomparable with every element of $\chn{j}{w}$.
\end{lemma}
\begin{proof}
Let $\minix{i}{x}{w} \in \chn{i}{w}$ and let $\minix{j}{y}{w} \in \chn{j}{w}$. If $(i, j) \in \inv{w}$, then by Definition~\ref{d:irreduciblecoordinates}, the $j$-th coordinate of $\minix{i}{x}{w}$ is $0$ and the $j$-th coordinate of $\minix{j}{y}{w}$ is $y > 0$. Therefore, we have $\minix{j}{y}{w} \not\leq_S \minix{i}{x}{w}$.
By Lemma~\ref{l:lessthan}, we have $\minix{i}{x}{w} \not\leq_S \minix{j}{y}{w}$. Thus, the chains $\chn{i}{w}$ and $\chn{j}{w}$ are pairwise incomparable.
\end{proof}
\begin{lemma} \label{l:ninvrelation}
Suppose $(i,j) \in \ninv{w}$, $x \in [\lehc{i}{w}]$, and $y \in [\lehc{j}{w}]$. Then we have $\minix{j}{y}{w} \leq_S \minix{i}{x}{w}$ if and only if $y \leq x - \lehm{i}{j}{w}$.
\end{lemma}
\begin{proof}
If $\minix{j}{y}{w} \leq_S \minix{i}{x}{w}$, then by Definition~\ref{d:irreduciblecoordinates}, we have
\begin{equation*}
y \leq \text{max}\{0, x - \lehm{i}{j}{w}\}.
\end{equation*}
Since $y > 0$, we have $y \leq x - \lehm{i}{j}{w}$.
Conversely, suppose that $y \leq x - \lehm{i}{j}{w}$. Then $y \leq \pi_j(\minix{i}{x}{w})$, which implies $\minix{j}{y}{w} \leq \minix{i}{x}{w}$ by Lemma~\ref{l:uniqueminimal}.
\end{proof}
\noindent
The theorem below summarizes important properties of $M_w$. There are no relations between chains $\chn{i}{w}$ and $\chn{j}{w}$ when $(i,j) \in \inv{w}$. Otherwise, if $(i,j) \in \ninv{w}$, then the relations are determined by the extended Lehmer code entry $\lehm{i}{j}{w}$.
\begin{theorem} \label{t:codejoinirreducibles}
Let $w \in S_n$ and let
\begin{eqnarray*}
\begin{aligned}
M_w &= \{\minix{i}{x}{w} : i \in [n] \text{ and } x \in [\lehc{i}{w}]\} \text{ and }\\
\chn{i}{w} &= \{\minix{i}{x}{w} : x \in [\lehc{i}{w}]\}.
\end{aligned}
\end{eqnarray*}
\begin{enumerate}[(a)]
\item The set of join-irreducibles for $\leh{\Lambda_w}$ is $M_w$.
\item As distributive lattices, we have $(J(M_w),\subseteq) \cong (\leh{\Lambda_w},\leq_S)$.
\item If $i < j$ and $\minix{i}{x}{w}, \minix{j}{y}{w}$ are defined, then $\minix{i}{x}{w} \not\leq_S \minix{j}{y}{w}$.
\item If $(i,j) \in \inv{w}$, then every element of $\chn{i}{w}$ is incomparable with every element of $\chn{j}{w}$.
\item If $(i,j) \in \ninv{w}$, $x \in [\lehc{i}{w}]$, and $y \in [\lehc{j}{w}]$, then
\begin{equation*}
\minix{j}{y}{w} \leq_S \minix{i}{x}{w} \iff y \leq x - \lehm{i}{j}{w}.
\end{equation*}
\end{enumerate}
\end{theorem}
\begin{proof}
Part (a) is given by Proposition~\ref{p:joinirreducible}. Part (b) can be proved by using \cite[Proposition 3.4.2]{ecI}.
Part (c) is given by Lemma~\ref{l:lessthan}, part (d) is given by Lemma~\ref{l:chains}, and Part (e) is given by Lemma~\ref{l:ninvrelation}.
\end{proof}
\begin{example}
Let $w = 41528637$. Then $\leh{w} = (3,0,2,0,3,1,0,0)$. To construct $M_w$ we first form the chains $\chn{i}{w}$ whenever $\lehc{i}{w} > 0$. Then we add the inter-chain relations using the last part of Theorem~\ref{t:codejoinirreducibles}. To refine the disjoint union of the chains, we need the following values of $\lehm{i}{j}{w}$:
\begin{equation*}
\lehm{1}{3}{w} = 1, \lehm{1}{5}{w} = 2, \lehm{1}{6}{w} = 2, \lehm{3}{5}{w} = 1, \text{ and } \lehm{3}{6}{w} = 1. \end{equation*}
As $(5,6) \in \inv{w}$, the associated chains are pairwise incomparable.
\begin{center}
\begin{tikzpicture}[scale = 1.1]
\chain{3}{0}{0.0}{1}
\chain{2}{1.0}{0.0}{3}
\chain{3}{2.0}{0.0}{5}
\filldraw (3.0,1.0) circle (0.05cm) node[below=5pt] {$\chn{6}{w}$};
\connectem{2}{5.5}{1.0}{6.5}{0.0}
\connectem{1}{6.5}{1.0}{8.5}{0.0}
\connectem{1}{7.5}{0.0}{6.5}{1.0}
\chain{3}{5.5}{0.0}{1}
\chain{2}{6.5}{0.0}{3}
\chain{3}{7.5}{0.0}{5}
\filldraw (8.5,1.0) circle (0.05cm) node[below=5pt] {$\chn{6}{w}$};
\end{tikzpicture}
\end{center}
\begin{center}
Figure 2: Construction of $M_w$
\end{center}
We construct the poset $M_w$ in two steps. We begin with the chain decomposition in Definition~\ref{d:chaindecomposition}. Then we use Theorem~\ref{t:codejoinirreducibles}(e) to add relations between the chains. See Figure 2.
\end{example}
\section{Rank-symmetry of $\Lambda_w$}
\noindent
Given a polynomial $f$ with nonzero constant term, we denote by $f^R$ the polynomial
\begin{equation*}
f^R(q) = q^{\text{deg}(f)} f(1/q).
\end{equation*}
Roughly speaking, this is the polynomial whose coefficients are obtained by reversing the coefficients in $f$. Note that the constant term of $F(\Lambda_w, q)$ is always nonzero.
A polynomial is \emph{symmetric} if the coefficients, when read left to right, are the same as when read right to left. So, a polynomial with nonzero constant term is symmetric if and only if $f = f^R$.
A ranked poset $P$ is \emph{rank-symmetric} if its rank-generating function $F(P, q)$ is symmetric. By \cite[Corollary 3.11]{Weiweakorder}, if a permutation $w$ is separable, then the interval $\Lambda_w$ is rank-symmetric. We give another class of rank-symmetric weak order intervals.
Recall that the \emph{dual} $P^*$ of a poset $P$ is a poset on the same set as $P$, such that $x \leq y$ in $P^*$ if and only if $y \leq x$ in $P$. A poset is \emph{self-dual} if $P \cong P^*$. If a ranked poset $P$ is self-dual, then it is rank-symmetric. However, the converse is false. The following proposition is not a characterization of rank-symmetric intervals, but it provides a large class of weak order intervals that are rank-symmetric.
\begin{proposition} \label{p:selfdual}
Let $w \in S_n$. If $M_w$ is self-dual, then the weak order interval $(\Lambda_w,\leq_L)$ is rank-symmetric.
\end{proposition}
\begin{proof}
By Theorem~\ref{t:distributivelattice} and Theorem~\ref{t:codejoinirreducibles}(a), we have
\begin{equation*}
F(J(M_w),q) = F(\Lambda_w,q).
\end{equation*}
The result then follows from the fact that $J(P)^* \cong J(P^*)$ for any poset $P$.
\end{proof}
\noindent
There is a standard embedding of $S_m \times S_n$ into $S_{m+n}$: If $v = v_1 \cdots v_m \in S_m$ and $w = w_1 \cdots w_n \in S_n$, then
\begin{equation*}
v \oplus w = v_1 \cdots v_m (w_1 + m)(w_2 + m)\cdots(w_n + m)
\end{equation*}
defines the embedding via $(v,w) \mapsto v \oplus w$. In $S_{m+n}$, each $u \leq_L v \oplus w$ can be decomposed as $v' \oplus w'$, where $v' \leq_L v$ and $w' \leq_L w$. Therefore, we have
\begin{equation*}
F(\Lambda_{v\oplus w},q) = F(\Lambda_v,q)F(\Lambda_w,q)
\end{equation*}
By \cite[Proposition 3.1.2]{bb}, an alternative characterization of left weak order is given by
\begin{equation*}
u \leq_L w \iff \ell(u) + \ell(w u^{-1}) = \ell(w).
\end{equation*}
Using this characterization, it is straightforward to show that
\begin{equation*}
u \leq_L w \iff uw^{-1} \leq_L w^{-1} \iff \ell(uw^{-1}) = \ell(w) - \ell(u).
\end{equation*}
It follows that $F(\Lambda_{w^{-1}},q) = F^R(\Lambda_w, q)$.
\begin{proposition} \label{p:lowerbound}
For any $w \in S_n$, the interval $\Lambda_{w \oplus w^{-1}}$ is rank-symmetric. It follows that there are at least $\left(\lfloor\frac{n}{2}\rfloor\right)!$ permutations in $S_n$ such that $\Lambda_w$ is rank-symmetric.
\end{proposition}
\begin{proof}
The rank-generating function of $\Lambda_{w\oplus w^{-1}}$ in the left weak order is given by
\begin{eqnarray*}
\begin{aligned}
F(\Lambda_{w\oplus w^{-1}},q) &= F(\Lambda_w,q) F(\Lambda_{w^{-1}},q)\\
&= F(\Lambda_w,q) F^R(\Lambda_w,q).
\end{aligned}
\end{eqnarray*}
Since $(f \cdot f^R)^R = f \cdot f^R$ for any polynomial $f$ with nonzero constant term, it follows that $F(\Lambda_{w \oplus w^{-1}},q)$ is symmetric.
\end{proof}
\section{Counterexamples}
Theorem~\ref{t:distributivelattice} asserts that every weak order interval has a rank-generating function that is the same as the rank-generating function of some distributive lattice. This is not true for arbitrary ranked posets. Thus, it is natural to ask whether the ranked posets similar to weak order intervals in $S_n$ possess this property. For the strong Bruhat order on $S_4$ and the weak order on the Coxeter group $D_4$, we show that there are intervals that do not have the rank-generating function of a distributive lattice. Thus, Theorem~\ref{t:distributivelattice} does not generalize to the strong Bruhat order or to arbitrary weak order intervals of arbitrary Coxeter groups.
The strong Bruhat order $(S_n, \leq_B)$ is defined similarly to the weak order. The condition $w = s_i v$ where $s_i$ is an adjacent transposition is replaced by the condition $w = t v$ where $t$ is \emph{any} transposition. Under the strong Bruhat order, the lower order ideal of the permutation $w = 3412$ has rank-generating function given by
\begin{equation*}
F((\Lambda_{3412}, \leq_B),q) = 1 + 3q + 5q^2 + 4q^3 + q^4.
\end{equation*}
If there exists a distributive lattice $L$ such that $F(L, q) = F((\Lambda_{3412},\leq_B),q)$, then the dual $L^*$ is a distributive lattice with rank-generating function
\begin{equation*}
F(L^*,q) = 1 + 4q + 5q^2 + 3q^3 + q^4.
\end{equation*}
By the fundamental theorem of finite distributive lattices, there is a finite poset $P$ such that $L^* \cong J(P)$. Such a poset $P$ would have $4$ minimal elements, which means that there would be at least $\binom{4}{2} = 6$ two-element ideals. Thus no such distributive lattice $L$ exists.
The Coxeter group of type $D_4$ has distinguished generating set
\begin{equation*}
S = \{s_1,s_2,s_3,s_4\}
\end{equation*}
subject to the relations
\begin{eqnarray*}
\begin{aligned}
s_i^2 &= 1 \text{ for all } i \in \{1,2,3,4\};\\
(s_is_j)^2 &= 1 \text { for all } i,j \in \{1,3,4\};\\
(s_2s_i)^3 &= 1 \text { for all } i \in \{1,3,4\}.
\end{aligned}
\end{eqnarray*}
Let $w = s_2 s_1 s_3 s_4 s_2 s_4 s_3 s_1 s_2$. This element of $D_4$ appeared in \cite{fbI} as an example of an element with a non-contractible inversion triple. The interval $(\Lambda_w, \leq_L)$ has a rank-generating function given by
\begin{equation*}
F(\Lambda_w,q) = 1 + q + 3q^2 + 3q^3 + 4q^4 + 4q^5 + 3q^6 + 3q^7 + q^8 + q^9.
\end{equation*}
This rank-generating function appears in \cite{stanleylefschetz} in a different context. As stated in that paper, it is straightforward to check that there is no distributive lattice with that rank-generating function.
\begin{center}
ACKNOWLEDGEMENTS
\end{center}
We thank Richard Green for the helpful comments and suggestions. We also thank the referees for their useful and insightful suggestions.
\begin{raggedright}
\end{raggedright}
\end{document} |
\begin{document}
\title{Quantum probabilities for time-extended alternatives }
\author{Charis Anastopoulos\footnote{[email protected]}\\
{\small Department of Physics, University of Patras, 26500 Patras,
Greece}
\\
and \\ Ntina Savvidou \footnote{[email protected]} \\
{\small Theoretical Physics Group, Imperial College, SW7 2BZ,
London, UK}}
\maketitle
\begin{abstract}
We study the probability assignment for the outcomes of
time-extended measurements. We construct the class-operator that
incorporates the information about a generic time-smeared quantity.
These class-operators are employed for the construction of
Positive-Operator-Valued-Measures for the time-averaged quantities.
The scheme highlights the distinction between velocity and momentum
in quantum theory. Propositions about velocity and momentum are
represented by different class-operators, hence they define
different probability measures. We provide some examples, we study
the classical limit and we construct probabilities for generalized
time-extended phase space variables.
\end{abstract}
\section{Introduction}
In this article we study the probability assignment for quantum
measurements of observables that take place in finite time. Usually
measurements are treated as instantaneous. One assumes that the
duration of interaction between the measured system and the
macroscopic measuring device is much smaller than any macroscopic
time scale characterising the behaviour of the measurement device.
Although this is a reasonable assumption, measurements that take
place in a macroscopically distinguishable time interval are
theoretically conceivable, too. In the latter case one expects that
the corresponding probabilities would be substantially different
from the ones predicted by the instantaneous approximation.
Moreover, the consideration of the duration of the measurement as a
determining parameter allows one to consider observables whose
definition explicitly involves a finite time interval. Such
observables may not have a natural counterpart when restricted to
single-time alternatives. In what follows, we also study physical
quantities whose definition involves time-derivatives of single-time
observables.
There are different procedures we can follow for the study of
finite-time measurements. For example, one may employ standard
models of quantum measurement and refrain from taking the limit of
almost instantaneous interaction between the measuring system and
the apparatus \cite{PeWo85}. However, there is an obvious drawback.
For example, a measurement of momentum can be implemented by
different models for the measuring device. They all give essentially
a probability that is expressed in terms of momentum spectral
projectors (more generally positive operators). However, if one
considers a measurement of finite duration, it is not obvious to
identify the physical quantity of the measured system to which the
resulting probability measure corresponds.
This problem is especially pronounced when one considers
measurements of relatively large duration. For the reason above, we
choose a different starting point: we identify time-extended
classical quantities and the we construct corresponding operators
that act on the Hilbert space of the measured system. A special case
of such observables are quantities that are smeared in time. If an
operator $\hat{A}$ has (generalised) eigenvalues $a$, then we
identify a probability density for its time-smeared values $\langle
a \rangle_f = \int_0^T dt \, a_t f(t)$. Here $f(t)$ is a positive
function defined on the interval $[0, T]$. The special case $f(t) =
\frac{1}{T}$ corresponds to the usual notion of time-averaging.
Having identified the operators that represent the time-extended
quantities, it is easy to construct the corresponding probability
measure for such observables using for example, simple models for
quantum measurement.
Our analysis is facilitated by a comparison with the decoherent
histories approach to quantum mechanics \cite{Gri84, Omn8894,
GeHa9093, Har93a}. The identification of operators that correspond
to time-extended observables is structurally similar to the
description of temporally extended alternatives in the decoherent
histories approach \cite{Har, scc, IL, Sav99, MiHa96, Ha02,
BoHa05}. The physical context is different, in the sense that the
decoherent histories scheme attempts the description of individual
closed systems, while the study of measurements we undertake here
involves---by necessity---the consideration of open systems.
However,
the mathematical descriptions are very closely related.
A history is defined as a sequence of propositions about the
physical system at successive moments of time. A proposition in
quantum mechanics is represented by a projection operator; hence, a
general $n$-time history $\alpha$ corresponds to a string of
projectors $\{\hat{P}_{t_1}, \hat{P}_{t_2}, \ldots, \hat{P}_{t_n}
\}$. To determine the probabilities associated to these histories we
define the class operator $\hat{C}_{\alpha}$,
\begin{equation}
\hat{C}_{\alpha} =\hat{U}^{\dagger}(t_1) \hat{P}_{t_1} \hat{U}(t_1)
\ldots \hat{U}^{\dagger}(t_n) \hat{P}_{t_n} \hat{U}(t_n),
\label{ccll}
\end{equation}
where $\hat{U}(t) = e^{-i \hat{H}t}$ is the evolution operator for
the system. For a pair of histories $\alpha$ and $\alpha'$, we
define the decoherence functional
\begin{equation}
d(\alpha, \alpha') = Tr \left( \hat{C}_{\alpha}^{\dagger}
\hat{\rho}_0 \hat{C}_{\alpha'} \right). \label{decfun}
\end{equation}
A key feature of the decoherent histories scheme is that
probabilities can be assigned to an exclusive and exhaustive set of
histories only if the decoherence condition
\begin{eqnarray}
d(\alpha, \alpha') = 0, \; \alpha \neq \alpha'
\end{eqnarray}
holds. In this case one may define a probability measure on this
space of histories
\begin{eqnarray}
p(\alpha) = Tr \left( \hat{C}_{\alpha}^{\dagger} \hat{\rho}
\hat{C}_{\alpha} \right). \label{pmeas}
\end{eqnarray}
One of the most important features of the decoherent histories
approach is its rich logical structure: logical operations between
histories can be represented in terms of algebraic relations between
the operators that represent a history. This logical structure is
clearly manifested in the History Projection Operator (HPO)
formulation of decoherent histories \cite{I94}. In this paper we
will make use of the following property. If $\{\alpha_i\}$ is a
collection of mutually exclusive histories, each represented by the
class operator $\hat{C}_{\alpha_i}$ then the coarse-grained history
that corresponds to the statement that any one of the histories $i$
has been realised is represented by the class operator $\sum_i
\hat{C}_{\alpha_i}$. This property has been employed by Bosse and
Hartle \cite{BoHa05}, who define class operators corresponding to
time-averaged position alternatives using path-integrals. A similar
construction in a slightly different context is given by Sokolovski
et al \cite{So98, LS00}---see also Ref. \cite{Caves}.
Our first step is to generalise the results of \cite{BoHa05} by
constructing such class operators for the case of a generic
self-adjoint operator $\hat{A}$ that are smeared with an arbitrary
function $f(t)$ within a time interval $[0, T]$. This we undertake
in section 2.
In section 3, we describe a toy model for a time-extended
measurement. It leads to a probability density for the measured
observable that is expressed solely in terms of the class operators
$\hat{C}_{\alpha}$. The same result can be obtained without the use
of models for the measurement device through a purely mathematical
argument. We identify generic Positive-Operator-Valued Measure
(POVM) that is bilinear with respect to the class operators
$\hat{C}_{\alpha}$ and compatible with Eq. (\ref{pmeas}).
The result above implies that $\hat{C}_{\alpha}$ can be employed in
two different roles: first, as ingredients of the decoherence
functional in the decoherent histories approach and second, as
building block of a POVM in an operational approach to quantum
theory. The same mathematical object plays two different roles: in
\cite{BoHa05} class operators corresponding to time-average
observables are constructed for use within the decoherent histories
approach, while the same objects are used in \cite{LS00} for the
determination of probabilities of time-extended position
measurements.
The approach we follow allows the definition of more general
observables. Within the context of the HPO approach, velocity and
momentum are represented by different (non-commuting) operators:
they are in principle distinguishable concepts \cite{Sav99}.
In
section 4, we show that indeed one may assign class operators to
alternatives corresponding to values of velocity that are distinct
from those corresponding to values of momentum. These operators
coincide at the limit of large coarse-graining (which often
coincides with the classical limit). In effect, two quantities that
coincide in classical physics are represented by different objects
quantum mechanically. It is quite interesting that the POVMs
corresponding to velocity are substantially different from those
corresponding to momentum. At the formal level, it seems that
quantum theory allows the existence of instruments which are able
to distinguish between the velocity and momentum of a quantum
particle. {\em A priori}, this is not surprising: in single-time
measurements, velocity cannot be defined as an independent
variable. For extended-in-time measurements, it is not inconceivable
that one type of detector responds to the rate of change of the
position variable and another to the particle's momentum. Whether
this result is a mere mathematical curiosity, or whether one can
design experiments that will demonstrate this difference completely
will be addressed in a future publication. In section 4 we also
study more general time-extended measurements, namely ones that
correspond to time-extended phase space properties of the quantum
system.
\section{Operators representing time-averaged quantities}
\subsection{The general form of the class operators}
We construct the class operators that correspond to the proposition
``the value of the observable $\hat{A}$, smeared with a function
$f(t)$ within a time interval $[0, T]$, takes values in the subset
$U$ of the real line ${\bf R}$.''
We denote by $a_t$ a possible value of the observable $\hat{A}$ at
time $t$. Then at the continuous-time limit the time-smeared value
$A_f$ of $\hat{A}$ reads $A_f := \int_0^T a_t f(t) dt$. Note that
for the special choice $f(t) = \frac{1}{T} \chi_{[0, T]}(t)$, where
$\chi_{[0, T]}$ is the characteristic function of the interval $[0,
T]$, we obtain the usual notion of the time-averaged value of a
physical quantity.
There are two benefits from the introduction of a general function
$f(t)$. First, it can be chosen to be a continuous function of
$t$, thus allowing the consideration of more general `observables';
for example observables that involve the time derivatives of $a_t$.
Second, when we consider measurements, the form of $f(t)$ may be
determined by the operations we effect on the quantum system. For
example, $f(t)$ may correspond to the shape of an electromagnetic
pulse acting upon a charged particle during measurement.
To this end, we construct the relevant class operators in a
discretised form. We partition the interval $[0, T]$ into $n$
equidistant time-steps $ t_1, t_2, \ldots, t_n $. The integral
$\int_0^T dt f(t) a_t$ is obtained as the continuous limit of
$\delta t \sum_i f(t_i) a_{t_i} = \frac{T}{n} \sum_i f(t_i)
a_{t_i}$.
For simplicity of exposition we assume that the operator $\hat{A}$
has discrete spectrum, with eigenvectors $|a_i\rangle$ and
corresponding eigenvalues $a_i$ \footnote{The generalization of our
results for continuous spectrum is straightforward.}. We write
$\hat{P}_{a_i} = | a_i \rangle \langle a_i|$. By virtue of Eq.
(\ref{ccll}) we construct the class operator
\begin{eqnarray}
\hat{C}_{\alpha} = e^{i\hat{H}T/n} |a_1 \rangle \langle
a_1|e^{i\hat{H}T/n} |a_2 \rangle \langle a_2 | \ldots \langle
a_{n-1}|e^{i \hat{H}T/n}|a_n \rangle \langle a_n| \label{ca}
\end{eqnarray}
that represents the history $\alpha = ( a_1, \ldots , a_n)$.
The
proposition ``the time-averaged value of $\hat{A}$ lies in a subset
$U$ of the real line'' can be expressed by summing over all
operators of the form of Eq. (\ref{ca}), for which $\frac{T}{n}
\sum_i f(t_i) a_i \in U $,
\begin{eqnarray}
\hat{C}_U = \sum_{a_1, a_2, \ldots, a_n} \chi_U\left(\frac{T}{n}
\sum_i f(t_i) a_i \right) \hspace{3cm} \nonumber \\
\times\, e^{i\hat{H}T/n}|a_1 \rangle \langle a_1|e^{i\hat{H}T/n}
|a_2 \rangle \langle a_2 | \ldots \langle a_{n-1}|e^{i
\hat{H}T/n}|a_n \rangle \langle a_n|. \label{class}
\end{eqnarray}
If we partition the real axis of values of the time-averaged
quantity $A_f$ into mutually exclusive and exhaustive subsets $U_i$,
the corresponding alternatives for the value of $A_f$ will also be
mutually exclusive and exhaustive.
Next, we insert the Fourier transform $\tilde{\chi}_U$ of $\chi_U$
defined by
\begin{eqnarray}
\chi_{U}(x) := \int \frac{dk}{2 \pi} e^{ikx} \tilde{\chi}_U(k)
\end{eqnarray}
into
Eq. (\ref{class}). We thus obtain
\begin{eqnarray}
\hat{C}_U = \int \frac{dk}{2 \pi} \tilde{\chi}_U(k) e^{-i
\hat{H}T/n} \left( \sum_{a_1} e^{-ik T f(t_1) a_1/n} |a_1 \rangle
\langle a_1| \right) e^{i\hat{H}T/n} \ldots \nonumber \\ \times e^{i
\hat{H}T/n} \left( \sum_{a_n} e^{-ik T f(t_n) a_n/n} |a_n \rangle
\langle a_n| \right).
\end{eqnarray}
By virtue of the spectral theorem we have
\begin{eqnarray}
\sum_{a_i} e^{ik T f(t_i) a_i/n} |a_i \rangle \langle a_i | = e^{i k
f(t_i) \hat{A}/n}.
\end{eqnarray}
Hence,
\begin{eqnarray}
\hat{C}_U = \int \frac{dk}{2 \pi}\, \tilde{\chi}_U(k)
\prod_{i=1}^n [
e^{i\hat{H} T/n}e^{ik f(t_i) \hat{A}T/n} ]. \label{class4}
\end{eqnarray}
From Eq. (\ref{class4}) we obtain
\begin{eqnarray}
\hat{C}_U = \int_U da \; \hat{C}(a), \label{class5}
\end{eqnarray}
where
\begin{eqnarray}
\hat{C}(a) := \int \frac{dk}{2 \pi} e^{-i ka} \hat{U}_f(T, k),
\label{class2}
\end{eqnarray}
and where
\begin{eqnarray}
\hat{U}_f(T,k) := \lim_{n \rightarrow \infty} \prod_{i=1}^n
[e^{i\hat{H} T/n}e^{-ik f(t_i) \hat{A}T/n} ].
\end{eqnarray}
The operator $\hat{U}_f$ is the generator of an
one-parameter family of transformations
\begin{eqnarray}
-i\frac{\partial}{\partial s} \hat{U}_f(s,k) = [ \hat{H} + k f(s)
\hat{A}] \hat{U}_f(s,k).
\end{eqnarray}
This implies that
\begin{eqnarray}
\hat{U}_f(T,k) = {\cal T} e^{i \int_0^T dt (H + k f(t) \hat{A})},
\label{Uf}
\end{eqnarray}
where
${\cal T}$ signifies the time-ordered expansion for the
exponential. The construction of $\hat{C}_U$ then is mathematically
identical to the determination of a propagator in presence of a
time-dependent external force proportional to $\hat{A}$.
For $f(t) = \frac{1}{T} \chi_{[0, T]}(t)$
we obtain
\begin{eqnarray}
\hat{C}_U = \int \frac{dk}{2\pi} \,\tilde{\chi}_U(k) e^{i \hat{H} T
+ i k \hat{A}},
\end{eqnarray}
that has been constructed through path-integrals for specific
choices of the operator $\hat{A}$ in \cite{So98, LS00, BoHa05}.
If $f(t)$ has support in the interval $[t, t'] \subset [0, T]$ then
\begin{eqnarray}
\hat{C}_U = e^{-i \hat{H}t} \int_U da \; \left( \int \frac{dk}{2
\pi} e^{-i ka} {\cal T} e^{i \int_{t}^{t'} ds (H + k f(s)
\hat{A})}\right) e^{i\hat{H}(T-t')}.
\end{eqnarray}
We note that outside the interval $[t, t']$ only the Hamiltonian
evolution contributes to $\hat{C}_U$ outside the interval $[t, t']$.
It will be convenient to represent the proposition about the
time-averaged value of $\hat{A}$ by the operator
\begin{eqnarray}
\hat{D}(a) : = e^{- i \hat{H}T} \hat{C}(a),
\end{eqnarray}
or else
\begin{eqnarray}
\hat{D}(a) = \int \frac{dk}{2 \pi} e^{ -i ka} \;{\cal T} e^{ik
\int_0^T dt f(t) \hat{A}(t)}, \label{toe}
\end{eqnarray}
where $\hat{A}(t)$ is the Heisenberg-picture operator $e^{i
\hat{H}t} \hat{A} e^{-i \hat{H}t}$.
If $[\hat{H}, \hat{A}] = 0$, then
\begin{eqnarray}
\hat{U}_f(T,k) = e^{i \hat{A}\int_0^T dt f(t) }.
\end{eqnarray}
Hence,
\begin{eqnarray}
\hat{D}_U := \int_U da \hat{D}(a) = \chi_U[\hat{A} \int_0^T dt
f(t)].
\end{eqnarray}
When we use $f(t)$ to represent time-smearing, it
is convenient to require that $\int_0^T dt f(t)) = 1$ in order to
avoid any rescaling in the values of the observable. Then $\hat{D}_U
= \chi_U(\hat{A})$. We conclude therefore that the operator
representing time-averaged value of $\hat{A}$ coincides with the one
representing a single-time value of $\hat{A}$.
\paragraph{The limit of large coarse-graining.} If we integrate
$\hat{D}(a)$ over a relatively large sample set $U$ the integral
over $dk$ is dominated by small values of $k$. To see this, we
approximate the integration over a subset of the real line of width
$\Delta$ centered around $a = a_0$, by an integral with a smeared
characteristic function $\exp[- (a-a_0)^2/2 \Delta^2]$. This leads
to
\begin{eqnarray}
\hat{D}_U = \sqrt{2\pi} \Delta \int \frac{dk}{2
\pi} e^{- \Delta^2 k^2/2} \, {\cal T} e^{i \int_0^T
dt f(t) \hat{A}(t)}
\end{eqnarray}
that is dominated by values of $k \sim
\Delta^{-1}$ .
The term $k f(t)$ in the time-ordered exponential of Eq. (\ref{toe})
is structurally similar to a coupling constant. Hence, for
sufficiently large values of $\Delta$ we write
\begin{eqnarray}
{\cal T} e^{i \int_0^T dt f(t) \hat{A}(t)} \simeq e^{i \int_0^T dt
f(t) \hat{A}(t)},
\end{eqnarray}
i.e., the zero-th loop order contribution to the
time-ordered exponential dominates. We therefore conclude that
\begin{eqnarray}
\hat{D}_U \simeq \chi_U \left[ \int_0^T dt f(t) \hat{A}(t) \right].
\label{111}
\end{eqnarray}
$\hat{D}_U$ is almost equal to a spectral element of the
time-averaged Heisenberg-picture operator $\int_0^T dt f(t)
\hat{A}(t)$. This generalises the result of \cite{BoHa05}, which was
obtained for configuration space variables at the limit $\hbar
\rightarrow 0$.
We estimate the leading order correction to the approximation
involved in Eq. (\ref{111}). The immediately larger contribution to
the time-ordered exponential of Eq. (\ref{toe}) is
\begin{eqnarray}
\frac{k^2}{2} \int_0^T ds \int_0^s ds' \, f(s) f(s') \, [\hat{A}(s),
\hat{A}(s')].
\end{eqnarray}
The contribution of this term must be much smaller than the first
term in the expansion of the time-ordered exponential's, namely $k
\int_0^T ds f(s) \hat{A}(s)$. We write the expectation values of
these operators on a vector $| \psi \rangle$ in order to obtain the
following condition
\begin{eqnarray}
|\int_0^T ds \int_0^s ds' f(s) f(s') \langle \psi|[\hat{A}(s),
\hat{A}(s')]| \psi \rangle| << \Delta \; | \int_0^T ds \langle \psi|
\hat{A}(s) | \psi \rangle|. \label{condit}
\end{eqnarray}
The above condition is satisfied rather trivially for bounded
operators if $||\hat{A}|| << \Delta$. In that case, the operator
$\hat{C}_U$ captures little, if anything, from the possible values
of $\hat{A}$. In the generic case however, Eq. (\ref{condit}) is to
be interpreted as a {\em condition} on the state $|\psi \rangle$.
Eq. (\ref{111}) provides a good approximation if the two-time
correlation functions of the system are relatively small.
Furthermore, if the function $f(t)$ corresponds to weighted
averaging, i.e., if $f(t) \geq 0 $, and if $f$ does not have any
sharp peaks, then the condition $ \int_0^T dt f(t) = 1$ implies that
the values of $f(t)$ are of the order $\frac{1}{T}$.
We denote by $\tau$ the correlation time of $\hat{A}(s)$, i.e. the
values of $|s-s'|$ for which $|\langle \psi|[\hat{A}(s),
\hat{A}(s')]| \psi \rangle|$ is appreciably larger than zero. Then
at the limit
$T >> \tau$ the
left-hand side of Eq. (\ref{condit}) is of the order $O\left(
\frac{\tau^2}{T^2}\right)$. Hence, for sufficiently large values of
$T$
one expects that Eq. (\ref{111}) will be satisfied with a fair degree of accuracy.
The argument above does not hold if $f$ is allowed to take
on negative values, which is the case for the velocity
samplings that we consider in section 4.
\subsection{Examples}
We study some interesting examples of class operators corresponding
to time-smeared quantities. In particular, we consider the
time-smeared position for a particle.and a simple system that is
described by a finite-dimensional Hilbert space.
\subsubsection{Two-level system}
In a two-level system described by the Hamiltonian $\hat{H} = \omega
\hat{\sigma}_z$, we consider time-averaged samplings of the values
of the operator $\hat{A} = \hat{\sigma}_x$. We compute
\begin{eqnarray}
\hat{U}(k,T) = \cos \sqrt{k^2 + \omega^2 T^2}\, \hat{1} + i \,
\frac{\sin \sqrt{k^2 + \omega^2 T^2}}{\sqrt{k^2 + \omega^2 T^2}} ( k
\hat{\sigma_x} + \omega T \hat{\sigma}_z).
\end{eqnarray}
Then the class operator $\hat{C}(a)$ is
\begin{eqnarray}
\hat{C}(a) = \frac{ \omega T}{2\sqrt{1 - a^2}} J_1( \omega T
\sqrt{1 - a^2})\, \hat{1} + \frac{a \omega T}{2 \sqrt{1-a^2}} J_1
(\omega T \sqrt{1 - a^2}) \hat{\sigma}_x \nonumber \\
+ \frac{i \omega T}{2} J_0(\omega T \sqrt{1 - a^2}) \hat{\sigma}_z,
\end{eqnarray}
where $J_n$ stands for the Bessel function of order $n$. Note that
the expression above holds for $|a| \leq 1$. For $|a|
> 1$, $\hat{C}(a) = 0$, as is expected by the fact that
$||\hat{\sigma}_x|| = 1$.
\subsubsection{Position samplings}
The case $\hat{A} = \hat{x}$ for ordinary time-averaging ($f(t) =
\frac{1}{T}$) has been studied in \cite{LS00, BoHa05} using path
integral techniques. Here we generalise these results by considering
the case of a general smearing function $f(t)$.
We consider the case of a harmonic oscillator of mass $m$ and
frequency $\omega$. The determination of the propagator
$\hat{U}_f(T,k)$ for a harmonic oscillator acted by an external
time-dependent force is well-known. It leads to the following
expression for the operator $\hat{D}(a)$
\begin{eqnarray}
\langle x|\hat{D}(a)|x' \rangle = \frac{m \omega}{2 \pi B_f \sin
\omega T} \exp \left[ \frac{ -i m \omega}{2 \sin \omega T} \left(
\cos \omega T (x'^2 - x^2) - 2 x x' \right)+ \right. \nonumber \\
\left.
\left. \frac{2}{B_f} (A_f x' + a)(x'-x) - \frac{ 2 \omega C_f}{B_f^2
\sin \omega T} (x - x')^2 \right) \right],
\end{eqnarray}
where
\begin{eqnarray}
A_f :&=& \frac{1}{\sin \omega T} \int_0^T ds \, \sin \omega s \, f(s)\\
B_f :&=& \frac{1}{\sin \omega T} \int_0^T ds \, \sin \omega(T-s) f(s) \\
C_f :&=& \frac{1}{ \omega \sin \omega T} \int_0^T ds \, \sin \omega
(T-s) f(s) \int_0^s ds' \, \sin \omega s'\, f(s').
\end{eqnarray}
The corresponding operators for the free particle is obtained at the
limit $\omega \rightarrow 0$
\begin{eqnarray}
\langle x|\hat{D}(a)|x' \rangle = \frac{m}{2 \pi B_f T} \exp \left[
\frac{-im}{2T} \left( (x'^2 -x^2) + \frac{2}{B_f} (A_f x' - a)(x' -
x) - \frac{2C_f}{B_f^2 T} (x' - x)^2 \right) \right], \label{freep}
\end{eqnarray}
where
\begin{eqnarray}
A_f &=& \frac{1}{T} \int_0^T ds \, s \, f(s) \\
B_f &=& \frac{1}{ T} \int_0^T ds \, (T-s) f(s) \\
C_f &=& \frac{1}{ T} \int_0^T ds \, (T-s) f(s) \int_0^s ds' \, s'
f(s').
\end{eqnarray}
\section{Probability assignment}
\subsection{The decoherence functional}
For a pair of histories $(U,
U')$ that correspond to different samplings of the time-smeared
values of $\hat{A}$ the decohrence functional $ d(U,U')$ is
\begin{eqnarray}
d(U,U') = Tr \left(\hat{D}^{\dagger}_U e^{-i \hat{H}T} \hat{\rho}_0
e^{i \hat{H}T} \hat{D}_{U'} \right).
\end{eqnarray}
From the expression above, we can read the probabilities that are
associated to any set of alternatives that satisfies the decoherence
condition. In section 2, we established that in the limit of large
coarse-graining, or for very large values of time $T$, the operators
$\hat{D}_U$ approximate projection operators. Hence, if we partition
the real line of values of $A_f$ into sufficiently large exclusive
sets $U_i$ the decoherence condition will be satisfied. A
probability measure will be therefore defined as
\begin{eqnarray}
p(U_i) = Tr \left[\chi_{U_i}\left(\int_0^T dt f(t) \hat{A}(t)\right)
e^{-i \hat{H}T }\hat{\rho}_0 e^{i \hat{H}T} \right].
\end{eqnarray}
This is the same as in the case of a single-time measurement of the
observable $\int_0^T dt f(t) \hat{A}(t)$ taking place at time $t =
T$. For further discussion, see \cite{BoHa05}.
\subsection{Probabilities for measurement outcomes}
Next, we show that the class operators $\hat{C}(a)$ can be employed
in order to define a POVM for a measurement with finite duration.
For this purpose, we consider a simple measurement scheme.
We
assume that the system interacts
with a measurement device characterised by a continuous pointer
basis $|x \rangle$. For simplicity, we assume that the self-dynamics
of the measurement device is negligible. The interaction between the
measured system and the apparatus is described by a Hamiltonian of
the form
\begin{eqnarray}
\hat{H}_{int} = f(t) \hat{A} \otimes \hat{K},
\end{eqnarray}
where $\hat{K}$ is the `conjugate momentum' of the pointer variable
$\hat{x}$
\begin{eqnarray}
\hat{K} = \int dk \,k \,|k \rangle
\langle k|,
\end{eqnarray}
where $\langle x| k \rangle = \frac{1}{\sqrt{2\pi}} e^{-ikx}$.
The initial state of the apparatus (at $t = 0$) is assumed to be
$|\Psi_0 \rangle$ and the initial state of the system corresponds to
a density matrix $\hat{\rho}_0$.
With the above assumptions, the reduced density matrix of the
apparatus at time $T$ is
\begin{eqnarray}
\hat{\rho}_{app}(T) = \int dk \int dk' \; Tr
\left(\hat{U}^{\dagger}_f(T,k) \hat{\rho}_0 \hat{U}_f(T,k') \right)
\langle k |\Psi_0 \rangle \langle \Psi_0|k' \rangle \; |k \rangle
\langle k'|,
\end{eqnarray}
where $\hat{U}_f(T,k)$ is given by Eq. (\ref{Uf}). Then, the
probability distribution over the pointer variable $x$ (after
reduction) is
\begin{eqnarray}
\langle x|\hat{\rho}_{app}(T)|x \rangle = \int \frac{dk dk'}{2 \pi}
e^{-i(k-k')x} \langle k |\Psi_0 \rangle \langle \Psi_0|k' \rangle
\; Tr \left(\hat{U}^{\dagger}_f(T,k) \hat{\rho}_0 \hat{U}_f(T,k')
\right).
\end{eqnarray}
The probability that the pointer variable takes values within a set
$U$ is
\begin{eqnarray}
p(U) = tr \left(e^{-i \hat{H}T}\hat{\rho}_0 e^{i
\hat{H}T}\hat{\Pi}_U \right),
\end{eqnarray}
where
\begin{eqnarray}
\hat{\Pi}_U = \int_U dx \; \hat{D}(w^*_x) \hat{D}^{\dagger}(w_x) :=
\int_U dx \, \hat{\Pi}_x, \label{central}
\end{eqnarray}
where $ w_x(a):= \langle x-a|\Psi_0 \rangle$ and where we employed
the notation
\begin{eqnarray}
\hat{D}(w_x) = \int da \, w_x(a) \hat{D}(a),
\end{eqnarray}
The operators $\hat{\Pi}_U$ define a POVM for the time-extended
measurement of $\hat{A}$: they are positive by construction, they
satisfy the property $ \hat{\Pi}_{U_1 \cup U_2} = \hat{\Pi}_{U_1} +
\hat{\Pi}_{U_2}$, for $U_1 \cap U_2 = \emptyset$ and they are
normalised to unity
\begin{eqnarray}
\hat{\Pi}_{\bf R} = \int_{\bf R} dx \, \hat{\Pi}_x = 1.
\end{eqnarray}
Note that the smearing of the class-operators is due to the spread
of the wave function of the pointer variable.
In what follows we employ for convenience a Gaussian function
\begin{eqnarray}
w(a) = \frac{1}{(2 \pi \delta^2)^{1/4}} e^{- \frac{a^2}{4
\delta^2}}. \label{ww}
\end{eqnarray}
In the free-particle case, the class operators in Eq. (\ref{freep})
lead to the following POVM
\begin{eqnarray}
\langle y|\hat{\Pi}_x|y' \rangle = \frac{m}{\sqrt{2} \pi A_f T} \exp
\left[ -\left(\frac{m^2 \delta^2}{2 A_f^2 T^2} + \frac{A_f^2}{8
\delta^2}(1 - \frac{2C_f}{A_f^2T})^2\right) (y - y')^2 +
\frac{im}{A_fT} x(y' - y) \right]. \label{fff}
\end{eqnarray}
In Eq. (\ref{fff}), we chose an even time-averaging function, i.e.
$f(s) = f(T-s)$, in which case $A_f = B_f$.
The POVM in Eq. (\ref{central}) may also be constructed without
reference to a specific model for the measurement device. In
particular, we partition the space of values for $A_f$ into sets of
width $\delta$ and employ the expression Eq. (\ref{pmeas}) for the
ensuing probabilities. It is easy to show that these probabilities
are reproduced---up to terms of order $O(\delta)$---by a POVM of the
form Eq. (\ref{central}), with the
smearing function $w$ of Eq. (\ref{ww})
\footnote{ The proof follows closely an analogous one in
\cite{Ana05}).}.
If we restrict our considerations to the above measurement model,
then there is no way we can interpret the POVM of Eq.
(\ref{central}) as corresponding to values of $A_f$, This
interpretation is possible by the explicit construction and by the
identification (see Sec. 2) of the class operators $\hat{C}(a)$ as
the only mathematical objects that correspond to such time-averaged
alternatives.
\section{More general samplings}
\subsection{Velocity Vs momentum}
Within the context of the History Projection Operator scheme,
Savvidou showed that histories of momentum differ in general from
histories of velocity, in the sense that they are represented by
different mathematical objects \cite{Sav99}. The corresponding
probabilities are also expected to be different. In single-time
quantum theory the notion of velocity (that involves differentiation
with respect to time) cannot be distinguished from the notion of
momentum. However, when we deal with histories, time differentiation
is defined {\em independently} of the evolution laws. One may
therefore consider alternatives corresponding to different values of
velocity.
In particular, if $x_f = \int_0^T dt x_t f(t)$ denotes the
time-smeared value of the position variable, we define the
time-smeared value of the corresponding velocity variable as
\begin{eqnarray}
\dot{x}_f := - x_{\dot{f}},
\end{eqnarray}
provided that the function $f$
satisfies $f(0) = f(T) = 0$.
Notice here that when we measure the time-averaged value of an
observable within a time-interval $[0, T]$, we employ positive
functions $f(t)$ that are $\cap$-shaped and that they satisfy
$\int_0^Tdt f(t) = 1$. Such functions correspond to the intuitive
notions of averaging the value of a quantity with a specific weight.
However, to determine the time-average velocity---weighted by a
positive and normalised function $f$---one has to smear the
corresponding position variable with the function $\dot{f}(t)$ that
in the general case is neither positive nor normalised. Therefore
{\em the form of the smearing function determines the physical
interpretation of the observable we consider} \cite{Sav05}.
Next, we compare the class operators corresponding to the average of
velocity and of momentum, with a common weight $f$. We denote the
velocity class operator as
\begin{eqnarray}
\hat{D}^{\dot{x}}(a) = \int \frac{dk}{2 \pi} e^{-ika} {\cal T} e^{i
\int_0^T dt \,\dot{f}(t) \hat{x}(t)},
\end{eqnarray}
and the momentum class operators as
\begin{eqnarray}
\hat{D}^{p}(a) = \int
\frac{dk}{2 \pi} e^{-ika} {\cal T} e^{i \int_0^T dt \, f(t)
\hat{p}(t)}.
\end{eqnarray}
At the limit of large coarse-graining, the operator
$\hat{D}^{\dot{x}}_U :=\int_U da \, \hat{D}^{\dot{x}}(a)$ is
approximately equal to
\begin{eqnarray}
\hat{D}^{\dot{x}}_U = \chi_U(\int_0^T dt \dot{f}(t) \hat{x}(t)) =
\chi_U( \frac{1}{m} \int_0^T dt \, f(t) \hat{p}(t)),
\end{eqnarray}
{\em i.e.}, the class-operator for
time-averaged momentum coincides with that for time-averaged
velocity. This result reproduces the classical notion that $p = m
\dot{x}$. However, the limit of large coarse-graining may be
completely trivial if the temporal correlations of position are
large.
For the case of a free particle, with the convenient choice $f(t) =
\frac{\pi}{T} \sin\frac{\pi t}{T}$, we obtain
\begin{eqnarray}
\hat{D}^p_U &=& \int_U dp \, |p \rangle \langle p|, \\
\hat{D}^{\dot{x}}_U &=& \int_U da \; \left(\sqrt{\frac{4 i m
T}{\pi^3}} \int dp \; e^{i\frac{4 m T}{\pi^2} (a - p/m)^2} \, |p
\rangle \langle p| \right). \label{ddx}
\end{eqnarray}
It is clear that the alternatives of time-averaged momentum are
distinct from those of time-averaged velocity. Still, at the limit
$T \rightarrow \infty$, $ \hat{D}^p_U =
m \hat{D}^{\dot{x}}_U $.
The POVM corresponding to Eq. (\ref{ddx}) is
\begin{eqnarray}
\hat{\Pi}^{\dot{x}}(v) = \frac{1}{\sqrt{2 \pi \sigma^2(T)}}
\int dp \; \exp
\left[ - \frac{1}{2 \sigma_v^2(T)} (v - p/m)^2 \right]\, |p \rangle
\langle p|, \label{POVMvelocity}
\end{eqnarray}
where $\sigma_v^2(T) = \delta^2 + \frac{\pi^4}{2^{8} m^2 T^2
\delta^2}$.
The POVM of Eq. (\ref{POVMvelocity}) commutes with the momentum
operator. One could therefore claim that it corresponds to an
unsharp measurement of momentum. However, the commutativity of this
POVM with momentum follows only from the special symmetry of the
Hamiltonian for a free-particle, it does not hold in general.
Moreover, at the limit of small $T$, the distribution corresponding
to Eq. (\ref{POVMvelocity}) has a very large mean deviation. Hence,
even for a wave-packet narrowly concentrated in momentum, the spread
in measured values is large. Note that at the limit $T \rightarrow
0$, the deviation $\sigma^2_v(T) \rightarrow \infty$ and the POVM
(\ref{POVMvelocity}) tends weakly to zero. For $T
>> (m \delta^2)^{-1}$, then $\sigma_v^2(T) \simeq \delta^2$ and the
velocity POVM is identical to one obtained by an instantaneous
momentum measurement.
The results of section 3.2 suggest the different measurement schemes
that are needed for the distinction of velocity and momentum. For a
momentum measurement the interaction Hamiltonian should be of the
form
\begin{eqnarray}
\hat{H}_{int}^{p} = f(t) \, \hat{p}\, \otimes \hat{K},
\end{eqnarray}
where $f(t)$ is a $\cap$-shaped positive-valued function. For a
velocity measurement the interaction Hamiltonian is
\begin{eqnarray}
\hat{H}_{int}^{\dot{x}} = - \dot{f}(t) \, \hat{x} \otimes \hat{K}.
\end{eqnarray}
The two Hamiltonians differ not only on the coupling but also on the
shape of the corresponding smearing functions: $\dot{f}(t)$ takes
both positive and negative values and by definition it satisfies
$\int_0^T \dot{f}(t) = 0$. The description above suggests that
momentum measurements can be obtained by coupling a charged particle
to a magnetic field pulse, while velocity measurements can be
obtained by a coupling to an electric field pulse of a different
shape. The possibility of designing realistic experiments that could
distinguish between the momentum and the velocity content of a
quantum state will be discussed elsewhere.
\subsection{Lagrangian action}
One may also consider samplings corresponding to the values of the
Lagrangian action of the system $\int_0^T dt L(x, \dot{x})$, where
$L$ is the Lagrangian. In this case the results can be easily
expressed in terms of Feynman path integrals: it is straightforward
to demonstrate---see Ref. \cite{So98}---that these coincide with
samplings of the Hamiltonian, and that the corresponding POVM is
that of energy measurements.
\subsection{Phase space properties}
It is possible to construct class-operators (and corresponding
POVMs) for more general alternatives that involve phase-space
variables. To see this, we consider a set of coherent states $|z
\rangle$ on the Hilbert space, where $z$ denotes points of the
corresponding classical phase space. The finest-grained histories
corresponding to an $n$-time coherent state path $z_0, t_0, z_1,
t_1, \ldots z_n, t_n$, with $t_i - t_{i-1} = \delta t$ are
represented by the class operator
\begin{eqnarray}
\hat{C}_{z_0, t_0; z_1, t_1; \ldots; z_n, t_n} = | z_0 \rangle
\langle z_0 |e^{i \hat{H} \delta t}|z_1 \rangle \langle z_1|
e^{i\hat{H} \delta t} |z_2 \rangle \cdots \langle z_{n-1}|e^{i
\hat{H} \delta t}|z_n \rangle \langle z_n|.
\end{eqnarray}
We use the standard Gaussian coherent states, which are defined
through an inner product
\begin{eqnarray}
\langle z|z' \rangle = e^{ - \frac{|z|^2}{2} - \frac{|z'|^2}{2} +
z^* z'}.
\end{eqnarray}
Then, at the limit of small $\delta t$
\begin{eqnarray}
\hat{C}_{z_1, t_1; z_2, t_2; \ldots; z_n, t_n} = |z_0 \rangle
\langle z_n |\; \exp \left(\frac{|z_n|^2}{2} - \frac{|z_0|^2}{2} -
\sum_{i=1}^n z^*_i(z_i - z_{i-1}) + i \delta t \,h(z^*_i, z_{i-1})
\right),
\end{eqnarray}
where $ h(z^*, z) = \langle z|\hat{H}|z \rangle$. Following the
same steps as in section 2.1 we construct the class operator
corresponding to different values of an observable $A(z_0, z_1,
\ldots, z_n)$. If the observable is ultra-local, i.e., if it can be
written in the form $\sum_i f(t_i) a(z_i)$, then the results reduce
to those of section 2.1 for the time-smeared alternatives of an
operator.
However, the function in question may involve time derivatives of
phase space variables (at the continuous limit), in which case it
will be rather different from the ones we considered previously.
For a generic function $F(z_i)$ we obtain the following class
operator
that corresponds to the value $F =a$
\begin{eqnarray}
\langle z_0| \hat{C}(a)|z_f \rangle = \int \frac{dk}{2 \pi} e^{-i
ka} \; \lim_{n \rightarrow \infty} \int [dz_1]
\ldots [dz_{n-1}] \hspace{3cm} \nonumber \\
\times \exp \left[\frac{|z_n|^2}{2} - \frac{|z_0|^2}{2} + \sum_i
z_i(z^*_i - z^*_{i-1}) + i \delta t \,h(z^*_i, z_{i-1}) - i k
F[z_i]\right]. \label{cc}
\end{eqnarray}
The integrations over $[dz_i]$ defines a coherent-state
path-integral at the continuous limit. However, if $F[z_i]$ is not
an ultra-local function, the path integral does not correspond to a
unitary operator of the form ${\cal T} e^{\int_0^T dt \hat{K}_t}$,
for some family of self adjoint operators $\hat{K}_t$. In this
sense, the consideration of phase space paths provides alternatives
that do not reduce to those studied in Section 2. Note however, that
these alternatives cannot be defined in terms of projection
operators; nonetheless the corresponding class operators can be
employed to define a POVM using Eq. (\ref{central}).
The simplest non-trivial example of a non-ultralocal function is the
Liouville term of the phase space action ( for its physical
interpretation in the histories theory see \cite{Sav99})
\begin{eqnarray}
V :=i \int_0^T dt \dot{z}^* z.
\end{eqnarray}
It is convenient to employ the discretised expression $V =
i \sum_{i=1}^n z_i(z^*_i -
z^*_{i-1})$. Its substitution in Eq. (\ref{cc}) effects a
multiplication of the Liouville term in the exponential by a factor
of $1+k$.
For an harmonic oscillator Hamiltonian $h(z^*,z) = \omega z^* z$,
and the path integral can be explicitly computed yielding the
unitary operator $e^{\frac{i}{1+k} \hat{H}T}$. Hence,
\begin{eqnarray}
\hat{C}(a) = \int \frac{dk}{2 \pi} e^{-i ka} e^{\frac{i}{1+k}
\hat{H}T} = s_a(\hat{H}),
\end{eqnarray}
where $s_a(x): = \int \frac{dk}{2 \pi} e^{-ika + i \frac{x}{1+k}}$.
The class-operator $\hat{C}(a)$ corresponding to the values of the
function $V$ is then a function of the Hamiltonian.
\section{Conclusions}
We studied the probability assignment for time-extended
measurements. We constructed of the class operators $\hat{C}(a)$,
which correspond to time-extended alternatives for a quantum
system. We showed that these operators can be employed to construct
POVMs describing the probabilities for time-averaged values of a
physical quantity. In light of these results, quantum mechanics has
room for measurement schemes that distinguish between momentum and
velocity. Finally, we demonstrated that a large class of
time-extended phase space observables may be explicitly constructed.
\end{document} |
\begin{document}
\title{Maximal measure and entropic continuity of Lyapunov exponents for $\mathcal C^r$ surface diffeomorphisms with large entropy}
\author{David Burguet}
\address{Sorbonne Universite, LPSM, 75005 Paris, France}
\email{[email protected]}
\mathop{\mathrm{ess sup}}ubjclass[2010]{37 A35, 37C40, 37 D25}
\date{September 2022}
\begin{abstract}We prove a finite smooth version of the entropic continuity of Lyapunov exponents proved recently by Buzzi, Crovisier and Sarig for $\mathcal C^\infty$ surface diffeomorphisms \cite{BCS2}.
As a consequence we show that any $\mathcal C^r$, $r>1$, smooth surface diffeomorphism $f$ with $h_{top}(f)> \frac{1}{r}\limsup_n\frac{1}{n}\log^+\|df^n\|_\infty$ admits a measure of maximal entropy. We also prove the $\mathcal C^r$ continuity of the topological entropy at $f$.
\end{abstract}
\keywords{}
\maketitle
\pagestyle{myheadings} \markboth{\normalsize\mathop{\mathrm{ess sup}}c David
Burguet}{\normalsize\mathop{\mathrm{ess sup}}c Existence of maximal measure for $C^r$ surface diffeos}
\mathop{\mathrm{ess sup}}ection*{Introduction}
The entropy of a dynamical system quantifies the dynamical complexity by counting distinct orbits.
There are topological and measure theoretical versions which are related by a variational principle : the topological entropy of a continuous map on a compact space is equal to the supremum of the entropy of the invariant (probability) measures. An invariant measure is said to be of maximal entropy (or a maximal measure) when its entropy is equal to the topological entropy, i.e. this measure realizes the supremum in the variational principle. In general a topological system may not admit a measure of maximal entropy. But such a measure exists for dynamical systems satisfying some expansiveness properties. In particular
Newhouse \cite{new} has proved their existence for $C^{\infty}$ systems by using Yomdin's theory. In the present paper we show the existence of a measure of maximal entropy for $\mathcal C^r$, $1<r<+\infty$, smooth surface diffeomorphisms with large entropy.
Other important dynamical quantities for smooth systems are given by the Lyapunov exponents which estimate the exponential growth of the derivative. For $\mathcal C^\infty$ surface diffeomorphisms, J. Buzzi, S. Crovisier and O. Sarig proved recently a property of continuity in the entropy of the Lyapunov exponents with many statistical applications \cite{BCS2}. More precisely, they showed that for a $\mathcal C^\infty$ surface diffeomorphism $f$, if $\nu_k$ is a converging sequence of ergodic measures with $\lim_k h(\nu_k)=h_{top}(f)$, then the Lyapunov exponents of $\nu_k$ are going to the (average) Lyapunov exponents of the limit (which is a measure of maximal entropy). We prove a $\mathcal C^r $ version of this fact for $1<r<+\infty$.
\mathop{\mathrm{ess sup}}ection{Statements}
We define now some notations to state our main results. Fix a compact Riemannian surface $(\mathbf M, \|\cdot\|)$. For $r>1$ we let $\mathrm{Diff}^r(\mathbf M)$ be the set of $\mathcal C^r$ diffeomorphisms of $\mathbf M$. For $f\in \mathrm{Diff}^r(\mathbf M)$ we let $F:\mathbb PT\mathbf M\circlearrowleft$ be the induced map on the projective tangent bundle $\mathbb PT\mathbf M=T^1\mathbf M/{\pm 1}$ and we denote by $\phi, \psi :\mathbb PT\mathbf M\rightarrow \mathbb R$ the continuous observables on $\mathbb PT\mathbf M$ given respectively by $\phi:(x,v)\mapsto \log \|d_xf(v)\|$ and $\psi:(x,v)\mapsto \log \|d_xf(v)\|-\frac{1}{r}\log^+\|d_xf\|$ with $\|d_xf\|=\mathop{\mathrm{ess sup}}up_{v\in T_x\mathbf M\mathop{\mathrm{ess sup}}etminus \{0\}}\frac{\|d_xf(v)\|}{\|v\|}$.
For $k\in \mathbb N^*$ we define more generally $\phi_k:(x,v)\mapsto \log \|d_xf^k(v)\|$ and $\psi_k:(x,v)\mapsto\phi_k(x,v)-\frac{1}{r}\mathop{\mathrm{ess sup}}um_{l=0}^{k-1}\log^+\|d_{f^kx}f\|$.
Then we let $\lambda^{+}(x)$ and $\lambda^{-}(x)$ be the pointwise Lyapunov exponents given by $\lambda^{+}(x)= \limsup_{n\rightarrow +\infty}\frac{1}{n}\log \|d_xf^n\|$ and $\lambda^{-}(x)=\liminf_{n\rightarrow -\infty}\frac{1}{n}\log \|d_xf^n\|$ for any $x\in \mathbf M$ and $\lambda^+(\mu)=\int \lambda^+(x) \, d\mu(x)$, $\lambda^-(\mu)=\int \lambda^-(x) \, d\mu(x)$, for any $f$-invariant measure $\mu$.
Also we put $\lambda^+(f):=\lim_n\frac{1}{n}\log^+ \|df^n\|_\infty$ with $\|df^n\|_\infty=\mathop{\mathrm{ess sup}}up_{x\in \mathbf M}\|d_xf^n\|$. The function $f\mapsto \lambda^+(f)$ is upper semi-continuous in the $\mathcal C^1$ topology on the set of $\mathcal C^1$ diffeomorphisms on $\mathbf M$. For an $f$-invariant measure $\mu$ with $\lambda^+(x)>0\geq \lambda^-(x)$ for $\mu$ a.e. $x$, there are by Oseledets\footnote[4]{We refer to \cite{Pes} for background on Lyapunov exponents and Pesin theory.} theorem one-dimensional invariant vector spaces $\mathcal{E}_+(x)$ and $\mathcal{E}_-(x)$, resp. called the unstable and stable Oseledets bundle, such that $$\forall \, \mu \text{ a.e. } x\ \forall v\in \mathcal{E}_\pm(x)\mathop{\mathrm{ess sup}}etminus \{0\}, \ \lim_{n\rightarrow \pm \infty}\frac{1}{n}\log \|d_xf^n(v)\|=\lambda^{\pm}(x).$$
Then we let $\hat \mu^+$ be the $F$-invariant measure given by the lift of $\mu$ on $\mathbb PT \mathbf M$ with $\hat \mu^+(\mathcal E_+)=1$. When writing $\hat \mu^+$ we assume implicitly that the push-forward measure $\mu$ on $\mathbf M$ satisfies $\lambda^+(x)>0\geq \lambda^-(x)$ for $\mu$ a.e. $x$.
A sequence of $\mathcal C^r$, with $r>1$, surface diffeomorphisms $(f_k)_k$ on $\mathbf M$ is said to converge $\mathcal C^r$ weakly to a diffeomorphism $f$, when $f_k$ goes to $f$ in the $\mathcal C^1$ topology and the sequence $(f_k)_k$ is $\mathcal C^r$ bounded. In particular $f$ is $\mathcal C^{r-1}$.
\begin{thm}[Buzzi-Crovisier-Sarig, Theorem C \cite{BCS2}]\label{cochon}
Let $(f_k)_{k\in \mathbb N}$ be a sequence of $\mathcal C^r$, with $r>1$, surface diffeomorphisms converging $\mathcal C^r$ weakly to a diffeomorphism $f$. Let $(F_k)_{k\in \mathbb N}$ and $F$ be the lifts of $(f_k)_{k\in \mathbb N}$ and $f$ to $\mathbb P T\mathbf M$. Assume there is a sequence $(\hat \nu_k^+)_k$ of ergodic $F_k$-invariant measures converging to $\hat \mu$.\\
Then there are $\beta\in [0,1]$ and $F$-invariant measures $\hat \mu_0$ and $\hat \mu_1^+$ with $\hat \mu= (1-\beta)\hat \mu_0+\beta\hat\mu_1^+$, such that:
$$\limsup_{k\rightarrow +\infty} h(\nu_k)\leq \beta h(\mu_1)+\frac{\lambda^+(f)+\lambda^+(f^{-1})}{r-1}.$$
\end{thm}
In particular when $f$ ($=f_k$ for all $k$) is $\mathcal C^\infty$ and $h(\nu_k)$ goes to the topological entropy of $f$, then $\beta$ is equal to $1$ and therefore $\lambda^+(\nu_k)$ goes to $\lambda^+(\mu)$:
\begin{coro*}[Entropic continuity of Lyapunov exponents \cite{BCS2}]\label{fir}
Let $f$ be a $\mathcal C^\infty$ surface diffeomorphism with $h_{top}(f)>0$.\\
Then if $(\nu_k)_k$ is a sequence of ergodic measures converging to $\mu$ with $\lim_k h(\nu_k)=h_{top}(f)$, then \begin{itemize}
\item $h(\mu)=h_{top}(f)$ \footnote[4]{This follows from the upper semi-continuity of the entropy function $h$ on the set of $f$-invariant probability measures for a $\mathcal C^\infty$ diffeomorphism $f$ (in any dimension), which was first proved by Newhouse in \cite{new}.},
\item $\lim_k\lambda^+(\nu_k)=\lambda^+(\mu)$.
\end{itemize}
\end{coro*}
We state an improved version of Buzzi-Crovisier-Sarig Theorem, which allows to prove the same entropy continuity of Lyapunov exponents for $\mathcal C^r$, $1<r<+\infty$, surface diffeomorphisms with large enough entropy (see Corollary \ref{fir}).
\begin{theorem*}\label{ense}
Let $(f_k)_{k\in \mathbb N}$ be a sequence of $\mathcal C^r$, with $r>1$, surface diffeomorphisms converging $\mathcal C^r$ weakly to a diffeomorphism $f$. Let $(F_k)_{k\in \mathbb N}$ and $F$ be the lifts of $(f_k)_{k\in \mathbb N}$ and $f$ to $\mathbb P T\mathbf M$. Assume there is a sequence $(\hat \nu_k^+)_k$ of ergodic $F_k$-invariant measures converging to $\hat \mu$.\\
Then for any $\alpha>\frac{\lambda^+(f)}{r}$, there are $\beta=\beta_\alpha\in [0,1]$ and $F$-invariant measures $\hat \mu_0=\hat\mu_{0,\alpha}$ and $\hat \mu_1^+=\hat \mu_{1,\alpha}^+$ with $\hat \mu= (1-\beta)\hat \mu_0+\beta\hat\mu_1^+$, such that:
$$\limsup_{k\rightarrow +\infty} h(\nu_k)\leq \beta h(\mu_1)+(1-\beta)\alpha.$$
\end{theorem*}
In the appendix we explain how the Main Theorem implies Buzzi-Crovisier-Sarig statement. We state now some consequences of the Main Theorem.
\begin{coro}[Existence of maximal measures and entropic continuity of Lyapunov exponents]\label{fir}
Let $f$ be a $\mathcal C^r$, with $r>1$, surface diffeomorphism satisfying $h_{top}(f)> \frac{\lambda^+(f)}{r}$.\\
Then $f$ admits a measure of maximal entropy. More precisely, if $(\nu_k)_k$ is a sequence of ergodic measures converging to $\mu$ with $\lim_k h(\nu_k)=h_{top}(f)$, then \begin{itemize}
\item $h(\mu)=h_{top}(f)$,
\item $\lim_k\lambda^+(\nu_k)=\lambda^+(\mu)$.
\end{itemize}
\end{coro}
It was proved in \cite{BCS1} that any $\mathcal C^r$ surface diffeomorphism satisfying $h_{top}(f)> \frac{\lambda^+(f)}{r}$ admits at most finitely many ergodic measures of maximal entropy. On the other hand, J. Buzzi has built examples of $\mathcal C^r$ surface diffeomorphisms for any $+\infty>r>1$ with $\frac{h_{top}(f)}{\lambda^+(f)}$ arbitrarily close to $1/r$ without a measure of maximal entropy \cite{buz}. It is expected that for any $r>1$ there are $\mathcal C^r$ surface diffeomorphisms satisfying $h_{top}(f)=\frac{\lambda^+(f)}{r}>0$ without measure of maximal entropy or with infinitely many such ergodic measures, but these questions are still open. Such results were already known for interval maps \cite{bbur,buru,buzthe}.
\begin{proof}
We consider the constant sequence of diffeomorphisms equal to $f$. By taking a subsequence, we can assume that $(\hat\nu_k^+)_k$ is converging to a lift $\hat \mu$ of $\mu$.
By using the notations of the Main Theorem with $h_{top}(f)>\alpha>\frac{\lambda^+(f)}{r}$, we have
\begin{align*}
h_{top}(f)&= \lim_{k\rightarrow +\infty} h(\nu_k),\\
& \leq \beta h(\mu_1)+(1-\beta)\alpha, \\
&\leq \beta h_{top}(f)+(1-\beta)\alpha,\\
(1- \beta) h_{top}(f)&\leq (1-\beta)\alpha.
\end{align*}
But $h_{top}(f)> \alpha$, therefore $\beta=1$, i.e. $\hat \mu_1^+=\hat \mu$ and $\lim_k\lambda^+(\nu_k)=\lambda^+(\mu)$. Moreover $h_{top}(f)=\lim_{k\rightarrow +\infty} h(\nu_k)\leq \beta h(\mu_1)+(1-\beta)\alpha=h(\mu)$. Consequently $\mu$ is a measure of maximal entropy of $f$.
\end{proof}
\begin{coro}[Continuity of topological entropy and maximal measures]
Let $(f_k)_k$ be a sequence of $\mathcal C^r$, with $r>1$, surface diffeomorphisms converging $\mathcal C^r$ weakly to a diffeomorphism $f$ with
$h_{top}(f)\geq \frac{\lambda^+(f)}{r}$.\\
Then
$$h_{top}(f)=\lim_k h_{top}(f_k).$$
Moreover if $h_{top}(f)>\frac{\lambda^+(f)}{r}$ and $\nu_k$ is a maximal measure of $f_k$ for large $k$, then any limit measure of $(\nu_k)_k$ for the weak-$*$ topology is a maximal measure of $f$.
\end{coro}
\begin{proof}
By Katok's horseshoes theorem \cite{Kat}, the topological entropy is lower semi-continuous for the $\mathcal C^1$ topology on the set of $\mathcal C^r$ surface diffeomorphisms. Therefore it is enough to show the upper semi-continuity.
By the variational principle there is a sequence of probability measures $(\nu_k)_{k\in K}$, $K\mathop{\mathrm{ess sup}}ubset \mathbb N$ with $\mathop{\mathrm{ess sup}}harp K=\infty$, such that :
\begin{itemize}
\item $\nu_k$ is an ergodic $f_k$-invariant measure for each $k$,
\item $\lim_{k\in K}h(\nu_k)=\limsup_{k\in \mathbb N}h_{top}(f_k)$.
\end{itemize}
By extracting a subsequence we can assume $(\hat \nu_k^+)_k$ is converging to a $F$-invariant measure $\hat\mu$ in the weak-$*$ topology. We can then apply the Main Theorem for any $\alpha> \frac{\lambda^+(f)}{r}$ to get for some $f$-invariant measures $\mu_1, \mu_0$ and $\beta \in [0,1]$ (depending on $\alpha$) with $\mu=(1-\beta)\mu_0+\beta\mu_1$:
\begin{align}\label{grave}
\limsup_kh_{top}(f_k)&= \lim_{k} h(\nu_k), \nonumber\\
&\leq \beta h(\mu_1) +(1-\beta)\alpha,\\
&\leq \beta h_{top}(f)+(1-\beta)\alpha, \nonumber\\
&\leq \max(h_{top}(f), \alpha). \nonumber
\end{align}
By letting $\alpha$ go to $\frac{\lambda^+(f)}{r}$ we get
\begin{align*}
\limsup_kh_{top}(f_k)&\leq h_{top}(f).
\end{align*}
If $h_{top}(f)>\frac{\lambda^+(f)}{r}$, we can fix $\alpha\in \left]\frac{\lambda^+(f)}{r}, h_{top}(f)\right[$ and the inequalities (\ref{grave}) may be then rewritten as follows :
\begin{align*}\limsup_kh_{top}(f_k)&\leq \beta h(\mu_1) +(1-\beta)\alpha,\\
&\leq h_{top}(f).
\end{align*}
By the lower semi-continuity of the topological entropy, we have $h_{top}(f)\leq \limsup_kh_{top}(f_k)$ and therefore these inequalities are equalities, which implies $\beta=1$, then $\mu_1=\mu$, and $h(\mu)=h_{top}(f)$.
\end{proof}
The corresponding result was proved for interval maps in \cite{BurF} by using a different method. We also refer to \cite{BurF} for counterexamples of the upper semi-continuity property for interval maps $f$ with $h_{top}(f)<\frac{\lambda^+(f)}{r}$. Finally, in \cite{buz}, the author built, for any $r>1$, a $\mathcal C^r$ surface diffeomorphism $f$ with $\limsup_{g\xrightarrow{\mathcal C^r}f}h_{top}(g)=\frac{\lambda^+(f)}{r}>h_{top}(f)=0$. We recall also that upper semi-continuity of the topological entropy in the $\mathcal C^\infty$ topology was established in any dimension by Y. Yomdin in \cite{Yom}.
Newhouse proved that for a $\mathcal C^\infty$ system $(\mathbf M,f)$, the entropy function $h:\mathcal M(\mathbf M,f)\rightarrow \mathbb R^+$ is an upper semi-continuous function on the set
$ \mathcal M(\mathbf M,f)$ of $f$-invariant probability measure. It follows from our Main Thereom, that the entropy function is upper semi-continuous at ergodic measures with entropy larger than $\frac{\lambda^+(f)}{r}$ for a $\mathcal C^r$, $r>1$, surface diffeomorphism $f$.
\begin{coro}[Upper semi-continuity of the entropy function at ergodic measures with large entropy]
Let $f:\mathbf M\circlearrowleft $ be a $\mathcal C^r$, $r>1$, surface diffeomorphism. \\
Then for any ergodic measure $\mu$ with $h(\mu)\geq\frac{\lambda^+(f)}{r}$, we have
$$\limsup_{\nu\rightarrow \mu}h(\nu)\leq h(\mu).$$
\end{coro}
\begin{proof}
By continuity of the ergodic decomposition at ergodic measures and by harmonicity of the entropy function, we have for any ergodic measure $\mu$ (see e.g. Lemma 8.2.13 in \cite{dow}):
$$\limsup_{\nu \text{ ergodic}, \, \nu\rightarrow \mu}h(\nu)=\limsup_{\nu\rightarrow \mu}h(\mu).$$
Let $(\nu_k)_{k\in \mathbb N}$ be a sequence of ergodic $f$-invariant measures with $\lim_{k}h(\nu_k)=\limsup_{\nu\rightarrow \mu}h(\nu)$.
By extracting a subsequence we can assume that the sequence $(\hat\nu_k^+)_k$ is converging to some lift $\hat \mu$ of $\mu$. Take $\alpha $ with $\alpha > \frac{\lambda^+(f)}{r}$. Then, in the decomposition $\hat \mu=(1-\beta)\hat \mu_0+\beta \hat \mu_1^+$ given by the Main Theorem, we have $\mu_1=\mu_0=\mu$ by ergodicity of $\mu$. Therefore
\begin{align*}
\lim_k h(\nu_k)&\leq \beta h(\mu)+(1-\beta)\alpha,\\
&\leq \max\left( h(\mu), \alpha\right).
\end{align*}
By letting $\alpha$ go to $\frac{\lambda^+(f)}{r}$ we get
\begin{align*}
\lim_k h(\nu_k)&\leq h(\mu).
\end{align*}
\end{proof}
\mathop{\mathrm{ess sup}}ection{Main steps of the proof}
We follow the strategy of the proof of \cite{BCS2}. We point out below the main differences:
\begin{itemize}
\item \textit{Geometric and neutral empirical component.} For $\lambda^+(\nu_k)>\frac{\lambda^+(f)}{r}$ we split the orbit of a $\nu_k$-typical point $x$ into two parts. We consider the empirical measures from $x$ at times lying between to $M$-close consecutive times where the unstable manifold has a "bounded geometry". We take their limit in $k$, then in $M$. In this way we get an invariant component of $\hat \mu$. In \cite{BCS2} the authors consider rather such empirical measures for $\alpha$-hyperbolic times and then take the limit when $\alpha$ go to zero.
\item \textit{Entropy computations.} To compute the asymptotic entropy of the $\nu_k$'s, we use the static entropy w.r.t. partitions and its conditional version. Instead the authors in \cite{BCS2} used Katok's like formulas.
\item \textit{$\mathcal C^r$ Reparametrizations}. Finally we use here reparametrization methods from \cite{burens} and \cite{bur} respectively rather than Yomdin's reparametrizations of the projective action $F$ as done in \cite{BCS2}. This is the principal difference with \cite{BCS2}.
\end{itemize}
\mathop{\mathrm{ess sup}}ubsection{Empirical measures}
Let $(X,T)$ be an invertible topological system, i.e. $T:X\circlearrowleft$ is a homeomorphism of a compact metric space. For a fixed Borel measurable subset $G$ of $X$ we let $E(x)=E_G(x)$ be the set of times of visits in $G$ from $x\in X$: $$E(x)=\left\{n \in \mathbb Z,\ T^n x\in G\right\}.$$
When $a<b$ are two consecutive times in $E(x)$, then $[a,b[$ is called a \textit{neutral block} (by following the terminology of \cite{BCS1}).
For all $M\in \mathbb N^*$ we let then
\begin{align*}
E^M( x)&=\bigcup_{a<b\in E(x), \ |a-b|\leq M}[a,b[.
\end{align*}
By convention we let $E^\infty(x)=\mathbb Z$.
For $M\in \mathbb N^*$ the complement of $E^M( x)$ is made of disjoint neutral blocks of length larger than $M$. We consider the associated empirical measures :
$$\forall n, \ \mu_{ x,n}^{M}=\frac{1}{n}\mathop{\mathrm{ess sup}}um_{ k\in E^{M}( x)\cap [0,n[}\delta_{T^k x}.$$
We denote by $\chi^M$ the indicator function of $\{ x, 0\in E^M(x)\}$. The following lemma follows straightforwardly from Birkhoff ergodic theorem:
\begin{lem}\label{empiri}
With the above notations, for any $T$-invariant ergodic measure $\nu$, there is a set $\mathtt G$ of full $\nu$-measure such that the empirical measures $\left(
\mu_{ x,n}^{M}\right)_n$ are converging for any $x\in \mathtt G$ and any $M\in\mathbb N^*\cup \{\infty\}$ to $
\chi^M \nu$ in the weak-$*$ topology, when $n$ goes to $+\infty$.
\end{lem}
Fix some $T$-invariant ergodic measure $\nu$. We let $ \xi^M=\chi^M \nu$ and $ \eta^{M}=\nu- \xi^{M}$.
Moreover we put $\beta_{M}=\int \chi^{M}\, d \nu$, then $\xi^{M}=\beta_{M}\cdot \underline{\xi}^{M}$ when $\beta_M\neq 0$ and $\eta^{M}=(1-\beta_{M})\cdot \underline \eta^{M}$ when $\beta_M\neq 1$ with $\underline{\xi}^{M}$, $\underline{\eta}^{M}$ being thus probability measures.
Following partially \cite{BCS2}, the measures $\xi^{M}$ and $\eta^M$ are respectively called here the \textit{geometric and neutral components} of $\nu$. In general these measures are not $T$-invariant, but
$\mathfrak d(\xi^M, T_*\xi^M)\leq 1/M$ for some standard distance $\mathfrak d$ on the set $\mathcal M(X)$ of Borel probability measures on $X$. From the definition one easily checks that $\xi^{M}\geq \xi^{N}$ for $M\geq N$. If $\nu(G)=0$, then for $\nu$-almost every $x$ we have $\mu_{x,n}^M=0$ for all $n$ and $M$. Assume $G$ has positive $\nu$-measure. Then, when $M$ goes to infinity, the function $\chi^M$ goes to $\chi^\infty=1$ almost surely with respect to $\nu$, therefore $\xi^M$ goes to $\nu$. However in general this convergence is not uniform in $\nu$. In the following we consider a sequence $(\nu_k)_k$ of ergodic $T$-invariant measures converging to $\mu$. Then, by a diagonal argument, we may assume by extracting a subsequence that $\xi_k^{M}:=\chi^M\nu_k$ is converging for any $M$, when $k$ goes to infinity, to some $\overline{\mu}^M$, which is a priori distinct from $\chi^M \mu$. We still have $\overline{\mu}^{M}\geq \overline{\mu}^N$ for $M\geq N$, but the limit $\mu_1=\lim_M\overline{\mu}^M$ is a $T$-invariant component of $\mu$, which may differ from $\mu$.
The next lemma follows from Lemma \ref{empiri} and standard arguments of measure theory:
\begin{lem}\label{neww} There is a Borel subset $\mathtt{H}$ with $\nu(\mathtt H)>\frac{1}{2}$ such that for any $M\in \mathbb N$ and for any continuous function $\varphi:X\rightarrow \mathbb R$: \begin{equation} \frac{1}{n}\mathop{\mathrm{ess sup}}um_{k\in E^M( x)\cap [1,n[}\varphi(T^kx)\xrightarrow{n}\int \varphi\, d \xi^M \text{ uniformly in }x\in \mathtt H.\end{equation}
\end{lem}
\begin{proof} We consider a dense countable family $\mathcal F=(\varphi_k)_{k\in\mathbb N}$ in the set $\mathcal C^0(X,\mathbb R)$ of real continuous functions on $X$ endowed with the supremum norm $\|\cdot \|_\infty$. Let $\mathtt G$ be as in Lemma \ref{empiri}. Then for all $k,M$, by Egorov's theorem applied to the pointwise converging sequence $(f_n:\mathtt G\rightarrow \mathbb R)_n=\left(x\mapsto\int \varphi_k\, d\mu_{ x,n}^M\right)_n$, there is a
subset $\mathtt F_k^M$ of $\mathtt F$ with $\nu(\mathtt F_k^M)>1-\frac{1}{2^{k+M+3}}$ such that
$\int \varphi_k\, d\mu_{ x,n}^M$ converges to $\int \varphi_k\, d\xi^M$ uniformly in $x\in \mathtt F_k^M$. Let $\mathtt H=\bigcap_{k,M}\mathtt F_k^M$. We have $\nu(\mathtt H)> \frac{1}{2}$. Then, if $\varphi \in \mathcal C^0(X,\mathbb R)$, we may find for any $\epsilon>0$ a function $\varphi_k\in \mathcal F$ with $\|\varphi-\varphi_k\|_\infty<\epsilon$. Let $M\in \mathbb N$.
Take $N=N_\epsilon^{k,M}$ such that $|\int \varphi_k\, d\mu_{ x,n}^M-\int \varphi_k\, d\xi^M|<\epsilon$ for $n>N$ and for all $x\in \mathtt F_k^M$. In particular for all $x\in \mathtt H$ we have for $n>N$
\begin{align*}
\left|\int \varphi\, d\mu_{\hat x,n}^M-\int \varphi\, d\xi^M\right| \leq & \left| \int \varphi_k\, d\mu_{x,n}^M- \int \varphi\, d\mu_{ x,n}^M\right|+ \left|\int \varphi_k\, d\mu_{ x,n}^M-\int \varphi_k\, d\xi^M\right|\\& +\left|\int \varphi_k\, d\xi^M-\int \varphi\, d\xi^M\right|,\\ \leq & 2\|\varphi-\varphi_k\|_\infty + \left|\int \varphi_k\, d\mu_{ x,n}^M-\int \varphi_k\, d\xi^M\right|,\\
< &3\epsilon.
\end{align*}\end{proof}
\mathop{\mathrm{ess sup}}ubsection{Pesin unstable manifolds}\label{pesi}We consider a smooth compact riemannian manifold $(\mathbf M, \|\cdot\|)$.
Let $\exp_x$ be the exponential map at $x$ and let $R_{inj}$ be the radius of injectivity of $(\mathbf M, \|\cdot\|)$. We consider the distance $\mathrm d$ on $\mathbf M$ induced by the Riemannian structure.
Let $f:\mathbf M\circlearrowleft$ be a $\mathcal C^r$, $r>1$, surface diffeomorphism. We denote by $\mathcal R$ the set of Lyapunov regular points with $\lambda^+(x)>0>\lambda^-(x)$. For $x\in \mathbf M $ we let $W^u(x)$ denote the unstable manifold at $x$ :
$$W^u(x):=\left\{y\in \mathbf M, \ \lim_n\frac{1}{n}\log \mathrm d(f^nx,f^ny)<0 \right\}.$$ By Pesin unstable manifold theorem, the set $W^u(x)$ for $x\in \mathcal R$ is a $\mathcal C^r$ submanifold tangent to $\mathcal E_+(x)$ at $x$.
For $x\in \mathcal R$, we let $\hat x$ be the vector in $\mathbb PT\mathbf M$ associated to the unstable Oseledets bundle $\mathcal E_+(x)$. For $\delta>0$ the point $x$ is called \textit{$\delta$-hyperbolic} with respect to $\phi$ (resp. $\psi$) when we have $\phi_l(F^{-l}\hat x)\geq \delta l$ (resp. $\psi_l(F^{-l}\hat x)\geq \delta l$) for all $l>0$. Note that if $x$ is $\delta$-hyperbolic with respect to $\psi$ then it is $\delta$-hyperbolic with respect to $\phi$. Let $H_\delta:= \left\{\hat x\in \mathbb PT\mathbf M, \ \forall l>0 \ \psi_l(F^{-l}\hat x)\geq \delta l\right\}$ be the set of $\delta$-hyperbolic points w.r.t. $\psi$.
\begin{lem}\label{maxi}
Let $\nu$ be an ergodic measure with $ \lambda^+(\nu)-\frac{\log^+ \|df\|_\infty}{r}>\delta>0>\lambda^-(\nu)$.
\\ Then we have $$\hat \nu^+(H_\delta)>0.$$
\end{lem}
\begin{proof}
By applying the Ergodic Maximal Inequality (see e.g. Theorem 1.1 in \cite{Brown}) to the measure preserving system $(F^{-1},\hat \nu^+)$ with the observable $\psi^\delta=\delta-\psi\circ F^{-1}$, we get
with $A_\delta=\{\hat x\in \mathbb PT\mathbf M, \ \exists k\geq 0 \text{ s.t. } \mathop{\mathrm{ess sup}}um_{l=0}^{k}\psi^\delta(F^{-l}\hat x)>0\}$:
$$\int_{A_\delta} \psi^\delta\, d\hat \nu^+\geq 0.$$
Observe that $H_\delta=\mathbb PT\mathbf M\mathop{\mathrm{ess sup}}etminus A_\delta$. Therefore
\begin{align*}
\int_{H_\delta} \psi^\delta\, d\hat \nu^+&=\int \psi^\delta \,d\hat\nu^+- \int_{A_\delta} \psi^\delta\, d\hat \nu^+,\\
&\leq \int \psi^\delta \,d\hat\nu^+,\\
& \leq \int (\delta-\psi\circ F^{-1}) \,d\hat\nu^+,\\
&\leq \delta-\lambda^+(\nu)+\frac{1}{r}\int \frac{\log^+\|d_{x}f\|}{r}\, d\nu(x),\\
&<0.
\end{align*}
In particular we have $\hat \nu^+(H_\delta)>0$.
\end{proof}
A point $x\in \mathcal R$ is said to have $\kappa$-bounded geometry for $\kappa>0$ when $\exp_x^{-1}W^u(x)$ contains the graph of a \textit{$\kappa$-admissible} map at $x$, which is defined as a $1$-Lipschitz map $f:I\rightarrow \mathcal E_+(x)^{\bot}\mathop{\mathrm{ess sup}}ubset T_x\mathbf M$, with $I$ being an interval of $\mathcal E_+(x)$ containing $0$ with length $\kappa$.
We let $G_\kappa$ be the subset of points in $\mathcal R$ with $\kappa$-bounded geometry.
\begin{lem}
The set $G_\kappa$ is Borel measurable.
\end{lem}
\begin{proof}
For $x\in \mathcal R$ we have $W^u(x)=\bigcup_{n\in \mathbb N}f^nW^u_{loc}(f^{-n}x)$ with $W^u_{loc}$ being the Pesin unstable local manifold at $x$. The sequence $\left(f^{n}W^u_{loc}(f^{-n}x)\right)_n$ is increasing in $n$ for the inclusion. Therefore, if we let $G_\kappa^n$ be the subset of points $x$ in $G_\kappa$,
such that $\exp_x^{-1} f^n W^u_{loc}(f^{-n}x)$ contains the graph of a $\kappa$-admissible map, then we have
$$G_\kappa=\bigcup_nG_\kappa^n.$$
There are closed subsets, $(\mathcal R_l)_{l\in \mathbb N}$, called the Pesin blocks, such that
$\mathcal R=\bigcup_l\mathcal R_l$ and $x\mapsto W^u_{loc}(x)$ is continuous on $\mathcal R_l$ for each $l$ (see e.g. \cite{Pes}). Let $(x_p)_p$ be sequence in $G_\kappa^n\cap \mathcal R_l$ which converges to $x\in \mathcal R_l$. By extracting a subsequence we can assume that the associated sequence of $\kappa$-admissible maps $f_p$ at $x_p$ is converging pointwisely to a $\kappa$-admissible map at $x$, when $p$ goes to infinity. In particular $G_\kappa^n\cap \mathcal R_l$ is a closed set and therefore $G_\kappa=\bigcup_{l,n}\left( G_\kappa^n\cap \mathcal R_l\right) $ is Borel measurable.
\end{proof}
\mathop{\mathrm{ess sup}}ubsection{Entropy of conditional measures} \label{zeta} We consider an ergodic hyperbolic measure $\nu$, i.e an ergodic measure with $\nu(\mathcal R)=1$. A measurable partition $\varsigma$ is \textit{subordinated} to the Pesin unstable local lamination $W^u_{loc}$ of $\nu$ if the atom $\varsigma(x)$ of $\varsigma$ containing $x$ is a neighborhood of $x$ inside the curve $W^u_{loc}(x)$ and $f^{-1}\varsigma\mathop{\mathrm{ess sup}}ucc \varsigma$.
By Rokhlin's disintegration theorem, there are a measurable set $\mathtt Z$ of full $\nu$-measure and probability measures $\nu_x$ on $\varsigma(x)$ for $x\in \mathtt Z$, called the \textit{conditional measures} on unstable manifolds, satisfying $\nu =\int \nu_x\, d\nu(x)$. Moreover $\nu_y=\nu_x$ for $x,y\in \mathtt Z$ in the same atom of $\varsigma$. Ledrappier and Strelcyn \cite{LS} have proved the existence of such subordinated measurable partitions. We fix such a subordinated partition $\varsigma$ with respect to $\nu$. For $x\in \mathbf M$, $n\in \mathbb N$ and $\rho>0$, we let $B_n(x,\rho)$ be the Bowen ball $B_n(x,\rho):=\bigcap_{0\leq k< n}f^{-k}B(f^kx,\rho)$ (where $B(f^kx,\rho)$ denotes the ball for $\mathrm d$ at $f^kx $ with radius $\rho$).
\begin{lem}\cite{LY}\label{ledr}
For all $\iota>0$, there is $\rho>0$ and a measurable set $\mathtt E\mathop{\mathrm{ess sup}}ubset \mathtt Z\cap \mathcal R$ with $\nu(\mathtt E)>\frac{1}{2}$ such that
\begin{align}\label{leed}\forall x\in \mathtt E, \ \liminf_n-\frac{1}{n}\log \nu_x\left(B_n(x,\rho)\right) \geq h(\nu)-\iota.\end{align}
\end{lem}
The natural projection from $\mathbb PT\mathbf M$ to $\mathbf M$ is denoted by $\pi$. We consider a distance $\hat{\mathrm{d}}$ on the projective tangent bundle $ \mathbb P T\mathbf M$, such that $\hat{\mathrm{d}}(X, Y)\geq \mathrm{d}(\pi X, \pi Y)$ for all $X, Y \in \mathbb P T\mathbf M$.
We let $\hat \eta^M $ and $\hat \xi^M$ be the neutral and geometric components of the ergodic $F$-invariant measure $\hat \nu^+$ associated to $G=H_\delta \cap \pi^{-1}G_\kappa \mathop{\mathrm{ess sup}}ubset \mathbb PT\mathbf M$, where the parameters $\delta$ and $\kappa$ will be fixed later on independently of $\nu$. The importance of this choice of $G$ will appear in Proposition \ref{paraa} to bound from above the entropy of the neutral component. We also consider the projections $\eta^M$ and $\xi^M$ on $\mathbf M$ of $\hat \eta^M $ and $\hat \xi^M$ respectively.
By Lemma \ref{neww} applied to the system $(\mathbb PT\mathbf M, F)$ and to the ergodic measure $\hat \nu^+$, there is a Borel subset $\mathtt H$ of $\mathbb P T\mathbf M$ with $\hat \nu^{+}(\mathtt H)>\frac{1}{2}$ such that for any $M\in \mathbb N^*\cup \{\infty\}$ and for any continuous function $\varphi:\mathbb P T\mathbf M\rightarrow \mathbb R$
\begin{equation}\label{unif} \frac{1}{n}\mathop{\mathrm{ess sup}}um_{k\in E^M( \hat x)\cap [1,n[}\varphi(F^k\hat x)\xrightarrow{n}\int \varphi\, d \hat \xi^M \text{ uniformly in }\hat x\in \mathtt H.\end{equation}
Fix an error term $\iota>0$ depending\footnote[4]{ In the proof of the Main Theorem we will take $\iota=\iota(\nu_k)\xrightarrow{k}0$ for the converging sequence of ergodic measures $(\nu_k)_k$. } on $\nu$ and let $\rho$ and $\mathtt E$ be as in Lemma \ref{ledr}. Let $\mathtt F= \mathtt E\cap \pi(\mathtt H)$. Note that $\nu(\mathtt F)>0$. We fix also $x_*\in \mathtt F$ with $\nu_{x_*}(\mathtt F)>0$ and we let $\zeta=\frac{\nu_{x_*}(\cdot)}{\nu_{x_*}(\mathtt F)}$ be the probability measure induced by
$\nu_{x_*}$ on $\mathtt F$. Observe that $\nu_x=\nu_{x_*}$ for $\zeta$ a.e. $x$. We let $D$ be the $\mathcal C^r$ curve given by the Pesin local unstable manifold $W^u_{loc}(x_*)$ at $x_*$. For a finite measurable partition $P$ and a Borel probability measure $\mu$ we let $H_\mu(P)$ be the static entropy, $H_\mu(P)=-\mathop{\mathrm{ess sup}}um_{A\in P}\mu(A)\log \mu(A)$. Moreover we let $P^n=\bigvee_{k=0}^{n-1}f^{-k}P$ be the $n$-iterated partition, $n\in \mathbb N$. We also denote by $P^n_x$ the atom of $P^n$ containing the point $x\in \mathbf M$.
\begin{lem}\label{roh}
For any (finite measurable) partition $P$ with diameter less than $\rho$, we have
\begin{align}\label{rhoh}\liminf_n\frac{1}{n}H_{\zeta}(P^n)\geq h(\nu)-\iota.\end{align}
\end{lem}
\begin{proof}
\begin{align*}\liminf_n\frac{1}{n}H_{\zeta}(P^n)& =\liminf_n\int -\frac{1}{n}\log \zeta(P^n_x)\, d\zeta(x), \text{ by the definition of }H_\zeta,\\
&\geq \int\liminf_n -\frac{1}{n}\log \zeta(P^n_x)\, d\zeta(x), \text{ by Fatou's Lemma},\\
&\geq \int \liminf_n -\frac{1}{n}\log \nu_{x_*}(P^n_x)\, d\zeta(x), \text{ by the definition of } \zeta,\\
&\geq \int \liminf_n -\frac{1}{n}\log \nu_{x}(P^n_x)\, d\zeta(x), \text{ as } \nu_x=\nu_{x_*} \text{ for $\zeta$ a.e. } x,\\
&\geq \int \liminf_n -\frac{1}{n}\log \nu_{x}(B_n(x,\rho))\, d\zeta(x), \text{ as } \mathop{\mathrm{diam}}(P)<\rho,\\
&\geq h(\nu)-\iota, \text{ by the choice of }\mathtt F\mathop{\mathrm{ess sup}}ubset \mathtt E \text{ and }(\ref{leed}).
\end{align*}
\end{proof}
\mathop{\mathrm{ess sup}}ubsection{Entropy splitting of the neutral and the geometric component}
In this section we split the entropy contribution of the neutral and geometric components $\hat \eta^M $ and $\hat \xi^M$ of the ergodic $F$-invariant measure $\hat \nu^+$ associated to a fixed Borel set $G$ of $\mathbb PT\mathbf M$.
Recall that $E(\hat x)$ denotes the set of integers $k$ with $F^k\hat x\in G$. Fix now $M$. For each $n\in \mathbb N$ and $x\in \mathtt F$ we let $E_n(x)=E(\hat x)\cap [0,n[$ and $E_n^M(x)=E^M( \hat x)\cap [0,n[$. We also let $\mathtt E_n^M$ be the partition of $\mathtt F$ with atoms $A_E:=\{x\in D, \, E_n^M(x)=E\}$ for $E\mathop{\mathrm{ess sup}}ubset [0,n[$. Given a partition $Q$ of $\mathbb P T\mathbf M$, we also let $Q^{\mathtt E_n^M}$ be the partition of $\hat{\mathtt F}:=\left\{\hat x, \, x\in\mathtt{F}\cap D\right\}$ finer than $\pi^{-1}\mathtt E_n^M$ with atoms $\left\{\hat x\in \hat {\mathtt {F}}, \, E_n^M(x)=E \text{ and } \forall k\in E,\ F^k\hat x \in Q_k\right\}$ for $E\mathop{\mathrm{ess sup}}ubset [0,n[$ and $(Q_k)_{k\in E}\in Q^E$. We let $\partial Q$ be the boundary of the partition $Q$, which is the union of the boundaries of its atoms. For a measure $\eta$ and a subset $A$ of $\mathbf M$ with $\eta(A)>0$ we denote by $\eta_A=\frac{\eta(A\cap \cdot)}{\eta(A)}$ the induced probability measure on $A$. Moreover, for two sets $A,B$ we let $A\Delta B$ denote the symmetric difference of $A$ and $B$, i.e. $A\Delta B=(A\mathop{\mathrm{ess sup}}etminus B) \cup (B\mathop{\mathrm{ess sup}}etminus A)$. Finally, let $H:]0,1[\rightarrow \mathbb R^+$ be the map $t\mapsto -t\log t-\left(1-t\right)\log\left(1-t\right)$. Recall that $\hat \zeta^+$ is the lift of $\zeta$ on $\mathbb PT\mathbf M$ to the unstable Oseledets bundle (with $\zeta$ as in Subsection \ref{zeta}).
\begin{lem}\label{dd}
For any finite partition $P$ with diameter less than $\rho$ and for any finite partition $Q$ and any $m\in\mathbb N^*$ with $ \hat \xi^M(\partial Q^m)=0$ we have
\begin{equation}\label{refer}h(\nu)\leq \beta_{M}\frac{1}{m}H_{\underline{\hat \xi}^{M}}(Q^m) +\limsup_{n}\frac{1}{n}H_{\hat\zeta^+}(\pi^{-1}P^{n}|Q^{\mathtt E_n^M})+H(2/M)+\frac{12 \log \mathop{\mathrm{ess sup}}harp Q}{M}+\iota.\end{equation}
\end{lem}
Before the proof of Lemma \ref{dd}, we first recall a technical lemma from \cite{bur}.
\begin{lem}[Lemma 6 in \cite{bur}]\label{invent}
Let $(X,T)$ be a topological system. Let $\mu$ be a Borel probability measure on $X$ and let $E$ be a finite subset of $\mathbb N$. For any finite partition $Q$ of $X$, we have with $\mu^E:=\frac{1}{\mathop{\mathrm{ess sup}}harp E}\mathop{\mathrm{ess sup}}um_{k\in E}T_*^{k}\mu$ and $Q^E:=\bigvee_{k\in E}T^{-k}Q$:
$$\frac{1}{\mathop{\mathrm{ess sup}}harp E}H_\mu(Q^E)\leq \frac{1}{m}H_{\mu^E}(Q^m)+6m\frac{\mathop{\mathrm{ess sup}}harp (E+1)\Delta E}{\mathop{\mathrm{ess sup}}harp E}\log \mathop{\mathrm{ess sup}}harp Q.$$
\end{lem}
\begin{proof}[Proof of Lemma \ref{dd}] As the complement of $E_n^M(x)$ is the disjoint union of neutral blocks with length larger than $M$, there are at most $A_n^M=\mathop{\mathrm{ess sup}}um_{k=0}^{[2n/M]+1}{n\choose k }$ possible values for $E_n^M(x)$ so that
\begin{align*}
\frac{1}{n}H_{\zeta}(P^n)&=\frac{1}{n}H_{ \zeta}(P^{n}|\mathtt E_n^M)+H_\zeta(\mathtt E_n^M),\\
& \leq \frac{1}{n}H_{\zeta}(P^{n}|\mathtt E_n^M)+\log A_n^M,\\
\liminf_n\frac{1}{n}H_{\zeta}(P^n)& \leq \limsup_n\frac{1}{n}H_{\zeta}(P^{n}|\mathtt E_n^M)+H(2/M) \text{ by using Stirling's formula}.
\end{align*}
Moreover
\begin{align*}
\frac{1}{n}H_{\zeta}(P^{n}| \mathtt E_n^M)&=\frac{1}{n}H_{\hat \zeta^+}(\pi^{-1}P^{n}| \pi^{-1}\mathtt E_n^M),\\
&\leq \frac{1}{n}H_{\hat \zeta^+}(Q^{ \mathtt E_n^M}| \pi^{-1}\mathtt E_n^M)+ \frac{1}{n}H_{\hat \zeta^+}(\pi^{-1}P^{n}|Q^{ \mathtt E_n^M}).
\end{align*}
For $E\mathop{\mathrm{ess sup}}ubset [0,n[$ we let $ \hat\zeta^+_{E,n}=\frac{n}{\mathop{\mathrm{ess sup}}harp E}\int \mu_{ \hat x,n}^{M}\,d \zeta_{A_E}(x) $, which may be also written as $ \left(\hat \zeta^+_{\pi^{-1}A_E}\right)^E $ by using the notations of Lemma \ref{invent}.
By Lemma \ref{invent} applied to the system $(\mathbb PT\mathbf M, F)$ and the measures $\mu:= \hat \zeta^+_{\pi^{-1}A_E}$ for $A_E\in \mathtt E_n^M$ we have for all $n>m\in \mathbb N^*$:
\begin{align*}
H_{\hat \zeta^+}\left(Q^{ \mathtt E_n^M}|\pi^{-1} \mathtt E_n^M\right)&=
\mathop{\mathrm{ess sup}}um_E \zeta(A_E)H_{\hat \zeta^+_{\pi^{-1}A_E}}(Q^E),\\
&\leq\mathop{\mathrm{ess sup}}um_E \zeta(A_E)\mathop{\mathrm{ess sup}}harp E\left(\frac{1}{m}H_{ \hat\zeta^+_{E,n}}(Q^m)+6m\frac{\mathop{\mathrm{ess sup}}harp (E+1)\Delta E}{\mathop{\mathrm{ess sup}}harp E}\log \mathop{\mathrm{ess sup}}harp Q\right).
\end{align*}
Recall again that if $E=E_n^M(x)$ for some $x$ then the complement set of $E$ in $[1,n[$ is made of neutral blocks of length larger than $M$, therefore $\mathop{\mathrm{ess sup}}harp (E+1)\Delta E\leq \frac{2M}{n}$. Moreover it follows from $\xi^M(\partial Q^m)=0$ and (\ref{unif}), that $\mu_{ \hat x,n}^{M}(A^m)$ for $A^m\in Q^m$ and $\mathop{\mathrm{ess sup}}harp E_n^M(x)/n$ are converging
to $\underline{\hat \xi}^{M}(A^m)$ and $\beta_M$ respectively uniformly in $x\in \mathtt F$ when $n
$ goes to infinity. Then we get by taking the limit in
$n$:
\begin{align*}
\limsup_{n}\frac{1}{n}H_{\hat \zeta^+}\left(Q^{ \mathtt E_n^M}|\pi^{-1} \mathtt E_n^M\right)\leq & \beta_{M}\frac{1}{m}H_{\underline{\hat \xi}^{M}}(Q^m)+\frac{12m \log \mathop{\mathrm{ess sup}}harp Q}{M},\\
h(\nu)-\iota\leq \liminf_n\frac{1}{n}H_{\zeta}(P^n)\leq & \beta_{M}\frac{1}{m}H_{\underline{\hat \xi}^{M}}(Q^m) +\limsup_{n}\frac{1}{n}H_{\hat\zeta^+}(\pi^{-1}P^{n}|Q^{\mathtt E_n^M})\\&+H(2/M)+\frac{12m \log \mathop{\mathrm{ess sup}}harp Q}{M}.
\end{align*}
\end{proof}
\mathop{\mathrm{ess sup}}ubsection{Bounding the entropy of the neutral component}
For a $\mathcal C^1$ diffeomorphism $f$ on $\mathbf M$ we put $C(f):=2A_fH(A_f^{-1})+\frac{\log^+ \|df\|_\infty}{r}+B_r$ with $A_f=\log^+ \|df\|_\infty+\log^+\|df^{-1}\|_\infty+1$ and a universal constant $B_r$ depending only $r$ precised later on. Clearly $f\mapsto C(f)$ is continuous in the $\mathcal C^1$ topology and $\frac{\lambda^+(f)}{r}=\lim_{\mathbb N \ni p\rightarrow +\infty}\frac{C(f^p)}{p}$ whenever $\lambda^+(f)>0$ (indeed $A_{f^p}\xrightarrow{p}+\infty$, therefore $H(A_{f^p}^{-1})\xrightarrow{p}0$). In particular, if $\frac{\lambda^+(f)}{r}<\alpha$ and $f_k\xrightarrow{k}f$ in the $\mathcal C^1$ topology, then there is $p$ with $\lim_k\frac{C(f_k^p)}{p}<\alpha$.
In this section we consider the empirical measures associated to an ergodic hyperbolic measure $\nu$ with $\lambda^+(\nu)>\frac{\log \|df\|_\infty}{r}+\delta$, $\delta>0$. Without loss of generality we can assume $\delta<\frac{r-1}{r}\log 2$. Then by Lemma \ref{maxi} we have $\hat\nu^+(H_\delta)>0$. For $ x\in \mathcal R$ we let $m_n( x)=\max\{k < n, \ F^k\hat x\in H_\delta\}$. By a standard application of Birkhoff ergodic theorem we have
$$\frac{m_n( x)}{n}\xrightarrow{n}1 \text{ for $ \nu$ a.e. $x$.}$$
By taking a smaller subset $\mathtt F$, we can assume the above convergence of $m_n$ is uniform on $\mathtt F$ and that $\mathop{\mathrm{ess sup}}up_{x\in \mathtt F}\min\{k \leq n, \ F^k\hat x\in H_\delta\}\leq N$ for some positive integer $N$.
We bound the term $\limsup_{n}\frac{1}{n}H_{\hat \zeta^+}(\pi^{-1}P^{n}|Q^{\mathtt E_n^M})$ in the right hand side of (\ref{refer}) Lemma \ref{dd}, which corresponds to the local entropy contribution plus the entropy in the neutral part.
\begin{lem}\label{fee}
There is $\kappa>0$ depending only on $\|d^kf\|_\infty$, $2\leq k\leq r$, \footnote[4]{Here $$\|d^kf\|_\infty=\mathop{\mathrm{ess sup}}up_{\alpha\in \mathbb N^{2}, \, |\alpha|=k}\mathop{\mathrm{ess sup}}up_{x,y}\left\|\partial_y^\alpha\left(\exp_{f(x)}^{-1}\circ f\circ \exp_x\right)(\cdot)\right\|_\infty$$ } such that the empirical measures associated to $G:=\pi^{-1}G_\kappa\cap H_\delta$ satisfy the following properties. For all $q, M\in \mathbb N^*$, there are $\epsilon_q>0$ depending only on $\|d^k(f^q)\|_\infty$, $2\leq k\leq r$ and $\gamma_{q,M}(f)>0$ such that for any partition $Q$ of $\mathbb PT\mathbf M$ with diameter less than $\epsilon_{q}$, we have:
\begin{align*}\limsup_n\frac{1}{n}H_{\hat \zeta^+}(\pi^{-1}P^{n}|Q^{ \mathtt E_n^M})\leq & (1-\beta_{M})C(f)\\& +\left(\log 2+\frac{1}{r-1}\right)\left(\int \frac{\log ^+\|df^q\|}{q}d\xi^{M}-\int \phi\, d\hat \xi^{M}\right)\\& +\gamma_{q,M}(f),
\end{align*}
where the error term $\gamma_{q,M}(f)$ satisfies \begin{equation}\label{todd}\forall K>0 \ \limsup_{q}\limsup_M\left(\mathop{\mathrm{ess sup}}up_{f\in \mathrm{Diff}^r(\mathbf M)} \left\{\gamma_{q,M}(f) \ | \ \|df\|_\infty \vee \|df^{-1}\|_\infty<K\right\}\right)=0.\end{equation}
\end{lem}
The proof of Lemma \ref{fee} appears after the statement of Proposition \ref{paraa}, which is a \textit{semi-local Reparametrization Lemma}.
\begin{prop}\label{paraa}There is $\kappa>0$ depending only on $\|d^kf\|_\infty$, $2\leq k\leq r$, such that the empirical measures associated to $G:=\pi^{-1}G_\kappa\cap H_\delta$ satisfy the following properties. For all $q,M\in \mathbb N^*$ there are $\epsilon_q>0$ depending only on $\|d^k(f^q)\|_\infty$, $2\leq k\leq r$ and $\gamma_{q,M}(f)>0$ satisfying (\ref{todd}) such that for any partition $Q$ with diameter less than $\epsilon<\epsilon_q$, we have for $n$ large enough : \\
Any atom $F_n$ of the partition $ Q^{\mathtt E_n^M}$ may be covered by a family $\Psi_{F_n}$ of $\mathcal C^r$ curves $\psi:[-1,1]\rightarrow \mathbf M$ satisfying $\|d(f^k\circ \psi)\|_\infty\leq 1$ for any $k=0,\cdots, n-1$, such that
\begin{align*}\frac{1}{n}\log \mathop{\mathrm{ess sup}}harp \Psi_{F_n}\leq &\left(1-\frac{\mathop{\mathrm{ess sup}}harp E_n^M}{n}\right)C(f)\\ &+\left(\log 2+\frac{1}{r-1}\right)\left(\int \frac{\log ^+\|d_xf^q\|_{\epsilon}}{q}\, d\zeta_{F_n}^{M}(x)-\int \phi\, d\hat{\zeta}_{F_n}^{M}\right)\\&+\gamma_{q,M}(f)+\tau_n,
\end{align*}
where $\lim_n\tau_n=0$, $E_n^M=E_n^M(x)$ for $x\in F_n$, $\hat{\zeta}_{F_n}^{M}=\int \mu_{\hat x,n}^M\, d\zeta_{F_n}(x)$ and $\zeta_{F_n}^{M}=\pi_*\hat{\zeta}_{F_n}^{M}$ its push-forward on $\mathbf M$.
\end{prop}
The proof of Proposition \ref{paraa} is given in the last section.
Proposition \ref{paraa} is very similar to the Reparametrization Lemma in \cite{burens}. Here we reparametrize an atom $F_n$ of $Q^{\mathtt E_n^M}$ instead of $Q^n$ in \cite{burens}.
\begin{proof}[Proof of Lemma \ref{fee} assuming Proposition \ref{paraa}] We take $\kappa>0$ and $\epsilon_q>0$ as in Proposition \ref{paraa}. Observe that
$$H_{\hat \zeta^+}(\pi^{-1}P^{n}|Q^{\mathtt E_n^M})\leq \mathop{\mathrm{ess sup}}um_{F_n\in Q^{\mathtt E_n^M}}\hat\zeta^+(F_n)\log \mathop{\mathrm{ess sup}}harp\{ A^n\in P^n, \ \pi^{-1}(A^n )\cap \hat{\mathtt F}\cap F_n\neq \emptyset\}.$$
As $\nu(\partial P)=0$, for all $\gamma>0$, there is $\chi>0$ and a continuous function $\vartheta:\mathbf M\rightarrow \mathbb R^+$
equal to $1$ on the $\chi$-neighborhood $\partial P^\chi$ of $\partial P$ satisfying $\int \vartheta\,d\nu<\gamma$. Then, by applying (\ref{unif}) with $\varphi: \hat x\mapsto \vartheta(x)$ and $M=\infty$, we have uniformly in $x\in\mathtt F\mathop{\mathrm{ess sup}}ubset \pi(\mathtt H)$: \begin{equation}\label{drd}\limsup_n\frac{1}{n}\mathop{\mathrm{ess sup}}harp\{0\leq k<n, \ f^kx\in \partial P^\chi\}\leq \lim_n\frac{1}{n}\mathop{\mathrm{ess sup}}um_{k=0}^{n-1}\vartheta(f^kx)=\int \vartheta\, d\nu<\gamma.
\end{equation}
Assume that for arbitrarily large $n$ there is $F_n\in Q^{\mathtt E_n^M}$ and $\psi\in \Psi_{F_n}$ with $\mathop{\mathrm{ess sup}}harp \{A^n\in P^n, \ A^n\cap \psi([-1,1])\cap \mathtt F\neq \emptyset\}>([\chi^{-1}]+1)\mathop{\mathrm{ess sup}}harp P^{\gamma n}$. As $\|d(f^k\circ \psi)\|_\infty\leq 1$ for $0\leq k<n$ we may reparametrize $\psi$ on $\mathtt F$ by $[\chi^{-1}]+1$ affine contractions $\theta$ so that the length of $f^k\circ\psi\circ \theta $ is less than $\chi$ for all $0\leq k< n$ and $(\psi\circ \theta)([-1,1])\cap \mathtt F\neq \emptyset$. Then we have $\mathop{\mathrm{ess sup}}harp \{0\leq k<n, \ \partial P\cap (f^k\circ\psi\circ \theta)([-1,1])\neq \emptyset\}>\gamma n$ for some $\theta$. In particular we get $\mathop{\mathrm{ess sup}}harp \{0\leq k<n, \ f^k x \in \partial P^\chi\}>\gamma n$ for any $x\in \psi\circ \theta([-1,1])$, which contradicts (\ref{drd}). Therefore we have $$\limsup_{n}\mathop{\mathrm{ess sup}}up_{F_n,\, \psi\in \Psi_{F_n}}\frac{1}{n}\log \left\{A^n\in P^n, \ A^n\cap \psi([-1,1]) \cap \mathtt F\neq \emptyset\right\}=0.$$
Together with Proposition \ref{paraa} and Lemma \ref{neww} we get
\begin{align*}
\limsup_n\frac{1}{n} H_{\hat\zeta^+}(\pi^{-1}P^{n}|Q^{\mathtt E_n^M})&\leq \limsup_n \mathop{\mathrm{ess sup}}um_{F_n\in Q^{\mathtt E_n^M}}\hat\zeta^+(F_n)\frac{1}{n}\log \mathop{\mathrm{ess sup}}harp \Psi_{F_n},\\
&\leq \limsup_n \mathop{\mathrm{ess sup}}um_{F_n\in Q^{\mathtt E_n^M}}\hat\zeta^+(F_n)\left(1-\frac{\mathop{\mathrm{ess sup}}harp E_n^M}{n}\right)C(f)+\\
&+\limsup_n \mathop{\mathrm{ess sup}}um_{F_n\in Q^{\mathtt E_n^M}}\hat\zeta^+(F_n) \left(\log 2+\frac{1}{r-1}\right)\left(\int \frac{\log ^+\|df^q\|}{q}\, d\zeta_{F_n}^{M}-\int \phi\, d\hat{\zeta}_{F_n}^{M}\right) \\
& +\gamma_{q,M}(f),\\
&\leq (1-\beta_M)C(f)+\left(\log 2+\frac{1}{r-1}\right)\left(\int \frac{\log ^+\|df^q\|}{q}d\xi^{M}-\int \phi\, d\hat{\xi}^{M}\right)+\gamma_{q,M}(f).
\end{align*}
This concludes the proof of Lemma \ref{fee}.
\end{proof}
By combining Lemma \ref{fee} and Lemma \ref{dd} we get:
\begin{prop}\label{revi}Let $\kappa$, $\epsilon_q$ and $\gamma_{q,M}(f)$ as in Proposition \ref{paraa}. Then for any $q,M\in \mathbb N^*$ and for any finite partition $Q$ with diameter less than $\epsilon_q$ and with $ \hat \xi^M(\partial Q^m)=0$ we have
with $\gamma_{q,Q,M}(f)=\gamma_{q,M}(f)+H\left(\frac{2}{M}\right)+\frac{12 \log \mathop{\mathrm{ess sup}}harp Q}{M}$ :
\begin{align*}h(\nu)\leq &\beta_{M}\frac{1}{m}H_{\underline{\hat{\xi}}^{M}}(Q^m) +(1-\beta_{M}) C(f)\\ & +\left(\log 2+\frac{1}{r-1}\right)\left(\int \frac{\log ^+\|df^q\|}{q}d\xi^{M}-\int \phi\, d\hat{\xi}^{M}\right)\\ & +\gamma_{q,Q,M}(f)+\iota.
\end{align*}
\end{prop}
\mathop{\mathrm{ess sup}}ubsection{Proof of the Main Theorem}
We first reduce the Main Theorem to the following statement.
\begin{prop}\label{reduc}
Let $(f_k)_{k\in \mathbb N}$ be a sequence of $\mathcal C^r$, with $r>1$, surface diffeomorphisms converging $\mathcal C^r$ weakly to a diffeomorphism $f$. Assume there is a sequence $(\hat \nu_k^+)_k$ of ergodic $F_k$-invariant measures converging to $\hat \mu$ with $\lim_k\lambda^+(\nu_k)>\frac{\log^+ \|df\|_{\infty}}{r}$.\\
Then, there are $F$-invariant measures $\hat \mu_0$ and $\hat \mu_1^+$ with $\hat \mu= (1-\beta)\hat \mu_0+\beta\hat\mu_1^+$, $\beta\in [0,1]$, such that:
$$\limsup_{k\rightarrow +\infty} h(\nu_k)\leq \beta h(\mu_1)+(1-\beta)C(f).$$
\end{prop}
\begin{proof}[Proof of the Main Theorem assuming Proposition \ref{reduc}]
Let $(\hat\nu_k^+)_k$ be a sequence of ergodic $F_k$-invariant measures converging to $\hat \mu$.
As previously mentionned, for any $\alpha>\lambda^+(f)/r$ there is $p\in \mathbb N^*$ with $\alpha>\frac{C(f^p)}{p}$. We can also assume $\frac{\log\|df^p\|_\infty }{pr}<\alpha$. Let $\hat\nu_k^{+,p}$ be an ergodic component of $\hat\nu_k^+$ for $F_k^p$ and let us denote by $\nu_k^p$ its push forward on $\mathbf M$. We have $ h_{f_k^p}(\nu_k^p)=ph_{f_k}(\nu_k)$ for all $k$. By taking a subsequence we can assume that $(\hat\nu_k^{+,p})_k$ is converging. Its limit $\hat \mu^p$ satisfies $\frac{1}{p}\mathop{\mathrm{ess sup}}um_{0\leq l<p}F_*^k\hat \mu^p=\hat \mu$. If $\lim_k\lambda^+(\nu_k^p)\leq \frac{\log^+ \|df^p\|_{\infty}}{r}<p\alpha$, then by Ruelle's inequality we get \begin{align*}
\limsup_{k\rightarrow +\infty} h_{f_k}(\nu_k)&=\limsup_{k\rightarrow +\infty} \frac{1}{p}h_{f_k^p}(\nu_k^p),\\
&\leq \lim_{k\rightarrow +\infty} \frac{1}{p}\lambda^+(\nu_k^p),\\
&< \alpha.
\end{align*}
This proves the Main Theorem with $\beta=1$.
We consider then the case $\lim_k\lambda^+(\nu_k^p)>\frac{\log^+ \|df^p\|_{\infty}}{r}$.
By applying Proposition 4 to the $p$-power system, we get $F^p$-invariant measure $\hat \mu_0^p$ and $\hat \mu_1^{+,p}$ with $\hat \mu^p= (1-\beta)\hat \mu_0^p+\beta\hat\mu_1^{+,p}$, $\beta\in [0,1]$, such that we have with $\mu_1^p=\pi_*\hat\mu_1^{+,p}$ :
$$\limsup_{k\rightarrow +\infty} h_{f_k^p}(\nu_k^p)\leq \beta h_{f^p}(\mu_1^p)+(1-\beta)C(f^p).$$
But $ h_{f^p}(\mu_1^p)=ph_{f}(\mu_1)$
with $\mu_1=\frac{1}{p}\mathop{\mathrm{ess sup}}um_{0\leq l<p}f^k \mu_1^p$. One easily checks that $\hat \mu_1^+=\frac{1}{p}\mathop{\mathrm{ess sup}}um_{0\leq l<p}F^k \hat\mu_1^{+,p}$. Then we have :
\begin{align*}
\limsup_{k\rightarrow +\infty} h_{f_k}(\nu_k)&=\limsup_{k\rightarrow +\infty} \frac{1}{p}h_{f_k^p}(\nu_k^p),\\
&\leq \beta \frac{1}{p}h_{f^p}(\mu_1^p)+(1-\beta)\frac{C(f^p)}{p},\\
&\leq \beta h_{f}(\mu_1)+(1-\beta)\alpha.
\end{align*}
This concludes the proof of the Main Theorem.
\end{proof}
We show now Proposition \ref{reduc} by using Lemma \ref{fee}.
\begin{proof}[Proof of Proposition \ref{reduc}:] Without loss of generality we can assume $\liminf_kh(\nu_k)>0$. For $\mu$ a.e. $x$, we have $\lambda^-(x)\leq 0$. If not, some ergodic component $\tilde{\mu}$ of $\mu$ would have two positive Lyapunov exponents and therefore should be the periodic measure at a source $S$ (see e.g. Proposition 4.4 in \cite{pol}). But then for large $k$ the probability $\nu_k$ would give positive measure to the basin of attraction of the sink $S$ for $f^{-1}$ and therefore $\nu_k$ would be equal to $\tilde{\mu}$ contradicting $\liminf_k h(\nu_k)>0$.
Let $\delta>0$ with $\lim_k\lambda^+(\nu_k)> \frac{\log \|df\|_\infty}{r}+\delta$. Then take $\kappa$ as in Lemma \ref{fee}. We consider the empirical measures associated to $G=\pi^{-1}G_\kappa\cap H_{\delta}$.
By a diagonal argument, there is a subsequence in $k$ such that the geometric component $\hat \xi_{k}^{M} $ of $\hat \nu_k^+$ is converging to some $\hat\xi_\infty^{M}$ for all $M\in \mathbb N$. Let us also denote by $\beta_M^\infty$ the limit in $k$ of $\beta_M^k$. Then consider a subsequence in $M$ such that $\hat \xi_\infty^{M}$ is converging to $\beta\hat \mu_1$ with $\beta=\lim_M\beta_M^\infty$. We also let $(1-\beta)\hat \mu_0=\hat \mu-\beta\hat\mu_1$. In this way, $\hat\mu_0$ and $\hat\mu_1$ are both probability measures.
\begin{lem}The measures $\hat \mu_0$ and $\hat \mu_1$ satisfy the following properties:
\begin{itemize}
\item $\hat \mu_1$ and $\hat \mu_0$ are $F$-invariant,
\item $\lambda^+(x)\geq \delta$ for $\mu_1$-a.e. $x$ and $\hat \mu_1=\hat \mu_1^+$.
\end{itemize}
\end{lem}
\begin{proof}The neutral blocks in the complement set of $E^M(x)$ have length larger than $M$. Therefore for any continuous function $\varphi:\mathbb PT\mathbf M\rightarrow \mathbb R$ and for any $k$, we have
$$\left|\int \varphi\, d \hat\xi_k^{M}-\int \varphi\circ F\, d\hat\xi_k^{M}\right|\leq \frac{2\mathop{\mathrm{ess sup}}up_{\hat x} |\varphi(\hat x)|}{M}.$$
Letting $k$, then $M$ go to infinity, we get $\int \varphi\, d\hat\mu_1=\int \varphi\circ F \, d\hat\mu_1$, i.e. $\hat \mu_1$ is $F$-invariant.
We let $K_M$ be the compact subset of $\mathbb PT\mathbf M$ given by $K_M=\{\hat x \in \mathbb PT\mathbf M, \ \exists 1\leq m\leq M \ \phi_m(\hat x)\geq m\delta\}$. Let $\hat x\in \mathtt G_k$, where $\mathtt G_k$ is the set where the empirical measures are converging to $\hat \xi_k^{M}$ (see Lemma \ref{empiri}). Observe that \begin{equation}\label{wed}\lim_n\mu_{\hat x, n}^M(K_M)= \hat \xi_k^{M}(K_M)=\hat \xi_k^{M}(\mathbb PT\mathbf M).\end{equation} Indeed for any $k\in E^M(\hat x)$ there is $1\leq m\leq M$ with $F^m(F^k \hat x)\in G\mathop{\mathrm{ess sup}}ubset H_\delta$. Moreover, as already mentioned, $\delta$-hyperbolic points w.r.t. $\psi$ are $\delta$-hyperbolic w.r.t. $\phi$. Therefore $\phi_m(F^k\hat x)\geq m\delta$. Consequently we have $\lim_n\mu_{\hat x, n}^M(K_M)=\lim_n\mu_{\hat x, n}^M(\mathbb PT\mathbf M)=\hat\xi_k^{M}(\mathbb PT\mathbf M)$. The set $K_M$ being compact in $\mathbb PT\mathbf M$, we get $\hat\xi_k^{M}(K_M)\geq \lim_n\mu_{\hat x, n}^M(K_M)$ and (\ref{wed}) follows.
Also we have $\hat \xi_\infty^{M}(K_M)\geq \limsup_k \hat \xi_k^{M}(K_M)=\limsup_k \hat \xi_k^{M}(\mathbb PT\mathbf M)=\beta^\infty_M$. Therefore we have $\hat \mu_1(\bigcup_M K_M)=1$ as $\hat \xi_\infty^{M}$ goes increasingly in $M$ to $\beta\hat \mu_1$.
The $F$-invariant set $\bigcap_{k\in \mathbb Z}F^{-k}\left(\bigcup_M K_M\right)$ has also full $\hat \mu_1$-measure and for all $\hat x=(x,v)$ in this set we have $\limsup_n\frac{1}{n}\log \|d_xf^n(v)\|\geq \delta$.
Consequently the measure $\hat \mu_1$ is supported on the unstable bundle $\mathcal E_+(x)$ and $\lambda^+(x)\geq \delta$ for $\mu_1$-a.e. $x$.
\end{proof}
\begin{rem}\label{mieux}
In Theorem C of \cite{BCS2}, the measure $\beta\hat \mu_1^+$ is obtained as the limit when $\delta$ goes to zero of the component associated to the set $G^\delta:=\{x, \ \forall l>0 \ \phi_l(\hat x)\geq \delta l \} \mathop{\mathrm{ess sup}}upset \pi^{-1}G_{\kappa}\cap H_\delta$. Therefore our measure $\beta_\alpha\hat \mu_{1,\alpha}^+$ is just a component of their measure $\beta\hat \mu_1^+$.
\end{rem}
We pursue now the proof of Proposition \ref{reduc}. Let $q, M\in \mathbb N^*$. Fix a sequence $(\iota_k)_k
$ of positive numbers with $\iota_k\xrightarrow{k}0$. We consider a partition $Q$ satisfying $\mathop{\mathrm{diam}}(Q)<\epsilon_q$ with $\epsilon_q$ as in Lemma \ref{fee}.
The sequence $(f_k)_k$ being $\mathcal C^r$ bounded, one can choose $\epsilon_q$ independently of $f_k$, $k\in \mathbb N$.
By a standard argument of countability we may assume that for all $m\in \mathbb N^*$ the boundary of $Q^m$ has zero-measure for $\hat \mu_1^+$ and all the measures $\hat \xi_k^M$, $M\in \mathbb N^*$ and $k \in \mathbb{N}\cup \{\infty\}$.
By applying Proposition \ref{revi} to $f_k$ and $\nu_k$ we get:
\begin{align*}h(\nu_k)\leq &\beta^k_{M}\frac{1}{m}H_{\underline{\hat{\xi}_k}^{M}}(Q^m) +(1-\beta^k_{M}) C(f_k)\\ & +\left(\log 2+\frac{1}{r-1}\right)\left(\int \frac{\log ^+\|df_k^q\|}{q}d\xi_k^{M}-\int \phi\, d\hat{\xi_k}^{M}\right)\\ & +\gamma_{q,Q,M}(f_k)+\iota_k.
\end{align*}
By letting $k$, then $M$ go to infinity, we obtain for all $m$:
\begin{align*} \limsup_k h(\nu_k)\leq & \beta\frac{1}{m}H_{\hat\mu_1^+}(Q^m)+(1-\beta)C(f)\\ & + \left(\log 2+\frac{1}{r-1}\right)\left( \int \frac{\log ^+\|df^q\|}{q}d\mu_1-\int \phi\, d \hat \mu_1^+ \right)\\& +\limsup_{M}\mathop{\mathrm{ess sup}}up_k\gamma_{q,Q,M}(f_k).
\end{align*}
By letting $m$ go to infinity, we get:
\begin{align*} \limsup_k h(\nu_k)\leq &\beta h(\hat\mu_1^+)+(1-\beta)C(f) \\
& +\left(\log 2+\frac{1}{r-1}\right)\left( \int \frac{\log ^+\|df^q\|}{q}d\mu_1-\int \phi\, d \hat \mu_1^+ \right)\\ & +\limsup_{M}\mathop{\mathrm{ess sup}}up_k\gamma_{q,M}(f_k).\end{align*}
But $h(\hat \mu_1^+)=h(\mu_1)$ as the measure preserving systems associated to $\mu_1$ and $\hat \mu_1^+$ are isomorphic. Moreover we have
$\int \phi\, d\hat\mu_1^+=\lambda^+(\mu_1)=\lim_{q} \int \frac{\log ^+\|df^q\|}{q}d\mu_1$. Therefore by letting $q$ go to infinity we finally obtain with the asymptotic property (\ref{todd}) of $\gamma_{q,M}$:
$$\limsup_k h(\nu_k)\leq \beta h(\mu_1)+(1-\beta)C(f). $$
This concludes the proof of Proposition \ref{reduc}.
\end{proof}
\mathop{\mathrm{ess sup}}ection{Semi-local Reparametrization Lemma }
In this section we prove the semi-local \textit{Reparametrization Lemma} stated above in Proposition \ref{paraa}.
\mathop{\mathrm{ess sup}}ubsection{Strongly bounded curves}
To simplify the exposition (by avoiding irrelevant technical details involving the exponential map) we assume that $\mathbf M$ is the two-torus $\mathbb T^2$ with the usual Riemannian structure inherited from $\mathbb R^2$. Borrowing from \cite{bur} we first make the following definitions.\\
A $\mathcal C^r$ embedded curve $\mathop{\mathrm{ess sup}}igma:[-1,1]\rightarrow \mathbf M$ is said \textit{bounded} when
$\max_{k=2,\cdots,r}\|d^k\mathop{\mathrm{ess sup}}igma\|_\infty\leq \frac{\|d\mathop{\mathrm{ess sup}}igma\|_\infty}{6}$.\\
\begin{lem}\label{nonam}
Assume $\mathop{\mathrm{ess sup}}igma $ is a bounded curve. Then for
any $x\in \mathop{\mathrm{ess sup}}igma([-1,1])$, the curve $\mathop{\mathrm{ess sup}}igma$ contains the graph of a $\kappa$-admissible map at $x$ with $\kappa= \frac{\|d\mathop{\mathrm{ess sup}}igma\|_\infty}{6}$.
\end{lem}
\begin{proof}Let $x=\mathop{\mathrm{ess sup}}igma(s)$, $s\in [-1,1]$.
One checks easily (see Lemma 7 in \cite{burens} for further details) that for all $t\in [-1,1]$ the angle $\angle\mathop{\mathrm{ess sup}}igma'(s), \mathop{\mathrm{ess sup}}igma'(t)<\frac{\pi}{6}\leq 1$
and therefore $\int_{0}^1\mathop{\mathrm{ess sup}}igma'(t)\cdot \frac{\mathop{\mathrm{ess sup}}igma'(s)}{\| \mathop{\mathrm{ess sup}}igma'(s)\|} \, dt\geq \frac{\|d\mathop{\mathrm{ess sup}}igma\|_\infty}{6}$. Therefore, as $\mathop{\mathrm{ess sup}}igma'(s)\in \mathcal E_+(x)$, the image of $\mathop{\mathrm{ess sup}}igma$ contains the graph of an $\frac{\|d\mathop{\mathrm{ess sup}}igma\|_\infty}{6}$-admissible map at $x$.
\end{proof}
A $\mathcal C^r$ bounded curve $\mathop{\mathrm{ess sup}}igma:[-1,1]\rightarrow \mathbf M$ is said \textit{strongly $\epsilon$-bounded }for $\epsilon>0$ if $\|d\mathop{\mathrm{ess sup}}igma\|_\infty\leq \epsilon$.
For $n\in \mathbb N^*$ and $\epsilon>0$ a curve is said \textit{strongly $(n,\epsilon)$-bounded} when $f^k\circ \mathop{\mathrm{ess sup}}igma$ is strongly $\epsilon$-bounded for all $k=0,\cdots, n-1$.\\
We consider a $\mathcal C^r$ smooth diffeomorphism $g:\mathbf M\circlearrowleft$ with $\mathbb N \ni r\geq 2$. For $\hat x=(x,v)\in \mathbb PT\mathbf M $ with $\pi(\hat x)=x$, we let $k_g(x)\geq k'_g(\hat x)$ be the following integers:
$$k_g(x):=\left[\log\|d_{x}g \|\right], $$
$$k'_g( \hat x):=\left[\log \|d_xg(v)\|\right]=[\phi_g(\hat x)].$$
In the next lemma, we reparametrize the image by $g$ of a bounded curve. The proof of this lemma is mostly contained in the proof of the Reparametrization Lemma \cite{bur}, but we reproduce it for the sake of completeness.
\begin{lem}\label{nondyn}
Let $\frac{R_{inj}}{2}>\epsilon=\epsilon_g>0$
satisfying $\|d^sg_{2\epsilon}^x\|_\infty\leq 3\epsilon \|d_xg\|$ for all $s=1,\cdots,r$ and all $x\in \mathbf M$, where $g^x_{2\epsilon}= g\circ \exp_x(2\epsilon\cdot)=g(x+2\epsilon\cdot): \{w_x\in T_x\mathbf M, \ \|w_x\|\leq 1\}\rightarrow \mathbf M$. We assume $\mathop{\mathrm{ess sup}}igma:[-1,1]\rightarrow \mathbf M$ is a strongly $\epsilon$-bounded $\mathcal C^r$ curve and we let $\hat \mathop{\mathrm{ess sup}}igma:[-1,1]\rightarrow \mathbb P T \mathbf M$ be the associated induced map. \\
Then for some universal constant $C_r>0$ depending only on $r$ and for any pair of integers $(k,k')$ there is a family $\Theta$ of affine maps from $[-1,1]$ to itself satisfying:
\begin{itemize}
\item $\hat\mathop{\mathrm{ess sup}}igma^{-1}\left(\left\{\hat x\in \mathbb P T \mathbf M, \ k_g(x)=k \text{ and }k'_g(\hat x)=k'\right\}\right)\mathop{\mathrm{ess sup}}ubset \bigcup_{\theta\in \Theta} \theta([-1,1])$,
\item $\forall \theta \in \Theta$, the curve $g\circ \mathop{\mathrm{ess sup}}igma \circ \theta$ is bounded,
\item $\forall \theta \in \Theta, \ |\theta'|\leq e^{\frac{k'-k-1}{r-1}}/4$,
\item $\mathop{\mathrm{ess sup}}harp \Theta \leq C_re^{\frac{k-k'}{r-1}}$.
\end{itemize}
\end{lem}
\begin{proof}
\underline{\textit{First step :}} \textbf{Taylor polynomial approximation.} One computes for an affine map $\theta:[-1,1]\circlearrowleft$ with contraction rate $b$ precised later and with $y= \mathop{\mathrm{ess sup}}igma(t)$, $k_g(y)=k$, $k'_g(y)=k' $, $t\in \theta([-1,1])$:
\begin{align*}\|d^r(g\circ \mathop{\mathrm{ess sup}}igma\circ \theta)\|_\infty &\leq b^r \left\|d^{r}\left(g_{2\epsilon}^y \circ \mathop{\mathrm{ess sup}}igma_{2\epsilon}^y\right)\right\|_\infty , \textrm{with $\mathop{\mathrm{ess sup}}igma_{2\epsilon}^y:=(2\epsilon)^{-1}\exp_y^{-1}\circ \mathop{\mathrm{ess sup}}igma=2\epsilon^{-1}\left(\mathop{\mathrm{ess sup}}igma(\cdot )-y\right)$,}\\
&\leq b^r\left\|d^{r-1}\left( d_{\mathop{\mathrm{ess sup}}igma_{2\epsilon}^y}g_{2\epsilon}^y\circ d\mathop{\mathrm{ess sup}}igma_{2\epsilon}^y \right) \right\|_\infty,\\
&\leq b^r 2^r \max_{s=0,\cdots,r-1}\left\|d^s\left(d_{\mathop{\mathrm{ess sup}}igma_{2\epsilon}^y}g_{2\epsilon}^y\right)\right\|_{\infty}\max_{k=1,\cdots ,r}\|d^k\mathop{\mathrm{ess sup}}igma_{2\epsilon}^y \|_\infty.
\end{align*}
By assumption on $\epsilon$, we have $\|d^s g_{2\epsilon}^y\|_{\infty}\leq 3\epsilon\|d_y g\|$ for any $r\geq s\geq 1$.
Moreover $\max_{k=1,\cdots ,r}\|d^k\mathop{\mathrm{ess sup}}igma_{2\epsilon}^y \|_\infty\leq 1$ as $\mathop{\mathrm{ess sup}}igma$ is strongly $\epsilon$-bounded.
Therefore by Fa\'a di Bruno's formula, we get for some\footnote[4]{Although these constants may differ at each step, they are all denoted by $C_r$.} constants $C_r>0$ depending only on $r$:
\begin{align*}\max_{s=0,\cdots,r-1}\|d^s\left(d_{\mathop{\mathrm{ess sup}}igma_{2\epsilon}^y}g_{2\epsilon}^y\right)\|_{\infty} &\leq \epsilon C_r\|d_y g\|,\\
\text{then }&,\\
\|d^r(g\circ \mathop{\mathrm{ess sup}}igma\circ \theta)\|_\infty &\leq \epsilon C_rb^r \|d_y g\|\max_{k=1,\cdots ,r}\|d^k\mathop{\mathrm{ess sup}}igma_{2\epsilon}^y \|_\infty,\\
&\leq C_rb^r \|d_y g\|\|d\mathop{\mathrm{ess sup}}igma \|_\infty, \\
&\leq ( C_r b^{r-1}\|d_yg\|) \|d(\mathop{\mathrm{ess sup}}igma \circ \theta)\|_{\infty}, \\
&\leq (C_r b^{r-1}e^{k}) \|d(\mathop{\mathrm{ess sup}}igma \circ \theta)\|_{\infty}, \textrm{ because $k(y)=k$ }, \\
& \leq e^{k'-4}\|d(\mathop{\mathrm{ess sup}}igma\circ \theta)\|_\infty, \textrm{ by taking $b=\left(C_re^{k-k'+4 }\right)^{-\frac{1}{r-1}}$.}
\end{align*}
Therefore the Taylor polynomial $P$ at $0$ of degree $r-1$ of $d(g\circ \mathop{\mathrm{ess sup}}igma\circ \theta)$ satisfies on $[-1,1]$:
\begin{align*}
\|P-d(g\circ \mathop{\mathrm{ess sup}}igma\circ \theta)\|_{\infty}&\leq e^{k'-4}\|d(\mathop{\mathrm{ess sup}}igma\circ \theta)\|_\infty.
\end{align*}
We may cover $[-1,1]$ by at most $b^{-1}+1$ such affine maps $\theta$. \\
\underline{\textit{Second step :}} \textbf{Bezout theorem.}
Let $a=e^{k'}\|d(\mathop{\mathrm{ess sup}}igma\circ \theta)\|_\infty$. Note that for $s\in [-1,1]$ with $k(\mathop{\mathrm{ess sup}}igma \circ \theta(s))=k $ and $k'(\mathop{\mathrm{ess sup}}igma \circ \theta(s))=k'$
we have $\|d(g\circ \mathop{\mathrm{ess sup}}igma\circ \theta)(s)\|\in [ae^{-2},ae^{2}]$, therefore $\|P(s)\|\in [ae^{-3},ae^3]$. Moreover if we have now $\|P(s)\|\in [ae^{-3},ae^3]$ for some $s\in [-1,1]$ we get also $\|d(g\circ \mathop{\mathrm{ess sup}}igma\circ \theta)(s)\|\in [ae^{-4},ae^{4}]$.
By Bezout theorem the semi-algebraic set $\{ s\in [-1,1],\ \|P(s)\|\in [e^{-3}a, e^{3}a]\}$ is the disjoint union of closed intervals $(J_i)_{i\in I}$
with $\mathop{\mathrm{ess sup}}harp I$ depending only on $r$. Let $\theta_i$ be the composition of $\theta$ with an affine reparametrization from $[-1,1]$ onto $J_i$. \\
\underline{\textit{Third step :}} \textbf{ Landau-Kolmogorov inequality.}
By the Landau-Kolmogorov inequality on the interval (see Lemma 6 in \cite{bur}), we have for some constants $C_r\in \mathbb N^*$ and for all $1\leq s\leq r$:
\begin{align*}
\|d^s(g\circ \mathop{\mathrm{ess sup}}igma\circ \theta_i)\|_\infty & \leq C_r\left(\|d^r(g\circ \mathop{\mathrm{ess sup}}igma\circ \theta_i)\|_\infty +\|d(g\circ \mathop{\mathrm{ess sup}}igma\circ \theta_i)\|_\infty\right),\\
&\leq C_r\frac{|J_i|}{2}\left( \|d^r(g\circ \mathop{\mathrm{ess sup}}igma\circ \theta)\|_\infty+ \mathop{\mathrm{ess sup}}up_{t\in J_i}\|d(g\circ \mathop{\mathrm{ess sup}}igma\circ \theta)(t)\| \right),\\
&\leq C_r a\frac{|J_i|}{2}.
\end{align*}
We cut again each $J_i$ into $1000C_r$ intervals $\tilde{J_i}$ of the same length with $$ \theta(\tilde{J}_i)\cap \mathop{\mathrm{ess sup}}igma^{-1}\left\{x, \ k_g(x)=k \text{ and }k'_g(x)=k'\right\}\neq \emptyset.$$ Let $\tilde{\theta_i}$ be the affine reparametrization from $[-1,1]$ onto $\theta(\tilde{J_i})$. We check that $g\circ \mathop{\mathrm{ess sup}}igma\circ \tilde{\theta_i}$ is bounded:
\begin{align*}
\forall s=2,\cdots, r, \ \|d^s(g\circ \mathop{\mathrm{ess sup}}igma\circ \tilde{\theta_i})\|_\infty & \leq (1000C_r)^{-2} \|d^s(g\circ \mathop{\mathrm{ess sup}}igma\circ \theta_i)\|_\infty,\\
&\leq \frac{1}{6}(1000C_r)^{-1}\frac{|J_i|}{2}a_ne^{-4},\\
&\leq \frac{1}{6}(1000C_r)^{-1}\frac{|J_i|}{2}\min_{s\in J_i}\|d(g\circ \mathop{\mathrm{ess sup}}igma\circ \theta)(s)\|,\\
&\leq \frac{1}{6}(1000C_r)^{-1}\frac{|J_i|}{2}\min_{s\in \tilde{J}_i}\|d(g\circ \mathop{\mathrm{ess sup}}igma\circ \theta)(s)\|,\\
&\leq \frac{1}{6} \|d(g\circ \mathop{\mathrm{ess sup}}igma\circ \tilde{\theta_i})\|_\infty.
\end{align*}
This conclude the proof with $\Theta$ being the family of all $\tilde{\theta_i}$'s.
\end{proof}
We recall now a useful property of bounded curve (see Lemma 7 in \cite{burens} for a proof).
\begin{lem}\label{inter}
Let $\mathop{\mathrm{ess sup}}igma:[-1,1]\rightarrow \mathbf M$ be a $\mathcal C^r$ bounded curve and let $B$ be a ball of radius less than $\epsilon$. Then there exists an affine map $\theta:[-1,1]\circlearrowleft$ such that :
\begin{itemize}
\item $\mathop{\mathrm{ess sup}}igma\circ \theta$ is strongly $3\epsilon$-bounded,
\item $\theta([-1,1])\mathop{\mathrm{ess sup}}upset \mathop{\mathrm{ess sup}}igma^{-1}B$.
\end{itemize}
\end{lem}
\mathop{\mathrm{ess sup}}ubsection{Choice of the parameters $\kappa$ and $\epsilon_q$}
For a diffeomorphism $f:\mathbf M \circlearrowleft$ the scale $\epsilon_f$ in Lemma \ref{nondyn} may be chosen such that $\epsilon_{f^k}\leq \epsilon_{f^l}\leq \max(1, \|df\|_\infty)^{-k}$ for any $q\geq k\geq l\geq 1$.
We take $\kappa =\frac{\epsilon_f}{36}$ and we choose $ \epsilon_q<\frac{\epsilon_{f^q}}{3}$ such that for any $\hat x, \hat y\in \mathbb PT\mathbf M$ which are $\epsilon_q$-close and for any $0\leq l\leq q$:
\begin{align}\label{eq}
\left| k_{f^l}( x ) - k_{f^l}(y )\right|&\leq 1,\\
\left| k'_{f^l}( \hat x ) - k'_{f^l}(\hat y)\right|&\leq 1.\nonumber
\end{align} Without loss of generality we can assume the local unstable curve $D$ (defined in Subsection \ref{zeta}) is reparametrized by a $\mathcal C^r$ strongly $\epsilon_q$-bounded map $\mathop{\mathrm{ess sup}}igma:[-1,1]\rightarrow D$.
Let $F_n$ be an atom of the partition $ Q^{\mathtt E_n^M}$ and let $E_n^M=E_n^M(x)$ for any $\hat x\in F_n$. Recall that the diameter of $Q$ is less than $\epsilon_q$. It follows from (\ref{eq}) that for any $\hat x\in F_n$ we have with $\hat{\zeta}_{F_n}^{M}=\int \mu_{\hat x,n}^M\, d\zeta_{F_n}(x)$:
\begin{align*}
\mathop{\mathrm{ess sup}}um_{l\in E_n^M } \left| k_{f^q}(f^l x)-k'_{f^q}(F^l\hat x)\right| \leq 10\mathop{\mathrm{ess sup}}harp E_n^M+ \int \log ^+\|d_yf^q\|\, d\zeta_{F_n}^{M}(y)-\int \phi_q\, d\hat{\zeta}_{F_n}^{M}.
\end{align*}
Therefore we may fix some $0\leq c<q$, such that for any $x\in F_n$
\begin{align*}\mathop{\mathrm{ess sup}}um_{l\in (c+q\mathbb N)\cap E_n^M } \left| k_{f^q}(f^l x)-k'_{f^q}(F^l\hat x)\right|& \leq 10\frac{n}{q}+ \frac{1}{q}\left(\int \log ^+\|d_yf^q\|\, d\zeta_{F_n}^{M}(y)-\int \phi_q\, d\hat{\zeta}_{F_n}^{M}\right), \\
&\leq 10\frac{n}{q}+2A_f\frac{qn}{M}+\frac{1}{q}\int \log ^+\|d_yf^q\|\, d\zeta_{F_n}^{M}(y)-\int \phi\, d\hat{\zeta}_{F_n}^{M}.
\end{align*}
\mathop{\mathrm{ess sup}}ubsection{Combinatorial aspects}
We put $\partial_lE_n^M:=\{ a\in E_n^M \text{ with }a-1\notin E_n^M\}.$ Then we let $\mathcal A_n:=\{0=a_1<a_2<\cdots a_m\}$
be the union of $\partial_l E_n^M$, $[0,n[\mathop{\mathrm{ess sup}}etminus E_n^M$ and $(c+q\mathbb N)\cap [0,n[ $.
We also let $b_i=a_{i+1}-a_i$ for $i=1,\cdots , m-1$ and $b_m=n-a_m$.
For a sequence $\mathbf k= (k_l,k'_l)_{ l\in \mathcal A_n}$ of integers, a positive integer $m_n$ and a subset $\overline{E}$ of $[0,n[$, we let $F_n^{\mathbf k , \overline{E},m_n}$ be the subset of points $\hat x\in F_n$ satisfying:
\begin{itemize}
\item $\overline{E}=E_n(x)\mathop{\mathrm{ess sup}}etminus E_n^M(x)$,
\item $k_{a_i}=k_{f^{b_i}}(f^{a_i} x)$ and $k'_{a_i}=k'_{f^{b_i}}(F^{a_i}\hat x)$ for $i=1,\cdots, m$,
\item $m_n(x)=m_n$.
\end{itemize}
\begin{lem}\label{comb}
$$\mathop{\mathrm{ess sup}}harp\left\{(\mathbf k, \overline E,m_n), \ F_n^{\mathbf k , \overline{E},m_n}\neq \emptyset\right\}\leq ne^{2nA_fH(A_f^{-1})} 3^{n(1/q+1/M)}e^{nH(1/M)}.$$
\end{lem}
\begin{proof}
Firstly observe that if $a_{i}\notin E_n^M$ then $b_i=1$. In particular
$\mathop{\mathrm{ess sup}}um_{i, \ a_i\notin E_n^M}k_{a_i}\leq (n-\mathop{\mathrm{ess sup}}harp E_n^M)\log^+\|df\|_\infty \leq (n-\mathop{\mathrm{ess sup}}harp E_n^M)(A_f-1) $. The number of such sequences $(k_{a_i})_{i, \ a_i\notin E_n^M}$ is therefore bounded above by $\binom{r_nA_f}{r_n}$ with $r_n=n-\mathop{\mathrm{ess sup}}harp E_n^M$and its logarithm is dominated by $r_nA_fH(A_f^{-1})+1\leq nA_fH(A_f^{-1})+1 $. Similarly the number of sequence $(k'_{a_i})_{i, \ a_i\notin E_n^M}$ is less than $nA_fH(A_f^{-1 })+1$.
Then from the choice of $\epsilon_q$ in (\ref{eq}) there are at most three possible values of $k_{a_i} (x)$ for $a_i\in E_n^M$ and $x\in F_n$.
Finally as $\mathop{\mathrm{ess sup}}harp \overline{E}\leq n/M$, the number of admissible sets $\overline{E}$ is less than $\binom{n}{[n/M]}$ and thus its logarithm is bounded above by $nH(1/M)+1$. Clearly we can also fix the value of $m_n$ up to a factor $n$.
\end{proof}
\mathop{\mathrm{ess sup}}ubsection{The induction}
We fix $\mathbf k$, $m_n$ and $\overline{E}$ and we reparametrize appropriately the set $F_n^{\mathbf k, \overline{E},m_n}$.
\begin{lem}\label{induc}
With the above notations there are families $(\Theta_i)_{i\leq m}$ of affine maps from $[-1,1]$ into itself such that :
\begin{itemize}
\item $\forall \theta\in \Theta_i \ \forall j\leq i$ the curve $f^{a_i}\circ \mathop{\mathrm{ess sup}}igma\circ \theta$ is strongly $\epsilon_{f^{b_i}}$-bounded,
\item $\hat{\mathop{\mathrm{ess sup}}igma}^{-1}\left(F_n^{\mathbf k, \overline E,m_n}\right)\mathop{\mathrm{ess sup}}ubset \bigcup_{\theta\in \Theta_i} \theta([-1,1])$,
\item $\forall \theta_i\in \Theta_i \ \forall j< i, \exists \theta^i_j\in \Theta_j, \ \frac{|\theta'_i|}{|(\theta_j^i)'|}\leq \prod_{j\leq l< i}e^{\frac{k'_{a_l}-k_{a_l}-1}{r-1}}/4$,
\item $\mathop{\mathrm{ess sup}}harp \Theta_i\leq C\max\left(1,\|df\|_\infty\right)^{\mathop{\mathrm{ess sup}}harp \overline E\cap [1,a_i] } \prod_{j< i}C_re^{\frac{k_{a_j}-k'_{a_j}}{r-1}}$.
\end{itemize}
\end{lem}
\begin{proof}
We argue by induction on $i\leq m$. By changing the constant $C$, it is enough to consider $i$ with $a_i>N$. Recall that the integer $N$ was chosen in such a way that for any $x\in \mathtt F $ there is $0\leq k\leq N$ with $F^k\hat x\in H_\delta$. We assume the family $\Theta_i$ for $i<m$ already built and we will define $\Theta_{i+1}$. Let $\theta_i \in \Theta_i$. We apply Lemma \ref{nondyn} to the strongly $\epsilon_{f^{b_i}}$-bounded curve $f^{a_i}\circ \mathop{\mathrm{ess sup}}igma\circ \theta_i$ with $g=f^{b_i}$. Let $\Theta$ be the family of affine reparametrizations of $[-1,1]$ satisfying the conclusions of Lemma \ref{nondyn}, in particular $f^{a_{i+1}}\circ \mathop{\mathrm{ess sup}}igma\circ \theta_i\circ \theta$ is bounded, $|\theta'|\leq e^{\frac{k'_{a_i}-k_{a_i}-1}{r-1}}/4 $ for all $\theta \in \Theta$ and $\mathop{\mathrm{ess sup}}harp \Theta\leq C_re^{\frac{k_{a_i}-k'_{a_i}}{r-1}}$. We distinguish three cases:
\begin{itemize}
\item \underline{$a_{i+1}\in E_n^M$.} The diameter of $F^{a_{i+1}}F_n$ is less than $\epsilon_q\leq \frac{\epsilon_{f^{b_{i+1}}}}{3}$. By Lemma \ref{inter} there is an affine map $\psi:[-1,1]\circlearrowleft$ such that $f^{a_{i+1}}\circ \mathop{\mathrm{ess sup}}igma\circ \theta_i\circ \theta\circ \psi$ is strongly $\epsilon_{f^{b_{i+1}}}$-bounded and its image contains the intersection of the bounded curve $f^{a_{i+1}}\circ \mathop{\mathrm{ess sup}}igma\circ \theta_i\circ \theta$ with $f^{a_{i+1}}F_n$. We let
then $\theta_{i+1}=\theta_i\circ \theta \circ \psi\in \Theta_{i+1}$.
\item \underline{$a_{i+1}\in E\mathop{\mathrm{ess sup}}etminus E_n^M$}. Observe that $b_{i+1}=1$, therefore $\epsilon_{f^{b_i}}\leq \epsilon_{f^{b_{i+1}}}$. Then the length of the curve $f^{a_{i+1}}\circ \mathop{\mathrm{ess sup}}igma\circ \theta_i \circ \theta$ is less than $3\|df\|_\infty \epsilon_{f^{b_i}}$, thus may be covered by $[3\|df\|_\infty]+1$ balls of radius less than $\epsilon_{f^{b_{i+1}}}$. We then use Lemma \ref{inter} as in the previous case to reparametrize the intersection of this curve with each ball by
a strongly $\epsilon_{f^{b_{i+1}}}$-bounded curve. We define in this way the associated parametrizations of $\Theta_{i+1}$.
\item \underline{$a_{i+1}\notin E$ and $a_{i+1}\notin E_n^M$}. We claim that $\|d(f^{a_{i+1}}\circ \mathop{\mathrm{ess sup}}igma\circ \theta_i\circ \theta\|\leq \epsilon_f/6$. Take $\hat x \in F_n^{\mathbf k, \overline E, m_n}$ with $x=\pi(\hat x)=\mathop{\mathrm{ess sup}}igma \circ \theta_i\circ \theta(s)$. Let $K_x=\max\{ k < a_{i+1}, \ F^k\hat x\in H_\delta\}\geq N$.
Observe that $[K_x, a_{i+1}]\cap E_n^M=\emptyset$, therefore for $K_x\leq a_l< a_{i+1}$, we have $b_l=1$, then $a_l=a_{i+1}-i-1+l$. We argue by contradiction by assuming : \begin{align}\label{mord}\|d(f^{a_{i+1}}\circ \mathop{\mathrm{ess sup}}igma\circ \theta_i\circ \theta\|\geq \epsilon_f/6=6\kappa
\end{align} By Lemma \ref{nonam}, the point $f^{a_{i+1}} x$ belongs to $G_\kappa$. We will show $F^{a_{i+1}}\hat x\in H_\delta$. Therefore we will get $F^{a_{i+1}}\hat x\in G=\pi^{-1}G_\kappa\cap H_\delta$ contradicting $a_{i+1}\notin E$. To prove $F^{a_{i+1}}\hat x\in H_\delta$ it is enough to show $\mathop{\mathrm{ess sup}}um_{j\leq l<a_{i+1}} \psi(F^l\hat x)\geq (a_{i+1}-j)\delta$ for any $K_x\leq j<a_{i+1}$ because $F^{K_x}(\hat x)$ belongs to $H_\delta$.
For any $K_x\leq j< a_{i+1}$ we have :
\begin{align}\label{frousse}
\|d(f^{a_{i+1}}\circ \mathop{\mathrm{ess sup}}igma\circ \theta_i\circ \theta\|_\infty&\leq 2
\|d_s(f^{a_{i+1}}\circ \mathop{\mathrm{ess sup}}igma\circ \theta_i\circ \theta\|,\text{ because $f^{a_{i+1}}\circ \mathop{\mathrm{ess sup}}igma\circ \theta_i\circ \theta$ is bounded,}\nonumber\\
&\leq 2\| d_{f^jx}f^{a_{i+1}-j}(\hat x) \|\times \|d_s(f^{a_{\overline j}}\circ \mathop{\mathrm{ess sup}}igma \circ \theta_{\overline j}^i)\|\times \frac{|\theta'_i|\times|\theta'|}{|(\theta_{\overline j}^i)'|}, \text{ with $a_{\overline j}=j$,} \nonumber \\
& \leq \frac{\epsilon_f}{3} \| d_{f^jx}f^{a_{i+1}-j}(\hat x) \| \prod_{\overline{j}\leq l\leq i}e^{\frac{k'_{a_l}-k_{a_l}-1}{r-1}}/4\text{ by induction hypothesis}, \nonumber \\
\frac{1}{2}&\leq \| d_{f^jx}f^{a_{i+1}-j}(\hat x) \| \prod_{\overline{j}\leq l\leq i}e^{\frac{k'_{a_l}-k_{a_l}-1}{r-1}}/4 \text{ by assumption (\ref{mord})}.
\end{align}
Recall again that for $\overline{j}\leq l \leq i$, we have $b_l=1$, thus
$$\left| k_{a_l}-\log \|d_{f^{a_l}x }f\| \right|\leq 1$$ and
$$k'_{a_l}\leq \phi(F^{a_l}\hat x). $$
Therefore we get for any $K_x\leq j< a_{i+1}$ from (\ref{frousse}):
\begin{align*}2^{a_{i+1}-j} &\leq e^{\frac{r}{r-1} \mathop{\mathrm{ess sup}}um_{j\leq l<a_{i+1}} \phi(F^l\hat x)} e^{-\frac{1}{r-1} \mathop{\mathrm{ess sup}}um_{j\leq l<a_{i+1}} \log^+ \|d_{f^{l}x }f\|},\\
(a_{i+1}-j)\log 2&\leq \frac{r}{r-1} \mathop{\mathrm{ess sup}}um_{j\leq l<a_{i+1}} \psi(F^l\hat x), \text{ by definition of $\psi$,}\\
(a_{i+1}-j)\delta&\leq \mathop{\mathrm{ess sup}}um_{j\leq l<a_{i+1}} \psi(F^l\hat x) \text{, as $\delta$ was chosen less than $\frac{r-1}{r}\log 2$}.
\end{align*}
\end{itemize}
\end{proof}
\begin{lem}\label{lastly}
$$\mathop{\mathrm{ess sup}}um_{i, \ m_n>a_i\notin E_n^M}\frac{k_{a_i} -k'_{a_i}}{r-1}\leq \left(n-\mathop{\mathrm{ess sup}}harp E_n^M\right)\left(\frac{\log^+ \|df\|_\infty}{r}+\frac{1}{r-1}\right).$$
\end{lem}
\begin{proof}
The intersection of $[0,m_n[$ with the complement set of $E_n^M$ is the disjoint union of neutral blocks and possibly an interval of integers of the form $[l,m_n[$. In any case $F^{\mathtt j}\hat x$ belongs to $H_\delta$ for such an interval $[\mathtt i, \mathtt j [$ for any $x\in F_n^{\mathbf k , \overline{E},m_n}$. In particular, we have
\begin{align*}\mathop{\mathrm{ess sup}}um_{l, a_l\in [\mathtt i, \mathtt j [ }k'_{a_i}-\frac{k_{a_i}}{r} & \geq (\delta-1)(\mathtt j -\mathtt i )
\end{align*} therefore
\begin{align*}
\mathop{\mathrm{ess sup}}um_{i, \ m_n>a_i\notin E_n^M}k'_{a_i}-\frac{k_{a_i}}{r}& \geq -(n-\mathop{\mathrm{ess sup}}harp E_n^M),\\
\mathop{\mathrm{ess sup}}um_{i, \ m_n>a_i\notin E_n^M} \frac{k_{a_i}-k'_{a_i}}{r-1} & \leq \frac{n-\mathop{\mathrm{ess sup}}harp E_n^M}{r-1}+ \frac{\mathop{\mathrm{ess sup}}um_{i, \ m_n>a_i\notin E_n^M} k_{a_i}}{r},\\
&\leq \left(n-\mathop{\mathrm{ess sup}}harp E_n^M\right)\left(\frac{\log^+ \|df\|_\infty}{r}+\frac{1}{r-1}\right).
\end{align*}
\end{proof}
\mathop{\mathrm{ess sup}}ubsection{Conclusion}
We let $\Psi_n$ be the family of $\mathcal C^r$ curves $\mathop{\mathrm{ess sup}}igma\circ \theta$ for $\theta\in \Theta_m=\Theta_m(\mathbf k, \overline E, m_n)$ with $\Theta_m$ as in Lemma \ref{induc} over all admissible parameters $\mathbf k, \overline E, m_n$. For $\theta \in \Theta_m$ the curve $f^{a_i}\circ \mathop{\mathrm{ess sup}}igma\circ \theta$ is strongly $\epsilon_{f^{b_i}}$-bounded for any $i=1,\cdots, m$, in particular
$$\forall i=1,\cdots, m, \ \|d( f^{a_i}\circ \mathop{\mathrm{ess sup}}igma\circ \theta)\|_\infty\leq \epsilon_{f^{b_i}}\leq \max(1,\|df\|_\infty)^{-b_i},$$
therefore
$$\forall j=0,\cdots, n, \ \|d( f^{j}\circ \mathop{\mathrm{ess sup}}igma\circ \theta)\|_\infty\leq 1.$$
By combining the previous estimates, we get moreover:
\begin{align*}
\mathop{\mathrm{ess sup}}harp \Psi_n& \leq \mathop{\mathrm{ess sup}}harp\left\{(\mathbf k, \overline E,m_n), \ F_n^{\mathbf k , \overline{E},m_n}\neq \emptyset\right\} \times \mathop{\mathrm{ess sup}}up_{\mathbf k, \overline{E},m_n}\mathop{\mathrm{ess sup}}harp \Theta_n(\mathbf k, \overline E, m_n), \\
&\leq ne^{2(n-\mathop{\mathrm{ess sup}}harp E_n^M)A_fH(A_f)} 3^{n(1/q+1/M)}e^{nH(1/M)}\mathop{\mathrm{ess sup}}up_{\mathbf k, \overline{E},m_n}\mathop{\mathrm{ess sup}}harp \Theta_n(\mathbf k, \overline E, m_n), \text{ by Lemma \ref{comb}, }\\
&\leq ne^{2(n-\mathop{\mathrm{ess sup}}harp E_n^M)A_fH(A_f)} 3^{n(1/q+1/M)}e^{nH(1/M)} \max(1,\|df\|_\infty)^{\mathop{\mathrm{ess sup}}harp \overline E } \prod_{j\leq m}C_re^{\frac{k_{a_j}-k'_{a_j}}{r-1}}, \text{ by Lemma \ref{induc}.}\\
\end{align*}
Then we decompose the product into four terms :
\begin{itemize}
\item $\mathop{\mathrm{ess sup}}um_{i, \ m_n>a_i\notin E_n^M}\frac{k_{a_i} -k'_{a_i}} {r-1}\leq (n-\mathop{\mathrm{ess sup}}harp E_n^M)\left( \frac{\log^+ \|df\|_\infty}{r}+\frac{1}{r-1}\right) $ by Lemma \ref{lastly},
\item $\mathop{\mathrm{ess sup}}um_{i, \ m_n\leq a_i}\frac{k_{a_i} -k'_{a_i}}{r-1}\leq (n-m_n)\frac{A_f}{r-1}$,
\item $\mathop{\mathrm{ess sup}}um_{i, a_i\in E_n^M\cap (c+q\mathbb N ) }\frac{k_{a_i} -k'_{a_i}}{r-1}\leq 10\frac{n}{q}+2A_f\frac{qn}{M}+ \frac{1}{r-1}\left(\int \frac{\log ^+\|d_yf^q\|}{q}\, d\zeta_{F_n}^{M}(y)-\int \phi\, d\hat{\zeta}_{F_n}^{M}\right),$\\
\item $\mathop{\mathrm{ess sup}}um_{i, a_i\in E_n^M\mathop{\mathrm{ess sup}}etminus (c+q\mathbb N ) }\frac{k_{a_i} -k'_{a_i}}{r-1} \leq 2A_f\frac{qn}{M}$.\\
\end{itemize}
By letting
$$B_r=\frac{1}{r-1}+\log C_r,$$
$$\gamma_{q,M}(f):=2\left(\frac{1}{q}+\frac{1}{M}\right)\log C_r+H(1/M)+\frac{10+\log 3}{q}+\frac{4qA_f+\log 3}{M},$$
$$\tau_n=\mathop{\mathrm{ess sup}}up_{x\in \mathtt F}\left(1-\frac{m_n(x)}{n}\right)\frac{A_f}{r-1}+\frac{\log (nC)}{n},$$
we get with $C(f):=2A_fH(A_f^{-1})+\frac{\log^+ \|df\|_\infty}{r}+B_r$:
\begin{align*}\frac{1}{n}\log \mathop{\mathrm{ess sup}}harp \Psi_{F_n}\leq &\left(1-\frac{\mathop{\mathrm{ess sup}}harp E_n^M}{n}\right)C(f)\\ &+\left(\log 2+\frac{1}{r-1}\right)\left(\int \frac{\log ^+\|d_xf^q\|}{q}\, d\zeta_{F_n}^{M}(x)-\int \phi\, d\hat{\zeta}_{F_n}^{M}\right)\\&+\gamma_{q,M}(f)+\tau_n,
\end{align*}
This concludes the proof of Proposition \ref{paraa}.
\appendix
\mathop{\mathrm{ess sup}}ection*{Appendix}
We explain in this appendix how our Main Theorem implies Buzzi-Crovisier-Sarig statement.\\
Let $(f_k)_k$, $(\nu_k^+)_k$ and $\hat \mu$ be as in the setting of Theorem \ref{cochon}.
Then, either $\lim_k\lambda^+(\nu_k)=\int \phi\, d\hat \mu\leq \frac{\lambda^+(f)}{r}$ and we get by Ruelle inequality, $\limsup_kh(\nu_k)\leq \frac{\lambda^+(f)}{r}$ or there exists $\alpha\in \left] \frac{\lambda^+(f)}{r}, \min\left(\int \phi\, d\hat \mu,\frac{\lambda^+(f)}{r-1} \right)\right[$. By applying our Main Theorem with respect to $\alpha$, there is a decomposition $\hat \mu=(1-\beta_\alpha)\hat \mu_{0,\alpha}+\beta_\alpha \hat \mu_{1,\alpha}^+$ satisfying
$\limsup_{k\rightarrow +\infty} h(\nu_k)\leq \beta_\alpha h(\mu_{1,\alpha})+(1-\beta_\alpha)\alpha$. But it follows from the proofs that $\beta_\alpha \mu_{1,\alpha}$ is a component of $\beta \mu_1$ with $\beta $ and $\mu_1$ being as in Buzzi-Crovisier-Sarig's statement as they consider empirical measure associated to a larger set $G$ (see Remark \ref{mieux}). In particular
$\beta_\alpha h(\mu_{1,\alpha})\leq \beta h(\mu_1)$, therefore $\limsup_{k\rightarrow +\infty} h(\nu_k)\leq\beta h(\mu_1)+\frac{\lambda^+(f)+\lambda^+(f^{-1})}{r-1}$. \\
In Theorem C \cite{BCS2}, the authors also proved $\int \phi \,d\hat \mu_0=0$ whenever $\beta \neq 1$. Therefore we get here $(1-\beta_\alpha)\int \phi\, d\hat \mu_{0,\alpha}\geq (1-\beta)\int \phi \,d\hat \mu_0=0$, then $\int \phi\, d\hat\mu_{0,\alpha}\geq 0$. But maybe we could have $\int \phi\, d\hat\mu_{0,\alpha}> 0$.
\end{document} |
\betagin{document}
\betagin{abstract}
Let $S$ be a minimal surface of general type with $p_{g}(S)=0$ and $K^{2}_{S}=4$.\ Assume the bicanonical map $\varphi$ of $S$ is a morphism of degree $4$ such that the image of $\varphi$ is smooth.\ Then we prove that the surface $S$ is a Burniat surface with $K^{2}=4$ and of non nodal type.
\end{abstract}
\maketitle
\section{
Introduction}
When we consider the bicanonical map $\varphi$ of a minimal surface $S$ of general type with $p_{g}(S)=0$ over the field of complex numbers, Xiao \cite{FABSTG} gave that the image of $\varphi$ is a surface if $K^{2}_{S}\ge 2$, and Bombieri \cite{CMSGT} and Reider \cite{VBR2L} proved that $\varphi$ is a morphism for $K^{2}_{S}\ge 5$. In \cite{SG06, BS07, DBS0} Mendes Lopes \cite{DGCSG0} and Pardini obtained that the degree of $\varphi$ is $1$ for $K^{2}_{S}=9$; $1$ or $2$ for $K^{2}_{S}=7,8$; $1,2$ or $4$ for $K^{2}_{S}=5,6$ or for $K^{2}_{S}=3,4$ with a morphism $\varphi$. Moreover, there are further studies for the surface $S$ with non birational map $\varphi$ in \cite{CCMSS0, NS03, ESEN, SG06, BS07II, CDPG80}.
Mendes Lopes and Pardini \cite{CCMSS0} gave a characterization of a Burniat surface with $K^{2}=6$ as a minimal surface of general type with $p_{g}=0,\ K^{2}=6$ and the bicanonical map of degree $4$. Zhang \cite{CCS05BM} proved that a surface $S$ is a Burniat surface with $K^{2}=5$ if the image of the bicanonical map $\varphi$ of $S$ is smooth, where a surface $S$ is a minimal surface of general type with $p_{g}(S)=0,\ K^{2}_{S}=5$ and the bicanonical map $\varphi$ of degree $4$. In this paper we extend their characterizations of Burniat surfaces with $K^{2}=6$ \cite{CCMSS0}, and with $K^{2}=5$ \cite{CCS05BM} to one for the case $K^{2}=4$ as the following.
\betagin{thm}\lambdabel{mainthm}
Let $S$ be a minimal surface of general type with $p_{g}(S)=0$ and $K_{S}^{2}=4$. Assume the bicanonical map $\varphi\colon S\longrightarrow \Sigma\subset \mathbb{P}^{4}$ is a morphism of degree $4$ such that the image $\Sigma$ of $\varphi$ is smooth. Then the surface $S$ is a Burniat surface with $K^{2}=4$ and of non nodal type.
\end{thm}
As we mentioned before Bombieri \cite{CMSGT} and Reider \cite{VBR2L} gave that the bicanonical map of a minimal surface of general type with $p_{g}=0$ is a morphism for $K^{2}\ge 5$. On the other hand, Mendes Lopes and Pardini \cite{NCF9} found that there is a family of numerical Campedelli surfaces, minimal surfaces of general type with $p_{g}=0$ and $K^{2}=2$, with $\pi_{1}^{alg}=\mathbb{Z}_{3}^{2}$ such that the base locus of the bicanonical system consists of two points. However, we do not know whether the bicanonical system of a minimal surface of general type with $p_{g}=0$ and $K^{2}=3$ or $4$ has a base point or not. Thus we need to assume that the bicanonical map is a morphism in Theorem \ref{mainthm}.
Bauer and Catanese \cite{BSSBII, BSSBIII, BSSBEII} studied Burniat surfaces with $K^{2}=4$. Let $S$ be a Burniat surface with $K^{2}=4$. When $S$ is of non nodal type it has the ample canonical divisor, but when $S$ is of nodal type it has one ($-2$)-curve. For the case of nodal type we will discuss to characterize Burniat surfaces with $K^{2}=4$ and of nodal type in the future article.
We follow and use the strategies of Mendes Lopes and Pardini \cite{CCMSS0}, and of Zhang \cite{CCS05BM} as main tools of this article. The paper is organized as follows: in Section \ref{DCBC} we recall some useful formulas and Propositions for a double cover from \cite{CCMSS0}, and we give a description of a Burniat surface with $K^{2}=4$ and of non nodal type; in Section \ref{analyze} we analyze branch divisors of the bicanonical morphism $\varphi$ of degree $4$ of a minimal surface of general type with $p_{g}=0$ and $K^{2}=4$ when the image of $\varphi$ is smooth; in Section \ref{proofmainthm} we give a proof of Theorem \ref{mainthm}.
\section{Notation and conventions}\lambdabel{NC}
In this section we fix the notation which will be used in the paper. We work over the field of complex numbers.
Let $X$ be a smooth projective surface. Let $\Gammama$ be a curve in
$X$ and $\tilde{\Gammama}$ be the normalization of $\Gammama$. We set:\\\\
$K_X$: the canonical divisor of $X$;\\
$q(X)$: the irregularity of $X$, that is, $h^{1}(X,\mathcal{O}_{X})$;\\
$p_{g}(X)$: the geometric genus of $X$, that is, $h^{0}(X,\mathcal{O}_{X}(K_{X}))$;\\
$p_{g}(\Gammama)$: the geometric genus of $\Gammama$, that is,
$h^{0}(\tilde{\Gammama},\mathcal{O}_{\tilde{\Gammama}}(K_{\tilde{\Gammama}}))$;\\
$\chi_{top}(X)$: the topological Euler characteristic of $X$;\\
$\chi(\mathcal{F})$: the Euler characteristic of a sheaf $\mathcal{F}$ on $X$, that is, $\sum_{i=0}^{2}(-1)^{i}h^{i}(X,\mathcal{F})$;\\
$\equiv$: the linear equivalence of divisors on a surface;\\
$(-n)$-curve: a smooth irreducible rational curve with the self-intersection number $-n$,
in particular we call that a $(-1)$-curve is exceptional and a $(-2)$-curve is nodal;\\
We usually omit the sign $\cdot$ of the intersection product of two
divisors on a surface. And we do not distinguish between line bundles and divisors on a smooth variety.
\section{Preliminaries}\lambdabel{DCBC}
\subsection{Double covers}\lambdabel{DC}
Let $S$ be a smooth surface and $B\subset S$ be a smooth curve (possibly empty) such that $2L\equiv B$ for a line bundle $L$ on $S$. Then there exists a double cover $\pi\colon Y\longrightarrow S$ branched over $B$. We get \[\pi_{*}\mathcal{O}_{Y}=\mathcal{O}_{S}\oplus L^{-1},\] and the invariants of $Y$ from ones of $S$ as follows:
\[K^{2}_{Y}=2(K_{S}+L)^{2},\ \chi(\mathcal{O}_{Y})=2\chi(\mathcal{O}_{S})+\frac{1}{2}L(K_{S}+L),\]
\[p_{g}(Y)=p_{g}(S)+h^{0}(S,\mathcal{O}_{S}(K_{S}+L)),\]
\[q(Y)=q(S)+h^{1}(S,\mathcal{O}_{S}(K_{S}+L)).\]
We begin with the following Proposition in \cite{CCMSS0}.
\betagin{prop}[Proposition 2.1 in \cite{CCMSS0}]\lambdabel{albenese}
Let $S$ be a smooth surface with $p_{g}(S)=q(S)=0$, and let $\pi\colon Y\longrightarrow S$ be a smooth double cover. Suppose that $q(Y)>0$. Denote the Albanese map of $Y$ by $\alphapha\colon Y\longrightarrow$ A. Then
$(i)$ the Albanese image of $Y$ is a curve $C$$;$
$(ii)$ there exist a fibration $g\colon S \longrightarrow \mathbb{P}^{1}$ and a degree $2$ map $p\colon C\longrightarrow \mathbb{P}^{1}$ such that $p\circ\alphapha=g\circ\pi$.
\end{prop}
\betagin{prop}[Corollary 2.2 in \cite{CCMSS0}]\lambdabel{inequiKq}
Let $S$ be a smooth surface of general type with $p_{g}(S)=q(S)=0,\ K_{S}^{2}\ge3$, and let $\pi\colon Y\longrightarrow S$ be a smooth double cover. Then $K_{Y}^{2}\ge16(q(Y)-1)$.
\end{prop}
\subsection{Bidouble covers} \lambdabel{bico} Let $Y$ be a smooth surface and $D_{i}\subset Y,\ i=1,2,3$ be smooth divisors such that $D:=D_{1}+D_{2}+D_{3}$ is a normal crossing divisor, $2L_{1}\equiv D_{2}+D_{3}$ and $2L_{2}\equiv D_{1}+D_{3}$ for line bundles $L_{1},\ L_{2}$ on $Y$. By \cite{ACAV} there exists a bidouble cover $\psi\colon \bar{Y}\longrightarrow Y$ branched over $D$. We obtain \[\psi_{*}\mathcal{O}_{\bar{Y}}=\mathcal{O}_{Y}\oplus L_{1}^{-1}\oplus L_{2}^{-1}\oplus L_{3}^{-1},\] where $L_{3}=L_{1}+L_{2}-D_{3}$.
We describe a Burniat surface with $K^{2}=4$ and of non nodal type \cite{BSSBII}.
\betagin{nota} \lambdabel{notdel}
{\rm{ Let $\rho\colon \Sigma\longrightarrow \mathbb{P}^{2}$ be the blow-up of $\mathbb{P}^{2}$ at $5$ points $p_{1},\ p_{2},\ p_{3},\ p_{4},\ p_{5}$ in general position. We denote that $l$ is the pull-back of a line in $\mathbb{P}^{2}$, $e_{i}$ is the exceptional curve over $p_{i},\ i=1,2,3,4,5$, and $e'_{i}$ is the strict transform of the line joining $p_{j}$ and $p_{k},\ \{i,j,k\}=\{1,2,3\}$. Also, $g_{i}$ $(resp.\ h_{i})$ denotes the strict transform of the line joining $p_{4}$ $(resp.\ p_{5})$ and $p_{i},\ i=1,2,3$. Then the picard group of $\Sigma$ is generated by $l,\ e_{1},\ e_{2},\ e_{3},\ e_{4}$ and $e_{5}$. We get that $-K_{\Sigma}\equiv3l-\sum_{i=1}^{5}e_{i}$ is very ample. The surface $\Sigma$ is embedded by the linear system $|-K_{\Sigma}|$ as a smooth surface of degree $4$ in $\mathbb{P}^{4}$, called a del Pezzo surface of degree $4$.
}}
\end{nota}
We consider smooth divisors
\[B_{1}=e_{1}+e'_{1}+g_{2}+h_{2}\equiv3l+e_{1}-3e_{2}-e_{3}-e_{4}-e_{5}, \textrm{ }\textrm{ }\textrm{ }\textrm{ }\textrm{ }\]
\[B_{2}=e_{2}+e'_{2}+g_{3}+h_{3}\equiv3l-e_{1}+e_{2}-3e_{3}-e_{4}-e_{5}, \textrm{and}\]
\[B_{3}=e_{3}+e'_{3}+g_{1}+h_{1}\equiv3l-3e_{1}-e_{2}+e_{3}-e_{4}-e_{5}.\textrm{ }\textrm{ }\textrm{ }\textrm{ }\textrm{ }\]
Then $B:=B_{1}+B_{2}+B_{3}$ is a normal crossing divisor, $2L'_{1}\equiv B_{2}+B_{3}$ and $2L'_{2}\equiv B_{1}+B_{3}$ for line bundles $L'_{1},\ L'_{2}$ on $\Sigma$. We obtain a bidouble cover $\varphi\colon S\longrightarrow \Sigma\subset \mathbb{P}^{4}$. We remark that the example is a minimal surface $S$ of general type with $p_{g}(S)=0,\ K_{S}^{2}=4$ and the bicanonical morphism $\varphi$ of degree $4$ having the ample $K_{S}$.
\section{Branch divisors of the bicanonical map}\lambdabel{analyze}
\betagin{nota}\lambdabel{BDBM1}
{\rm{
Let $S$ be a minimal surface of general type with $p_{g}(S)=0$ and $K_{S}^{2}=4$. Assume that the bicanonical map $\varphi$ of $S$ is a morphism of degree $4$ and the image $\Sigma$ of $\varphi$ is smooth in $\mathbb{P}^{4}$. By \cite{ORS} $\Sigma$ is a del Pezzo surface of degree $4$ in Notation \ref{notdel}. We denote $\rho,l,e_{i},e'_{j},g_{j},h_{j},\ i=1,2,3,4,5,\ j=1,2,3$ as the notations in Notation \ref{notdel}. Denote $\gammama\equiv l-e_{4}-e_{5},\ \deltata\equiv 2l-\sum_{i=1}^{5}e_{i},\ f_{i}\equiv l-e_{i}$ and $F_{i}\equiv\varphi^{*}(f_{i})$ for $i=1,2,3,4,5$.
}}
\end{nota}
We follow the strategies of \cite{CCMSS0, CCS05BM}. We start with the following proposition similar to one in Section $4$ of \cite{CCS05BM}.
\betagin{prop}[Note Proposition 4.2 in \cite{CCS05BM}]\lambdabel{cong3}
For $i=1,2,3,4,5$ if $f_{i}\in |f_{i}|$ is general, then $\varphi^{*}(f_{i})$ is connected, hence $|F_{i}|$ induces a genus $3$ fibration $u_{i}\colon S\longrightarrow \mathbb{P}^{1}$.
\end{prop}
\betagin{proof}
We get a similar proof from Proposition 4.2 in \cite{CCS05BM}.
\end{proof}
\betagin{prop}[Note Proposition 4.4 in \cite{CCMSS0} and Proposition 4.3 in \cite{CCS05BM}]\lambdabel{finampirr}
The bicanonical morphism $\varphi$ is finite, the canonical divisor $K_{S}$ is ample, and for $i=1,2,3,4,5$, the pull-back of an irreducible curve in $|f_{i}|$ is also irreducible (possibly non-reduced).
\end{prop}
\betagin{proof}
Noether's formula gives $\chi_{top}(S)=8$ by $\chi(\mathcal{O}_{S})=1$ and $K_{S}^{2}=4$. Then we get $h^{2}(S,\mathbb{Q})=h^{2}(\Sigma,\mathbb{Q})=6$ by $p_{g}(S)=q(S)=0$. So $\varphi^{*}\colon H^{2}(\Sigma,\mathbb{Q})\longrightarrow H^{2}(S,\mathbb{Q})$ is an isomorphism preserving the intersection form up to multiplication by $4$.\ Therefore $\varphi$ is finite and $K_{S}$ is ample.
For an irreducible curve $f_{1}\in|f_{1}|$, if $\varphi^{*}(f_{1})$ is reducible, then it contains an irreducible component $C$ with $C^{2}<0$. Put $D=C-\frac{C\varphi^{*}(e_{1})}{4}\varphi^{*}(f_{1})$.\ Then $D^{2}=C^{2}<0$, and $D\varphi^{*}(e_{1})=0$. And $\left(C-\frac{C\varphi^{*}(e_{1})}{4}\varphi^{*}(f_{1})\right)\varphi^{*}(e_{i})=0$ for $i=2,3,4,5$ since $e_{i}$ is contained in one fiber of the pencil $|f_{1}|$.\ We obtain that the intersection matrix of $\varphi^{*}(l),\ C-\frac{C\varphi^{*}(e_{1})}{4}\varphi^{*}(f_{1}),\ \varphi^{*}(e_{i}),\ i=1,2,3,4,5$ has rank $7$.\ But it is a contradiction because $h^{2}(S,\mathbb{Q})=6$. Thus $\varphi^{*}(f_{1})$ is irreducible. We can similarly prove the other cases.
\end{proof}
\betagin{lemma}[Lemma 4.4 in \cite{CCS05BM}]\lambdabel{Mm}
Let $\phi\colon T'\longrightarrow T$ be a finite morphism between two smooth surfaces. Let $h$ be a divisor on $T$ such that $|\phi^{*}(h)|=\phi^{*}(|h|)$. Let $M$ be a divisor on $T'$ such that the linear system $|M|$ has no fixed part. Suppose that $\phi^{*}(h)-M$ is effective. Then there exists a divisor $m\subset T$ such that $|M|=\phi^{*}(|m|)$. Furthermore the line bundle $h-m$ is effective.
\end{lemma}
\betagin{lemma}[Note Lemma 4.5 in \cite{CCS05BM}]\lambdabel{ne1e}
There does not exist a divisor $d$ on $\Sigma$ such that $h^{0}(\Sigma,\mathcal{O}_{\Sigma}(d))>1$ and that the line bundle $-K_{\Sigma}-2d$ is effective.
\end{lemma}
\betagin{proof}
Suppose that there exists such a divisor $d$. Assume $d\equiv al-\sum_{i=1}^{5}b_{i}e_{i}$ for some integers $a,\ b_{i},\ i=1,2,3,4,5$. Then $a\le1$ because $-K_{\Sigma}-2d$ is effective. On the other hand, $a\ge 1$ by the condition $h^{0}(\Sigma,\mathcal{O}_{\Sigma}(d))>1$. Thus $a=1$, and at most one of $b_{1},\cdots,b_{5}$ is positive. Then the line bundle $-K_{\Sigma}-2d\equiv l-\sum_{i=1}^{5}(1-2b_{i})e_{i}$ cannot be effective since there is no line on $\mathbb{P}^{2}$ passing through $3$ points in general position.
\end{proof}
We prove the following lemma as one of Lemma 4.6 in \cite{CCS05BM} since we have Lemmas \ref{Mm} and \ref{ne1e}.
\betagin{lemma}[Note Lemma 4.6 in \cite{CCS05BM}]\lambdabel{2Deff1}
Let $D\subset S$ be a divisor. If there exists a divisor $d$ on $\Sigma$ such that
$(i)$ $\varphi^{*}(d)\equiv 2D;$
$(ii)$ the line bundle $-K_{\Sigma}-d$ is effective,\\
then $h^{0}(S,\mathcal{O}_{S}(D))\le 1$.
\end{lemma}
\betagin{proof}
Suppose $h^{0}(S,\mathcal{O}_{S}(D))>1$. We may write $|D|=|M|+F$ where $|M|$ is the moving part and $F$ is the fixed part. Since $|2K_{S}|=|\varphi^{*}(-K_{\Sigma})|=\varphi^{*}(|-K_{\Sigma}|)$ and $\varphi^{*}(-K_{\Sigma})-M>\varphi^{*}(-K_{\Sigma}-d)$ is effective, there is a divisor $m$ on $\Sigma$ such that $\varphi^{*}(|m|)=|M|$ by Lemma \ref{Mm}. Choose an element $M_{1}\in |M|$ and an effective divisor $N$ on $S$ such that $2M_{1}+2F+N\equiv \varphi^{*}(-K_{\Sigma})$. We find $h\in|-K_{\Sigma}|$ and $m_{1}\in|m|$ such that $2M_{1}+2F+N=\varphi^{*}(h)$ and $2M_{1}=\varphi^{*}(2m_{1})$. Thus we conclude that $h-2m_{1}$ is effective. It is a contradiction by Lemma \ref{ne1e}.
\end{proof}
Now we investigate the pull-backs of ($-1$)-curves on the surface $\Sigma$ via the bicanonical morphism $\varphi\colon S\longrightarrow \Sigma \subset \mathbb{P}^{4}$. There are sixteen $(-1)$-curves on $\Sigma$ which are $e_{i},\ e'_{j},\ g_{j},\ h_{j},$ $\gammama$ and $\deltata$ for $i=1,2,3,4,5$ and $j=1,2,3$.
\betagin{lemma}[Lemma 5.1 in \cite{CCMSS0}]\lambdabel{twocases}
Let $C\subset\Sigma$ be a $(-1)$-curve. Then we have either
$(i)$ $\varphi^{*}(C)$ is a reduced smooth rational $(-4)$-curve$;$ or
$(ii)$ $\varphi^{*}(C)=2E$ where $E$ is an irreducible curve with $E^{2}=-1,\ K_{S}E=1$.
\end{lemma}
\betagin{lemma}\lambdabel{three-4}
There are at most three disjoint $(-4)$-curves on $S$.
\end{lemma}
\betagin{proof}
Let $r$ be the cardinality of a set of smooth disjoint rational curves with self-intersection number $-4$ on $S$. Then
\[\frac{25}{12}r\le c_{2}(S)-\frac{1}{3}K_{S}^{2}=\frac{20}{3}\]
by \cite{mnqs}
which is $r\le 3$.
\end{proof}
\betagin{rmk}\lambdabel{cremona}
\rm{We consider an exceptional curve $e$ on $\Sigma$ which is different from $\deltata$ and is not an $\rho$-exceptional curve (i.e.\ $\rho(e)$ is not a point in $\mathbb{P}^{2}$). Then we can find an automorphism $\tau$ on $\Sigma$ induced by a Cremona transformation with respect to 3 points among 5 points $p_{1},p_{2},p_{3},p_{4},p_{5}$ in general position on $\mathbb{P}^{2}$ such that an exceptional curve $\tau(e)$ on $\Sigma$ is different from $\deltata$ and is an $\rho$-exceptional curve.}
\end{rmk}
\betagin{prop}\lambdabel{theretwo}
There exist at least two disjoint $(-1)$-curves different from $\deltata$ on $\Sigma$ such that those pull-backs by the bicanonical morphism $\varphi$ are $(-4)$-curves.
\end{prop}
\betagin{proof}
Let $R$ be the ramification divisor of the bicanonical morphism $\varphi\colon S\longrightarrow \Sigma \subset \mathbb{P}^{4}$. By Hurwitz formula $K_{S}\equiv \varphi^{*}(K_{\Sigma})+R$, we get $R\equiv K_{S}+\varphi^{*}(-K_{\Sigma})\equiv3K_{S}$. Because $\varphi^{*}(-K_{\Sigma})\equiv2K_{S}$ since the image $\Sigma$ of $\varphi$ is a del Pezzo surface of degree $4$ in $\mathbb{P}^{4}$ (Note Notations \ref{notdel} and \ref{BDBM1}).
We assume $\varphi^{*}(e_{i})=2E_{i}$, $\varphi^{*}(e'_{j})=2E'_{j}$, $\varphi^{*}(g_{j})=2G_{j}$, $\varphi^{*}(h_{j})=2H_{j}$ for $i=1,2,3,4,5$ and $j=1,2,3$, and $\varphi^{*}(\gammama)=2\Gammama$. Put \[R_{1}:= R-\left(\sum_{i=1}^{3}(E_{i}+E'_{i}+G_{i}+H_{i})+E_{4}+E_{5}+\Gammama\right).\]
It implies $2R_{1}\equiv \varphi^{*}(-l)$. By the assumption, $\varphi$ is ramified along reduced curves $E_{i},\ E'_{j},\ G_{j},\ H_{j}$ for $i=1,2,3,4,5$ and $j=1,2,3$, and $\Gammama$. So $R_{1}$ is a nonzero effective divisor. But it is a contradiction because $0< (2R_{1})(2K_{S})=\varphi^{*}(-l)\varphi^{*}(-K_{\Sigma})<0$ since $\varphi$ is finite and $K_{S}$ is ample by Proposition \ref{finampirr}.\ Thus by Lemma \ref{twocases} and Remark \ref{cremona} we may consider $\varphi^{*}(e_{5})=E_{5}$, where $E_{5}$ is a reduced smooth rational $(-4)$-curve.
Again, we assume $\varphi^{*}(e_{i})=2E_{i}$, $\varphi^{*}(e'_{j})=2E'_{j}$, $\varphi^{*}(g_{j})=2G_{j}$, $\varphi^{*}(h_{j})=2H_{j}$ for $i=1,2,3,4$ and $j=1,2,3$, and $\varphi^{*}(\gammama)=2\Gammama$. Put \[R_{2}:=R-\left(\sum_{i=1}^{3}(E_{i}+E'_{i}+G_{i}+H_{i})+E_{4}+\Gammama\right).\]
It induces $2R_{2}\equiv \varphi^{*}(-l+e_{5})$. Then the nonzero divisor $R_{2}$ is effective. Because $\varphi$ is ramified along reduced curves $E_{i},\ E'_{j},\ G_{j},\ H_{j}$ for $i=1,2,3,4$ and $j=1,2,3$, and $\Gammama$ from the assumption. It gives a contradiction because $0< (2R_{2})(2K_{S})=\varphi^{*}(-l+e_{5})\varphi^{*}(-K_{\Sigma})<0$ since $\varphi$ is finite and $K_{S}$ is ample by Proposition \ref{finampirr}. By Lemma \ref{twocases} we get an $(-1)$-curve $e$ with an $(-4)$-curve $\varphi^{*}(e)$ among $e_{i}$, $e'_{j},\ g_{j},\ h_{j}$ for $i=1,2,3,4$ and $j=1,2,3$, and $\gammama$.
We have two $(-1)$-curves $e$ and $e_{5}$ different from $\deltata$ on $\Sigma$ such that $\varphi^{*}(e)$ and $\varphi^{*}(e_{5})$ are $(-4)$-curves on $S$. We verify that $e$ and $e_{5}$ are disjoint. By Remark \ref{cremona} we consider that the $(-1)$-curve $e$ is $\gammama$. It is enough to assume $\varphi^{*}(e_{i})=2E_{i}$, $\varphi^{*}(e'_{j})=2E'_{j}$, $\varphi^{*}(g_{j})=2G_{j}$, $\varphi^{*}(h_{j})=2H_{j}$ for $i=1,2,3,4$ and $j=1,2,3$. Then put
\[R_{3}:=R-\left(\sum_{i=1}^{3}(E_{i}+E'_{i}+G_{i}+H_{i})+E_{4}\right).\]
We get $2R_{3}\equiv \varphi^{*}(-e_{4})$.\ The nonzero divisor $R_{3}$ is effective.\ Because $\varphi$ is ramified along reduced curves $E_{i},\ E'_{j},\ G_{j},\ H_{j}$ for $i=1,2,3,4$ and $j=1,2,3$ from the assumption. It contradicts because $0< (2R_{3})(2K_{S})=\varphi^{*}(-e_{4})\varphi^{*}(-K_{\Sigma})<0$ since $\varphi$ is finite and $K_{S}$ is ample by Proposition \ref{finampirr}.
\end{proof}
\betagin{prop}\lambdabel{c1c2c3}
There do not exist three $(-1)$-curves $C_{1},C_{2}$ and $C_{3}$ different from $\deltata$ on $\Sigma$ satisfying
$(i)$ $C_{i}\cap C_{j}=\varnothing$ for distinct $i,j\in\{1,2,3\};$
$(ii)$ $\varphi^{*}(C_{i})$ for $i=1,2,3$ are $(-4)$-curves.
\end{prop}
\betagin{proof}
Assume that the proposition is not true.\ We may consider $C_{1}=e_{2},\ C_{2}=e_{4}$ and $C_{3}=e_{5}$ by Remark \ref{cremona}. Then $E_{2}=\varphi^{*}(e_{2})$, $E_{4}=\varphi^{*}(e_{4})$ and $E_{5}=\varphi^{*}(e_{5})$ are reduced smooth rational $(-4)$-curves. And $\varphi^{*}(e'_{2})=2E'_{2}$ with ${E'_{2}}^{2}=-1,\ K_{S}E'_{2}=1$ by Lemmas \ref{twocases} and \ref{three-4}. Then
\betagin{align*}
2K_{S} &\equiv\varphi^{*}\left(3l-\sum_{i=1}^{5}e_{i}\right) \equiv \varphi^{*}(e'_{2}+2f_{2}+e_{2}-e_{4}-e_{5})\\
&\equiv 2E'_{2}+2F_{2}+E_{2}-E_{4}-E_{5}.
\end{align*}
We get $2(K_{S}-E'_{2}-F_{2}+E_{4}+E_{5})\equiv E_{2}+E_{4}+E_{5}$. We consider a double cover $\pi\colon Y\longrightarrow S$ branched over $E_{2},\ E_{4}$ and $E_{5}$. By the formula in Subsection \ref{DC} we obtain
\[K_{Y}^{2}=2(2K_{S}-E'_{2}-F_{2}+E_{4}+E_{5})^{2}=14,\]
\[\chi(\mathcal{O}_{Y})=2+\frac{(K_{S}-E'_{2}-F_{2}+E_{4}+E_{5})\cdot(2K_{S}-E'_{2}-F_{2}+E_{4}+E_{5})}{2}=2,\]
\betagin{align*}
p_{g}(Y) & =h^{0}(S,\mathcal{O}_{S}(2K_{S}-E'_{2}-F_{2}+E_{4}+E_{5}))\\
& =h^{0}(S,\mathcal{O}_{S}(\varphi^{*}(-K_{\Sigma}-e'_{2}-f_{2}+e_{4}+e_{5})+E'_{2}))\\
& =h^{0}(S,\mathcal{O}_{S}(\varphi^{*}(l)+E'_{2}))\ge3.
\end{align*}
Thus we have $q(Y)\ge 2$, and so $K_{Y}^{2}<16(q(Y)-1)$.\ It is a contradiction by Proposition \ref{inequiKq}.
\end{proof}
\betagin{assum}\lambdabel{ass}
\em{From Lemma \ref{twocases}, Propositions \ref{theretwo} and \ref{c1c2c3} we may assume that $\varphi^{*}(e_{4})=E_{4}$ and $\varphi^{*}(e_{5})=E_{5}$ by Remark \ref{cremona}, where $E_{4}$ and $E_{5}$ are $(-4)$-curves, $\varphi^{*}(e_{i})=2E_{i},\ \varphi^{*}(e'_{i})=2E'_{i},\ \varphi^{*}(g_{j})=2G_{j}$ and $\varphi^{*}(h_{j})=2H_{j}$ for $i=1,2,3$ and $j=1,2$.}
\end{assum}
\betagin{nota}\lambdabel{etE}
{\rm{$2(E_{j}+E'_{k})$ and $2(E'_{j}+E_{k})$ are two double fibers of $u_{i}\colon S\longrightarrow \mathbb{P}^{1}$ induced by $|F_{i}|$ where $\{i,j,k\}=\{1,2,3\}$. Set $\eta_{i}\equiv (E_{j}+E'_{k})-(E'_{j}+E_{k})$ where $\{i,j,k\}=\{1,2,3\}$, and set $\eta\equiv K_{S}-\sum_{i=1}^{3}(E_{i}+E'_{i})$. Then $2\eta\equiv -E_{4}-E_{5}$, and by Lemma 8.3, Chap. III in \cite{CCS} $\eta_{i}\noindentotequiv0$ for $i=1,2,3$. It implies that $\eta_{i},\ i=1,2,3$ are torsions of order $2$.
}}
\end{nota}
\betagin{prop}[Note Proposition 5.9 ($resp.\ 4.13$) in \cite{CCMSS0} ($resp.\ $\cite{CCS05BM})]\lambdabel{FijKi}
For a general curve $F_{i}\in |F_{i}|,\ i=1,2,3,$ \[F_{j}|_{F_{i}}\equiv K_{F_{i}}\ \textrm{if}\ i\noindenteq j.\]
\end{prop}
\betagin{proof}
We verify that $F_{2}|_{F_{1}}\equiv K_{F_{1}}$. Since $2K_{S}\equiv F_{1}+2(2E_{1}+E'_{3}+E'_{2})-E_{4}-E_{5}$, we get \[2(K_{S}-(2E_{1}+E'_{3}+E'_{2})+E_{4}+E_{5})\equiv F_{1}+E_{4}+E_{5}.\]
It gives a double cover $\pi\colon Y\longrightarrow S$ branched over $F_{1},\ E_{4}$ and $E_{5}$. We have
\betagin{align*}
\chi(\mathcal{O}_{Y})=3
\end{align*}
and
\betagin{align*}
p_{g}(Y) & =h^{0}(S,\mathcal{O}_{S}(F_{1}+2E_{1}+E'_{3}+E'_{2}))\\
& =h^{0}(S, \mathcal{O}_{S}(\varphi^{*}(f_{1}+e_{1})+E'_{3}+E'_{2}))\\
& =h^{0}(S, \mathcal{O}_{S}(\varphi^{*}(l)+E'_{3}+E'_{2}))\ge 3,
\end{align*}
thus $q(Y)\ge 1$. By Proposition \ref{albenese} the Albanese pencil of $Y$ is the pull-back of a pencil $|F|$ of $S$ such that $\pi^{*}(F)$ is disconnected for a general element $F$ in $|F|$. Thus $FF_{1}=0$ because $\pi$ is branched over $F_{1}$. It means $|F|=|F_{1}|$. For a general element $F_{1}\in |F_{1}|$, $\pi^{*}(F_{1})$ is an unramified double cover of $F_{1}$ given by the relation $2(K_{S}-(2E_{1}+E'_{3}+E'_{2})+E_{4}+E_{5})|_{F_{1}}$. Since $\pi^{*}(F_{1})$ is disconnected, we get
\betagin{align*}
(K_{S}-(2E_{1}+E'_{3}+E'_{2})+E_{4}+E_{5})|_{F_{1}} & \equiv (K_{S}-2E_{1})|_{F_{1}}\\
& \equiv (K_{S}-2E_{1}-2E'_{3})|_{F_{1}}\\
& \equiv (K_{S}-F_{2})|_{F_{1}}
\end{align*}
is trivial. Thus $F_{2}|_{F_{1}}\equiv K_{F_{1}}$.
\end{proof}
\betagin{lemma}\lambdabel{invariants} We have:\\
$(i)$ $\chi(\mathcal{O}_{S}(K_{S}+\eta+\eta_{i}))=-1,\ h^{2}(S,\mathcal{O}_{S}(K_{S}+\eta+\eta_{i}))=0;$\\
$(ii)$ $h^{0}(F_{i},\mathcal{O}_{F_{i}}(K_{F_{i}}+\eta|_{F_{i}}))\le 2;$\\
$(iii)$ $h^{1}(S,\mathcal{O}_{S}(\eta-\eta_{i}))=1.$
\end{lemma}
\betagin{proof}
$(i)$ By Riemann-Roch theorem, $\chi(S,\mathcal{O}_{S}(K_{S}+\eta+\eta_{i}))=-1$ since $2\eta\equiv -E_{4}-E_{5}$. Moreover, $h^{0}(S,\mathcal{O}_{S}(-\eta+\eta_{i}))=0$ because $2(-\eta+\eta_{i})\equiv E_{4}+E_{5}$ and $E_{4},\ E_{5}$ are reduced $(-4)$-curves. It implies $h^{2}(S,\mathcal{O}_{S}(K_{S}+\eta+\eta_{i}))=0$ by Serre duality.
$(ii)$ We may assume $i=1$. By $\eta_{1}|_{F_{1}}\equiv \mathcal{O}_{F_{1}}$ we have an exact sequence
\[0\longrightarrow \mathcal{O}_{S}(K_{S}+\eta+\eta_{1})\longrightarrow \mathcal{O}_{S}(K_{S}+\eta+\eta_{1}+F_{1})\longrightarrow \mathcal{O}_{F_{1}}(K_{F_{1}}+\eta|_{F_{1}})\longrightarrow 0.\]
Then we get
\betagin{align*}
h^{0}(F_{1},\mathcal{O}_{F_{1}}(K_{F_{1}}+\eta|_{F_{1}})) \le &\ h^{0}(S,K_{S}+\eta+\eta_{1}+F_{1})-h^{0}(S,K_{S}+\eta+\eta_{1})\\
& +h^{1}(S,K_{S}+\eta+\eta_{1})\\
= &\ h^{0}(S,K_{S}+\eta+\eta_{1}+F_{1})-\chi(\mathcal{O}_{S}(K_{S}+\eta+\eta_{1}))\\
& +h^{2}(S,K_{S}+\eta+\eta_{1})\\
= &\ h^{0}(S,\mathcal{O}_{S}(K_{S}+\eta+\eta_{1}+F_{1}))+1.
\end{align*}
Note $K_{S}+\eta+\eta_{1}+F_{1}\equiv 2K_{S}-(E_{1}+E'_{1})$. Since the linear system $|2K_{S}|$ embeds $E_{1}+E'_{1}$ as a pair of skew lines in $\mathbb{P}^{4}$, we have $h^{0}(S,\mathcal{O}_{S}(2K_{S}-(E_{1}+E'_{1})))=1$. Hence $h^{0}(F_{1},\mathcal{O}_{F_{1}}(K_{F_{1}}+\eta))\le 2$.
$(iii)$ We have $2(\eta-\eta_{i})\equiv -E_{4}-E_{5}$. It implies $h^{0}(S,\mathcal{O}_{S}(\eta-\eta_{i}))=0$. Thus $-h^{1}(S,\mathcal{O}_{S}(\eta-\eta_{i}))+h^{2}(S,\mathcal{O}_{S}(\eta-\eta_{i}))=1$ by Riemann-Roch theorem. We show $h^{0}(S,\mathcal{O}_{S}(K_{S}-\eta+\eta_{1}))=2$ by Serre duality. Indeed, since $E_{4},\ E_{5}$ are rational $(-4)$-curves and $(2K_{S}+E_{4}+E_{5})(E_{4}+E_{5})=0$, we obtain an exact sequence
\[ 0\longrightarrow \mathcal{O}_{S}(2K_{S}) \longrightarrow \mathcal{O}_{S}(2K_{S}+E_{4}+E_{5})\longrightarrow \mathcal{O}_{E_{4}\cup E_{5}} \longrightarrow 0.\]
The canonical divisor $K_{S}$ is ample in Proposition \ref{finampirr}. It follows $h^{0}(S,\mathcal{O}_{S}(2K_{S}))=5$ and $h^{1}(S,\mathcal{O}_{S}(2K_{S}))=0$ by Kodaira vanishing theorem and Riemann-Roch theorem. Thus the long cohomology sequence induces $h^{0}(S,\mathcal{O}_{S}(2K_{S}+E_{4}+E_{5}))=7$. Moreover, since $h^{0}(\Sigma,\mathcal{O}_{\Sigma}(3l-e_{1}-e_{2}-e_{3}))=7$ and $2(K_{S}-\eta+\eta_{1})\equiv 2K_{S}+E_{4}+E_{5}\equiv \varphi^{*}(3l-e_{1}-e_{2}-e_{3})$, we get $|2(K_{S}-\eta+\eta_{1})|=\varphi^{*}(|3l-e_{1}-e_{2}-e_{3}|)$. Also, $h^{0}(S,\mathcal{O}_{S}(K_{S}-\eta+\eta_{1}))=h^{0}(S,\mathcal{O}_{S}(F_{1}+E'_{1}+E_{1}))\ge2$ because $K_{S}-\eta+\eta_{1}\equiv F_{1}+E'_{1}+E_{1}$.\ We consider $|K_{S}-\eta+\eta_{1}|=|M|+F$ where $|M|$ is the moving part and $F$ is the fixed part.\ By Lemma \ref{Mm} there is a divisor $m$ on $\Sigma$ such that $|M|=\varphi^{*}(|m|)$. Then $3l-e_{1}-e_{2}-e_{3}-2m$ is effective by arguing as in the proof of Lemma \ref{2Deff1}. So $m\equiv f_{i}$ for some $i\in\{1, 2, 3\}$. Hence $h^{0}(S,\mathcal{O}_{S}(K_{S}-\eta+\eta_{1}))=h^{0}(S,\mathcal{O}_{S}(M))=h^{0}(\Sigma, \mathcal{O}_{\Sigma}(f_{i}))=2$.
\end{proof}
\betagin{cor}[Corollary 4.15 in \cite{CCS05BM}]\lambdabel{etaij}
For a general curve $F_{i}\in |F_{i}|,\ i=1,2,3$ we have
\[(-\eta+\eta_{j})|_{F_{i}}\equiv \mathcal{O}_{F_{i}}\ \textrm{if}\ i\noindenteq j;\]
\[ \eta_{i}|_{F_{i}}\equiv \mathcal{O}_{F_{i}};\ (-\eta+\eta_{i})|_{F_{i}}\noindentotequiv\mathcal{O}_{F_{i}}.\]
\end{cor}
\betagin{proof}
By Lemma \ref{FijKi}
\betagin{align*}
\eta|_{F_{1}}\equiv (K_{S}-(E_{1}+E'_{1}))|_{F_{1}} & \equiv K_{F_{1}}-(E_{1}+E'_{1})|_{F_{1}}\equiv (F_{2}-(E_{1}+E'_{1}))|_{F_{1}}\\
& \equiv (2(E_{1}+E'_{3})-(E_{1}+E'_{1}))|_{F_{1}}\equiv (E_{1}-E'_{1})|_{F_{1}}.
\end{align*}
Since $\eta_{2}|_{F_{1}}\equiv \eta_{3}|_{F_{1}}\equiv(E_{1}-E'_{1})|_{F_{1}}$ we get $(-\eta+\eta_{j})|_{F_{i}}\equiv\mathcal{O}_{F_{i}}$ for $i\noindenteq j$. The definitions of $\eta_{i}$ and $F_{i}$ imply $\eta_{i}|_{F_{i}}\equiv \mathcal{O}_{F_{i}}$. Moreover, if we assume $\eta|_{F_{i}}\equiv \mathcal{O}_{F_{i}}$ then $h^{0}(F_{i},\mathcal{O}_{F_{i}}(K_{F_{i}}+\eta|_{F_{i}}))=h^{0}(F_{i},\mathcal{O}_{F_{i}}(K_{F_{i}}))=3$ because the curve $F_{i}$ has genus $3$ by Proposition \ref{cong3}.\ It induces a contradiction by Lemma \ref{invariants} $(ii)$.
\end{proof}
\section{Proof of Theorem \ref{mainthm}}\lambdabel{proofmainthm}
We provide the characterization of Burniat surfaces with $K^{2}=4$ and of non nodal type.\ We use the notations in Notations \ref{BDBM1} and \ref{etE}, and we work with Assumption \ref{ass}. We follow the approaches in \cite{CCMSS0, CCS05BM}.
\betagin{lemma}[Note Lemma 5.1 in \cite{CCS05BM}]\lambdabel{threefibers}
Let $u\colon S\longrightarrow \mathbb{P}^{1}$ be a fibration such that $E_{4}$ and $E_{5}$ are contained in fibers.\ Then $u$ is induced by one of the pencils $|F_{i}|,\ i=1,2,3$.
\end{lemma}
\betagin{proof}
We argue as in the proof of Lemma 5.7 in \cite{CCMSS0}.
\end{proof}
\betagin{rmk}
\em{In Lemma \ref{threefibers} $E_{4}$ and $E_{5}$ are not contained in the same fiber of $u$ because $u$ is induced by one of $|F_{i}|,\ i=1,2,3$.}
\end{rmk}
\textit{Proof of Theorem \ref{mainthm}.}
Let $\pi_{i}\colon Y_{i}\longrightarrow S$ be the double cover branched over $E_{4}$ and $E_{5}$ given by the relation $2(-\eta+\eta_{i})\equiv E_{4}+E_{5}$. By Corollary \ref{etaij} $\eta_{i}\noindentotequiv \eta_{j}$ for $i\noindenteq j$. So $\pi_{i}$ is different from $\pi_{j}$. Serre duality and the formula for $q(Y)$ in Subsection \ref{DC} imply $q(Y_{i})=h^{1}(S,\mathcal{O}_{S}(\eta-\eta_{i}))=1$ from Lemma \ref{invariants} $(iii)$. Let $\alphapha_{i}\colon Y_{i}\longrightarrow C_{i}$ be the Albanese pencil where $C_{i}$ is an elliptic curve. By Proposition \ref{albenese} there exist a fibration $h_{i}\colon S\longrightarrow \mathbb{P}^{1}$ and a double cover $\pi'_{i}\colon C\longrightarrow \mathbb{P}^{1}$ such that $\pi'_{i}\circ \alphapha_{i}=h_{i}\circ\pi_{i}$. Since $\pi_{i}^{-1}(E_{4})$ and $\pi_{i}^{-1}(E_{5})$ are rational curves they are contained in fibers of $\alphapha_{i}$. So $E_{4}$ and $E_{5}$ are contained in fibers of $h_{i}$. Thus $h_{i}=u_{s_{i}}$ for some $s_{i}\in\{1,2,3\}$ by Lemma \ref{threefibers}. We obtain the following commutative diagram:
\betagin{equation*}\lambdabel{isqd}
\xymatrix{
Y_{i} \ar[r]^{\pi_{i}} \ar[d]_{\alphapha_{i}} & S \ar[d]^{u_{s_{i}}}\\
C_{i} \ar[r]^{\pi'_{i}} & \mathbb{P}^{1} }
\end{equation*}
By Corollary \ref{etaij} $(-\eta+\eta_{i})|_{F_{i}}\noindentotequiv \mathcal{O}_{F_{i}}$. It implies that a general curve in $\pi^{*}_{i}(|F_{i}|)$ is connected. Hence $s_{i}\noindenteq i$.
We devide the proof into six steps.
\paragraph{\textbf{Step $1\colon$} \it{The fibration $u_{i}\colon S\longrightarrow \mathbb{P}^{1},\ i=1,2,3$ has exactly two double fibers.}}
\textrm{ }
It is enough to show that $u_{3}\colon S\longrightarrow \mathbb{P}^{1}$ has at most two double fibers because $u_{3}$ already has two different double fibers, $2(E_{1}+E'_{2})$ and $2(E_{2}+E'_{1})$. Since $s_{3}\noindenteq 3$ we may consider $u_{s_{3}}=u_{1}$.\ Assume that $u_{3}$ has one additional double fiber $2M$ aside from $2(E_{1}+E'_{2})$ and $2(E_{2}+E'_{1})$.\ Then $M$ is reduced and irreducible by Proposition \ref{finampirr} because $ME_{3}=1$ and $\varphi(M)$ is irreducible. So $\varphi$ is ramified along $M$ because the curve in the pencil $|f_{3}|$ supported on $\varphi(M)$ is reduced.
Let $R$ be the ramification divisor of the bicanonical morphism $\varphi\colon S\longrightarrow \Sigma \subset \mathbb{P}^{4}$. We have $\varphi^{*}(-K_{\Sigma})\equiv 2K_{S}$ since the image $\Sigma$ of $\varphi$ is a del Pezzo surface of degree $4$ in $\mathbb{P}^{4}$ (See Notations \ref{notdel} and \ref{BDBM1}). It implies $R\equiv K_{S}+\varphi^{*}(-K_{\Sigma})\equiv 3K_{S}$ by Hurwitz formula $K_{S}\equiv \varphi^{*}(K_{\Sigma})+R$. Put $R_{0}:=\sum_{i=1}^{3}(E_{i}+E'_{i})+G_{1}+G_{2}+H_{1}+H_{2}+M$. By Assumption \ref{ass} $\varphi$ is ramified along $E_{i},\ E'_{i},\ G_{j}$ and $H_{j}$ for $i=1,2,3$ and $j=1,2$. It follows $R_{0}\le R$. So we get a nonzero effective divisor $E:=2(R-R_{0})\equiv F_{3}-E_{4}-E_{5}$. However, it induces a contradiction because $0<EK_{S}=(F_{3}-E_{4}-E_{5})K_{S}=0$ since $K_{S}F_{3}=4$ by Proposition \ref{cong3}, $E_{4}$ and $E_{5}$ are $(-4)$-curves and $K_{S}$ is ample by Proposition \ref{finampirr}.
Similarly we get that $u_{1},\ u_{2}$ each has exactly two double fibers.
\paragraph{\textbf{Step $2\colon$} \it{$(s_{1}\ s_{2}\ s_{3})$ is a cyclic permutation.}}
\textrm{ }
Since $s_{i}\noindenteq i$ we need $s_{i}\noindenteq s_{j}$ if $i\noindenteq j$.\ We verify $s_{1}\noindenteq s_{2}$. Otherwise, it is $s_{1}=s_{2}=3$, and $\alphapha_{1}\colon Y_{1}\longrightarrow C_{1}$ ($resp.\ \alphapha_{2}\colon Y_{2}\longrightarrow C_{2}$) arises in the Stein factorization of $u_{3}\circ \pi_{1}$ ($resp.\ u_{3}\circ\pi_{2}$). We have the following commutative diagram:
\betagin{equation*}
\xymatrix{
Y_{1} \ar[r]^{\pi_{1}} \ar[d]_{\alphapha_{1}} & S \ar[d]^{u_{3}} & \ar[l]_{\pi_{2}} Y_{2} \ar[d]^{\alphapha_{2}}\\
C_{1} \ar[r]^{\pi'_{1}} & \mathbb{P}^{1} & \ar[l]_{\pi'_{2}} C_{2}
}
\end{equation*}
For $i=1,2$ $Y_{i}$ coincides with the normalization of the fiber product $C_{i}\times_{\mathbb{P}^{1}} S$ since $\pi_{i}$ factors through the natural projection $C_{i}\times_{\mathbb{P}^{1}} S \longrightarrow S$ which is also of degree $2$. Thus $\pi'_{1}$ is different from $\pi'_{2}$.\ We denote $q_{1},\ q_{2},\ q_{3}=u_{3}(E_{4}),\ q_{4}=u_{3}(E_{5})$ as the branch points of $\pi'_{1}$. Then we find a branch point $q_{5}$ of $\pi'_{2}$ which is not branched over by $\pi'_{1}$. We have the fibers over the points $q_{i},\ i=1,2,5$ of $u_{3}$ are double fibers. It is a contradiction by \textbf{Step $1$}.
From now on we assume $s_{1}=2,\ s_{2}=3,\ s_{3}=1$, and for each $i\in\{1,2,3\}$ the fibration $u_{i}$ has exactly two double fibers.
\paragraph{\textbf{Step $3\colon$} \it{$\varphi^{*}(g_{3})$ and $\varphi^{*}(h_{3})$ are not reduced.}}
\textrm{ }
We have the following commutative diagram:
\betagin{equation*}
\xymatrix{
Y_{2} \ar[r]^{\pi_{2}} \ar[d]_{\alphapha_{2}} & S \ar[d]^{u_{3}}\\
C_{2} \ar[r]^{\pi'_{2}} & \mathbb{P}^{1} }
\end{equation*}
Let $W$ be $C_{2}\times_{\mathbb{P}^{1}} S$, and let $p\colon W\longrightarrow S$ be the natural projection which is a double cover. Assume that $G_{3}:=\varphi^{*}(g_{3})$ ($resp.\ H_{3}:=\varphi^{*}(h_{3})$) is reduced.\ Since $\pi'_{2}\colon C_{2}\longrightarrow \mathbb{P}^{1}$ is branched over the point $u_{3}(G_{3})=u_{3}(E_{4})$ ($resp.\ u_{3}(H_{3})=u_{3}(E_{5})$), the map $p$ is branched over $G_{3}$ ($resp.\ H_{3}$).\ Thus $W$ is normal along $p^{-1}(G_{3})$ ($resp.\ p^{-1}(H_{3})$). The map $\pi_{2}\colon Y_{2}\longrightarrow S$ is also branched over $G_{3}$ ($resp.\ H_{3}$) because $Y_{2}$ is the normalization of $W$. It is a contradiction because the branch locus of $\pi_{2}$ is $E_{4}\cup E_{5}$.
\paragraph{\textbf{Step $4\colon$}\textit{A general element $F_{i}\in|F_{i}|$ is hyperelliptic for each $i\in\{1,2,3\}$.}}
\textrm{ }
We verify that a general fiber $F_{2}\in|F_{2}|$ is hyperelliptic.\ Since the pull-back $\pi_{1}^{*}(F_{2})$ ($resp.\ \pi_{1}^{*}(F_{3})$) is disconnected, we may consider $\pi_{1}^{*}(F_{2})=\hat{F_{2}}+{\hat{F_{2}}}'$ ($resp.\ \pi_{1}^{*}(F_{3})=\hat{F_{3}}+{\hat{F_{3}}}'$) where the two components are disjoint.\ Then we get $\hat{F_{2}}\hat{F_{3}}=2$ by $F_{2}F_{3}=4$. Let $p\circ h\colon Y_{1}\longrightarrow C\longrightarrow \mathbb{P}^{1}$ be the Stein factorization of $u_{3}\circ \pi_{1}\colon Y_{1}\longrightarrow \mathbb{P}^{1}$. Since $\hat{F_{3}}$ is a fiber of $h\colon Y_{1}\longrightarrow C$, the restriction map $h|_{\hat{F_{2}}}\colon \hat{F_{2}}\longrightarrow C$ is a $2$-to-$1$ map by $\hat{F_{2}}\hat{F_{3}}=2$. Moreover, $C$ is rational because $h\colon Y_{1}\longrightarrow C$ is not Albanese map and $q(Y_{1})=1$. Thus $\hat{F_{2}}$ is hyperelliptic, and so is $F_{2}$.
\paragraph{\textbf{Step $5\colon$}\textit{$\varphi\colon S\longrightarrow \Sigma$ is a Galois cover with the Galois group $G\cong\mathbb{Z}_{2}\times\mathbb{Z}_{2}$.}}
\textrm{ }
For each $i\in\{1,2,3\}$, let $\gammama_{i}$ be the involution on $S$ induced by the involution on the general fiber $F_{i}$. Since $S$ is minimal, the maps $\gammama_{i}$ are regular maps. So the maps $\gammama_{i}$ belong to $G$ by Proposition \ref{FijKi}. Now it suffices $\gammama_{i}\noindenteq \gammama_{j}$ if $i\noindenteq j$. We show $\gammama_{2}\noindenteq \gammama_{3}$. Consider the lifted involution $\hat{\gammama_{2}}\colon Y_{1}\longrightarrow Y_{1}$. The restriction of $\alphapha_{1}$ identifies $\hat{F_{3}}/ \hat{\gammama_{2}}$ with $C_{1}$ by the construction in Step $4$. Thus we obtain $p_{g}(\hat{F_{3}}/\hat{\gammama_{2}})=1$, but $\hat{F_{3}}/ \hat{\gammama_{3}}\cong \mathbb{P}^{1}$. It means that $\gammama_{2}\noindenteq \gammama_{3}$.
\paragraph{\textbf{Step $6\colon$}\ \textit{$S$ is a Burniat surface.}}
\textrm{ }
Denote by $B$ be the branch divisor of $\varphi$. Then we get
\[-3K_{\Sigma}\equiv B \ge \sum_{i=1}^{3}(e_{i}+e'_{i}+g_{i}+h_{i})\equiv -3K_{\Sigma},\]
thus $B=\sum_{i=1}^{3}(e_{i}+e'_{i}+g_{i}+h_{i})$. Denote $B_{i}$ as the image of the divisorial part of the fixed locus of $\gammama_{i}$. We have $B=B_{1}+B_{2}+B_{3}$. By \textbf{Step $4$} we obtain $B_{i}=e_{i}+e'_{i}+g_{i+1}+h_{i+1}$ for each $i\in\{1,2,3\}$, where $g_{4}$ ($resp.\ h_{4}$) denotes $g_{1}$ ($resp.\ h_{1}$).
The theorem is proved with all steps.
$\Box$ \par
{\em Acknowledgements}. This work was supported by Shanghai Center for Mathematical Sciences. The author is very grateful to the referee for valuable suggestions and comments.
\betagin{small}
\betagin{thebibliography}{PPS}
\bibitem{CCS}
Barth W, Peters C, Van de Ven A. Compact complex surfaces. New York: Springer-Verlag, 1984
\bibitem{BSSBII}
Bauer I, Catanese F. Burniat surfaces. II. Secondary Burniat surfaces form three connected components of the moduli space. Invent Math, 2010, 180: 559--588
\bibitem{BSSBIII}
Bauer I, Catanese F. Burniat surfaces III: deformations of automorphisms and extended Burniat surfaces. Doc Math, 2013, 18: 1089--1136
\bibitem{BSSBEII}
Bauer I, Catanese F. Erratum to: Burniat surfaces II: secondary Burniat surfaces form three connected components of the moduli space. Invent Math, 2014, 197: 237--240
\bibitem{CMSGT}
Bombieri E. Canonical models of surfaces of general type. Inst Hautes \'Etudes Sci Publ Math, 1973, 42: 171--219
\bibitem{DGCSG0}
Mendes Lopes M. The degree of the generators of the canonical ring of surfaces of general type with $p_{g}=0$. Arch Math (Basel), 1997, 69: 435--440
\bibitem{CCMSS0}
Mendes Lopes M, Pardini R. A connected component of the moduli space of surfaces with $p_{g}=0$. Topology, 2001, 40: 977--991
\bibitem{NS03}
Mendes Lopes M, Pardini R. A new family of surfaces with $p_{g}=0$ and $K^{2}=3$. Ann Sci \'{E}cole Norm Sup IV, 2004, 37: 507--531
\bibitem{ESEN}
Mendes Lopes M, Pardini R. Enriques surfaces with eight nodes. Math Z, 2002, 241: 673--683
\bibitem{NCF9}
Mendes Lopes M, Pardini R. Numerical Campedelli surfaces with fundamental group of order 9. J Eur Math Soc (JEMS), 2008, 10: 457--476
\bibitem{SG06}
Mendes Lopes M, Pardini R. Surfaces of general type with $p_{g}=0,\ K^{2}=6$ and non birational bicanonical map. Math Ann, 2004, 329: 535--552
\bibitem{BS07}
Mendes Lopes M, Pardini R. The bicanonical map of surfaces with $p_{g}=0$ and $K^{2}\ge7$. Bull London Math Soc, 2001, 33: 265--274
\bibitem{BS07II}
Mendes Lopes M, Pardini R. The bicanonical map of surfaces with $p_{g}=0$ and $K^{2}\ge7.$ II. Bull London Math Soc, 2003, 35: 337--343
\bibitem{DBS0}
Mendes Lopes M, Pardini R. The degree of the bicanonical map of a surface with $p_{g}=0$. Proc Amer Math Soc, 2007, 135: 1279--1282
\bibitem{CDPG80}
Pardini R. The classification of double planes of general type with $K^{2}=8$ and $p_{g}=0$. J Algebra, 2003, 259: 95--118
\bibitem{mnqs}
Miyaoka Y. The maximal number of quotient singularities on surfaces with given numerical invariants.
Math Ann, 1984, 268: 159--171
\bibitem{ORS}
Nagata M. On rational surfaces. I. Irreducible curves of arithmetic genus 0 or 1. Mem Coll Sci Univ Kyoto Ser A Math, 1960, 32: 351--370
\bibitem{ACAV}
Pardini R. Abelian covers of algebraic varieties. J Reine Angew Math, 1991, 417: 191--213
\bibitem{VBR2L}
Reider I. Vector bundles of rank $2$ and linear systems on algebraic surfaces. Ann of Math (2), 1988, 127: 309--316
\bibitem{FABSTG}
Xiao G. Finitude de l'application bicanonique des surfaces de type g\'en\'eral. Bull Soc Math France, 1985, 113: 23--51
\bibitem{CCS05BM}
Zhang L. Characterization of a class of surfaces with $p_{g}=0$ and $K^{2}=5$ by their bicanonical maps. Manuscripta Math, 2011, 135: 165--181
\end{thebibliography}
\end{small}
\end{document} |
\begin{document}
\title [Attractors of $(a,b)$-continued fractions]{Structure of attractors for $(a,b)$-continued fraction transformations}
\author{Svetlana Katok}
\address{Department of Mathematics, The Pennsylvania State University,
University Park, PA 16802} \email{katok\[email protected]}
\author{Ilie Ugarcovici}
\address{Department of Mathematical Sciences,
DePaul University, Chicago, IL 60614} \email{[email protected]}
\subjclass[2000]{37E05, 11A55, 11K50}
\keywords{Continued fractions, attractor, natural extension, invariant measure}
\thanks{We are grateful to Don Zagier for helpful discussions and the Max Plank Institute for Mathematics in Bonn for its hospitality and support. The second author is partially supported by the NSF grant DMS-0703421}
\date{\today}
\begin{abstract} We study a two-parameter family of one-dimensional maps and related $(a,b)$-continued fractions suggested for consideration by Don Zagier. We prove
that the associated natural extension maps
have attractors with finite rectangular structure for the entire parameter set except for a Cantor-like set of one-dimensional Lebesgue zero measure that we completely describe.
We show that the structure of these attractors can be ``computed" from the data $(a,b)$, and that for a dense open set of parameters the Reduction theory conjecture holds, i.e. every point is mapped to the attractor after finitely many iterations. We also show how this theory can be applied to the study of invariant measures and ergodic properties of the associated Gauss-like maps.
\end{abstract}
\maketitle
\tableofcontents
\section{Introduction}\label{s:1}
The standard generators $T(x)=x+1$, $S(x)=-1/x$ of the modular group $SL(2,\Z)$ were used classically to define piecewise continuous maps acting on the extended real line $\bar \mathbb R=\mathbb R\cup \{\infty\}$ that led to well-known continued fraction algorithms.
In this paper we present a general method of constructing such maps suggested by Don Zagier, and study their dynamical properties and associated generalized continued fraction transformations.
Let $\mathcal P$ be the two-dimensional parameter set
\[
\mathcal P=\{(a,b)\in \mathbb R^2\, |\, a\leq 0\leq b,\,b-a\gammaeq 1,\,-ab\leq 1\}
\]
and consider the map $f_{a,b}:\bar\mathbb R\rightarrow\bar\mathbb R$ defined as
\begin{equation}\label{fab}
f_{a,b}(x)=\begin{cases}
x+1 &\text{ if } x< a\\
-\displaystyle\frac{1}{x} &\text{ if } a\le x<b\\
x-1 &\text{ if } x\gammae b\,.
\end{cases}
\end{equation}
Using the first return map of $f_{a,b}$ to the interval $[a,b)$, denoted by $\mathcal Hat f_{a,b}$, we introduce a two-dimensional family of
continued fraction algorithms and study their properties. We mention here three classical examples: the case $a=-1/2$, $b=1/2$ gives the ``nearest-integer" continued fractions considered first by Hurwitz in \cite{Hurwitz1}, the case $a=-1$, $b=0$ described in \cite{Z, K3} gives the ``minus" (backward) continued fractions, while the situation $a=-1$, $b=1$ was presented in \cite{S1, KU2} in connection with a method of symbolically coding the geodesic flow on the modular surface following Artin's pioneering work \cite{Artin}. Also, in the case $b-a=1$, the class of one-parameter maps $f_{b-1,b}$ with $b\in [0,1]$ is conceptually similar to the ``$\alpha$-transformations" introduced by Nakada in \cite{N1} and studied subsequently in \cite{LM, MCM, NN1,NN2, Sw}.
The main object of our study is a two-dimensional realization of the natural extension map of $f_{a,b}$, $F_{a,b}:\bar\mathbb R^2\setminus\Delta\rightarrow \bar\mathbb R^2\setminus\Delta$, $\Delta=\{(x,y)\in \bar\mathbb R^2| x=y\}$,
defined by
\begin{equation}\label{Fab}
F_{a,b}(x,y)=\begin{cases}
(x+1,y+1) &\text{ if } y<a\\
\left(-\displaystyle\frac{1}{x},-\displaystyle\frac{1}{y}\right) &\text{ if } a\le y<b\\
(x-1,y-1) &\text{ if } y\gammae b\,.
\end{cases}
\end{equation}
The map $F_{a,b}$ is also called the \emph{reduction map}.
Numerical experiments led Don Zagier to conjecture that such a map $F_{a,b}$ has several interesting properties for all parameter pairs $(a,b)\in \mathcal P$ that we list under the
{\bf Reduction theory conjecture.}
\begin{enumerate}
\item The map $F_{a,b}$ possesses a global attractor set $D_{a,b}=\cap_{n=0}^\infty F^n(\bar\mathbb R^2\setminus\Delta)$
on which $F_{a,b}$ is essentially bijective.
\item The set $D_{a,b}$ consists of two (or one, in degenerate cases) connected components each having {\em finite rectangular structure}, i.e. bounded by non-decreasing step-functions with a finite
number of steps.
\item Every point $(x,y)$ of the plane ($x\ne y$) is mapped to $D_{a,b}$ after finitely many iterations of $F_{a,b}$.
\end{enumerate}
\begin{figure}
\caption{Attracting domain for Zagier's example: $a=-\frac{4}
\label{don-a}
\end{figure}
Figure \ref {don-a} shows the computer picture of such a the set $D_{a,b}$ with $a=-4/5$, $b=2/5$.
It is worth mentioning that the complexity of the domain $D_{a,b}$
increases as $(a,b)$ approach the line segment $b-a=1$ in $\mathcal P$, a situation fully analyzed in what follows.
The main result of this paper is the following theorem.
\begin{result} There exists an explicit one-dimensional Lebesgue measure zero, uncountable set $\mathcal E$ that lies on the diagonal boundary $b= a+ 1$ of $\mathcal P$ such that:
\begin{itemize}
\item[(a)] for all $(a,b)\in\mathcal P\setminus\mathcal E$ the map $F_{a,b}$ has an attractor $D_{a,b}$ satisfying properties (1) and (2) above;
\item[(b)] for an open and dense set in $\mathcal P\setminus\mathcal E$ property (3), and hence the Reduction theory conjecture, holds. For the rest of $\mathcal P\setminus\mathcal E$ property (3) holds for almost every point of $\bar\mathbb R^2\setminus \Delta$.
\end{itemize}
\end{result}
We point out that this approach gives explicit conditions for the set $D_{a,b}$ to have finite rectangular structure that are satisfied, in particular, for all pairs $(a,b)$ in the interior of the maximal parameter set $\mathcal P$.
At the same time, it provides an effective algorithm for finding $D_{a,b}$, independent of the complexity of its boundary (i.e., number of horizontal segments). The simultaneous properties satisfied by $D_{a,b}$, attracting set and bijectivity domain for $F_{a,b}$, is an essential feature that has not been exploited in earlier works. This approach makes the notions of reduced geodesic and dual expansion natural and transparent, with a potential for generalization to other Fuchsian groups. We remark that
for ``$\alpha$-transformations" \cite{N1,LM}, explicit descriptions of the domain of the natural extension maps have been obtained only for a subset of the parameter interval $[0,1]$ (where the boundary has low complexity).
The paper is organized as follows. In Section \ref{s:2} we develop the theory of $(a,b)$-continued fractions associated to the map $f_{a,b}$. In Section \ref{s:3} we prove that the natural extension map $F_{a,b}$ possesses a {\em trapping region}; it will be used in Section \ref{s:6} to study the attractor set for $F_{a,b}$.
In Section \ref{s:4} we further
study the map $f_{a,b}$. Although it
is discontinuous at $x=a$, $b$, one can look at two orbits of each of the discontinuity points. For generic $(a,b)$, these orbits meet after finitely many steps, forming a {\em cycle} that can be {\em strong} or {\em weak}, depending on whether or not the product over the cycle is equal to the identity transformation.
The values appearing in these cycles play a crucial role in the theory. Theorems \ref{b-cycle} and \ref{a-cycle} give necessary and sufficient conditions for $b$ and $a$ to have the {\em cycle property}. In Section \ref{s:5} we introduce the {\em finiteness condition} using the notion of {\em truncated orbits} and prove that under this condition the map $F_{a,b}$ has a bijectivity domain $A_{a,b}$ with a finite rectangular structure that can be ``computed" from the data $(a,b)$ (Theorem \ref{thm:recstructure}). In Section \ref{s:6} we define the attractor for the map $F_{a,b}$ by iterating the trapping region, and
identify it with the earlier constructed set $A_{a,b}$ assuming the finiteness condition (Theorem \ref{attractor}). In Section \ref{s:7}
we prove that the Reduction theory conjecture holds under the assumption that both $a$ and $b$ have the strong cycle property, and that under the finiteness condition property, (3) holds for almost every point of $\bar\mathbb R^2\setminus\Delta$.
In Section \ref{s:8} we prove that the finiteness condition holds for all $(a,b)\in\mathcal{P}$ except for
an uncountable set of one-dimensional Lebesgue measure zero that lies on the boundary $b=a+1$ of $\mathcal{P}$, and we present a complete description of this exceptional set. We conclude by showing that the set of $(a,b)\in\mathcal P$ where $a$ and $b$ have the strong cycle property is open and dense in $\mathcal P$.
And, finally, in Section \ref{s:9} we show how these results can be applied to the study of invariant measures and ergodic properties of the associated Gauss-like maps.
\section{Theory of $(a,b)$-continued fractions}\label{s:2}
Consider $(a,b)\in \mathcal{P}$. The map $f_{a,b}$ defines what we call \emph{$(a, b)$-continued fractions} using
a generalized integral part function $\lfloor x\rceil_{a,b}$ :
for any real $x$, let
\begin{equation}
\lfloor x\rceil _{a,b}=\begin{cases}
\lfloor x-a \rfloor &\text{if } x< a\\
0 & \text{if } a\le x<b\\
\lceil x-b \rceil & \text{if } x\gammae b\,,
\end{cases}
\end{equation}
where $\lfloor x\rfloor$ denotes the integer part of $x$ and $\lceil x\rceil=\lfloor x\rfloor+1$.
Let us remark that the first return map of $f_{a,b}$ to the interval $[a,b)$, $\mathcal Hat f_{a,b}$, is given by the function
\[
\mathcal Hat f_{a,b}(x)=-\frac{1}{x}-\left\lfloor-\frac{1}{x}\right\rceil_{a,b}=T^{-\lfloor-1/x\rceil_{a,b}}S(x) \text{ if } x\ne 0, f(0)=0.
\]
We prove that any irrational number $x$ can be expressed in a unique way as an infinite $(a,b)$-continued fraction
\[
x=n_0-\cfrac{1}{n_1 -\cfrac{1}{n_2-\cfrac{1}{\ddots}}}
\]
which we will denote by $\lfloor n_0,n_1,\dots \rceil_{a,b}$ for short.
The
``digits" $n_i$, $i\gammae 1$, are non-zero integers determined recursively by
\begin{equation}\label{a-b}
n_0=\lfloor x\rceil_{a,b},\,x_1=-\frac1{x-n_0},\text{ and }
n_{i}=\lfloor x_i \rceil_{a,b},\,x_{i+1}=- \frac1{x_i-n_i}.
\end{equation}
In what follows, the notation $(\alpha_0,\alpha_1,\dots,\alpha_k)$ is used to write formally a ``minus" continued fraction expression, where $\alpha_i$ are real numbers.
\begin{thm}\label{convergence}
Let $x$ be an irrational number, $\{n_i\}$ the associated sequence of integers defined by (\ref{a-b})
and
\[
r_k=( n_0,n_1,\dots, n_k)\,.
\]
Then the sequence $r_k$ converges to $x$.
\end{thm}
\begin{proof}\footnote{The authors proved initially the convergence statement assuming $-1\le a \le 0 \le b\le 1$, and two Penn State REU students, Tra Ho and Jesse Barbour, worked on the proof for $a,b$ outside of this compact triangular region. The unified proof presented here uses some of their ideas.} We start by proving that none of the pairs of type $(p,1)$, $(-p,-1)$, with $p\gammae 1$
are allowed to appear as consecutive entries of the sequence $\{n_i\}$. Indeed, if $n_{i+1}=1$, then
$$b\le x_{i+1}=-\frac{1}{x_i-n_i}<b+1\,,$$
therefore $-\displaystyle\frac{1}{b}\le x_i-n_i<-\frac{1}{b+1}\le (b-1)$, and $n_i<0$. If $n_{i+1}=-1$, then
$$a-1\le x_{i+1}=-\frac{1}{x_i-n_i}<a\,,$$
so $-\displaystyle\frac{1}{a-1}\le x_i-n_i<-\frac{1}{a}$. But $a+1\le -\frac{1}{a-1}$, thus $n_i>0$.
With these two restrictions, the argument follows the lines of the proof for the classical case
of minus (backward) continued fractions \cite{K3}, where $n_i\gammae 2$, for all
$i\gammae 1$. We define inductively two sequences of integers
$\{p_k\}$ and $\{q_k\}$ for $k\gammae -2$:
\begin{equation}\label{pkqk}
\begin{split}
&p_{-2}=0\;,\;p_{-1}=1\;;\; p_{k}=n_{k}p_{k-1}-p_{k-2}\; \text{ for } k\gammae 0\\
&q_{-2}=-1\;,\;q_{-1}=0\;;\;q_{k}=n_{k}q_{k-1}-q_{k-2}\; \text{ for } k\gammae 0\;.
\end{split}
\end{equation}
We have the following properties:
\begin{itemize}
\item[(i)] there exists $l\gammae 1$ so that $|q_l|<|q_{l+1}|<\dots<|q_k|<\dots$;
\item[(ii)] $(n_0,n_1,\dots,n_k,\alpha)=\displaystyle\frac{\alpha p_k-p_{k-1}}{\alpha q_k-q_{k-1}}$, for any real number $\alpha$;
\item [(iii)] $p_kq_{k+1}-p_{k+1}q_k=1$;
\end{itemize}
Let us prove property (i). Obviously $1=q_0\le |q_1|=|n_1|$, $q_2=n_2q_1-q_0=n_2n_1-1$. Notice that $|q_2|>|q_1|$ unless
$n_1=1, n_2=2$ or $n_1=-1$, $n_2=-2$. We analyze the situation $n_1=1$, $n_2=2$. This implies that $q_3=n_3(n_2n_1-1)-n_1=n_3-n_1$, so $|q_3|>|q_2|$, unless $n_3=2$. Notice that it is impossible to have $n_i=2$ for all $i\gammae 2$, because $x$ is irrational and the minus continued fraction expression consisting only of two's, $(2,2,\dots)$, has numerical value $1$. Therefore, there exists $l\gammae 1$ so that
$n_{l+1}\ne 1,2$. This implies that $|q_{l+1}|>|q_l|$. We continue to proceed by induction. Assume that property (i) is satisfied up to $k$-th term, $k>l$. If $|n_{k+1}|\gammae 2$, then
$$
|q_{k+1}|\gammae |n_{k+1}|\cdot |q_k|-|q_{k-1}|\gammae 2|q_k|-|q_{k-1}|>|q_{k}|\,.
$$
If $n_{k+1}=1$, then
$q_{k+1}=q_k-q_{k-1}$. Since $q_k=n_{k}q_{k-1}-q_{k-2}$ with $n_k<0$, one gets
$$q_{k-1}=\frac{q_k+q_{k-2}}{n_{k}}\,.$$
We analyze the two possible situations
\begin{itemize}
\item If $q_k>0$ then $|q_{k-2}|<q_k$, so $q_k+q_{k-2}>0$ and $q_{k-1}<0$. This implies that
$q_{k+1}=q_k-q_{k-1}\gammae q_{k}>0\,.$
\item If $q_k<0$, then $|q_{k-2}|<-q_k$, so $q_k+q_{k-2}<0$ and $q_{k-1}>0$. This implies that
$q_{k+1}=q_k-q_{k-1}<q_k<0\,.$
\end{itemize}
Thus $|q_{k}|<|q_{k+1}|$. A similar argument shows that the inequality remains true if $n_{k+1}=-1$.
Properties (i)--(iii) show that $r_k=p_k/q_k$ for $k\gammae 0$.
Moreover, the sequence $r_k$ is a Cauchy sequence because
$$|r_{k+1}-r_k|=\frac{1}{|q_kq_{k+1}|}\le \frac{1}{(k-l)^2}\, \text{ for } k>l.$$
Hence $r_k$ is convergent.
In order to prove that $r_k$ converges to $x$, we write $x=(n_0,n_1,\dots,n_k,x_{k+1})$, and
look only at those terms $(n_0,n_1, \dots,n_k,x_{k+1})$ with $|x_{k+1}|\gammae 1$.
There are infinitely many such terms: indeed, if $-1\le a<b\le 1$, then $|x_{k+1}|\gammae 1$ for all $k\gammae 1$; if $a<-1$, and $|x_{k+1}|<1$, then $b\le x_{k+1}<1$, so $x_{k+2}=-1/(x_{k+1}-1)\gammae 1$; if $b>1$, and $|x_{k+1}|<1$, then $-1<x_{k+1}<a$, so $x_{k+2}=-1/(x_{k+1}+1)\gammae 1$.
Therefore, the corresponding subsequence $r_k=p_k/q_k$ satisfies
\begin{equation*}
\begin{split}\left|\frac{p_k}{q_k}-x\right|=&\left|\frac{p_k}{q_k}-\frac{p_k x_{k+1}-p_{k-1}}{q_k x_{k+1}-q_{k-1}}\right|
=\frac{1}{|q_k(q_k x_{k+1}-q_{k-1})|}\\ \le & \frac{1}{|q_k|(|q_k||x_{k+1}|-|q_{k-1}|)}\le \frac{1}{|q_k|}\rightarrow 0.
\end{split}
\end{equation*}
We showed that the convergent sequence $r_k=p_k/q_k$ has a subsequence convergent to $x$, therefore the whole sequence converges to $x$.
\end{proof}
\begin{rem}
One can construct $(a,b)$-continued fraction expansions for rational numbers, too. However, such expansions will terminate after finitely many steps if $b\ne 0$. If $b=0$, the expansions of rational numbers will end with a tail of $2$'s, since $0=(1,2,2,\dots)$.
\end{rem}
\begin{rem} \label{quadratic}It is easy to see that if the $(a,b)$-continued fraction expansion of a real number is eventually periodic, then the number is a quadratic irrationality.
\end{rem}
It is not our intention to present in this paper some of the typical number theoretical results that can be derived for the class of $(a,b)$-continued fractions. However, we state and prove a simple version about $(a,b)$-continued fractions with ``bounded digits". For the regular continued fractions, this is a classical result due to Borel and Bernstein (see \cite[Theorem 196]{HW} for an elementary treatment). We are only concerned with
$(a,b)$-expansions that are written with two consecutive digits, a result explicitly needed in Sections \ref{s:7} and \ref{s:8}.
\begin{prop}\label{bdigits1}
The set $\Gammaamma^{(m)}_{a,b}=\{x=\lfloor 0, n_1,n_2,\dots\rceil_{a,b} \:|\: n_k\in\{m,m+1\}\}$
has zero Lebesgue measure for every $m\gammae 1$.
\end{prop}
\begin{proof}
First, notice that if $m=1$, then the set $\Gammaamma^{(1)}_{a,b}$ has obviously zero measure, since the pairs $(2,1)$ and $(-2,-1)$ are not allowed in the $(a,b)$-expansions.
Assume $m\gammae 2$. Notice that $\Gammaamma^{(m)}_{a,b}\subset \Gammaamma^{(m)}_{0,-1}$ since a formal continued fraction $x=(0, n_1,n_2,\dots)$ with $n_k\in\{m,m+1\}$ coincides with its ``minus" (backward) continued fraction expansion ($a=-1, b=0$), $x=\lfloor 0, n_1,n_2,\dots\rceil_{-1,0}$. The reason is that any sequence of digits $n_i\gammae 2$ gives a valid ``minus" continued fraction expansion.
In what follows, we study the set $\Gammaamma^{(m)}_{0,-1}$. For practical reasons we will drop the subscript $(0,-1)$.
It is worth noticing that the result for $\Gammaamma^{(m)}_{0,-1}$ does not follow automatically from the result about regular continued fractions, since there are numbers for which the $(0,-1)$-expansion has only digits $2$ and $3$, while the regular continued fractions expansion has unbounded digits.
We follow the approach of \cite[Theorem 196]{HW} and estimate the size of the set $\Gammaamma^{(m)}_{n_1,n_2,\dots,n_k}\subset \Gammaamma^{(m)}$ with the digits $n_1,n_2,\dots,n_k\in \{m,m+1\}$ being fixed. In this particular case, the recursive relation \eqref{pkqk} implies that $1=q_1<q_2<\dots<q_k$.
If $x\in \Gammaamma^{(m)}_{n_1,n_2,\dots,n_k}$, then
$$(0,n_1,n_2,\dots ,n_k-1)\le x<(0, n_1,n_2,\dots, n_k)\,.$$
Using property (iii), the endpoints of such an interval $I^{(m)}_{n_1,\dots,n_k}$ are given by
$$
\frac{(n_k-1)p_{k-1}-p_{k-2}}{(n_k-1)q_{k-1}-q_{k-2}}\text{ , }
\frac{n_kp_{k-1}-p_{k-2}}{n_kq_{k-1}-q_{k-2}}
$$
and the length of this interval is
\[
l(I^{(m)}_{n_1,\dots,n_k})=\frac{1}{(n_kq_{k-1}-q_{k-2})((n_k-1)q_{k-1}-q_{k-2})}=\frac{1}{q_{k}(q_k-q_{k-1})}
\]
by using that $p_{k-2}q_{k-1}-p_{k-1}q_{k-2}=1$ and $q_k=n_kq_{k-1}-q_{k-2}$.
Denote by $\Gammaamma^{(m)}_k$ the set of numbers in $[-1,0)$ with $(-1,0)$-continued fraction digits $n_1, n_2, \dots$, $n_k\in\{m,m+1\}$. The set $\Gammaamma^{(m)}_k$ is part of the set
$$I^{(m)}_k=\bigcup_{n_1,\dots, n_k\in\{m,m+1\}}I^{(m)}_{n_1,\dots,n_k}\,.$$ We have the following relation:
$$
I^{(m)}_{k+1}=\bigcup_{n_1,\dots, n_k\in\{m,m+1\}}I^{(m)}_{n_1,\dots,n_k,m}\cup I^{(m)}_{n_1,\dots,n_k,m+1}
$$
If $x$ lies in $I^{(m)}_{n_1,\dots,n_k,m}\cup I^{(m)}_{n_1,\dots,n_k,m+1}$, then
$$(0,n_1,n_2,\dots ,n_k,m-1)\le x<(0, n_1,n_2,\dots, n_k,m+1)\,.$$
The length of this interval is
$$l(I^{(m)}_{n_1,\dots,n_k,m}\cup I^{(m)}_{n_1,\dots,n_k,m+1})=\frac{2}{((m+1)q_{k}-q_{k-1})((m-1)q_{k}-q_{k-1})}
$$
Now we estimate the ratio
\begin{equation*}
\begin{split}
\frac{l(I^{(m)}_{n_1,\dots,n_k,m}\cup I^{(m)}_{n_1,\dots,n_k,m+1})} {l(I^{(m)}_{n_1,n_2,\dots,n_k})} &= \frac{2q_{k}(q_k-q_{k-1})}{((m+1)q_{k}-q_{k-1})((m-1)q_{k}-q_{k-1})}\\
&\le \frac{2q_{k}}{(m+1)q_{k}-q_{k-1}}\\
&\le \frac{2q_{k}}{3q_{k}-q_{k-1}}=\frac{2}{3-q_{k-1}/q_k}\\
&\le\frac{2k}{2k+1}
\end{split}
\end{equation*}
since $\displaystyle\frac{q_{k-1}}{q_{k}}\le \frac{k-1}{k}$. Indeed, if $n_1=\dots=n_k=2$, then $q_{k-1}/q_k=(k-1)/k$; if some $n_j>2$, then $q_{k-1}/q_k\le 1/2$ from \eqref{pkqk}. This proves that for every $k\gammae 1$
$$
I^{(m)}_{k+1}\le \frac{2k}{2k+1}I^{(m)}_k
$$
so
$$l(I^{(m)}_{k})\le \frac{2\cdot 4 \cdots (2k-2)}{3\cdot 5\cdots(2k-1)} \cdot l(I^{(m)}_1)\longrightarrow 0 \text { as } k\rightarrow \infty.$$
Therefore, in all cases, $l(I^{(m)}_{k})\rightarrow 0$ as $k\rightarrow \infty$. Since $\Gammaamma^{(m)}\subset I^{(m)}_{k}$ for every $k\gammae 1$, the proposition follows.
\end{proof}
\begin{rem}\label{bdigits2}
By a similar argument, the set $\Gammaamma^{(-m)}_{a,b}=\{x=\lfloor 0, n_1,n_2,\dots\rceil_{a,b}\:|\: n_k\in\{-m,-m-1\}\}$
has zero Lebesgue measure for every $m\gammae 1$.
\end{rem}
\section{Attractor set for $F_{a,b}$}\label{s:3}
The reduction map $F_{a,b}$ defined by \eqref{Fab} has a trapping domain, i.e. a closed
set ${\mathcal T}heta_{a,b}\subset \bar\mathbb R^2\setminus \Delta$ with the following properties:
\begin{itemize}
\item[(i)] for every pair $(x,y)\in \bar\mathbb R^2\setminus \Delta$, there exists a positive integer $N$
such that $F_{a,b}^N(x,y)\in {\mathcal T}heta_{a,b}$;
\item[(ii)] $F_{a,b}({\mathcal T}heta_{a,b})\subset {\mathcal T}heta_{a,b}$.
\end{itemize}
\begin{thm}\label{Delta-trapping} The region ${\mathcal T}heta_{a,b}$ consisting of two connected components (or one if $a=0$ or $b=0$)
defined as
\begin{equation*}
{\mathcal T}heta^u_{a,b}=
\begin{cases}
[-\infty,-1]\times[b-1,\infty]\cup [-1,0]\times[-\frac{1}{a},\infty] & \text{ if } b\gammae 1, a\ne 0\\
\emptyset & \text{ if } a=0\\
\begin{split}
[-\infty,-1]\times[b-1,\infty]&\cup [-1,0]\times[\min (-\frac{b}{b-1}, -\frac1 a),\infty]\\
&\cup[0,1]\times[-\frac{1}{b-1},\infty]
\end{split} & \text{ if } 0<b<1
\end{cases}
\end{equation*}
\begin{equation*}
{\mathcal T}heta^l_{a,b}=
\begin{cases}
[0,1]\times[-\infty,-\frac{1}{b}]\cup [1,\infty]\times[-\infty,a+1] & \text{ if } a\le -1, b\ne 0\\
\emptyset & \text{ if } b=0\\
\begin{split}
[-1,0]\times[-\infty,-\frac{1}{a+1}]&\cup [0,1]\times[-\infty,\max (\frac{a}{a+1}, -\frac{1}{b})]\\
&\cup[1,\infty]\times[-\infty,a+1]
\end{split} &\text{ if } a>-1
\end{cases}
\end{equation*}
is the trapping region for the reduction map $F_{a,b}$.
\end{thm}
\begin{figure}
\caption{Typical trapping regions: case $a<-1, 0<b<1$ (left); case $-1<a<0<b<1$ (right)}
\label{trap}
\end{figure}
\begin{proof}
The fact that the region ${\mathcal T}heta_{a,b}$ is $F_{a,b}$-invariant is verified by a direct calculation.
We focus our attention on the attracting property of ${\mathcal T}heta_{a,b}$. Let $(x,y)\in \mathbb R^2\setminus\Delta$, write
$y=\lfloor n_0,n_1,\dots\rceil_{a,b}$, and construct the following sequence of
real pairs $\{(x_k,y_k)\}$ ($k\gammae 0$) defined by $x_0=x$, $y_0=y$
and:
$$y_{k+1}=ST^{-n_k}\dots ST^{-n_1}ST^{-n_0}y\,,\quad x_{k+1}=ST^{-n_k}\dots ST^{-n_1}ST^{-n_0}x\,.$$
If $y$ is rational and its $(a,b)$-expansion terminates $y=\lfloor n_0,n_1,\dots,n_l\rceil_{a,b}$, then $y_{l+1}=\pm \infty$,
so $(x,y)$ lands in ${\mathcal T}heta_{a,b}$ after finitely many iterations. If $y$ has an infinite $(a,b)$-expansion, then
$y_{k+1}=\lfloor n_{k+1},n_{k+2},\dots\rceil_{a,b}$, and $y_{k+1}\gammae -1/a$ or $y_{k+1}\le -1/b$ for $k\gammaeq 0$. Also,
\begin{equation*}
\begin{split}
y&=T^{n_0}ST^{n_1}S\dots T^{n_k}S(y_{k+1})=\frac{p_{k}y_{k+1}-p_{k-1}}{q_{k}y_{k+1}-q_{k-1}}\\
x&=T^{n_0}ST^{n_1}S\dots T^{n_k}S(x_{k+1})=\frac{p_{k}x_{k+1}-p_{k-1}}{q_{k}x_{k+1}-q_{k-1}}\,,
\end{split}
\end{equation*}
hence
\begin{equation*}\label{eq:uk}
x_{k+1}=\frac{q_{k-1}x-p_{k-1}}{q_kx-p_k}=\frac{q_{k-1}}{q_k}+\frac{1}{q_k^2(p_k/q_k-x)}=\frac{q_{k-1}}{q_k}+\varepsilon_{k}
\end{equation*}
where $\varepsilon_{k}\rightarrow 0$. This shows that for $k$ large enough $x_{k+1}\in [-1,1]$. We proved that there exists $N>0$, such that
$$F_{a,b}^{N}(x,y)=ST^{-n_k}\dots ST^{-n_1}ST^{-n_0}(x,y)\in [-1,1]\times ([-1/a,\infty]\cup [-\infty,-1/b])\,.$$
The point $F^N_{a,b}(x,y)=:(\tilde x,\tilde y)$ belongs to ${\mathcal T}heta_{a,b}$, unless $b<1$ and $(\tilde x,\tilde y)\in [0,1]\times [-1/a,-1/(b-1)]$ or
$a>-1$ and $(\tilde x,\tilde y)\in [-1,0]\times [-1/b,-1/(a+1)]$.
Let us study the next iterates of $(\tilde x,\tilde y)\in [0,1]\times [-1/a,-1/(b-1)]$. If $\tilde y\gammae b+1$ then
$$F^2_{a,b}(\tilde x,\tilde y)=(\tilde x-2,\tilde y-2)\in [-1,1]\times [b-1,\infty]\,,$$ so $F^2_{a,b}(\tilde x,\tilde y)\in {\mathcal T}heta_{a,b}$. If it so happens that $-1/a\le \tilde y<b+1$, then
$$F_{a,b}(\tilde x,\tilde y)=(\tilde x-1,\tilde y-1)\in [-1,0]\times [0,b]$$ and
$$F^2_{a,b}(\tilde x,\tilde y)=ST^{-1}(\tilde x,\tilde y)\in [0,\infty]\times [-1/b,\infty] \subset {\mathcal T}heta_{a,b}\,.$$
Similarly, if $(x,y)\in [-1,0]\times [-1/b,-1/(a+1)]$, then $F^2_{a,b}(x,y)\in {\mathcal T}heta_{a,b}$.
Notice that if $a=0$, then $y_{k+1}\le -1/b$ for all $k\gammae 0$ (so ${\mathcal T}heta_{a,b}^u=\emptyset$) and if $b=0$, then $y_{k+1}\gammae -1/a$ for al $k\gammae 0$ (so ${\mathcal T}heta_{a,b}^l=\emptyset$).
\end{proof}
Using the trapping region described in Theorem \ref{Delta-trapping} we define the associated \emph{attractor set}
\begin{equation}\label{def-atrac}
D_{a,b}=\bigcap_{n=0}^\infty D_n,
\end{equation}
where $D_n=\bigcap_{i=0}^n F_{a,b}^i({\mathcal T}heta_{a,b})$.
\begin{rem}
In the particular cases when $a=0$ and $b\gammae 1$, or $b=0$ and $a\le -1$ or $(a,b)=(-1,1)$ the trapping regions
\begin{eqnarray*}
{\mathcal T}heta_{0,b}&=&[-1,0]\times [-\infty,-1]\cup [0,1]\times[-\infty,0]\cup[1,\infty]\times [-\infty,1]\\
{\mathcal T}heta_{a,0}&=&[-\infty,-1]\times[-1,\infty]\cup [-1,0]\times [0,\infty]\cup [0,1]\times [1,\infty]\\\
{\mathcal T}heta_{-1,1}&=&
[-\infty,-1]\times[-1,\infty]\cup [-1,0]\times [1,\infty]\\
& &\!\!\!\cup \,\,[0,1]\times [-\infty,-1]\cup [1,\infty]\times [-\infty,0]
\end{eqnarray*}
are also bijectivity domains for the corresponding maps $F_{a,b}$. Therefore, in these cases the attractor $D_{a,b}$ coincides with the trapping region ${\mathcal T}heta_{a,b}$, so the properties mentioned in the introduction are obviously satisfied. In what follows, all our considerations will exclude these degenerate cases.
\end{rem}
\section{Cycle property}\label{s:4}
In what follows, we simplify the notations for $f_{a,b}$, $\lfloor,\cdot\rceil_{a,b}$, $\mathcal Hat f_{a,b}$ and $F_{a,b}$ to $f$, $\lfloor,\cdot\,\rceil$, $\mathcal Hat f$ and $F$, respectively, assuming implicitly their dependence on parameters $a,b$. We will use the notation $f^n$ (or $\mathcal Hat f^n$) for the $n$-times composition operation of $f$ (or $\mathcal Hat f$). Also,
for a given point $x\in (a,b)$ the notation $\mathcal Hat f^{(k)}$ means the transformation of type $T^iS$ ($i$ is an integer) such that
\[
\mathcal Hat f^k(x)=\mathcal Hat f^{(k)}\mathcal Hat f^{(k-1)}\cdots \mathcal Hat f^{(2)}\mathcal Hat f^{(1)}(x),
\]
where $\mathcal Hat f^{(1)}(x)=\mathcal Hat f(x)$.
The map $f$ is discontinuous at $x=a$, $b$, however, we can associate to each $a$ and $b$ two forward orbits: to $a$ we associate the {\em upper orbit} ${\mathcal O}_u(a)=\{f^{n}(Sa)\}$, and the {\em lower orbit} ${\mathcal O}_\ell(a)=\{f^n(Ta)\}$, and to $b$ --- the {\em lower orbit} ${\mathcal O}_\ell(b)=\{f^n(Sb)\}$ and the {\em upper orbit} ${\mathcal O}_u(b)=\{f^n(T^{-1}b)\}$. We use the convention
that if an orbit hits one of the discontinuity points $a$ or $b$, then the next iterate is computed according to the lower or upper location: for example,
if the lower orbit of $b$ hits $a$, then the next iterate is $Ta$,
if the upper orbit of $b$ hits $a$ then the next iterate is $Sa$.
Now we explore the patterns in the above orbits. The following property plays an essential role in studying the map $f$.
\begin{defn}\label{def:cycles}
We say that the point $a$ has the {\em cycle property} if for some non-negative integers $m_1,k_1$
\[
f^{m_1}(Sa)=f^{k_1}(Ta)=c_a.
\]
We will refer to
the set
\[
\{Ta, fTa,\dots ,f^{k_1-1}Ta\}
\]
as the {\em lower side of the $a$-cycle}, to the set
\[
\{Sa, fSa,\dots ,f^{m_1-1}Sa\}
\]
as the {\em upper side of the $a$-cycle}, and to $c_a$ as the {\em end of the $a$-cycle}.
If the product over the $a$-cycle equals the identity transformation, i.e.
\[
T^{-1}f^{-k_1}f^{m_1}S=\rm Id,
\]
we say that $a$ has {\em strong cycle property}, otherwise, we say that $a$ has {\em weak cycle property}.
Similarly, we say that $b$ has {\em cycle property} if for some non-negative integers $m_2,k_2$
\[
f^{k_2}(Sb)=f^{m_2}(T^{-1}b)=c_b.
\]
We will refer to
the set
\[
\{Sb, fSb,\dots ,f^{k_2-1}Sb\}
\]
as the {\em lower side of the $b$-cycle}, to the set
\[
\{T^{-1}b, fT^{-1}b,\dots ,f^{m_2-1}T^{-1}b\}
\]
as the {\em upper side of the $b$-cycle}, and to $c_b$ as the {\em end of the $b$-cycle}.
If the product over the $b$-cycle equals the identity transformation, i.e.
\[
Tf^{-m_2}f^{k_2}S=\rm Id,
\]
we say that $b$ has {\em strong cycle property}, and otherwise we say that $b$ has {\em weak cycle property}.
\end{defn}
It turns out that the cycle property is the prevalent pattern. It can be analyzed and described explicitly by partitioning the parameter set $\mathcal P$ based on the first digits of $Sb$, $STa$, and $Sa$, $ST^{-1}b$, respectively. Figure \ref{fig-P} shows a part of the countable partitions, with $B_{-1},B_{-2},\dots$ denoting the regions where $Sb$ has the first digit $-1, -2,\dots$, and $A_1,A_2,\dots$, denoting the regions where $Sa$ has the first digit $1,2,\dots$.
For most of the parameter region, the cycles are short: everywhere except for the very narrow triangular regions shown in Figure \ref{fig-P} the cycles for both $a$ and $b$ end after the first return to $[a,b)$.
However, there are Cantor-like recursive sets where the lengths of the cycles can be arbitrarily long. Part of this more complex structure, studied in details in Section \ref{s:8}, can be seen as very narrow triangular regions close to the boundary segment $b-a=1$.
\begin{figure}
\caption{The parameter set $\mathcal{P}
\label{fig-P}
\end{figure}
By symmetry of the parameter set $\mathcal{P}$ with respect to the line $b=-a$, $(a,b)\mapsto (-b,-a)$, we may assume that $b\leq -a$ and concentrate our attention to this subset of $\mathcal{P}$.
The structure of the set where the cycle property holds for $b$ is described next for the part of the parameter region with $0<b\le -a<1$. We make use extensively of the first return map $\mathcal Hat f$.
\begin{thm}\label{b-cycle}
Let $(a,b)\in\mathcal{P}$, $0<b\leq -a<1$ and $m\gammae 1$ such that $a\le T^mSb<a+1$.
\begin{itemize}
\item[\bf{(I)}] Suppose that there exists $n\gammaeq 0$ such that
\[
\quad\quad\quad \mathcal Hat f^{k}T^mSb\in \mathcal Big(\frac{b}{b+1},a+1\mathcal Big) \text{ for } k<n, \text{ and } \mathcal Hat f^{n}T^mSb\in \mathcal Big[a,\frac{b}{b+1}\mathcal Big].
\]
\begin{itemize}
\item[(i)] If $\mathcal Hat f^{n}T^mSb\in (a,\frac{b}{b+1})$, then $b$
has the cycle property; the cycle property is strong if and only if $\mathcal Hat f^{n}T^mSb\neq 0$.
\item[(ii)] If $\mathcal Hat f^{n}T^mSb=a$, then $b$ has the cycle property if and only if $a$ has the cycle property.
\item[(iii)] $\mathcal Hat f^{n}T^mSb=b/(b+1)$, then $b$ does not have the cycle property, but the orbits of $Sb$ and $T^{-1}b$ are periodic.
\end{itemize}
\item[\bf{(II)}] If $\mathcal Hat f^{k}T^mSb\in (\frac{b}{b+1},a+1)$ for all $k\gammaeq 0$, then $b$ does not have the cycle property.
\end{itemize}
\end{thm}
\begin{proof} {\bf{(I)}}
In the case $m=1$, and assuming $a<TSb<a+1$ we have
\begin{equation}\label{m=1}
a<1-\frac1{b}< \frac{b}{b+1},
\end{equation}
and the cycle relation for $b$ can be explicitly described as
\begin{equation}\label{m1}
\begin{diagram}
& & \boxed{b-1} &\rTo{S} & \boxed{-\frac 1{b-1}}\\
&\ruTo{T^{-1}}& & & &\rdTo{T^{-1}} \\
\boxed{b}& & & & & &\quad\boxed{c_b=\frac{b}{1-b}}\\
&\rdTo{S} & & & &\ruTo{S} \\
& &\boxed{-\frac 1{b}}&\rTo{T}& \boxed{\frac{b-1}{b}}
\end{diagram}
\end{equation}
In the particular situation that $TSb=a$,
the lower orbit of $b$ hits $a$ and continues to $a+1$, while the upper orbit hits $\frac{b}{1-b}=-1/a$. This means that the iterates will follow the lower and upper orbits of $a$, respectively, thus statement (ii) holds.
Since the second inequality (\ref{m=1}) is strict, the case (iii) cannot occur.
For the case $m=2$ (and assuming $T^2Sb\ne a$) we analyze the following situations: if $b<\frac12$, then
$
2-\frac1{b}<0,
$
and the cycle relation is
\begin{equation}\label{m=2}
\begin{diagram}
& & \boxed{b-1} &\rTo{S} & \boxed{-\frac 1{b-1}} &\rTo{ST^{-2}}& \boxed{1+\frac{b}{1-2b}}\\
&\ruTo{T^{-1}}& & & & & &\rdTo{\;\;T^{-1}}\\
\boxed{b}& & & & & & & & \boxed{c_b=\frac{b}{1-2b}}\\
&\rdTo{S}& & & & & \ruTo(3,2){S} & & &\\
& &\boxed{-\frac 1{b}}&\rTo{T^{2}}& \boxed{-\frac{1-2b}{b}}
\end{diagram}
\end{equation}
If $b>\frac12$ we have
\[
0<2-\frac1{b}\leq \frac{b}{b+1},
\]
since we must also have $2-\frac1{b}<a+1$, i.e. $b\leq \frac1{1-a}$,
and
the cycle relation is
\begin{equation}
\begin{diagram}
& & \boxed{b-1} &\rTo{S} & \boxed{-\frac 1{b-1}} & & & &\\
&\ruTo{T^{-1}}& & & & \rdTo(4,2){ST^{-2}} & \\
\boxed{b}& & & & & & & & \boxed{c_b=1+\frac{b}{1-2b}}\\
&\rdTo{S}& & & & & & \ruTo_{T} \\
& &\boxed{-\frac 1{b}}&\rTo{T^{2}}& \boxed{-\frac{1-2b}{b}} & \rTo^S & \boxed{\frac{b}{1-2b}}&
\end{diagram}
\end{equation}
The above cycles are strong.
If $b=\frac12$ the cycle relation is
\begin{equation}
\begin{diagram}
& & \boxed{b-1} &\rTo{S} & \boxed{-\frac 1{b-1}}\\
&\ruTo{T^{-1}}& & & &\rdTo(2,2){T^{-2}} \\
\boxed{b}& & & & & &\boxed{c_b=-\frac{1-2b}{b}=0}\\
&\rdTo{S}& & & \ruTo(3,2){T^2} &\\
& &\boxed{-\frac 1{b}}& & &
\end{diagram}
\end{equation}
It is easy to check that this cycle is weak. In the particular situation when
$T^2Sb=a$, the lower orbit of $b$ hits $a$, and continues with $a+1$, while the upper orbit still hits $\frac{b}{1-2b}=-1/a$. This means that the iterates will follow the lower and upper orbits of $a$, respectively, and statement (ii) holds.
The relation $2-\frac1{b}=\frac{b}{b+1}$ implies $b=\frac{-1+\sqrt{5}}2$ that does not have the cycle property and the orbits
of $Sb$ and $T^{-1}b$ are periodic; this is the only possibility for (iii) to hold.
The situation for $m\gammaeq 3$ is more intricate. First we will need the following lemmas.
\begin{lem} \label{xy} Suppose $STSx=y$. The following are true:
\begin{itemize}
\item[(a)] if $TSb\leq x<a$, then $b-1\leq y<\frac{a}{1-a}$;
\item[(b)] if $a\le x<\frac{b}{b+1}$, then $\frac{a}{1-a}\le y< b$;
\item[(c)] if $\frac{b}{b+1}\le x< a+1$, then $b\le y<\frac{a}{1-a}+1$;
\item[(d)] if $x=0$, then $y=0$.
\end{itemize}
\end{lem}
\begin{proof} Applying $STS$ to the corresponding inequalities
we obtain
(a)
\(\qquad
b-1=STSTSb\leq y<STSa=\displaystyle\frac{a}{1-a}.
\)
(b)
\(\qquad
\displaystyle \frac{a}{1-a}=STSa\le y<STSTSTb=b
\)
(c)
\(\qquad
b=STSTSTb\le y<STSTa=T^{-1}Sa\le \displaystyle\frac1{1-a}=\frac{a}{1-a}+1,
\)
\noindent where the last inequality is valid for $a\le \frac{1-\sqrt{5}}{2}$,
which is true in the considered region $b\le\frac1{2-a}$.
Relation (d) is obvious.
\end{proof}
\begin{lem}\label{main-argument}
Suppose that for all $k<n$
\begin{equation}\label{continuation}
\frac{b}{b+1}<\mathcal Hat f^{k}T^mSb<a+1.
\end{equation}
Then
\begin{enumerate}
\item for $0\leq k\leq n$, in the lower orbit of $b$, $\mathcal Hat f^{(k)}=T^mS$ or $T^{m+1}S$; in the upper orbit of $b$, $\mathcal Hat f^{(k)}=T^{-i}S$ with $i=2$ or $3$;
\item
there exists $p>1$ such that
\begin{equation}\label{STS}
(STS)\mathcal Hat f^{n}T^mS=(T^{-2}S)\mathcal Hat f^{p}T^{-1}.
\end{equation}
\end{enumerate}
\end{lem}
\begin{proof}
(1) Applying $T^mS$ to the inequality (\ref{continuation}), we obtain
\[
a-1\leq T^{m-1}Sb=T^mSTSTb<T^mS\mathcal Hat f^k T^mSb\leq T^mSTa\leq T^mSb< a+1,
\]
therefore $\mathcal Hat f^{(k+1)}=T^mS$ or $T^{m+1}S$. Since $\mathcal Hat f^{(0)}=T^mS$, we conclude that $\mathcal Hat f^{(k)}=T^mS$ or $T^{m+1}S$ for $0\leq k\leq n$.
(2) In order to determine the upper side of the $b$-cycle,
we will use the following relation in the group $SL(2,\Z)$ obtained by concatenation of the ``standard" relations (from right to left)
\begin{equation}\label{stand}
(STS)T^iS=(T^{-2}S)^{i-1}T^{-1}\quad (i\gammaeq 1),
\end{equation}
and Lemma \ref{xy} repeatedly.
The proof is by induction on $n$. For the base case $n=1$ we have
\[
\frac{b}{b+1}< T^mSb< a+1.
\]
Then for $1\leq i\leq m-1$ $T^iSb$ satisfies condition (a) of Lemma \ref{xy}, hence
\[
b-1<(T^{-2}S)^{i-2}T^{-1}b<\frac{a}{1-a},
\]
which means that on the upper side of the $b$-cycle $\mathcal Hat f^{(1)}=T^{-1}$ and $\mathcal Hat f^{(i)}=T^{-2}S$ for $1<i\leq m-1$.
Using (\ref{stand}) for $i=m$ we obtain
\[
(STS)T^mS=(T^{-2}S)^{m-1}T^{-1}=(T^{-2}S)\mathcal Hat f^{m-2}T^{-1},
\]
i.e. (\ref{STS}) holds with $p=m-2$.
Now suppose the statement holds for $n=n_0$, and for all $k<n_0+1$ we have
\[
\frac{b}{b+1}<\mathcal Hat f^{k}T^mSb< a+1.
\]
By the induction hypothesis,
there exists $p_0>1$ such that
\begin{equation}\label{cont}
(STS)\mathcal Hat f^{n_0}T^mS=(T^{-2}S)\mathcal Hat f^{p_0}T^{-1}.
\end{equation}
But since
\[
\frac{b}{b+1}<\mathcal Hat f^{n_0}T^mSb< a+1,
\]
condition (c) of Lemma \ref{xy} is satisfied, and hence
\[
b<(T^{-2}S)\mathcal Hat f^{p_0}T^{-1}b<\frac{a}{1-a}+1,
\]
which is equivalent to
\[
b-1<(T^{-3}S)\mathcal Hat f^{p_0}T^{-1}b<\frac{a}{1-a},
\]
i.e. $\mathcal Hat f^{p_0+1}=T^{-3}S$. Using the relation
\(
(STS)T^2S=T^{-1}(STS),
\)
we can rewrite (\ref{cont}) as
\begin{equation}\label{ind}
(STS)T^2S\mathcal Hat f^{n_0}T^mS=(T^{-3}S)\mathcal Hat f^{p_0}T^{-1}=\mathcal Hat f^{p_0+1}T^{-1}.
\end{equation}
Let $\mathcal Hat f^{(p_0+1)}=T^qS$. We have proved in (1) that $q=m$ or $m+1$, hence $q\gammaeq 3$.
Let
\[
b_0=T^2S\mathcal Hat f^{n_0}T^mSb \text{ and } c_0=(T^{-3}S)\mathcal Hat f^{p_0}T^{-1}b.
\]
Then by (\ref{ind})
$(STS)b_0=c_0$.
Using the relation $(STS)T=T^{-2}S(STS)$, we obtain
\[
(STS)T^i=(T^{-2}S)^i(STS),
\]
and therefore,
\begin{equation}\label{STSi}
(STS)T^ib_0=(T^{-2}S)^i(STS)b_0=(T^{-2}S)^ic_0
\end{equation}
Since for $0\leq i<q-2$ $T^ib_0$ satisfies condition (a) of Lemma \ref{xy},
we conclude that
\[b-1< (T^{-2}S)^ic_0<\frac{a}{1-a}.
\]
Therefore $\mathcal Hat f^{(i)}=T^{-2}S$ for $p_0+1<i\leq p_0+q$,
and (\ref{STSi}) for $i=q-2$ gives us the desired relation
\[
(STS)\mathcal Hat f^{n_0+1}T^mS=(T^{-2}S)\mathcal Hat f^{p_0+q}T^{-1}
\]
with $p=p_0+q$.
\end{proof}
Now we complete the proof of the theorem. In what follows we introduce the notations
\[
I_\ell=\mathcal Bigl( a,\frac{b}{b+1}\mathcal Bigr)\,,\; I_u=\mathcal Bigl(\frac{a}{1-a},b\mathcal Bigr)
\]
and write $\overline I_\ell$, $\overline I_u$ for the corresponding closed intervals.
(I) If
\(
\mathcal Hat f^nT^mSb\in I_\ell
\), then
condition (b) of Lemma \ref{xy} is satisfied, and
\[
(T^{-2}S)\mathcal Hat f^{p}T^{-1}b\in I_u.
\]
It follows that $\mathcal Hat f^{(p+1)}=T^{-2}S$,
therefore (\ref{STS}) can be rewritten as
\[
(STS)\mathcal Hat f^{n}T^mS=\mathcal Hat f^{p+1}T^{-1},
\]
which means that we reached the end of the cycle. More precisely,
(i) if $\mathcal Hat f^{n}T^mSb\in (0,\frac{b}{b+1})$, then
\[
TS\mathcal Hat f^{n}T^mSb=S\mathcal Hat f^{p}T^{-1}b=c_b;
\]
$b-1<\mathcal Hat f^{j}T^{-1}b<\frac{a}{1-a}$ for $j<p$, and $\mathcal Hat f^{p}T^{-1}b\in (0,b)$. In this case $c_b<Sb$.
If $\mathcal Hat f^{n}T^mSb\in (a,0)$, then
\[
S\mathcal Hat f^{n}T^mSb=T^{-1}S\mathcal Hat f^{p}T^{-1}b=c_b;
\]
$b-1<\mathcal Hat f^{j}T^{-1}b<\frac{a}{1-a}$ for $j<p$, and $\mathcal Hat f^{p}T^{-1}b\in (\frac{a}{1-a},0)$.
In this case $c_b>Sa$.
Since the cycle relation in both cases is equivalent to the identity (\ref{STS}), the cycle property is strong, and (i) is proved.
If $\mathcal Hat f^{n}T^mSb=0$, then
\[\mathcal Hat f^{n}T^mSb=\mathcal Hat f^{p}T^{-1}b=0
\]
is the end of the cycle; for $j<p$, $b-1<\mathcal Hat f^{j}T^{-1}b<\frac{a}{1-a}$. In this case the cycle ends ``before" the identity (\ref{STS}) is complete, therefore the product over the cycle is not equal to identity, and the cycle is weak.
(ii) If $\mathcal Hat f^{n}T^mSb=a$, then following the argument in (i) and using relation \eqref{STS} we obtain that the upper orbit of $b$ hits $T^{-1}S\mathcal Hat f^{p}T^{-1}b=S\mathcal Hat f^{n}T^mSb=Sa=-1/a$, while the lower orbit hits the value $a+1$, hence $b$ satisfies the cycle property if and only if $a$ does.
(iii) If $\mathcal Hat f^{n}T^mSb=\frac{b}{b+1}$, then following the argument in (i) we obtain
\[
(T^{-2}S)\mathcal Hat f^{p}T^{-1}b=b.
\]
However, one needs to apply one more $T^{-1}$ to follow the definition of the map $f$, hence $\mathcal Hat f^{(p+1)}=T^{-3}S$, not $T^{-2}S$, and the cycle will not close.
One also observes that in this case the $(a,b)$-expansions of $Sb$ and $T^{-1}b$ will be periodic, and therefore the cycle will never close.
(II) If
\[
\mathcal Hat f^{k}T^mSb\notin \overline {I_\ell}
\]
for all $k\gammaeq 0$, by the argument in the part (I) of the proof, on the
lower orbit of $b$
each $\mathcal Hat f^{(k)}=T^q S$, where $q=m$ or $m+1$, and on the
upper orbit of $b$
each $\mathcal Hat f^{(p)}=T^{-r}S$, where $r=2$ or $3$, and
for all $p\gammaeq 1$
\[
\mathcal Hat f^pT^{-1}b\notin\overline{I_u}.
\]
This means that for all images under the original map $f$ on the
lower orbit of $b$
we have
\[
f^kSb\in\left(-1-\frac1{b},a\right)\cup \left(\frac{b}{b+1},a+1\right)
\]
while for the images on the
upper orbit of $b$
\[
f^kT^{-1}b\in\left(b-1,\frac{a}{1-a}\right)\cup\left(b,1-\frac1{a}\right).
\]
Since these ranges do not overlap, the cycle cannot close, and $b$ has no cycle property.
\end{proof}
A similar result holds for the $a$-cycles. First, if $Sa$ has the first digit $1$, i.e. $b\le Sa<b+1$, then one can easily write the $a$-cycle, similarly to \eqref{m=1}. For the rest of the parameter region we have:
\begin{thm}\label{a-cycle}
Let $(a,b)\in\mathcal{P}$, $0<b\leq -a<1$ with $Sa\gammae b+1$ and $m\gammae 1$ such that $a\le T^mSTa<a+1$.
\begin{itemize}
\item[\bf{(I)}] Suppose that there exists $n\gammaeq 0$ such that
\[
\quad\quad\quad \mathcal Hat f^{k}T^mSTa\in \mathcal Big(\frac{b}{b+1},a+1\mathcal Big) \text{ for } k<n, \text{ and } \mathcal Hat f^{n}T^mSTa\in \mathcal Big[a,\frac{b}{b+1}\mathcal Big].
\]
\begin{itemize}
\item[(i)] If $\mathcal Hat f^{n}T^mSTa\in (a,\frac{b}{b+1})$, then $a$
has the cycle property; the cycle property is strong if and only if $\mathcal Hat f^{n}T^mSTa\neq 0$.
\item[(ii)] If $\mathcal Hat f^{n}T^mSTa=a$, then $a$ does not have the cycle property, but the $(a,b)$-expansions of $Sa$ and $Ta$ are eventually periodic.
\item[(iii)] $\mathcal Hat f^{n}T^mSTa=b/(b+1)$, then $a$ has the cycle property if and only if $b$ has the cycle property.
\end{itemize}
\item[\bf{(II)}] If $\mathcal Hat f^{k}T^mSTa\in (\frac{b}{b+1},a+1)$ for all $k\gammaeq 0$, then $a$ does not have the cycle property.
\end{itemize}
\end{thm}
\begin{proof} The proof follows the proof of Theorem \ref{b-cycle} with minimal modifications. In particular, the relation
(\ref{STS}) should be replaced by relation
\begin{equation}\label{STS*}
(STS)\mathcal Hat f^nT^mST=(T^{-2}S)\mathcal Hat f^p.
\end{equation}
For (iii), since $\mathcal Hat f^nT^mSTa=\frac{b}{b+1}$, on the lower side we have $TS f^nT^mSTa=Sb$, and on the upper side, using (\ref{STS*}), $(T^{-2}S)\mathcal Hat f^pb=b$. As in the proof of Theorem \ref{b-cycle}, $\mathcal Hat f^{p+1}=T^{-3}S$, so $(T^{-3}S)\mathcal Hat f^pb=T^{-1}b$. Therefore $a$ has (strong or weak) cycle property if and only if $b$ does.
\end{proof}
Let us now
describe the situation when $a\le -1$.
\begin{thm}\label{ab-cycle}
Let $(a,b)\in\mathcal{P}$ with $0<b\leq -a$ and $a\le -1$. Then $a$ and $b$ satisfy the cycle property.
\end{thm}
\begin{proof}
It is easy to see that $a=-1$ has the degenerate weak cycle:
\begin{equation}\label{a=-1}
\begin{diagram}
& & \boxed{1} \\
&\ruTo{S}& &\rdTo{T^{-1}} \\
\boxed{a=-1} & & \rTo{T} & & \boxed{0} \\
\end{diagram}
\end{equation}
while $a<-1$ satisfies the following strong cycle relation:
\begin{equation}\label{acycle}
\begin{diagram}
& & \boxed{-\frac{1}{a}} &\rTo{T^{-1}} & \boxed{-\frac{1}{a} - 1} & \rTo{S} &\boxed{\frac{a}{a+1}}\\
&\ruTo{S} & & & & & &\rdTo{T^{-1}} \\
\boxed{a}& & & & & & & & \boxed{c_a=-\frac{1}{a+1}} \\
&\rdTo{T} & & & & & & \ruTo(6,2){S} \\
& &\boxed{a+1}
\end{diagram}
\end{equation}
In order to study the orbits of $b$, let $m\gammae 0$ such that $a\le T^mSb<a+1$. If $m=0$, then $Sb=a$ (since $Sb\le a$), and the cycle of $b$ is identical to the one described by \eqref{a=-1}. If $m\gammae 1$, then one can use relation \eqref{stand} to construct the $b$-cycle. More precisely, if $a<T^mSb<a+1$, then we have:
\begin{equation}
\begin{diagram}
& & \boxed{b-1} &\rTo{S} & \boxed{-\frac 1{b-1}} &\rTo{(ST^{-2})^{m-1}}& \boxed{1+\frac{b}{1-mb}}\\
&\ruTo{T^{-1}}& & & & & &\rdTo{\;\;T^{-1}}\\
\boxed{b}& & & & & & & & \boxed{c_b=\frac{b}{1-mb}}\\
&\rdTo{S}& & & & & \ruTo(3,2){S} & & &\\
& &\boxed{-\frac 1{b}}&\rTo{T^{m}}& \boxed{-\frac{1-mb}{b}}
\end{diagram}
\end{equation}
If $T^mSb=a$, then it happens again that the lower orbit of $b$ hits a, and then $Ta$, while the upper orbit hits $Sa$. Following now the cycle of $a$ described by \eqref{acycle}, we conclude that $b$ satisfies the strong cycle property.
If $T^mSb=0$, i.e. $b=1/m$, then a minor modification of the above b-cycle gives us the following weak cycle relation:
\begin{equation}
\begin{diagram}
& & \boxed{b-1} &\rTo{S\;\;} & \boxed{-\frac 1{b-1}} &\rTo{T^{-1}(ST^{-2})^{m-2}}& \boxed{\frac{b}{1-mb+b}=1}\\
& \ruTo_{T^{-1}} & & & & & &\rdTo_{\;\;T^{-1}}\\
\boxed{b}& & & & & & & & \boxed{c_b=0}\\
&\rdTo{S\;\;}& & & & & \ruTo(3,2){T} & & &\\
& &\boxed{-\frac 1{b}=-m}&\rTo{T^{m-1}}& \boxed{-1}
\end{diagram}
\end{equation}
\end{proof}
The following corollaries are immediate from the proof of Theorems \ref{b-cycle}, \ref{a-cycle}, \ref{ab-cycle}.
\begin{cor} \label{b-norepeats}If $b$ has the cycle property, then the upper side of the $b$-cycle
\[
\{T^{-1}b, fT^{-1}b,\dots ,f^{m_2-1}T^{-1}b\}
\]
and the lower side of the $b$-cycle
\[
\{Sb, fSb,\dots ,f^{k_2-1}Sb\}
\]
do not have repeating values.
\end{cor}
\begin{cor} \label{a-norepeats}If $a$ has the cycle property, then the upper side of the $a$-cycle
\[
\{Sa, fSa,\dots ,f^{m_1-1}Sa\}
\]
and the lower side of the $a$-cycle
\[
\{Ta, fTa,\dots ,f^{k_1-1}Ta\}
\]
do not have repeating values.
\end{cor}
\section{Finiteness condition implies finite rectangular structure}\label{s:5}
In order to state the condition under which the natural extension map $F_{a,b}$ has an attractor with finite rectangular structure mentioned in the Introduction, we follow the split orbits of $a$ and $b$
\[
\mathcal{L}_a=\begin{cases} {\mathcal O}_\ell(Ta)&\text{ if $a$ has no cycle property}\\
\text{lower part of $a$-cycle}&\text{ if $a$ has strong cycle property}\\
\text{lower part of $a$-cycle $\cup\{0\}$}&\text{ if $a$ has weak cycle property},
\end{cases}
\]
\[
\mathcal{U}_a=\begin{cases} {\mathcal O}_u(Sa)&\text{ if $a$ has no cycle property}\\
\text{upper part of $a$-cycle}&\text{ if $a$ has strong cycle property}\\
\text{lower part of $a$-cycle $\cup\{0\}$}&\text{ if $a$ has weak cycle property},
\end{cases}
\]
and, similarly, $\mathcal{L}_b$ and $\mathcal{U}_b$ by
\[
\mathcal{L}_b=\begin{cases} {\mathcal O}_\ell(Sb)&\text{ if $b$ has no cycle property}\\
\text{lower part of $b$-cycle}&\text{ if $b$ has strong cycle property}\\
\text{lower part of $b$-cycle $\cup\{0\}$}&\text{ if $b$ has weak cycle property},
\end{cases}
\]
\[
\mathcal{U}_b=\begin{cases} {\mathcal O}_u(T^{-1}b)&\text{ if $b$ has no cycle property}\\
\text{upper part of $b$-cycle}&\text{ if $b$ has strong cycle property}\\
\text{lower part of $b$-cycle $\cup\{0\}$}&\text{ if $b$ has weak cycle property},
\end{cases}
\]
We find it useful to introduce the map $\rho_{a,b}:\bar\mathbb R\to\{T,S,T^{-1}\}$
\begin{equation}
\rho_{a,b}(x)=\begin{cases}
T &\text{ if } x < a\\
S &\text{ if } a\le x<b\\
T^{-1} &\text{ if } x\gammae b
\end{cases}
\end{equation}
in order to write $f_{a,b}(x)=\rho_{a,b}(x)x$ and $F_{a,b}(x,y)=(\rho(y)x,\rho(y)y)$.
\begin{rem}It follows from the above definitions that $\rho(y)=S$ or $T$ if $y\in\mathcal{L}_a\cup \mathcal{L}_b$, and $\rho(y)=S$ or $T^{-1}$ if $y\in\mathcal{U}_a \cup\mathcal{U}_b$.
\end{rem}
\begin{defn} We say that the map $f_{a,b}$ satisfies the {\em finiteness condition} if the sets of values in all four truncated orbits $\mathcal{L}_a,\mathcal{L}_b,\,\,\mathcal{U}_a,\,\,\mathcal{U}_b$ are finite.
\end{defn}
\begin{prop} \label{shift} Suppose that the set $\mathcal{L}_b$ is finite. Then
\begin{enumerate}
\item either
$b$ has the cycle property or
the upper and lower orbits of $b$ are eventually periodic.
\item The finiteness of $\mathcal{L}_b$ implies the finiteness of $\mathcal{U}_b$.
\end{enumerate}
Similar statements hold for the sets $\mathcal{L}_a$, $\mathcal{U}_a$ and $\mathcal{U}_b$ as well.
\end{prop}
\begin{proof} The two properties follow from Theorem \ref{b-cycle} and its proof. If $b$ does not have the cycle property, but its lower orbit is eventually periodic, then one uses Lemma \ref{main-argument} to conclude that the upper orbit of $b$ has to be eventually periodic.
\end{proof}
\begin{rem}
If $b$ has the strong cycle property, then the set $\mathcal{L}_b$ coincides with the lower side of the $b$-cycle and $\mathcal{U}_b$ coincides with the upper side of the $b$-cycle. If $b$ does not have the cycle property, but the lower and upper orbits of $b$ are eventually periodic then $\mathcal{L}_b$ and $U_b$ are identified with these orbits accordingly, until the first repeat.
\end{rem}
\begin{thm} \label{thm:recstructure} Let $(a,b)\in \mathcal P$, $a\ne 0$, $b\ne 0$, and assume that the map $f_{a,b}$
satisfies the finiteness condition. Then there exists a set $A_{a,b}\underset{\neq}\subset\bar\mathbb R^2$ with the following properties:
\begin{enumerate}
\item[(A1)] The set $A_{a,b}$ consists of two connected components each having {\em finite rectangular structure},
i.e. bounded by non-decreasing step-functions with a finite number of steps.
\item[(A2)] $F_{a,b}: A_{a,b}\to A_{a,b}$ is a bijection except for some images of the boundary of $A_{a,b}$.
\end{enumerate}
\end{thm}
\begin{proof}
(A1)
We will construct a set $A_{a,b}$ whose upper connected component is bounded by a step-function with values in the set $\mathcal{U}_{a,b}=\mathcal{U}_a\cup\mathcal{U}_b$ that we refer to as {\em upper levels}), and whose lower connected component is bounded by a step-function with values in the set $\mathcal{L}_{a,b}=\mathcal{L}_a\cup\mathcal{L}_b$ that we refer to as {\em lower levels}. Notice that each level in $\mathcal{U}_a$ and $\mathcal{U}_b$ appears exactly once, but if the same level appears in both sets, we have to count it twice in $\mathcal{U}_{a,b}$. The same remark applies to the lower levels.
Now let $y_\ell\in\mathcal{L}_{a,b}$ be the closest $y$-level to $Sb$ with $y_\ell\gammaeq Sb$, and $y_u\in\mathcal{U}_{a,b}$ be the closest $y$-level to $Sa$ with $y_u\leq Sa$. Since each level in $\mathcal{U}_a$ and in $\mathcal{L}_b$ appears only once,
if $y_u=Sa$, $y_u$ can only belong to $\mathcal{U}_b$, and if $y_\ell= Sb$, $y_\ell$ can only belong to $\mathcal{L}_a$.
We consider the rays $[-\infty,x_b]\times \{b\}$ and $[x_a,\infty]\times \{a\}$, where $x_a$ and $x_b$ are unknown, and
``transport" them (using the special form of the natural extension map $F_{a,b}$) along the
sets $\mathcal{L}_b,\,\,\mathcal{U}_b,\,\,\mathcal{L}_a$ and $\mathcal{U}_a$ respectively until we reach the levels $y_u$ and $y_\ell$ (see Figure \ref{fig-levels}).
Now we set-up a system of two fractional linear equations by equating the right end of the segment at the level $Sb$ with the left end of the segment at the level $y_\ell$, and, similarly, the left end of the segment at the level $Sa$ and the right end of the level $y_u$.
\begin{figure}
\caption{Construction of the domain $A_{a,b}
\label{fig-levels}
\end{figure}
\begin{lem}\label{2-sys} The system of two equations at the consecutive levels $y_u$ and $Sa$, and $y_\ell$ and $Sb$,
has a unique solution with $x_a\gammaeq 1$ and $x_b\leq -1$.
\end{lem}
\begin{proof}
In what follows, we present the proof assuming that $0<b\le -a<1$. The situation $a\le -1$ is less complex due to the explicit cycle expressions described in Theorem \ref{ab-cycle} and will be discussed at the end.
Let $m_a,m_b$ be positive integers such that $a\le T^{m_a}STa<a+1$ and $a\le T^{m_b}Sb<a+1$. For the general argument we assume that $m_a,m_b\gammaeq 3$, the cases $m_a$ or $m_b\in \{1,2\}$ being considered separately.
The level $y_u$ may belong to $\mathcal{U}_a$ or $\mathcal{U}_b$, and the level $y_\ell$ may belong to $\mathcal{L}_a$ or $\mathcal{L}_b$, therefore we need to consider $4$ possibilities.
\noindent{\bf Case 1: $y_u\in\mathcal{U}_a,\,\,y_\ell\in\mathcal{L}_a$.} Then we have
\[
Sx_a= T^{-1}S\mathcal Hat f_-^{n_1}(\infty)\,,\quad
Sx_b= TS \mathcal Hat f_+^{n_2}Tx_a,
\]
where $\mathcal Hat f_-^{n_1}$ is a product of factors $T^{-i}S$ (that appear on the upper orbit of $a$) with $i=2$ or $3$, and $\mathcal Hat f_+^{n_2}$ is a product of factors $T^{i}S$ (that appear on the lower orbit of $a$) with $i=m$ or $m+1$. Using (\ref{STS*}) we rewrite the first equation as
\[
x_a=ST^{-1}S\mathcal Hat f_-^{n_1}(\infty)=ST^{-1}SST^2STS\mathcal Hat f_+^{k_1}T^mST(\infty)
=T^{-1}\mathcal Hat f_+^{k_1}T^mST(\infty)\,.
\]
Since $\mathcal Hat f_+^{k_1}$ is a product of factors $T^{i}S$ with $i=m$ or $m+1$, $m\gammaeq 3$, we conclude that $Tx_a$ has a finite formal continued fraction expansion starting with $m'\gammaeq 3$, i.e. $Tx_a>2$, and $x_a>1$.
Furthermore, from the second equation
\[
x_b= STS \mathcal Hat f_+^{n_2}Tx_a,
\]
hence $ \mathcal Hat f_+^{n_2}Tx_a$ has a finite formal continued fraction expansion starting with $m'\gammaeq 3$, i.e. $ \mathcal Hat f_+^{n_2}Tx_a>2$, and $x_b<-2$.
\noindent{\bf Case 2: $y_u\in\mathcal{U}_a,\,\,y_\ell\in\mathcal{L}_b$.} Then
\[
Sx_a= T^{-1}S\mathcal Hat f_-^{n_1}(\infty)\,,\quad
Sx_b= TS \mathcal Hat f_+^{n_2}(-\infty)\,.
\]
Like in Case 1 we see that $x_a>1$, and
\[
x_b= STS \mathcal Hat f_+^{n_2}(-\infty)<-2,
\]
since $ \mathcal Hat f_+^{n_2}(-\infty)$ has a formal continued fraction expansion starting with $m'\gammaeq 3$, and therefore is $>2$.
\noindent{\bf Case 3: $y_u\in\mathcal{U}_b,\,\,y_\ell\in\mathcal{L}_a$.} Then
\[
Sx_a= T^{-1}S\mathcal Hat f_-^{n_1}T^{-1}x_b\,,\quad
Sx_b= TS \mathcal Hat f_+^{n_2}Tx_a\,.
\]
Using (\ref{STS})
\[
x_a= ST^{-1}S\mathcal Hat f_-^{n_1}T^{-1}x_b=ST^{-1}SST^2STS\mathcal Hat f^{k_2}T^mSx_b,
\]
and using the second equation and simplifying, we obtain
\[
Tx_a=\mathcal Hat f^{k_2}T^mSSTS\mathcal Hat f_+^{n_2}(Tx_a)=\mathcal Hat f^{k_2}T^{m+1}S\mathcal Hat f_+^{n_2}(Tx_a).
\]
Since all its factors are of the form $T^iS$ with $i\gammaeq 3$, the matrix $\mathcal Hat f^{k_2}T^{m+1}S\mathcal Hat f_+^{n_2}$ is hyperbolic and its attracting fixed point $Tx_a$ has periodic formal continued fraction expansion starting with $m'\gammaeq 3$ (see Theorem 3.1 of \cite{KU1}), hence $x_a>1$. Finally, as in Case 1,
\[
x_b=STS\mathcal Hat f_+^{n_2}Tx_a<-2
\]
since $\mathcal Hat f_+^{n_2}Tx_a$ has formal continued fraction expansion with $m'\gammaeq 3$, hence $>2$.
\noindent{\bf Case 4: $y_u\in\mathcal{U}_b,\,\,y_\ell\in\mathcal{L}_b$.} Then
\[
Sx_a= T^{-1}S\mathcal Hat f_-^{n_1}T^{-1}x_b\,,\quad
Sx_b= TS \mathcal Hat f_+^{n_2}S(-\infty)\,.
\]
From the second equation we obtain
\[
x_b=STS\mathcal Hat f_+^{n_2}S(-\infty)<-2
\]
since $\mathcal Hat f_+^{n_2}S(-\infty)$ has formal continued fraction expansion with $m'\gammaeq 3$, hence $>2$.
Finally,
\[
x_a= ST^{-1}S\mathcal Hat f_-^{n_1}T^{-1}x_b=T^{-1}\mathcal Hat f^{k_2}T^{m+1}S\mathcal Hat f_+^{n_2}S(-\infty),
\]
hence
\[
Tx_a=\mathcal Hat f^{k_2}T^{m+1}S\mathcal Hat f_+^{n_2}S(-\infty)>2
\]
since it has formal continued fraction expansion with $m'\gammaeq 3$, therefore $x_a>1$.
Now we analyze the particular situations when $m_a$ or $m_b\in\{1,2\}$, using the explicit cycle descriptions that exist for these situations as described by Theorems \ref{b-cycle} and \ref{a-cycle}.
(i) If $m_a=m_b=1$, then relation \eqref{m1} for the $b$-cycle and a similar one for the $a$-cycle shows that $y_\ell=-\frac1{b}+1$ and $y_u=-\frac1{a}-1$, therefore $x_a=1$ and $x_b=-1$.
(ii) If $m_a=1$, $m_b=2$, following the explicit cycles given by \eqref{m=2} we obtain $y_\ell=-1/b +1$, and $y_u=-1/(b-1)-1$, therefore $x_a=2$, $x_b=-1$.
(iii) If $m_a=1$, $m_b\gammae 3$, using the cycle structure in Theorem \ref{b-cycle} we obtain $y_\ell=1/b+1$ and $y_u=T^{-1}(ST^{-2})^{m_b-2}ST^{-1}b$, therefore, $x_a=m_b$, and $x_b=-1$.
(iv) If $m_a=2$, $m_b=2$, using the cycle structure in Theorems \ref{b-cycle} and \ref{a-cycle} we obtain $y_\ell=-\frac1{a+1}+1$ and $y_u=-\frac1{b-1}-1$, and a calculation in this particular case, like in Lemma \ref{2-sys}, Case 3 implies that $x_a>1$ and $x_b<-1$.
(v) if $m_a=2$, $m_b>2$, an analysis of the four cases above for this particular situation (with an explicit cycle relation for $a$) yields $x_a\gammae 1$ and $x_b\le -1$. Indeed, in Case 1, we have $y_u=-1/a-1$, hence $x_a=1$ and $x_b=-2$. In Case 2, we get $x_a=1$ and $x_b<-2$. Cases 3 and 4 are treated similarly.
\end{proof}
Now, since $x_a$ and $x_b$ are uniquely determined, by ``transporting" the rays $[-\infty, x_b]\times\{b\}$ and $[x_a,\infty]\times\{a\}$ along the sets $\mathcal{L}_b,\,\,\mathcal{U}_b,\,\,\mathcal{L}_a$ and $\mathcal{U}_a$ we obtain the $x$-coordinates of the right and left end of the segments on each level.
\begin{defn} We say that two consecutive levels $y_1\leq y_2$ of $\mathcal{L}_{a,b}$, respectively, $\mathcal{U}_{a,b}$, are called {\em connected by a vertical segment} (we will refer to this as {\em connected}) if the $x$-coordinate of the right end point of the horizontal segment on the level $y_1$ is equal to the the $x$-coordinate of the left end point of the horizontal segment on the level $y_2$.
\end{defn}
We will prove that all levels of $\mathcal{L}_{a,b}$ and all levels of $\mathcal{U}_{a,b}$ are connected.
We first look at the levels in $\mathcal{L}_{a,b}$. By Lemma \ref{2-sys} the levels $y_u$ and $Sa$, and the levels $Sb$ and $y_\ell$ are connected.
\begin{lem} \label{next} The levels $Sb\in\mathcal{L}_b$ and $STa\in\mathcal{L}_a$ are two consecutive levels of $\mathcal{L}_{a,b}$ connected by a vertical segment at $x=0$.
The levels $Sa\in\mathcal{U}_a$ and $ST^{-1}b\in\mathcal{U}_b$ are two consecutive levels of $\mathcal{U}_{a,b}$ connected by a vertical segment at $x=0$.
\end{lem}
\begin{proof} Suppose there is $y\in \mathcal{L}_{a,b}$ such that $STa\leq y\leq Sb$. Then $y\in \mathcal{L}_a$ or $\mathcal{L}_b$. In either case,
since by Lemmas \ref{a-norepeats} and \ref{b-norepeats} the truncated orbits $\mathcal{L}_a,\mathcal{L}_b$ do not have repeated values, neither $STa=y$ nor $y=Sb$ is possible. Thus the only case we need to consider is
\[
STa<y<Sb.
\]
Then, either $y=Sy'$ for some $y'\in\mathcal{L}_{a,b}$ ($0<y'\le a+1$) or $y=Ty''$ for some $y''\in\mathcal{L}_{a,b}$. These would imply that either $y'>Ta$, which is impossible, or $Ty''<Sb$, i.e. $y''<T^{-1}Sb$, which is also impossible (if $y''<T^{-1}Sb$ then $y=Ty''$ must be the end of the $a$-cycle, by Theorem \ref{a-cycle}). The $x$-coordinate of the right end point of the segment at the level $STa$ and of the left end point of the segment at the level $Sb$ is equal to $0$.
The second part of the proof is similar.
\end{proof}
The following proposition will be used later in the proof.
\begin{prop} \label{tech} Suppose that the set $\mathcal{L}_{a,b}$ is finite and $y\in \mathcal{L}_{a,b}$ with $y>STa$.
\begin{enumerate}
\item If $y\in\mathcal{L}_a$, then
there exists $n_0>0$ such that
$\rho(f^{n}y)=\rho(f^n STa)$
for all $0<n<n_0$ and $\rho(f^{n_0}y)\neq \rho(f^{n_0}STa)$, or $f^{n_0}y=0$;
\item If $y\in \mathcal{L}_b$, then $y>Sb$, and there exists $n_0>0$ such that
$\rho(f^{n}y)=\rho(f^n Sb)$
for all $n<n_0$ and $\rho(f^{n_0}y)\neq \rho(f^{n_0}Sb)$, or $f^{n_0}y=0$.
\end{enumerate}
\end{prop}
\begin{proof}
Suppose that $y\in\mathcal{L}_a$ and $a$ satisfies the cycle property. It follows that such an $n_0$ exists or $f^{n_0}y$
is the end of the $a$-cycle. We will show that the latter is possible only if $f^{n_0}y=0$, i.e. it is the end of a weak cycle.
Suppose $f^{n_0}y$ is the end of the $a$-cycle.
Then if
\[
\rho(f^{n_0-1}y)=\rho(f^{n_0-1}STa)=S,
\]
we must have
$f^{n_0-1}y<0$ since otherwise the cycle would not stop at $S$, but $f^{n_0-1}(STa)>0$ since for $STa$ we have not reached the end of the cycle. This contradicts the monotonicity of $f^{n_0-1}$ and the original assumption $y>STa$, thus is impossible. The other possibility is
\[
\rho(f^{n_0-1}y)=\rho(f^{n_0-1}STa)=T.
\]
But this either implies that $f^{n_0-1}y<T^{-1}Sb$, and by monotonicity of $f^{n_0-1}$, $f^{n_0-1}(STa)<f^{n_0-1}y<T^{-1}Sb$, which implies that we have reached the end of the cycle of $STa$ as well, a contradiction, or,
$f^{n_0}y=0$, i.e. it is the end of a weak cycle.
Now suppose $y\in\mathcal{L}_b$. Then by Lemma \ref{next} $y\gammaeq Sb$, but
since each level in $\mathcal{L}_b$ appears only once, we must have but $y>Sb$. Now the argument that $f^{n_0}y$ cannot be the end of the $b$-cycle is exactly the same as for the $a$-cycle.
In the periodic case, let us assume that no such $n_0$ exists. Then, in case (1) the $(a,b)$-expansions of $STa$ and $y$, which is the lower part of the former, are the same, i.e. $(a,b)$-expansions of $STa$ is invariant by a left shift.
In case (2), we have seen already that we must have $y>Sb$.
Then the $(a,b)$-expansions of $Sb$ and $y$, which is the lower part of the former, are the same, i.e. $(a,b)$-expansions of $Sb$ is invariant by a left shift.
The proof that this is impossible
is based on the following simple observation: if $\sigma= (a_1,a_2,\dots , a_k, \overline{a_{k+1},a_{k+2},\dots a_{k+n}})$ is an eventually periodic symbolic sequence with the minimal period $n$ and invariant under a left shift by $m$, then $\sigma$ is purely periodic and $m$ is a multiple of $n$.
By the uniqueness property of $(a,b)$-expansions, this would imply that
$y=STa$ or $y=Sb$, a contradiction.
\end{proof}
Let $y_b^-,y_b^+\in\mathcal{U}_{a,b}$ be two consecutive levels with $y_b^-\leq b< y_b^+$, and $y_a^-,y_a^+\in\mathcal{L}_{a,b}$ be two consecutive levels with $y_a^-< a\leq y_a^+$.
\begin{lem}\label{a-connected}
There is always one level connected with level $a+1$, and
the levels $y_a^-$ and $y_a^+$ are connected by the vertical segment at $x_a$.
\end{lem}
\begin{proof}
By Lemmas \ref{2-sys} and \ref{next}, we know that three consecutive levels $STa\leq Sb\leq y_\ell$ are connected. Moreover, their images remain connected under the same transformations in $SL(2,\Z)$.
Since each level in $\mathcal{U}_a$ and in $\mathcal{L}_a$ appears only once, at least one of the two inequalities must be strict, i.e. if $STa= Sb$, then $STa=Sb< y_\ell$, and if $Sb= y_\ell$, then $STa< Sb= y_\ell$.
First we prove that
$y_\ell<TSb$. Suppose $y_\ell\gammaeq TSb$. Its pre-image must be $y'_\ell=T^{-1}y_\ell$ since for any $y,\,0<y<Ta$,
$Sy<STa\leq Sb<TSb$, and
we would have
$Sb\leq y'_\ell<y_\ell$ that contradicts the assumption that $y_\ell$ is {the next level above} $Sb$. Therefore, if the first digit in the $(a,b)$-expansion of $Sb$ is $-m$,
then the first digit of $y_\ell$ is $-(m-1)$ or $-m$. In the first case, the three levels
\[
T^{m-1}Sb< a\le T^{m-1}y_{\ell}
\]
are connected and satisfy $T^{m-1}Sb=y^-_a, T^{m-1}y_{\ell}=y^+_a$.
Therefore, the levels $T^{m}Sb$ and $a+1$ are connected.
For the second case, we know that $Sb\leq y_\ell$ and
\[
a\le T^{m}Sb\leq T^{m}y_{\ell}< a+1.
\]
If $Sb=y_\ell$, then $y_\ell\in \mathcal{L}_a$, and $STa<y_\ell$.
If $Sb<y_\ell$, then $y_\ell\in \mathcal{L}_b$, or $y_\ell\in \mathcal{L}_a$ and $STa<y_\ell$.
Let us assume that $y_\ell$ belongs to $\mathcal{L}_a$.
Since $STa<y_\ell$, by Proposition \ref{tech}, there are two possibilities:
\begin{enumerate}
\item $f^{n_0}y_\ell$ is the end of a weak cycle.
\item There exists
$n_0$ such that
$\rho(f^{n}y_\ell)=\rho(f^n STa)$
for all $n<n_0$, and $\rho(f^{n_0}y_\ell)\neq \rho(f^{n_0}STa)$.
\end{enumerate}
In the first case, we have $f^{n_0}STa=y_a^-$ and $f^{n_0}Sb=y_a^+$, or $f^{n_0}Sb=y_a^-$ and $ f^{n_0}y_{\ell}=y_a^+$. Therefore, either
$f^{n_0+1}STa$ or $f^{n_0+1}Sb$ is connected with level $a+1$.
In the second case,
we notice that
\[
\rho(f^{n_0-1}y_\ell)=\rho(f^{n_0-1}STa)=T
\]
otherwise, $\rho(f^{n_0-1}y_\ell)=\rho(f^{n_0-1}STa)=S$
would imply
\[
\rho(f^{n_0}y_\ell)=\rho(f^{n_0}STa)=T
\]
in contradiction with the choice of $n_0$.
Further,
there are two possibilities:
\[
\text{(i) } \rho(f^{n_0}STa)=S,\,\,\rho(f^{n_0}y_\ell)=T\,,\quad \text{(ii) } \rho(f^{n_0}STa)=T,\,\,\rho(f^{n_0}y_\ell)=S.
\]
In case (i) we obtain
\[
f^{n_0}y_\ell<a\le f^{n_0}STa
\]
which contradicts the monotonicity of $f$ and the original assumption $y_\ell>STa$. Thus the only possibility is
\[
f^{n_0}y_\ell \gammae a>f^{n_0}STa.
\]
By using the monotonicity of $f^{n_0}$ we have
\[
f^{n_0}y_\ell>f^{n_0}Sb>f^{n_0}STa
\]
and conclude that $f^{n_0}STa=y_a^-$ and $f^{n_0}Sb=y_a^+$, or $f^{n_0}Sb=y_a^-$ and $ f^{n_0}y_{\ell}=y_a^+$. Therefore, either
$f^{n_0+1}STa$ or $f^{n_0+1}Sb$ is connected with level $a+1$.
The case when $y_\ell$ belongs to $\mathcal{L}_b$ is very similar, and in this case $f^{n_0}Sb=y_a^-$, $ f^{n_0}y_{\ell}=y_a^+$, and $f^{n_0+1}Sb$ is connected with $a+1$. By construction, in both cases the common $x$-coordinate of the end points is equal to $x_a$.
\end{proof}
After an application of $S$ the level connected with $a+1$ will be connected with $STa$, and now, instead of $3$ connected levels $STa\leq Sb\leq y_\ell$ (with at least one strict inequality) we have at least $4$ connected levels $y'\leq STa\leq Sb\leq y_\ell$ (with no more than two equalities in a row).
The process continues with a growing number of connected levels, the highest being $a+1$. Since on each step we cannot have more than two equalities in a row, the number of distinct levels in this sequence will also increase. Therefore, we obtain a sequence of connected levels
\begin{equation}\label{lowersnake}
a+1\gammaeq y_1\gammaeq \dots\gammaeq y_{s}>\frac{b}{b+1}\gammaeq y_{s+1}.
\end{equation}
It is evident from the construction that
there are no unaccounted levels $y\in \mathcal{L}_{a,b}$, $a+1\gammaeq y\gammaeq y_{s+1}$.
Now we prove a similar result for $\mathcal{U}_{a,b}$.
\begin{lem}\label{b-connected}
There is always one level connected with level $b-1$, and
the levels $y_b^-$ and $y_b^+$ are connected by a vertical segment at $x_b$.
\end{lem}
\begin{proof}
By Lemmas \ref{2-sys} and \ref{next}
we know that the three consecutive levels $y_u\leq Sa\leq ST^{-1}b$ are connected.
It is easy to see that the first digit in $(a,b)$-expansion of $ST^{-1}b$ is $2$,
and the first digit in $(a,b)$-expansion of $Sa$ is either $1$ or $2$. Therefore, the first digit in $(a,b)$-expansion of $y_u$ is either $1$ or $2$.
In the first case either
\[
T^{-1}Sa< b\leq T^{-1}ST^{-1}b
\]
or
\[
T^{-1}y_u<b\leq T^{-1}Sa
\]
are the connected levels. Therefore either $T^{-1}Sa=y_b^-$ and $T^{-1}ST^{-1}b=y_b^+$, or $T^{-1}y_u=y_b^-$ and $T^{-1}Sa=y_b^+$ are connected. So either $T^{-2}ST^{-1}b$ or $T^{-2}Sa$ is connected with level $b-1$.
In the second case, we know that $y_u\leq Sa$ and
\[
b-1\leq T^{-2}y_u\leq T^{-2}Sa<b.
\]
If $y_u=Sa$, $y_u$ must belong to $\mathcal{U}_b$, in which case $y_u<ST^{-1}b$. If $y_u<Sa$, then $y_u\in \mathcal{U}_a$, or $y_u\in \mathcal{U}_b$ and $y_u<ST^{-1}b$.
Let us assume that $y_u$ belongs to $\mathcal{U}_b$. Since $y_u<ST^{-1}b$, by
Proposition \ref{tech} there are two possibilities:
\begin{enumerate}
\item $f^{n_0}y_u$ is the end of a weak cycle,
\item there
exists $n_0$ such that $\rho(f^{n}y_u)=\rho(f^nST^{-1}b)$
for all $n<n_0$, and $\rho(f^{n_0}y_u)\neq \rho(f^{n_0}ST^{-1}b)$.
\end{enumerate}
In the first case, either $f^{n_0}ST^{-1}b=y_b^+$ and $f^{n_0}Sa=y_b^-$, or $f^{n_0}Sa=y_b^+$ and $ f^{n_0}y_u=y_b^-$, so either
$f^{n_0+1}ST^{-1}b$ or $f^{n_0+1}Sa$ is connected with level $b-1$.
In the second case, we first notice that
\[
\rho(f^{n_0-1}y_u)=\rho(f^{n_0-1}ST^{-1}b)=T^{-1}
\]
since if we had $\rho(f^{n_0-1}y_u)=\rho(f^{n_0-1}ST^{-1}b)=S$, then
we would have
\[
\rho(f^{n_0}y_u)=\rho(f^{n_0}ST^{-1}b)=T^{-1}
\]
in contradiction with the choice of $n_0$.
Further,
there are two possibilities:
\[
\text{(i) } \rho(f^{n_0}ST^{-1}b)=S,\,\,\rho(f^{n_0}y_u)=T^{-1},\;
\text{(ii) } \rho(f^{n_0}ST^{-1}b)=T^{-1},\,\,\rho(f^{n_0}y_u)=S.
\]
In the first case we obtain
\[
f^{n_0}y_u>b>f^{n_0}ST^{-1}b
\]
which contradicts the monotonicity of $f^{n_0}$ and the original assumption $y_u<ST^{-1}b$. Thus the only possibility is
\[
f^{n_0}y_u<b<f^{n_0}ST^{-1}b.
\]
By monotonicity of $f^{n_0}$ we have
\[
f^{n_0}y_u<f^{n_0}Sa<f^{n_0}ST^{-1}b.
\]
Therefore either $f^{n_0}y_u=y_b^-$ and $f^{n_0}Sa=y_b^+$,
or $f^{n_0}Sa=y_b^-$ and $f^{n_0}ST^{-1}b=y_b^+$ are connected. So either $T^{-1}f^{n_0}ST^{-1}b$ or $T^{-1}f^{n_0}Sa$ is connected with level $b-1$. The case when $y_u$ belongs to the $a$-cycle is very similar, and in this case $f^{n_0}y_u=y_b^-$ and $f^{n_0}Sa=y_b^+$ and $T^{-1}f^{n_0}Sa$ is connected with level $b-1$. By construction, in both cases the common $x$-coordinate of the end points of the segments at the levels $y_b^-$ and $y_b^+$ is $x_b$.
\end{proof}
After an application of $S$ the levels (2) will be connected with $ST^{-1}b$, and now, instead of $3$ connected levels $y_u\leq Sa\leq ST^{-1}b$ we have at least $4$ connected levels $y_u\leq Sa\leq ST^{-1}b\leq y''$.
The process continues with a growing number of connected levels, the lowest being $b-1$. Also the number of distinct levels will increase, and we obtain a sequence of connected levels
\begin{equation}\label{uppersnake}
b-1\leq \bar y_1\leq \dots\leq \bar y_t<\frac{a}{1-a}\leq \bar y_{t+1}.
\end{equation}
It is evident from the construction that
there are no unaccounted levels $y\in \mathcal{U}_{a,b}$, $b-1\leq y\leq \bar y_{t+1}$.
Now we complete the proof that all levels of $\mathcal{L}_{a,b}$ are connected. For that it is sufficient to find a sequence of connected levels with the distance between the highest and the lowest level $\gammaeq 1$ and the lowest level $\gammaeq T^{-1}Sb$. This is because the set of levels in $y\in\mathcal{L}_{a,b}$ satisfying $T^{-1}Sb\leq y\leq a+1$ is periodic with period $1$, and each $y\in \mathcal{L}_{a,b}$ uniquely determines a horizontal segment on level $y$, as was explained just before Lemma \ref{next}.
If $y_{s+1} \leq a$, then
all levels in $\mathcal{L}_{a,b}$ are connected.
Suppose now that $y_{s+1}> a$. If
$y_{s+1}=y_a^+$, then, since $y_a^+$ is already connected with $y_a^-$, all levels of $\mathcal{L}_{a,b}$ are connected.
Now assume that $y_{s+1}>y_a^+$. Then either
\[ y_{s+1}=\frac{b}{b+1} \quad \text{ or } \quad y_{s+1}<\frac{b}{b+1}.
\]
In the first case either $TSy_{s+1}=y_\ell=Sb$ (this can only happen if $y_{s+1}\in\mathcal{L}_a$), or $TSy_{s}>Sb$ is the next level above $Sb$, and hence $TSy_{s}=y_\ell$. In either case
$Sy_{s+1}\leq Sy_{s}\leq \cdots \leq STa\leq Sb=TSy_{s+1}$ are the connected levels with the distance between the lowest and the highest equal to $1$,
thus we conclude that all levels of $\mathcal{L}_{a,b}$ are connected.
In the second case,
the two levels $y_a^+<y_{s+1}$ will produce the ends of the cycles (one of them can be weak if one of $y_a^+$ or $y_{s+1}$ is equal to $0$).
By the cycle property (Proposition \ref{main-argument}(ii)), there exists a level $z\in\mathcal{U}_{a,b}$, $\frac{a}{1-a}< z<b$ such that $z=(STS)y_{s+1}$. We claim that $z=y_b^-$. Suppose not, and $z<y_b^-$. Then $y_b^-$ gives rise to the second cycle, and again by the cycle property, there exists $y\in\mathcal{L}_{a,b}$, $y<\frac{b}{b+1}$, such that $y_b^-=STSy$. Since $STS(z)=-\frac{z}{z-1}$
is monotone increasing for $z<1$, we conclude that
$y>y_{s+1}$ in contradiction with (\ref{lowersnake}). Thus $y_b^-=(STS)y_{s+1}$.
Then $TSy_{s+1}=Sy_b^-$ which implies that the right end of the segment at the level $Sy_b^-$, which is equal to the right end of the segment at the level $Sb$, is equal to the right end of the segment at the level $TSy_{s+1}$ (notice that this level may belong to $\mathcal{L}_{a,b}$, $\mathcal{U}_{a,b}$ or be at infinity if $y_{s+1}=0$). Since $y_{s}$ and $y_{s+1}$ were connected, the left end of the segment at the level $TSy_{s}$ is equal to the right end of the segment at the level $TSy_{s+1}$ even though they may belong to the boundaries of different connected components. Since $TSy_{s}\in \mathcal{L}_{a,b}$, we conclude that the segment at the level $TSy_{s}$ is adjacent to the segment at the level $Sb$, i.e. $TSy_{s}=y_\ell$. Thus $Sy_{s}\leq Sy_{s-1}\leq \cdots \leq STa\leq Sb\leq TSy_{s}$ are the connected levels with the distance between the lowest and the highest equal to $1$, and therefore
all levels in $\mathcal{L}_{a,b}$ are also connected. The proof for $\mathcal{U}_{a,b}$ follows exactly the same lines.
(A2) In order to prove the bijectivity of the map $F$ on $A_{a,b}$ we write it as a union of the upper and lower connected components, $A_{a,b}=A_{a,b}^u\cup A_{a,b}^\ell$, and subdivide each component into $3$ pieces: $A_{a,b}^u=\cup_{i=1}^3U_i$, and $A_{a,b}^\ell=\cup_{i=1}^3L_i$, where
\[\begin{aligned}
U_1=&\{(x,y)\in A_{a,b}^u\,:\, y\gammae b\}\\
U_2=&\{(x,y)\in A_{a,b}^u\,:\, b-1\le y\le 0\}\\
U_3=&\{(x,y)\in A_{a,b}^u\,:\, 0\le y\le b\}\\
L_1=&\{(x,y)\in A_{a,b}^\ell\,:\, y\le a\}\\
L_2=&\{(x,y)\in A_{a,b}^\ell\,:\, 0\le y\le a+1\}\\
L_3=&\{(x,y)\in A_{a,b}^\ell\,:\, a\le y\le 0\},
\end{aligned}
\]
Now let
\[
U'_1=T^{-1}(U_1),\,\,
U'_2=S(U_2),\,\,
U'_3=S(U_3),\,\,
L'_1=T(L_1),\,\,
L'_2=S(L_2),\,\,
L'_3=S(L_3)
\]
be their images under the transformation $F$ (see Figure \ref{fig-bij}).
\begin{figure}
\caption{Bijectivity of the map $F_{a,b}
\label{fig-bij}
\end{figure}
Since the set $A_{a,b}$ is bounded by step-functions with finitely many steps, each of the pieces $U_i,L_i$ have the same property, and so do their images under $F$.
By the construction of the set $A_{a,b}$ we know that the levels corresponding to the ends of the cycles $c_a$ and $c_b$, if the cycles are strong, do not appear as horizontal boundary levels; the corresponding horizontal segments, let us call them the {\em locking segments} lie in the interior of
the set $A_{a,b}$. Furthermore, the images of all levels except for the levels next to the ends of the cycles, $f^{k_1-1}Ta$, $f^{m_1-1}Sa$, $f^{m_2-1}Sb$, and $f^{k_2-1}T^{-1}b$, also belong to $\mathcal{U}_{a,b}\cup\mathcal{L}_{a,b}$.
The exceptional levels are exactly those between $0$ and $b$ and above $TSa$ in $\mathcal{U}_{a,b}$, and between $a$ and $0$ and below $T^{-1}Sb$ in $\mathcal{L}_{a,b}$. The images of the horizontal segments belonging to these levels are the locking segments. Notice that the exceptional levels between $0$ and $b$ and between $a$ and $0$ constitute the horizontal boundary of the regions $U_3$ and $L_3$.
Transporting the rays $[-\infty,x_b]$ and $[x_a,\infty]$ (with $x_a$ and $x_b$ uniquely determined by Lemma \ref{2-sys}),
along the corresponding cycles, and using the strong cycle property, we see that the ``locking segment" in the horizontal boundary of $U_1'$ coincides with the locking segment of the horizontal boundary of $L_3'$, and the locking segment in the horizontal boundary of $L_1'$ coincides with the locking segment of the horizontal boundary of $U_3'$. It can happen that both ``locking segments" belong to $A_{a,b}^u$ or $A_{a,b}^\ell$. If only one of the numbers $a$ or $b$ has the strong cycle property, then there will be only one locking segment.
If the cycle property is weak or the $(a,b)$-continued fraction expansion of one or both $a$ and $b$ is periodic, then
all levels of $\mathcal{L}_a,\,\,\mathcal{L}_b,\,\,\mathcal{U}_a$ and $\mathcal{U}_b$ will belong to the boundary of $A_{a,b}$,
and there will be no locking segments. In these cases $L_3=[x_1,\infty]\times[a,0]$, and $L'_3=[-1/x_1,0]\times[-1/a,\infty]$, where $x_1=x_a$. Let $x_2$ be the $x$-coordinate of the right vertical boundary segment of $U_2$. Then the $x$-coordinate of the right vertical boundary segment of $U_1$ is $-1/x_2$. Let us denote the highest level in $\mathcal{U}_{a,b}$ by $y_2$.
Since $y_2\leq -1/a+1$, $y_2-1\leq -1/a$ is the next level after $-1/a$ in $\mathcal{U}_{a,b}$. This is since if we had $y\in\mathcal{U}_{a,b}$ such that $y_2-1<y<-1/a$, its preimage $y'=Ty$ would satisfy $y_2<y'<-1/a+1$, a contradiction. By construction of the region $A_{a,b}$ the segments at the levels $y_2-1$ and $-1/a$ are connected, therefore $Sx_1=T^{-1}Sx_2$.
This calculation shows that $L'_3$ and $U'_1$ do not overlap and fit together by this vertical ray.
Thus in all cases
the images $U'_i,\,L'_i$ do not overlap, and
$A_{a,b}=(\cup_{i=1}^3U'_i)\cup(\cup_{i=1}^3L'_i)$. This proves the bijectivity of the map $F$ on $A_{a,b}$ except for some images of its boundary. This completes the proof in the case $0<b\le -a<1$.
Now we return to the case $a\le -1$ dropped from consideration before Lemma \ref{2-sys}.
The explicit cycle relations for this case have been described in Theorem \ref{ab-cycle}. Notice that
all lower levels are connected, and $T^mSb$ is connected with $a+1$. Therefore $y_\ell=TSb$, and this implies that $x_a=m$.
The upper levels in the positive part are
$$ST^{-1}b<ST^{-2}ST^{-1}b< ... <(ST^{-2})^{m-1}ST^{-1}b<a/(a+1)$$
and $y_u=T^{-1}(ST^{-2})^{m-2}ST^{-1}b$.
Lemma \ref{2-sys} in this case holds with $x_a=m$ and $x_b=-1$ since
the equation for adjacency of the levels $y_u$ and $Sa$ is
$$T^{-1}(ST^{-2})^{m-2}ST^{-1}x_b=ST^{m-1}Sx_b=-1/m\, ,$$ which implies $x_b=-1$. Lemma \ref{a-connected} also holds with $y_a^-=ST^{m-1}b$ and $y_a^+=ST^{m}b$. Lemma \ref{b-connected} holds with $y_b^-=T^{-1}Sa$ and $y_b^+=T^{-1}ST^{-1}b$ and all upper level will be connected by an argument similar to one described obove. To prove the bijectivity of $F$ on $A_{a,b}$ one proceeds the same way as above, the only modification being that level $L_2$ does not exist, and
$L_3=\{(x,y)\in A_{a,b}^\ell, a\le y\le a+1\}$.
\end{proof}
The following corollary is evident from the proof of part (ii) of the above theorem.
\begin{cor} \label{cor:bry} If both $a$ and $b$ have the strong cycle property, then for any boundary component
$h$ of $A_{a,b}$ (vertical or horizontal) there exists $N>0$ such that $F^N(h)$ is in the interior of $A_{a,b}$.
\end{cor}
\section{Finite rectangular structure of the attracting set}\label{s:6}
Recall that the attracting set $D_{a,b}$ was defined by \eqref{def-atrac}: starting with the trapping region ${\mathcal T}heta_{a,b}$ described in Theorem \ref{Delta-trapping}, one has
\[ D_{a,b}=\bigcap_{n=0}^\infty D_n, \text{ with } D_n=\bigcap_{i=0}^n F^i({\mathcal T}heta_{a,b})\,.
\]
\begin{lem} \label{Dn} Suppose that the map $f$ satisfies the finiteness condition. Then,
for each $n\gammaeq 0$, $D_n$ is a region consisting of two connected components, the upper one, $D_n^u$, and the lower one, $D_n^\ell$, bounded by non-decreasing step-functions.
\end{lem}
\begin{proof}
The proof is by induction on $n$. The base of induction holds by the definition of the trapping region ${\mathcal T}heta_{a,b}$. For the induction step, let us assume that the region $D_n$ consists of two connected components, the upper one
$D_n^u$ and the lower one $D_n^\ell$, bounded by non-decreasing step-functions.
We will show that
the region $D_{n+1}$ consists of two connected components,
$D_{n+1}^u$ and $D_{n+1}^\ell$, bounded by non-decreasing step-functions.
In what follows, we present the proof assuming that $0<b\le -a<1$. The situation $a\le -1$ is less complex due to the explicit cycle expressions described in Theorem \ref{ab-cycle} and can be treated similarly with some minor modifications.
We decompose the regions $D_n^u$ and $D_n^\ell$ as follows
\[
\begin{split}
U_n^{11}&=\{(x,y)\in D_n^u\,:\, y\gammae TSa\}\\
U_n^{12}&=\{(x,y)\in D_n^u\,:\, b\le y\le TSa\}\\
U_n^3&=\{(x,y)\in D_n^u\,:\, 0\le y\le b\}\\
U_n^{21}&=\{(x,y)\in D_n^u\,:\, \frac{a}{1-a}\le y\le 0\}\\
U_n^{22}&=\{(x,y)\in D_n^u\,:\, b-1\le y\le \frac{a}{1-a}\}\\
L_n^{11}&=\{(x,y)\in D_n^\ell\,:\, y\le T^{-1}Sb\}\\
L_n^{12}&=\{(x,y)\in D_n^\ell\,:\, T^{-1}Sb\le y\le a\}\\
L_n^3&=\{(x,y)\in D_n^\ell\,:\, a\le y\le 0\}\\
L_n^{21}&=\{(x,y)\in D_n^\ell\,:\, 0\le y\le \frac{b}{b+1}\}\\
L_n^{22}&=\{(x,y)\in D_n^\ell\,:\, \frac{b}{b+1}\le y\le a+1\}.\\
\end{split}
\]
By induction hypothesis, the regions $U_{12},\,U_n^3,\,U_n^{21}$ and $U_n^{22}$ are bounded below and above, and $U_n^{11}$ only below, by a ray and on the right by a non-decreasing step-function. Similarly, the regions $L_n^{12},\,L_n^3,\,L_n^{21}$ and $L_n^{22}$ bounded above and below, and $L_n^{11}$ only above, by a ray and on the left by a non-decreasing step-function.
If $B\subset D_n^u$
is one of the upper subregions, let $\partial B$ be the union of the boundary components of $B$ that belong to the boundary of $D_n^u$, and, similarly, if
$B\subset D_n^\ell$
is one of the lower subregions, let $\partial B$ be the union of the boundary components of $B$ that belong to the boundary of $D_n^\ell$.
Since ${\mathcal T}heta_{a,b}$ is a trapping region, $F({\mathcal T}heta_{a,b})\subset {\mathcal T}heta_{a,b}$, $D_{n+1}=F(D_n)\subset D_n$, and hence $D_{n+1}^u\subset D_n^u$ and $D_{n+1}^\ell\subset D_n^\ell$.
The natural extension map $F$ is piecewise fractional-linear, hence it maps regions bounded by non-decreasing step-functions to regions bounded by non-decreasing step-functions. More precisely, we have
\[
\begin{split}
U_{n+1}^u&=S(U_n^{22}\cup U_n^{21})\cup T^{-1}(U_n^{11}\cup U_n^{12})\cup S(L_n^3)\\
U_{n+1}^\ell&=S(L_n^{22}\cup L_n^{21})\cup T(L_n^{11}\cup L_n^{12})\cup S(U_n^3)\,.
\end{split}
\]
In order to show that the region $D^u_{n+1}$, is connected, we notice
that the region $T^{-1}(U_n^{11}\cup U_n^{12})$ is inside the ``quadrant" $[-\infty,0]\times[b-1,\infty]$
while $S(U_n^{22}\cup U_n^{21})$ is inside the strip $[0,1]\times[ST^{-1}b,\infty]$. Therefore, they either intersect by a ray of the $y$-axis, or are disjoint. In the first case, either $T^{-1}ST^{-1}b<Sa$, which implies that $S(L_n^3)$ is inside the connected region
$S(U_n^{22}\cup U_n^{21})\cup T^{-1}(U_n^{11}\cup U_n^{12})$, or $Sa\leq T^{-1}ST^{-1}b$ which implies that the level $Sa$ belongs to the boundary of the trapping region, and again $S(L_n^3)$ is inside the connected region
$S(U_n^{22}\cup U_n^{21})\cup T^{-1}(U_n^{11}\cup U_n^{12})$.
Now suppose that the regions $T^{-1}(U_n^{11}\cup U_n^{12})$ and $S(U_n^{22}\cup U_n^{21})$ are disconnected. Notice that the right vertical boundary of the region $S(L_n^3)$ is a ray of the $y$-axis, thus $S(L_n^3)\cup S(U_n^{22}\cup U_n^{21})$ is a connected region bounded by a non-decreasing step-function. Since $T^{-1}(U_n^{12})\cap S(L_n^3)=\emptyset$, the non-connectedness situation may only appear from the intersection of $T^{-1}(U_n^{11})$ and $S(L_n^3)$,
i.e. inside the strip $[-1,0]\times[-1/a,\infty]$.
Since $f$ satisfies the finiteness condition,
Theorem \ref{thm:recstructure} is applicable, and the set $A_{a,b}$ constructed there belongs to each $D_n$. This is because $A_{a,b}\subset {\mathcal T}heta_{a,b}$, and if $A_{a,b}\subset D_n$, we have $A_{a,b}=F(A_{a,b})\subset F(D_n)=D_{n+1}$. The set
$A_{a,b}$ has finite rectangular structure
and contains the strip $[-1,0]\times[-1/a,\infty]$. Thus the connectedness
of the region $D^u_{n+1}$ is proved. Moreover, this argument shows that $\partial T^{-1}(U_n^{11})$ is inside $D^u_{n+1}$ and therefore does not contribute to its boundary, and
\[
\partial U_{n+1}^u=\partial (T^{-1}( U_n^{12}))\cup\partial (S(U_n^{22}\cup U_n^{21})\cup S(L_n^3)).
\]
Since $\partial (T^{-1}( U_n^{12})$ and $\partial (S(U_n^{22}\cup U_n^{21})\cup S(L_n^3))$ are given by non-decreasing step-functions, one $<Sa$, and the other $\gammae Sa$, it follows that $\partial U_{n+1}^u$ is also given by a non-decreasing step-function. A similar argument proves that $D_{n+1}^\ell$ is connected and bounded by a non-decreasing step-function.
\end{proof}
\begin{lem}\label{l:all-levels} Suppose that, for each $n$, $D_n$ consists of two connected components as in Lemma \ref{Dn}.
Then
\begin{enumerate}
\item all horizontal levels of the boundary of $D_n^u$ belong to $\mathcal{U}_{a,b}$ (resp., $D_n^\ell$ belong to $\mathcal{L}_{a,b}$) and remain as horizontal levels of $D_{n+1}^u$ (resp., $D_{n+1}^\ell$);
\item all levels of $\mathcal{U}_{a,b}$ appear in the boundary of some $D_n^u$, and all levels of $\mathcal{L}_{a,b}$ appear in the boundary of some $D_n^\ell$;
\item
the attractor $D_{a,b}$ consists of two connected components bounded by non-decreasing step-functions; the upper boundary function takes all values from the set $\mathcal{U}_{a,b}$, and the lower boundary function takes all values from the set $\mathcal{L}_{a,b}$.
\item The map $F: D_{a,b}\to D_{a,b}$ is surjective.
\end{enumerate}
\end{lem}
\begin{proof} (1) We prove this by induction. For the base case, $D_0^u$ contains the horizontal levels $T^{-1}b$, $ST^{-1}b$ and $\min(T^{-1}ST^{-1}b,Sa)$. The levels $T^{-1}b$, $ST^{-1}b$ belong to the boundary of $D_1^u$. If $Sa<T^{-1}ST^{-1}b$, then $ST^{-1}b>TSa$ and therefore is the end of the cycle and does not belong to $\mathcal{U}_{a,b}$. If
$Sa>T^{-1}ST^{-1}b$, then $T^{-1}ST^{-1}b$ appears as a boundary segment of $D_1^u$.
A similar argument applies to $D_0^\ell$ that contains the horizontal levels $Ta$, $STa$, and either $TSTa$ or $Sb$.
For the induction step we assume that (1) holds for $k=n-1$, and prove that it holds for $k=n$.
Let $y\in\partial D_n$ be a horizontal segment of the boundary, $y\gammaeq ST^{-1}b$, and $y\in \mathcal{U}_{a,b}$. Then $y=Sy'$, where $y'\in\partial D_{n-1},\,b-1\leq y'<0$.
By inductive hypothesis, $y'\in\partial D_n$, hence $y=Sy'\in\partial D_{n+1}$. Now let $y\in\partial D_n$ be a horizontal segment of the boundary, $b-1\leq y<Sa$. Then $y=T^{-1}y'$, where $y'\in\partial D_{n-1},\,0< y'<TSa$. By inductive hypothesis, $y'\in\partial D_n$, hence $y=Sy'\in\partial D_{n+1}$.
The level $y=Sa$ appears as a boundary segment of $D_n^u$ since $T^{-1}(\partial(U_{n-1}^{11})\cup \partial(U_{n-1}^{12}))$ and $S(\partial(L_{n-1}^3))$ do not overlap. Then $y=Sy'$, where $y'=a$ is the $y$-coordinate of the horizontal lower boundary of $L_{n-1}^3$. Since $L_{n}^3\subset L_{n-1}^3$ and $U_{n}^{11}\cup U_{n}^{12}\subset U_{n-1}^{11}\cup U_{n-1}^{12}$, we get that $T^{-1}(\partial(U_{n}^{11})\cup \partial(U_{n}^{12}))$ and $S(\partial(L_{n}^3))$ do not overlap, and $y=Sa$ will appear as a boundary segment of $D_{n+1}^u$.
On the other hand, assume $y\in\partial D_{n+1}$ was not a horizontal level of $\partial D_n$. Then $y=Sy'$ for some $y'\in\partial (U_n^{22}\cup U_n^{21})$, $y=T^{-1}y'$ for some $y'\in\partial (U_n^{12})$, or $y=Sa$. In all cases $y\in \mathcal{U}_{a,b}$
by the structure of the sets $\mathcal{U}_{a}$ and $\mathcal{U}_b$ established in Theorems \ref{a-cycle} and \ref{b-cycle}.
(2)
We start with level $-\frac1{b-1}$ which belongs to the boundary of the trapping region ${\mathcal T}heta_{a,b}$ by definition. We have seen that if $T^{-1}ST^{-1}b\in \mathcal{U}_b$,
then the level appears in the boundary of $D_1^u$. Now, if $b-1<T^{-k}ST^{-1}b<\frac{a}{1-a}$ (for the smallest $k=2$ or $3$), then the expansion continues, each $T^{-i}ST^{-1}b,\,i\leq k$ appears for the first time in the boundary of $D_i^u$ for $i\leq k$, and
the next element in the cycle, $ST^{-k}ST^{-1}b$, appears in the boundary of $D_{k+1}^u$. Using the structure of the set $\mathcal{U}_b$
established in Theorem \ref{b-cycle} we see that all levels of the set $\mathcal{U}_b$ appear as boundary levels of some $D_n^u$. We use the same argument for level $-\frac1{a}$ which appears for the first time in the boundary of some $D_{n_0}^u$, to see that all elements of the set $\mathcal{U}_a$ appear as boundary levels of all successive sets $D_n^u$. The same argument works for the lower boundary.
(3) Thus starting with some $n$, all sets $D_n$ have two connected components bounded by non-decreasing step-functions whose $y$ levels coincide with the sets $\mathcal{U}_{a,b}$ and $\mathcal{L}_{a,b}$.
Therefore, the attractor $D_{a,b}=\cap_{n=0}^\infty D_n$ has the same property.
(4) The surjectivity of the map $F$ on $D_{a,b}$ follows from the nesting property of the sets $D_n$.
\end{proof}
A priori the map $F$ on $D_{a,b}$ does not have to be injective, but in our case it will be since we will identify $D_{a,b}$ with an earlier constructed set $A_{a,b}$.
\begin{cor} \label{FRS}If the map $f$ satisfies the finiteness condition, then the attractor $D_{a,b}$ has finite rectangular structure, i.e. bounded by non-decreasing step-functions with a finite number of steps.
\end{cor}
\begin{thm} \label{attractor} If the map $f$ satisfies the finiteness condition, then the set $A_{a,b}$ constructed in Theorem \ref{thm:recstructure} is the attractor for the map $F$.
\end{thm}
\begin{proof} We proved in Theorem \ref{thm:recstructure} that the set $A_{a,b}$ constructed there is uniquely determined by the prescribed set of $y$-levels $\mathcal{U}_{a,b}\cup\mathcal{L}_{a,b}$. By Corollary \ref{FRS}, the set $D_{a,b}$ has finite rectangular structure with the same set of $y$-levels. Now we look at the $x$-levels of the jumps of its boundary step-functions.
Take the vertex $(x, b-1)$ of $D_{a,b}$. From the surjectivity of $F$ on $D_{a,b}$, there is a point $z\in D_{a,b}$ s.t. $F(z)=(x, b-1)$.
Then $z$ must be the intersection of the ray at the level $b$ with the boundary of $D_{a,b}$, i.e. $z=(\tilde x_b, b)$, hence $x=\tilde x_b-1$. Continue the same argument: look at the vertex at the level $-1/(b-1)$. It must be $F(\tilde x_b-1, b-1)$, etc. Since each $y$-level of the boundary has a unique ``predecessor" in its orbit, all $x$-levels of the jumps obtained by ``transporting" the rays $[-\infty,\tilde x_b]$ and $[\tilde x_a,\infty]$ over the corresponding cycles, satisfy the same
equations that defined the boundary of the set $A_{a,b}$ of Theorem \ref{thm:recstructure}. Therefore $\tilde x_a=x_a,\,\,\tilde x_b=x_b$,
the step-functions that define the boundaries are the same, and $D_{a,b}=A_{a,b}$.
\end{proof}
\section{Reduction theory conjecture}\label{s:naturalextension}\label{s:7}
Don Zagier conjectured that the Reduction Theory properties, stated in the Introduction, hold for every $(a,b)\in\mathcal P$.
He was motivated by the classical cases and computer experimentations with random parameter values $(a,b)\in\mathcal P$ (see Figures \ref{don-a} and \ref{fig:A} for attractors obtained by iterating
random points using Mathematica program).
The following theorem gives a sufficient condition for the Reduction Theory conjecture to hold:
\begin{thm} \label{RTC}If both $a$ and $b$ have the strong cycle property, then for every point $(x,y)\in \bar\mathbb R^2\setminus \Delta$ there exists $N>0$ such that $F^N(x,y)\in D_{a,b}$.
\end{thm}
\begin{proof} Every point $(x,y)\in\bar\mathbb R^2\setminus \Delta$ is mapped to the trapping region by some iterate $F^{N_1}$. Since the sets $D_n$ are nested and contain $D_{a,b}$, for large $N$, $F^N(x,y)$ will be close to the boundary of $D_{a,b}$. By Corollary \ref{cor:bry}, for any boundary component $h$ of $D_{a,b}$ there exists $N_2>0$ such that $F^{N_2}(h)$ is inside $D_{a,b}$. Therefore, there exists a large enough $N>0$ such that $F^N(x,y)$ will be in the interior of $D_{a,b}$.
\end{proof}
The strong cycle property is not necessary for the Reduction theory conjecture to hold. For example, it holds for the two classical expansions $(-1,0)$ and $(-1,1)$ that satisfy only a weak cycle property. In the third classical expansion $(-1/2,1/2)$ that also satisfies a weak cycle property, property (3) does not hold for some points $(x,y)$ with $y$ equivalent to $r=(3-\sqrt{5})/2$.
\noindent\begin{figure}
\caption{Attractors for the classical cases}
\label{fig:A}
\end{figure}
The next result shows that, under the finiteness condition, almost every point $(x,y)\in \bar\mathbb R^2\setminus \Delta$ lands in the attractor $D_{a,b}$ after finitely many iterations.
\begin{prop}
If the map $f_{a,b}$ satisfies the finiteness condition, then for almost every point $(x,y)\in \bar\mathbb R^2\setminus \Delta$, there exists $N>0$ such that
$F_{a,b}^N(x,y)\in D_{a,b}$.
\end{prop}
\begin{proof}
Let $(x,y)\in \mathbb R^2$ with $y$ irrational and $y=\lfloor n_0,n_1,n_2,\dots\rceil_{a,b}$. In the proof of Theorem \ref{Delta-trapping}, we showed that there exists $k>0$ such that
$$(x_{j+1},y_{j+1})=ST^{-n_j}\dots ST^{-n_1}ST^{-n_0}(x,y)\in [-1,1]\times ([-1/a,\infty]\cup [-\infty,-1/b])$$
for all $j\gammae k$. The point $F^{N}_{a,b}(x,y)=(x_{k+1}, y_{k+1})$ is in $A_{a,b}$, if $(x_{k+1},y_{k+1})\in [-1,0]\times [-1/a,\infty]$ or
$(x_{k+1},y_{k+1})\in [0,1]\times [-\infty,-1/b]$. Also, $F^{N+1}(x,y)=F(x_{k+1},y_{k+1})$ is in $A_{a,b}$ if $(x_{k+1},y_{k+1})\in [0,1]\times [-1/a+1,\infty]$
or $(x_{k+1},y_{k+1})\in [-1,0]\times [-\infty,-1/b-1]$. Thus we are left with analyzing the situation when the sequence of iterates
$$(x_{j+1},y_{j+1})=ST^{-n_j}\dots ST^{-n_1}ST^{-n_0}(x,y)$$ belongs to $[0,1]\times [-1/a,-1/a+1]$ for all $j\gammae k$ (or $[-1,0]\times [-1/b,-1/b-1]$ for all $j\gammae k$). Assume that we are in the first situation: $y_{j+1}\in [-1/a,-1/a+1]$ for all $j\gammae k$. This implies that all digits $n_{j+1}$, $j\gammae k$ are either $\lfloor -1/a\rceil$ or $\lfloor -1/a\rceil+1$. In the second situation, the digits $n_{j+1}$, $j\gammae k$ are either $\lfloor -1/b\rceil$ or $\lfloor -1/b\rceil-1$. Therefore the continued fraction expansion of $y$ is written with only two consecutive digits (starting from a certain position). By using Proposition \ref{bdigits1} and Remark \ref{bdigits2} we obtain that the set of all such points has zero Lebesgue measure. This proves our result.
\end{proof}
\begin{rem}
In the next section we show that there is a non-empty Cantor-like set ${\mathcal E}\subset \Delta$ belonging to the boundary segment $b=a+1$ of $\mathcal P$ such that for $(a,b)\in{\mathcal E}$ the set $\mathcal{U}_{a,b}\cup\mathcal{L}_{a,b}$ is infinite. Therefore, for $(a,b)\in{\mathcal E}$ either the set $D_n^u$ or $D_n^\ell$ is disconnected for some $n>0$, or,
by Lemma \ref{l:all-levels}(3), the attractor $D_{a,b}$ consists of two connected components whose boundary functions are not step-functions with finitely many steps.
\end{rem}
\section{Set of exceptions to the finiteness condition}\label{s:8}
In this section we study the structure of the set ${\mathcal E}\subset\mathcal P$ of exceptions to the finiteness condition.
We write ${\mathcal E}={\mathcal E}_{\mathfrak{b}}\cup{\mathcal E}_{\mathfrak{a}}$
where ${\mathcal E}_{\mathfrak{b}}$ (resp., ${\mathcal E}_{\mathfrak{a}}$) consists of all points $(a,b)\in \mathcal{P}$ for which $b$ (resp., $a$) does not satisfy the finiteness condition, i.e. either the truncated orbit $\mathcal{U}_b$ or $\mathcal{L}_b$ is infinite (resp., $\mathcal{U}_a$ or $\mathcal{L}_a$).
We analyze the set ${\mathcal E}_{\mathfrak{b}}$. Recall that, by Proposition \ref{shift}(2), the set $\mathcal{U}_b$ is infinite if and only if $\mathcal{L}_b$ is infinite, therefore it is sufficient to analyze the condition that the orbit $\mathcal{U}_b$ is not eventually periodic and its values belong to the interval $(\frac{b}{b+1},a+1)$. As before, we restrict our analysis (due to the
symmetry considerations) to the parameter subset of $\mathcal{P}$ given by $b\le -a$ and write
${\mathcal E}_{\mathfrak{b}}=\cup_{m=3}^\infty{\mathcal E}^m_{\mathfrak{b}}$ where $b\in{\mathcal E}^m_{\mathfrak{b}}$ if $b\in {\mathcal E}_{\mathfrak{b}}$ and $T^mSb\in (\frac{b}{b+1},a+1)$.
By Theorem \ref{b-cycle} and its proof, it follows that if $b\in {\mathcal E}^m_{\mathfrak{b}}$, then the first digit of the $(a,b)$-continued fraction expansion of $Sb$ is $-m$ and all the other digits are either $-m$ or $-(m+1)$.
We describe a recursive construction of the exceptional set ${\mathcal E}^m_{\mathfrak{b}}$. One starts with the `triangular' set
$${\mathcal T}^m_{\mathfrak{b}}=\{(a,b)\in \mathcal{P} : \frac{b}{b+1}\le T^mSb\le a+1\}.$$
The range of possible values of $b$ in ${\mathcal T}^m_{\mathfrak{b}}$ is given by the interval $[\underline b, \bar b]$ where $T^mS\bar b=\bar b$ and $T^mS\underline b=\underline b/(\underline b+1)$. Since
\[
\frac{b}{b+1}\leq b \text{ for all } b\gammae 0,
\]
and the function $T^mSb$ is monotone increasing, we obtain that $\underline b<\bar b$, and $\underline b$ is the horizontal boundary of ${\mathcal T}^m_{\mathfrak{b}}$, while $\bar b$ is the $b$-coordinate of its `vertex'.
At the next stage we obtain the following regions:
$${\mathcal T}^{m,m}_{\mathfrak{b}}=\{(a,b)\in {\mathcal T}^m_{\mathfrak{b}}: \frac{b}{b+1}\le T^{m}ST^mSb\le a+1\}$$
$${\mathcal T}^{m,m+1}_{\mathfrak{b}}=\{(a,b)\in {\mathcal T}^m_{\mathfrak{b}}: \frac{b}{b+1}\le T^{m+1}ST^mSb\le a+1\}\,.$$
By the same argument as above each region is `triangular', i.e. the $b$-coordinate of its lower (horizontal) boundary is less than the $b$-coordinate of its vertex. We show that its intersection with the triangular region obtained on the previous step is either empty or has `triangular' shape. The horizontal boundary of ${\mathcal T}^{m,m}_{\mathfrak{b}}$ has the $b$-coordinate given by the relation $T^mST^mSb=b/(b+1)$ (call it $\tilde b$).
We have
\[
T^mST^mS\underline b=T^mS\left(\frac{\underline b}{\underline b+1}\right)=T^mS\underline b-1=-\frac1{\underline b+1}<\frac{\underline b}{\underline b+1},
\]
so $\underline b<\tilde b$. On the other hand,
\[
T^mST^mS\bar b=T^mS\bar b=\bar b,
\]
which shows that the hyperbola $T^mST^mSb=b$ intersects the diagonal side $b=a+1$ at the point with $b$-coordinate $\bar b$. It follows that the region ${\mathcal T}^{m,m}_{\mathfrak{b}}$ is triangular and non-empty with $\underline b<\tilde b<\bar b$.
The upper boundary of ${\mathcal T}^{m,m+1}_{\mathfrak{b}}$ is given by the hyperbola $T^{m+1}ST^mSb=a+1$. Notice that, if $\underline a+1=T^mS\underline b$, then the point $(\underline a,\underline b)$ lies on the curves $T^mSb=a+1$ (obviously) and $T^{m+1}ST^mSb=a+1$ because
$$T^{m+1}ST^mS\underline b=T^{m+1}S(\underline b/(\underline b+1))=T^mS\underline b=\underline a+1\,.$$
This shows that the entire horizontal boundary of ${\mathcal T}^m_{\mathfrak{b}}$ belongs to that of ${\mathcal T}^{m,m+1}_{\mathfrak{b}}$.
Moreover, the hyperbola $T^{m+1}ST^mSb=a+1$ intersects the diagonal side $b-a=1$ at the point $\mathcal Hat b$ satisfying $T^{m+1}ST^mS\mathcal Hat b=\mathcal Hat b$. Therefore, $T^{m}ST^mS\mathcal Hat b=\mathcal Hat b-1<\frac{\mathcal Hat b}{\mathcal Hat b+1}$, i.e. $\mathcal Hat b<\tilde b$.
In this case we have
$\underline b<\mathcal Hat b<\tilde b<\bar b$, and the two triangular regions ${\mathcal T}^{m,m}_{\mathfrak{b}}$ and ${\mathcal T}^{m,m+1}_{\mathfrak{b}}$ are disjoint and non-empty.
The situation becomes more complicated as we proceed recursively. Let ${\mathcal T}^{n_1,n_2,\dots,n_k}_{\mathfrak{b}}$ be one of the regions obtained after $k$ steps of this construction, with $n_1=m$ and $n_i\in\{m,m+1\}$ for $2\le i\le k$. At the next step we get two new sets (possible empty) (see Figure \ref{fig-exc}):
$${\mathcal T}^{n_1,n_2,\dots,n_k,m}_{\mathfrak{b}}=\{(a,b)\in {\mathcal T}^{n_1,n_2,\dots,n_k}_{\mathfrak{b}}: \frac{b}{b+1}\le T^{m}ST^{n_k}S\dots T^{n_1}Sb\le a+1\}$$
$${\mathcal T}^{n_1,n_2,\dots,n_k,m+1}_{\mathfrak{b}}=\{(a,b)\in {\mathcal T}^{n_1,n_2,\dots,n_k}_{\mathfrak{b}}: \frac{b}{b+1}\le T^{m+1}ST^{n_k}S\dots T^{n_1}Sb\le a+1\}\,.$$
\begin{figure}
\caption{Set ${\mathcal T}
\label{fig-exc}
\end{figure}
As in the base case, the inequality $T^{m}ST^{n_k}S\dots T^{n_1}Sb\le a+1$ of ${\mathcal T}^{n_1,n_2,\dots,n_k,m}_{\mathfrak{b}}$ is satisfied by all points of ${\mathcal T}^{n_1,n_2,\dots,n_k}_{\mathfrak{b}}$ because of the monotone increasing property of $T,S$ and the fact that $T^{n_k}S\dots T^{n_1}Sb\le a+1$ implies
$$T^{m}ST^{n_k}S\dots T^{n_1}Sb\le T^mS(a+1)\le T^mS(b)\le a+1\,.$$
Thus the upper boundary of the region ${\mathcal T}^{n_1,n_2,\dots,n_k,m}_{\mathfrak{b}}$ (if nonempty) is part of the upper boundary of ${\mathcal T}^{n_1,n_2,\dots,n_k}_{\mathfrak{b}}$; it is the lower (horizontal) boundary that changes. Similarly, the defining inequality $\frac{b}{b+1}\le T^{m+1}ST^{n_k}S\dots T^{n_1}Sb$ of ${\mathcal T}^{n_1,n_2,\dots,n_k,m+1}_{\mathfrak{b}}$ is satisfied by al points of ${\mathcal T}^{n_1,n_2,\dots,n_k}_{\mathfrak{b}}$ because
$$T^{m+1}ST^{n_k}S\dots T^{n_1}Sb\gammae T^{m+1}S\frac{b}{b+1}=m-\frac{1}{b}=T^mSb\gammae\frac{b}{b+1}\,.$$
Thus the lower boundary of ${\mathcal T}^{n_1,n_2,\dots,n_k,m+1}_{\mathfrak{b}}$ (if nonempty) is part of the lower boundary of ${\mathcal T}^{n_1,n_2,\dots,n_k}_{\mathfrak{b}}$.
Therefore, we can describe the above sets as
\begin{eqnarray}
& &{\mathcal T}^{n_1,n_2,\dots,n_k,m}_{\mathfrak{b}} = \{(a,b)\in {\mathcal T}^{n_1,n_2,\dots,n_k}_{\mathfrak{b}}: \frac{b}{b+1}\le T^{m}ST^{n_k}S\dots T^{n_1}Sb\}\\
& &{\mathcal T}^{n_1,n_2,\dots,n_k,m+1}_{\mathfrak{b}} = \{(a,b)\in {\mathcal T}^{n_1,n_2,\dots,n_k}_{\mathfrak{b}}: T^{m+1}ST^{n_k}S\dots T^{n_1}Sb\le a+1\}\,.
\end{eqnarray}
By the same reason as in the base case, the two regions ${\mathcal T}^{n_1,\dots,n_k,m}_{\mathfrak{b}}$ and ${\mathcal T}^{n_1,\dots,n_k,m+1}_{\mathfrak{b}}$ do not overlap.
The set ${\mathcal E}^m_{\mathfrak{b}}$ is now obtained as the union of all sets of type
\begin{equation}\label{eni}
{\mathcal E}^{(n_i)}_{\mathfrak{b}}=\bigcap_{k=1}^\infty{\mathcal T}^{n_1,n_2,\dots,n_k}_{\mathfrak{b}}
\end{equation}
where
$n_1=m$, $n_i\in\{m,m+1\}$ if $i\gammae 2$, and the sequence $(n_i)$ is not eventually periodic. If such a set ${\mathcal E}^{(n_i)}_{\mathfrak{b}}$ is non-empty and $(a,b)$ belongs to it, then $b$ is uniquely determined from the $(a,b)$-expansion of $Sb=\lfloor -n_1,-n_2,\dots\rceil$.
First we need some additional lemmas:
\begin{lem}\label{lem-cf} $ $
\begin{itemize}
\item[(i)] A point $b\in [0,1]$ satisfying $T^{n_k}S\dots T^{n_1}Sb=b$ with $|n_i|\gammae 2$ can be written formally using a periodic ``$-$" continued fraction expansion
\begin{equation}\label{bbar}
b=-1/(\overline{-n_1,-n_2,\dots,-n_k})=(0,\overline{-n_1,-n_2,\dots,-n_k})\,.
\end{equation}
If $b$ is in ${\mathcal T}^{n_1,n_2,\dots,n_k}_b$, then $Sb$ has the $(a,b)$-continued fraction expansion
$$\lfloor Sb\rceil_{a,b}=\lfloor\overline{-n_1,-n_2,\dots,-n_k}\,\rceil\,.$$
\item[(ii)] A point $b$ in $[0,1]$ satisfying $T^{n_k}S\dots T^{n_1}Sb=b/(b+1)$ can be written formally using the periodic ``$-$'' continued fraction expansion
\begin{equation}\label{underb}
b=(0,-n_1,\overline{-n_2,\dots,-n_k,-(m+1)})\,.
\end{equation}
If the point $b\in T^{n_1,n_2,\dots,n_k}_b$, then $\lfloor Sb\rceil_{a,b}=\lfloor -n_1,\overline{-n_2,\dots,-n_k,-(m+1)}\rceil$.
\end{itemize}
\end{lem}
\begin{proof}
One can verify directly that the point $b$ given by \eqref{bbar} is the fixed point of the hyperbolic transformation $T^{n_k}S\dots T^{n_1}S$ and $b\in [0,1]$ (see also \cite[Proposition 1.3]{KU1}).
The equation in part (ii) can be written as $STST^{n_k}S\dots T^{n_1}Sb=b$ and one verifies directly that the value $b$ given by \eqref{underb} is the fixed point of that hyperbolic transformation and $b\in [0,1]$.
\end{proof}
Notice that the relation $(0,-n_1,-n_2,\dots)=-(0,n_1,n_2,\dots)$ is satisfied, assuming that the formal ``$-$" continued fraction expansions are convergent (from the proof of Theorem \ref{convergence}, the convergence property holds if $|n_i|\gammae 2$ for all $i\gammae 1$).
\begin{defn}
We say that two sequences (finite or infinite) $\sigma_1=(n_i)$ and $\sigma_2=(p_j)$ of positive integers are in lexicographic order, $\sigma_1\prec \sigma_2$, if on the first position $k$ where the two sequences differ one has $n_k<p_k$ ,or if the finite sequence $(n_i)$ is a starting subsequence of $(p_j)$.
\end{defn}
The following property follows from the monotonicity of $T,S$.
\begin{lem}\label{order}
Given two infinite sequences $\sigma_1=(n_i)$ and $\sigma_2=(p_j)$ of integers $n_i\gammae 2$ and $p_j\gammae 2$ such that $\sigma_1\prec \sigma_2$ then
$$(0,n_1,n_2,\dots) < (0,p_1,p_2,\dots)\,.$$
\end{lem}
The next lemma provides necessary conditions for a set ${\mathcal E}^{(n_i)}_{\mathfrak{b}}$ to be non-empty. Denote by $\mathfrak{l}_m$ the length of the initial block of $m$'s and by $\mathfrak{l}_{m+1}$ the length of the first block of $(m+1)$'s in $(n_i)$.
\begin{lem}\label{lemE} $ $
\begin{itemize}
\item[(i)] If a set ${\mathcal E}^{(n_i)}_{\mathfrak{b}}$ in the upper region ${\mathcal T}^{m,m}_{\mathfrak{b}}$ is non-empty then the sequence $(n_i)$ contains no consecutive $(m+1)$'s and the length of any block of $m$'s is equal to $\frak{l}_m$ or $\frak{l}_m-1$.
\item[(ii)] If a set ${\mathcal E}^{(n_i)}_{\mathfrak{b}}$ in the lower region ${\mathcal T}^{m,m+1}_{\mathfrak{b}}$ is non-empty then the sequence $(n_i)$ contains no consecutive $m$'s and the length of any block of $(m +1)$'s is
equal to $\frak{l}_{m+1}$ or $\frak{l}_{m+1}+1$.
\end{itemize}
\end{lem}
\begin{proof}
(i) Assume that the sequence $(n_i)$ contains two consecutive $(m+1)$'s.
Then some ${\mathcal T}^{n_1,n_2,\dots,n_k,m+1,m+1}_{\mathfrak{b}}$ (with $n_1=n_2=n_k=m$) is non-empty. The upper vertex of such a triangular set satisfies the inequality
\[
\begin{split}
\bar b&\le -(0,\overline{n_1,n_2,\dots,n_k,m+1,m+1})\\
&=-(0,m,m,\dots,m,m+1,\boxed{m+1},\dots)
\end{split}
\]
while the lower (horizontal) boundary satisfies
\[
\begin{split}
\underline{b}&\gammae -(0,n_1,\overline{n_2,\dots,n_k,m+1})\\
&=-(0,m,m,\dots,m,m+1,\boxed{m},\dots)\qquad
\end{split}
\]
This implies that $\underline{b}>\bar b$ because the entries of the corresponding continued fractions
with positive entries are in lexicographic order (they coincide on the first $k+1$ places, and on the $(k+2)^{th}$
position the first continued fraction has digit $m+1$ while the second one has digit $m$), i.e. the set ${\mathcal T}^{n_1,n_2,\dots,n_k,m+1,m+1}_{\mathfrak{b}}$ is empty.
Now assume that there exists a non-empty set ${\mathcal T}^{n_1,n_2\dots,n_k, m, m, \dots, m}$ ($n_k=m+1$) with the final block of $m$'s of length greater than $\frak{l}_m$. The upper vertex of this set is given by
\[
\begin{split}
\bar b&\le -(0,\overline{n_1,n_2,\dots,n_k})=-(0,\overline{\underbrace{m,m,\dots,m}_{\frak{l}_m},m+1,\dots,n_k})\\
&=-(0,\underbrace{m,m,\dots,m}_{\frak{l}_m},m+1,\dots,n_k,\underbrace{m,m,\dots,m}_{\frak{l}_m},m+1,\dots)
\end{split}
\]
while the lower horizontal segment is given by
\[
\underline{b}\gammae -(0,n_1,\overline{n_2,\dots,n_k,\underbrace{m,m\dots, m}_q,m+1})\,.
\]
If $\frak{l}_m<q$ then the two continued fractions coincide on the first $k+p$ entries. Looking at the $k+p+1$ entry, we get that $\bar b<\underline{b}$, hence the set ${\mathcal T}^{n_1,n_2\dots,n_k, m, m, \dots, m}_{\mathfrak{b}}$ would be empty.
Assume now that there exists a non-empty set of type ${\mathcal T}^{n_1,n_2\dots,n_k, m, m, \dots, m,m+1}_{\mathfrak{b}}$ ($n_k=m+1$) with the last block of $m$'s of length $q$ strictly less than $\frak{l}_m-1$. Because $n_k=m+1$, $n_{k-1}=m$, and ${\mathcal T}^{n_1,n_2\dots,n_k, m, m, \dots, m,m+1}_{\mathfrak{b}}\subset {\mathcal T}^{n_1,n_2\dots,n_k}_{\mathfrak{b}}$ we have that the lower limit of the set ${\mathcal T}^{n_1,n_2\dots,n_k, m, m, \dots, m,m+1}_{\mathfrak{b}}$ satisfies the relation
\[
\begin{split}
\underline b&\gammae -(0,n_1\overline{n_2,\dots,n_{k-1},m+1})=-(0,n_1\overline{n_2,\dots,n_{k-1},n_k})\\
&=-(0,\underbrace{m,m,\dots,m}_{\frak{l}_m},m+1,\dots,n_k,\underbrace{m,\dots,m}_{\frak{l}_m-1},m+1,\dots)
\end{split}
\]
while the upper limit of the same set satisfies the relation
\[
\bar b\le -(0,\overline{n_1, n_2,\dots,n_k,\underbrace{m,m\dots, m}_q,m+1}]\,.
\]
This implies that $\bar b<\underline b$ because the two continued fractions coincide on their first $k+q$ entries, and the $k+q+1$ entries are $m$, and $m+1$ respectively. Therefore the set ${\mathcal T}^{n_1,n_2\dots,n_k, m, m, \dots, m,m+1}_{\mathfrak{b}}$ is empty.
(ii) Assume that a set ${\mathcal T}^{n_1,n_2,\dots,n_k,m,m}_{\mathfrak{b}}$ (with $n_1=m$, $n_2=m+1$ and $n_k=m+1$) is non-empty. The upper vertex of such a set satisfies the inequality
\[
\bar b\le -(0,\overline{n_1,n_2,\dots,n_k})
=-(0,m,m+1,\dots,n_k,m,\boxed{m+1},\dots)
\]
while the lower horizontal segment satisfies the relation
\[
\underline{b}\gammae -(0,n_1,\overline{n_2,\dots,n_k,m,m,m+1})
=-(0,m,m+1,\dots,n_k,m,\boxed{m},m+1,\dots).
\]
Then $\underline{b}>\bar b$ because the sequences of the corresponding continued fractions with positive entries are in lexicographic order, i.e. the set ${\mathcal T}^{n_1,n_2,\dots,n_k,m,m}_{\mathfrak{b}}$ is empty.
Now assume that there exists a non-empty set ${\mathcal T}^{n_1,n_2\dots,n_k, m+1, m+1, \dots, m+1}_{\mathfrak{b}}$ ($n_k=m$) with the final block of $(m+1)$'s of length $q$ greater than $\frak{l}_{m+1}+1$. The upper vertex of this set satisfies
\[
\bar b\le -(0,\overline{m,\underbrace{m+1,\dots,m+1}_{\frak{l}_{m+1}},m,\dots,n_k,\underbrace{m+1,\dots,m+1}_q})
\]
while the lower horizontal segment satisfies the relation
\[
\begin{split}
\underline{b}&\gammae -(0,n_1,\overline{n_2,\dots,n_k,m+1})\\
&=-(0,m,{\underbrace{m+1,\dots,m+1}_{\frak{l}_{m+1}},m,\dots, n_k,\underbrace{m+1,\dots,m+1}_{\frak{l}_{m+1}+1},m,\dots}).
\end{split}
\]
Since the two continued fraction expansions with positive entries coincide on the first $k+\frak{l}_{m+1}+1$ entries and their $k+\frak{l}_{m+1}+2$ entries are $m+1$ and $m$, respectively, we obtain $\bar b<\underline{b}$, i.e. the set ${\mathcal T}^{n_1,n_2\dots,n_k, m+1, m+1, \dots, m+1}_{\mathfrak{b}}$.
Finally, suppose that there exists a non-empty set ${\mathcal T}^{n_1,n_2\dots,n_k, m+1, m+1, \dots, m+1,m}_{\mathfrak{b}}$ ($n_k=m$) with the final block of $(m+1)$'s of length $q$ less than $\frak{l}_{m+1}$. The upper vertex of this set satisfies
\[
\bar b\le -(0,\overline{m,\underbrace{m+1,\dots,m+1}_{\frak{l}_{m+1}},m,\dots,n_k,\underbrace{m+1,\dots,m+1}_{\frak{l}_{m+1}}})
\]
while the lower horizontal segment satisfies the relation
\[
\begin{split}
\underline{b}&\gammae -(0,n_1,\overline{n_2,\dots,n_k,\underbrace{m+1,\dots,m+1}_{q},m,m+1})\\
&=-(0,m,{\underbrace{m+1,\dots,m+1}_{\frak{l}_{m+1}},m,\dots, n_k,\underbrace{m+1,\dots,m+1}_{q},m,\dots}).
\end{split}
\]
Since the two continued fraction expansions with positive entries coincide on the first $k+\frak{l}_{m+1}$ entries and their $(k+\frak{l}_{m+1}+1)^{th}$ entries are $(m+1)$ and $m$, respectively, we obtain $\bar b<\underline{b}$, i.e. the set ${\mathcal T}^{n_1,n_2\dots,n_k, m+1, m+1, \dots, m+1,m}_{\mathfrak{b}}$ is empty.
\end{proof}
In what follows, we describe in an explicit manner the symbolic properties of a sequence $(n_i)$ for which ${\mathcal E}^{(n_i)}_{\mathfrak{b}}\ne \emptyset$. Notice that in both cases of Lemma \ref{lemE} there are two admissible blocks that can be used to express the admissible sequence $(n_i)$:
case (i): $A^{(1)}=(\underbrace{m,\dots,m}_{\frak{l}_{m}},m+1)$ and $B^{(1)}=(\underbrace{m,\dots,m}_{\frak{l}_{m}-1},m+1)$;
case (ii): $A^{(1)}=(m,\underbrace{m+1,\dots,m+1}_{\frak{l}_{m+1}})$ and $B^{(1)}=(m,\underbrace{m+1,\dots,m+1}_{\frak{l}_{m+1}+1})$.
\noindent with $\frak{l}_{m}\gammae 2$, $\frak{l}_{m+1}\gammae 1$. In both situations $A^{(1)}\prec B^{(1)}$.
One could think of $A^{(1)}$ as being the new `$m$' and $B^{(1)}$ the new `$m+1$', and treat the original sequence of $m$'s and $m+1$'s as a sequence of $A^{(1)}$'s and $B^{(1)}$'s. Furthermore, the next lemma shows that such a substitution process can be continued recursively to construct blocks $A^{(n)}$ and $B^{(n)}$ (for any $n\gammae 1$), so that the original sequence $(n_i)$ may be considered to be a sequence of $A^{(n)}$'s and $B^{(n)}$'s. Moreover, only particular blocks of $A^{(n)}$'s and $B^{(n)}$'s warrant non-empty triangular regions of the next generation.
Let us also introduce the notations $A^{(0)}=m$ and $B^{(0)}=m+1$. Assume that ${\mathcal E}^{(n_i)}_{\mathfrak{b}}$ is a nonempty set. We have:
\begin{lem}\label{lem-rec}
For every $n\gammae 0$, there exist integers $\frak{l}_{A^{(n)}}\gammae 2$, $\frak{l}_{B^{(n)}}\gammae 1$ such that the sequence $(n_i)$ can be written as a concatenation of blocks
\begin{equation}\label{case1}
A^{(n+1)}=(\underbrace{A^{(n)},\dots,A^{(n)}}_{\frak{l}_{A^{(n)}}},B^{(n)}) \,,\quad B^{(n+1)}=(\underbrace{A^{(n)},\dots,A^{(n)}}_{\frak{l}_{A^{(n)}}-1},B^{(n)})
\end{equation}
or
\begin{equation}\label{case2}
A^{(n+1)}=(A^{(n)},\underbrace{B^{(n)},\dots,B^{(n)}}_{\frak{l}_{B^{(n)}}}) \,,\quad B^{(n+1)}=(A^{(n)},\underbrace{B^{(n)},\dots,B^{(n)}}_{\frak{l}_{B^{(n)}}+1})\,.
\end{equation}
\end{lem}
\begin{proof}
Notice that Lemma \ref{lemE} proves the above result for $n=0$ with $\frak{l}_{A^{(0)}}=\frak{l}_{m}$, $\frak{l}_{B^{(0)}}=\frak{l}_{m+1}$.
We show inductively that
\begin{equation}\label{eqorder}
A^{(n)}\prec B^{(n)}
\end{equation}
and if a finite sequence $\sigma$ starts with an $A^{(n)}$ block and ends with a $B^{(n)}$ block, $\sigma=(A^{(n)},\tau,B^{(n)})$, then the lower boundary $\underline b(\sigma)$ of ${\mathcal T}^\sigma_{\mathfrak{b}}$ (if nonempty) satisfies
\begin{equation}\label{bng}
\underline b(\sigma)\gammae-(0,A^{(n)},\overline{\tau,B^{(n)}})\,.
\end{equation}
Relation \eqref{eqorder} is obviously true for $n=0$; \eqref{bng} is also satisfied if $n=0$, since one applies Lemma \ref{lem-cf} part (ii) to the sequence $\tilde\sigma=(A^{(0)},\tau)$ where ${\mathcal T}^{\tilde\sigma}_{\mathfrak{b}}\supset {\mathcal T}^\sigma_{\mathfrak{b}}$.
We point out that by applying Lemma \ref{lem-cf} part (i) to the region ${\mathcal T}^{\sigma}$ we have
\begin{equation}\label{ubng}
\bar b(\sigma)\le-(0,\overline{\sigma})=-(0,\overline{A^{(n)},\tau,B^{(n)}})\,.
\end{equation}
To prove the inductive step, suppose that for some $n\gammae 1$, we can rewrite the sequence $(n_i)$ using blocks $A^{(n+1)}$ and $B^{(n+1)}$ as in case \eqref{case1} or \eqref{case2}.
\noindent\textbf{Case 1.} Assume $A^{(n+1)}$ and $B^{(n+1)}$ are given by \eqref{case1}.
It follows immediately that $A^{(n+1)}\prec B^{(n+1)}$ since $A^{(n)}\prec B^{(n)}$. Also, if a sequence $\sigma$ starts with an $A^{(n+1)}$ block and ends with a $B^{(n+1)}$ block (thus, implicitly, $\sigma$ starts with an $A^{(n)}$ block and ends with a $B^{(n)}$ block),
$$\sigma=(A^{(n+1)},\tau,B^{(n+1)})=(\underbrace{A^{(n)},\dots,A^{(n)}}_{\frak{l}_{A^{(n)}}},B^{(n)},\tau,\underbrace{A^{(n)},\dots,A^{(n)}}_{\frak{l}_{A^{(n)}}-1},B^{(n)})\,$$ then, by applying \eqref{bng} to $\tilde \sigma=(\underbrace{A^{(n)},\dots,A^{(n)}}_{\frak{l}_{A^{(n)}}},B^{(n)},\tau)=(A^{(n)},B^{(n+1)},\tau)$ (which starts with $A^{(n)}$ and ends with $B^{(n)}$) we get
$$\underline b(\sigma)\gammae b(\tilde\sigma)\gammae -(0,A^{(n)},\overline{B^{(n+1)},\tau})=-(0,A^{(n)},B^{(n+1)},\overline{\tau, B^{(n+1)}})\,.$$
Therefore, \eqref{bng} holds for $n+1$, since $(A^{(n)},B^{(n+1)})=A^{(n+1)}$.
Now assume that $(n_i)$ starts with a block of $A^{(n+1)}$'s of length $\frak{l}_{A^{(n+1)}}>1$. We prove that the sequence $(n_i)$ cannot have two consecutive $B^{(n+1)}$'s and any sequence of consecutive blocks $A^{(n+1)}$ has length $\frak{l}_{A^{(n+1)}}$ or $\frak{l}_{A^{(n+1)}-1}$. Suppose the sequence $(n_i)$ contains two consecutive blocks of type $B^{(n+1)}$:
\[
(n_i)=(A^{(n+1)},A^{(n+1)},\dots,A^{(n+1)},B^{(n+1)},B^{(n+1)},\dots).
\]
We look at the set
$${\mathcal T}^{A^{(n+1)}A^{(n+1)}\dots A^{(n+1)}B^{(n+1)}B^{(n+1)}}$$
and remark that the upper boundary satisfies (from \eqref{ubng})
\begin{equation}\label{tb}
\bar b\le -(0,\overline{A^{(n+1)},A^{(n+1)},\dots,A^{(n+1)},B^{(n+1)},B^{(n+1)}})
\end{equation}
and the lower boundary satisfies (from \eqref{bng})
\begin{equation}\label{lb}
\underline b\gammae -(0,A^{(n+1)},\overline{A^{(n+1)},\dots,A^{(n+1)},B^{(n+1)}})\,.
\end{equation}
But \eqref{tb} and \eqref{lb} imply that $\underline b>\bar b$, because the two corresponding continued fractions
with positive entries are
in lexicographic order. Thus, there cannot be two consecutive $B^{(n+1)}$ blocks in the sequence $(n_i)$.
Now, let us check that the sequence $(n_i)$ cannot have a block of $A^{(n+1)}$'s of length $q>\frak{l}_{A^{(n+1)}}$. Assume the contrary,
$$(n_i)=(\underbrace{A^{(n+1)},\dots,A^{(n+1)}}_{\frak{l}_{A^{(n+1)}}},B^{(n+1)},\tau,B^{(n+1)}, \underbrace{A^{(n+1)},\dots,A^{(n+1)}}_{q},B^{(n+1)},\dots)\,.$$
Then the set ${\mathcal T}^{(n_i)}_{\mathfrak{b}}$ has the upper bound $\bar b$ satisfying
$$\bar b\le -(0,\overline{\underbrace{A^{(n+1)},\dots,A^{(n+1)}}_{\frak{l}_{A^{(n+1)}}},B^{(n+1)},\tau,B^{(n+1)}})$$
while the lower bound $\underline b$ satisfies by \eqref{bng}
$$
\underline b\gammae -(0,A^{(n+1)},\overline{\underbrace{A^{(n+1)},\dots,A^{(n+1)}}_{\frak{l}_{A^{(n+1)}}-1},B^{(n+1)},\tau,B^{(n+1)},\underbrace{A^{(n+1)},\dots,A^{(n+1)}}_{q},B^{(n+1)}}).
$$
Comparing the two continued fractions, we get that $\bar b<\underline b$ (since $A^{(n+1)}\prec B^{(n+1)}$ and $q>\frak{l}_{A^{(n+1)}}$).
Now assume that $(n_i)$ starts with $A^{(n+1)}$ and then continues with a block of $B^{(n+1)}$'s of length $\frak{l}_{B^{(n+1)}}\gammae 1$. We prove that the sequence $(n_i)$ cannot have two consecutive $A^{(n+1)}$'s and any sequence of consecutive blocks $B^{(n+1)}$ has length $\frak{l}_{B^{(n+1)}}$ or $\frak{l}_{B^{(n+1)}}+1$. Suppose the sequence $(n_i)$ contains two (or more) consecutive blocks of type $A^{(n+1)}$:
$$(n_i)=(A^{(n+1)},B^{(n+1)},\tau,B^{(n+1)},\underbrace{A^{(n+1)},\dots,A^{(n+1)}}_{q\gammae 2},B^{(n+1)},\dots)\,.$$
We study the region ${\mathcal T}^{A^{(n+1)},B^{(n+1)},\tau,B^{(n+1)},A^{(n+1)},\dots,A^{(n+1)},B^{(n+1)}}$
and remark that its upper boundary satisfies (from \eqref{ubng})
\begin{equation}\label{ttb}
\bar b\le -(0,\overline{A^{(n+1)},B^{(n+1)},\tau,B^{(n+1)}})
\end{equation}
and the lower boundary satisfies (from \eqref{bng})
\begin{equation}\label{llb}
\underline b\gammae -(0,A^{(n+1)},\overline{B^{(n+1)},\tau,B^{(n+1)},\underbrace{A^{(n+1)},\dots,A^{(n+1)}}_{q\gammae 2},B^{(n+1)}}).
\end{equation}
But \eqref{ttb} and \eqref{llb} implie that $\underline b>\bar b$ because the two corresponding continued fractions
with positive entries are
in lexicographic order. Thus, there cannot be two consecutive $A^{(n+1)}$ blocks in the sequence $(n_i)$.
Now, let us check that the sequence $(n_i)$ cannot have a block of $B^{(n+1)}$'s of length $q>\frak{l}_{B^{(n+1)}}+1$. Assume the contrary,
$$(n_i)=(A^{(n+1)},\underbrace{B^{(n+1)}\dots,B^{(n+1)}}_{\frak{l}_{B^{(n+1)}}},A^{(n+1)},\tau,A^{(n+1)}, \underbrace{B^{(n+1)},\dots,B^{(n+1)}}_{q},A^{(n+1)},\dots)\,.$$
Then the set ${\mathcal T}^{(n_i)}$ has the upper bound $\bar b$ satisfying
$$\bar b\le -(0,\overline{A^{(n+1)},\underbrace{B^{(n+1)}\dots,B^{(n+1)}}_{\frak{l}_{B^{(n+1)}}},A^{(n+1)},\tau,A^{(n+1)}, \underbrace{B^{(n+1)},\dots,B^{(n+1)}}_{q},A^{(n+1)}})
$$
while the lower bound $\underline b$ satisfies by \eqref{bng}
$$
\underline b\gammae -(0,A^{(n+1)},\overline{\underbrace{B^{(n+1)}\dots,B^{(n+1)}}_{\frak{l}_{B^{(n+1)}}},A^{(n+1)},\tau,A^{(n+1)}, B^{(n+1)}}).
$$
Comparing the two continued fractions, we get that $\bar b<\underline b$.
\noindent\textbf{Case 2.} Assume $A^{(n+1)}$ and $B^{(n+1)}$ are given by \eqref{case2}. It follows that $A^{(n+1)}\prec B^{(n+1)}$ since $A^{(n+1)}$ is the beginning block of $B^{(n+1)}$.
Also, if a sequence $\sigma$ starts with an $A^{(n+1)}$ block and ends with a $B^{(n+1)}$ block (thus, implicitly, $\sigma$ starts with an $A^{(n)}$ block and ends with a $B^{(n)}$ block),
$$\sigma=(A^{(n+1)},\tau,B^{(n+1)})=(A^{(n)},\underbrace{B^{(n)},\dots,B^{(n)}}_{\frak{l}_{B^{(n)}}},\tau,A^{(n)},\underbrace{B^{(n)},\dots,B^{(n)}}_{\frak{l}_{B^{(n)}}+1})\,$$
then by applying \eqref{bng} to $\tilde \sigma=(A^{(n)},\underbrace{B^{(n)},\dots,B^{(n)}}_{\frak{l}_{B^{(n)}}},\tau,A^{(n)},B^{(n)})$, which starts with $A^{(n)}$ and ends with $B^{(n)}$, we get
\begin{equation*}
\begin{split}
\underline b(\sigma)\gammae \underline b(\tilde\sigma)\gammae -(0,A^{(n)},\overline{\underbrace{B^{(n)},\dots,B^{(n)}}_{\frak{l}_{B^{(n)}}},\tau,A^{(n)},B^{(n)}})\\=-(0,A^{(n+1)},\overline{\tau, A^{(n)},\underbrace{B^{(n)},\dots,B^{(n)}}_{\frak{l}_{B^{(n)}+1}}})
\end{split}
\end{equation*}
so \eqref{bng} holds for $n+1$.
Assume that $(n_i)$ starts with a sequence of $A^{(n+1)}$'s of length $\frak{l}_{A^{(n+1)}}>1$. Similar to the analysis of the first case, one proves that the sequence $(n_i)$ cannot have two consecutive $B^{(n+1)}$'s and any sequence of consecutive blocks $A^{(n+1)}$ has length $\frak{l}_{A^{(n+1)}}$ or $\frak{l}_{A^{(n+1)}}-1$.
If the sequence $(n_i)$ starts with $A^{(n+1)}$ and then continues with a sequence of $B^{(n+1)}$'s of length $\frak{l}_{B^{(n+1)}}\gammae 1$, one can prove that the sequence $(n_i)$ cannot have two consecutive $A^{(n+1)}$'s and any sequence of consecutive blocks $B^{(n+1)}$ has length $\frak{l}_{B^{(n+1)}}$ or $\frak{l}_{B^{(n+1)}}+1$.
\end{proof}
Additionally, we prove
\begin{lem}\label{lem-order}
If the block $\tau_1=(n_i,\dots, n_l)$ is a tail of $A^{(n)}$ and $\tau_2=(p_j,\dots,p_h)$ is a tail of $B^{(n)}$, then
$A^{(n)}\prec \tau_1$ and $B^{(n)}\prec \tau_2$.
\end{lem}
\begin{proof}
The statement is obviously true if $n=1$. Assume it is true for some $n$ both for $A^{(n)}$ and $B^{(n)}$. We analyze the case of $A^{(n+1)}$ being given by \eqref{case1}, $A^{(n+1)}=(\underbrace{A^{(n)},\dots, A^{(n)}}_{\frak{l}_{A^{(n)}}},B^{(n)})$. Consider an arbitrary tail $\tau$ of $A^{(n+1)}$; $\tau$ could start with a block $A^{(n)}$ or a tail of $A^{(n)}$ or $\tau$ coincides with $B^{(n)}$ or a tail of $B^{(n)}$. In all situations, the inductive hypothesis and the fact that $A^{(n)}\prec B^{(n)}$ prove that $A^{(n+1)}\prec \tau$. The case of $A^{(n+1)}$ given by \eqref{case2} is treated similarly.
\end{proof}
\begin{rem}\label{rem-ineq}
Using the relations \eqref{bng} and \eqref{ubng}, notice that a set ${\mathcal T}^{A^{(n+1)}}_{\mathfrak{b}}$ (if nonempty) has the upper vertex satisfying
\begin{equation}\label{bn}
\bar b_{n+1}\le -(0,\overline{A^{(n+1)}})
\end{equation}
and a lower horizontal boundary that satisfies
\begin{equation}\label{ubn1}
\underline b_{n+1}\gammae -(0,A^{(n+1)},\overline{B^{(n+1)}})
\end{equation}
if $A^{(n+1)}$ is given by the substitution rule \eqref{case1}, and
\begin{equation}\label{ubn2}
\underline b_{n+1}\gammae -(0,A^{(n)},\overline{B^{(n)}})
\end{equation}
if $A^{(n+1)}$ is given by \eqref{case2}.
\end{rem}
We will prove that the above inequalities are actually equality relations. For that we construct a starting subsequence of $A^{(n+1)}$ defined inductively as:
$$
\sigma^{(1)}=
\begin{cases}
(\underbrace{m,\dots,m}_{{\frak l}_m}) & \text{if } A^{(1)}=(\underbrace{m,\dots,m}_{{\frak l}_m},m+1)\\
({m}) & \text{if } A^{(1)}=(m,\underbrace{m+1,\dots,m+1}_{{\frak l}_{m+1}})\\
\end{cases}
$$
\noindent{Case 1.} If $A^{(n)}$ is given by a relation of type \eqref{case1}, i.e. $A^{(n)}=(A^{(n-1)}, \dots,A^{(n-1)}, B^{(n-1)})$, then
\begin{equation}\label{snc1}
\sigma^{(n+1)}=
\begin{cases}
(\underbrace{A^{(n)},\dots,A^{(n)}}_{{\frak l}_A^{(n)}-1},\sigma^{(n)}) & \text{if } A^{(n+1)}=(\underbrace{A^{(n)},\dots,A^{(n)}}_{{\frak l}_{A^{(n)}}},B^{(n)})\\
\sigma^{(n)} & \text{if } A^{(n+1)}=(A^{(n)},\underbrace{B^{(n)},\dots,B^{(n)}}_{{\frak l}_{B^{(n)}}})
\end{cases}
\end{equation}
\noindent{Case 2.} If $A^{(n)}$ is given by a relation of type \eqref{case2}, i.e. $A^{(n)}=(A^{(n-1)}, B^{(n-1)}, \dots, B^{(n-1)})$, then
\begin{equation}\label{snc2}
\sigma^{(n+1)}=
\begin{cases}
(\underbrace{A^{(n)},\dots,A^{(n)}}_{{\frak l}_A^{(n)}},\sigma^{(n)}) & \text{if } A^{(n+1)}=(\underbrace{A^{(n)},\dots,A^{(n)}}_{{\frak l}_{A^{(n)}}},B^{(n)})\\
(A^{(n)},\sigma^{(n)}) & \text{if } A^{(n+1)}=(A^{(n)},\underbrace{B^{(n)},\dots,B^{(n)}}_{{\frak l}_{B^{(n)}}})
\end{cases}
\end{equation}
We introduce the notation $f^{\sigma}$ to denote the transformation $T^{n_k}S\dots T^{n_1}S$ if $\sigma=(n_1,\dots,n_k)$.
\begin{lem}\label{relbn}
Let $\sigma^{(n+1)}$ be the starting block of $A^{(n+1)}$ defined as above. Then the equation
$$f^{\sigma^{(n+1)}}b=\frac{b}{b+1}$$ has a unique solution $b\in [0,1]$ given by
\begin{equation}\label{eqbn}
b_{n+1}=\begin{cases}
-(0,A^{(n+1)},\overline{B^{(n+1)}}) & \text{if } A^{(n+1)} \text{ given by } \eqref{case1}\\
-(0,A^{(n)},\overline{B^{(n)}}) & \text{if } A^{(n+1)} \text{ given by } \eqref{case2}
\end{cases}
\end{equation}
\end{lem}
\begin{proof}
We proceed with an inductive proof, and as part of it we also show that
\begin{equation}\label{sigma}
(\sigma^{(n+1)},m+1,\tilde A^{(n)})=
\begin{cases}
A^{(n+1)} & \text{if } A^{(n+1)}=(\underbrace{A^{(n)},\dots,A^{(n)}}_{{\frak l}_{A^{(n)}}},B^{(n)})\\
(A^{(n)},B^{(n)}) & \text{if } A^{(n+1)}=(A^{(n)},\underbrace{B^{(n)},\dots,B^{(n)}}_{{\frak l}_{B^{(n)}}})
\end{cases}
\end{equation}
where $A^{(n)}=(m,\tilde A^{(n)})$.
The relation \eqref{eqbn} is true for $n=0$ due to Lemma \ref{lem-cf}(ii). Also, \eqref{sigma} follows immediately.
Suppose now that the inductive relations hold for some $n$. We analyze the solution of $f^{\sigma^{(n+2)}}b=\frac{b}{b+1}$.
Assume that $A^{(n+1)}=(\underbrace{A^{(n)},\dots,A^{(n)}}_{{\frak l}_{A^{(n)}}}, B^{(n)})$. We look at the two possible cases:
\noindent (i) If $A^{(n+2)}=(\underbrace{A^{(n+1)},\dots,A^{(n+1)}}_{{\frak l}_{A^{(n+1)}}}, B^{(n+1)})$,
$\sigma^{(n+2)}=(\underbrace{A^{(n+1)},\dots,A^{(n+1)}}_{{\frak l}_{A^{(n+1)}}-1},\sigma^{(n+1)})$.
Using Lemma \ref{lem-cf}(ii), we have that the solution to $f^{\sigma^{(n+2)}}b=\frac{b}{b+1}$ is given by
\begin{equation*}
\begin{split}
b_{n+2}&=-(0,m,\overline{\tilde A^{(n+1)},\underbrace{A^{(n+1)},\dots,A^{(n+1)}}_{{\frak l}_{A^{(n+1)}}-2},\sigma^{(n+1)},m+1})\\
&= -(0,m,\tilde A^{(n+1)},\overline{\underbrace{A^{(n+1)},\dots,A^{(n+1)}}_{{\frak l}_{A^{(n+1)}}-2},\sigma^{(n+1)},m+1,\tilde A^{(n+1)}})\\
&=-(0,m,\tilde A^{(n+1)},\overline{\underbrace{A^{(n+1)},\dots,A^{(n+1)}}_{{\frak l}_{A^{(n+1)}}-2},\sigma^{(n+1)},m+1,\tilde A^{(n)},B^{(n+1)}})\\
&=-(0,A^{(n+1)},\overline{\underbrace{A^{(n+1)},\dots,A^{(n+1)}}_{{\frak l}_{A^{(n+1)}}-2},A^{(n+1)},B^{(n+1)}}\\
&=-(0,A^{(n+1)},\overline{B^{(n+2)}}=-(0,A^{(n+2)},\overline{B^{(n+2)}}).
\end{split}
\end{equation*}
Also,
\begin{equation*}
\begin{split}
(\sigma^{(n+2)},m+1,\tilde A^{(n+1)})&=(\underbrace{A^{(n+1)},\dots,A^{(n+1)}}_{{\frak l}_{A^{(n+1)}}-1},\sigma^{(n+1)},m+1,\tilde A^{(n)}, B^{(n+1)})\\
&=(\underbrace{A^{(n+1)},\dots,A^{(n+1)}}_{{\frak l}_{A^{(n+1)}}-1},A^{(n+1)},B^{(n+1)})=A^{(n+2)}.
\end{split}
\end{equation*}
\noindent (ii) If $A^{(n+2)}=(A^{(n+1)},\underbrace{B^{(n+1)},\dots,B^{(n+1)}}_{{\frak l}_{B^{(n+1)}}})$, then
$
\sigma^{(n+2)}=\sigma^{(n+1)}\,,
$
and the induction step gives us the solution of $f^{\sigma(n+2)}b=\frac{b}{b+1}$ as
$
b_{n+2}=-(0,A^{(n+1)},\overline{B^{(n+1)}})
$. Also,
\begin{equation*}
(\sigma^{(n+2)},m+1,\tilde A^{(n+1)})=(\sigma^{(n+1)},m+1,\tilde A^{(n)},B^{(n+1)})=(A^{(n+1)},B^{(n+1)}).
\end{equation*}
\noindent Now assume that $A^{(n+1)}=(A^{n},\underbrace{B^{(n)},\dots,B^{(n)}}_{{\frak l}_{B^{(n)}}})$.
We look again at the two possible cases:
\noindent (i) If $A^{(n+2)}=(\underbrace{A^{(n+1)},\dots,A^{(n+1)}}_{{\frak l}_{A^{(n+1)}}}, B^{(n+1)})$,
$
\sigma^{(n+2)}=(\underbrace{A^{(n+1)},\dots,A^{(n+1)}}_{{\frak l}_{A^{(n+1)}}},\sigma^{(n+1)})
$.
Using Lemma \ref{lem-cf}(ii), we have that the solution to $f^{\sigma(n+2)}b=\frac{b}{b+1}$ is given by
\begin{equation*}
\begin{split}
b_{n+2}&=-(0,m,\overline{\tilde A^{(n+1)},\underbrace{A^{(n+1)},\dots,A^{(n+1)}}_{{\frak l}_{A^{(n+1)}}-1},\sigma^{(n+1)},m+1})\\
&= -(0,m,\tilde A^{(n+1)},\overline{\underbrace{A^{(n+1)},\dots,A^{(n+1)}}_{{\frak l}_{A^{(n+1)}}-1},\sigma^{(n+1)},m+1,\tilde A^{(n+1)}})\\
&=-(0,m,\tilde A^{(n+1)},\overline{\underbrace{A^{(n+1)},\dots,A^{(n+1)}}_{{\frak l}_{A^{(n+1)}}-1},\sigma^{(n+1)},m+1,\tilde A^{(n)},\underbrace{B^{(n)},\dots,B^{(n)}}_{{\frak l}_{B^{(n)}}})}\\
&=-(0,m,\tilde A^{(n+1)},\overline{\underbrace{A^{(n+1)},\dots,A^{(n+1)}}_{{\frak l}_{A^{(n+1)}}-1},A^{(n)},B^{(n)},\underbrace{B^{(n)},\dots,B^{(n)}}_{{\frak l}_{B^{(n)}}})}\\
&=-(0,A^{(n+1)},\overline{\underbrace{A^{(n+1)},\dots,A^{(n+1)}}_{{\frak l}_{A^{(n+1)}}-1},B^{(n+1)}})=-(0,A^{(n+2)},\overline{B^{(n+2)}}).
\end{split}
\end{equation*}
A similar approach gives us that $(\sigma^{(n+2)},m+1,\tilde A^{(n+1)})=A^{(n+2)}$.
\noindent (ii) If $A^{(n+2)}=(A^{(n+1)},\underbrace{B^{(n+1)},\dots,B^{(n+1)}}_{{\frak l}_{B^{(n+1)}}})$, then
$
\sigma^{(n+2)}=(A^{(n+1)},\sigma^{(n+1)}).
$
Using Lemma \ref{lem-cf}(ii),
we have that the solution to $f^{\sigma(n+2)}b=\frac{b}{b+1}$ is given by
\begin{equation*}
\begin{split}
b_{n+2}&=-(0,m,\overline{\tilde A^{(n+1)},\sigma^{(n+1)},m+1})\\
&= -(0,m,\tilde A^{(n+1)},\overline{\sigma^{(n+1)},m+1,\tilde A^{(n+1)}})\\
&=-(0,m,\tilde A^{(n+1)},\overline{\sigma^{(n+1)},m+1,\tilde A^{(n)},\underbrace{B^{(n)},\dots,B^{(n)}}_{{\frak l}_{B^{(n)}}}})\\
&=-(0,m,\tilde A^{(n+1)},\overline{A^{(n)},B^{(n)},\underbrace{B^{(n)},\dots,B^{(n)}}_{{\frak l}_{B^{(n)}}}})\\
&=-(0,A^{(n+1)},\overline{B^{(n+1)}}).
\end{split}
\end{equation*}
Also,
\begin{equation*}
\begin{split}
(\sigma^{(n+2)},m+1,\tilde A^{(n+1)})&=(A^{(n+1)},\sigma^{(n+1)},m+1,\tilde A^{(n)},\underbrace{B^{(n)},\dots,B^{(n)}}_{{\frak l}_{B^{(n)}}})\\
&=(A^{(n+1)},A^{n},B^{(n)},\underbrace{B^{(n)},\dots,B^{(n)}}_{{\frak l}_{B^{(n)}}})=(A^{(n+1)},B^{(n+1)}).
\end{split}
\end{equation*}
\end{proof}
\begin{thm}\label{thm-exact}
Any sequence $(n_i)$ constructed recursively using relations \eqref{case1} and \eqref{case2} provides a non-empty set $\mathcal E^{(n_i)}_b$.
\end{thm}
\begin{proof}
We prove inductively that any set ${\mathcal T}^{A^{(n+1)}}_{\mathfrak{b}}$ is nonempty and the relations \eqref{bn} and \eqref{ubn1} or \eqref{ubn2}
are actual equalities, i.e.
\begin{equation}\label{bne}
\bar b_{n+1}= -(0,\overline{A^{(n+1)}})
\end{equation}
and a lower horizontal boundary that satisfies
\begin{equation}\label{ube1}
\underline b_{n+1}= -(0,A^{(n+1)},\overline{B^{(n+1)}})
\end{equation}
if $A^{(n+1)}$ is given by the substitution rule \eqref{case1} or
\begin{equation}\label{ube2}
\underline b_{n+1}= -(0,A^{(n)},\overline{B^{(n)}})
\end{equation}
if $A^{(n+1)}$ is given by \eqref{case2}. As part of the inductive proof, we also show that any tail block $\tau$ of $A^{(n+1)}$, $\tau\ne \tau^{(n+1)}$ satisfies $\tau\prec \tau^{(n+1)}$, where $\tau^{(n+1)}$ denotes the tail block of $A^{(n+1)}$ obtained by eliminating the starting block $\sigma^{(n+1)}$ defined by \eqref{snc1} or \eqref{snc2}.
Indeed for $n=0$, one can check directly that the sets ${\mathcal T}^{m,m,\dots,m,m+1}_{\mathfrak{b}}$ and ${\mathcal T}^{m,m+1,\dots,m+1}_{\mathfrak{b}}$ satisfy the above equalities using the fact that an ``$m$" digit does not change the position of the upper vertex, while an ``$m+1$'' digit does not change the position of the horizontal segment of such a triangular set. Also, for any tail $\tau\ne \tau^{(1)}$ of $A^{(1)}$, $\tau\prec \tau^{(1)}$.
Now, let us assume that ${\mathcal T}_{\mathfrak{b}}^{A^{(n+1)}}$ obtained from $A^{(n+1)}=(\underbrace{A^{(n)},\dots,A^{(n)}}_{\frak{l}^{(n)}},B^{(n)})$ is nonempty and satisfies \eqref{bne} and \eqref{ube1}.
For ${\mathcal T}^{A^{(n+2)}}_{\mathfrak{b}}$ we look at the two possible cases:
\noindent (i) $A^{(n+2)}=(\underbrace{A^{(n+1)},\dots,A^{(n+1)}}_{{\frak l}_{A^{(n+1)}}}, B^{(n+1)})$. By Remark \ref{rem-ineq},
\[
\bar b_{n+2}\le -(0,\overline{A^{(n+2)}})=-(0,\overline{\underbrace{A^{(n+1)},\dots,A^{(n+1)}}_{\frak{l}_{A^{(n+1)}}},B^{(n+1)}})=:\mathcal Hat b
\]
and
\[
\underline b_{n+2}\gammae -(0,A^{(n+2)},\overline{B^{(n+2)}})=-(0,A^{(n+1)},\overline{\underbrace{A^{(n+1)},\dots,A^{(n+1)}}_{\frak{l}_{A^{(n+1)}-1}},B^{(n+1)}})=:\tilde b
\]
where $\tilde b$ was obtained by applying Lemma \ref{lem-cf} part (ii) to the starting block
$$
\sigma^{(n+2)}=(\underbrace{A^{(n+1)},\dots,A^{(n+1)}}_{{\frak l}_{A^{(n+1)}}-1},\sigma^{(n+1)})
$$
of $A^{(n+2)}$.
We prove first the other inductive step: any tail block $\tau$ of $A^{(n+2)}$, $\tau\ne\tau^{(n+2)}$ satisfies $\tau\prec \tau^{(n+2)}$. Notice that $\tau^{(n+2)}=(\tau^{(n+1)}, B^{(n+1)})$. There exists $\tau'$ a tail block of $A^{(n+1)}$ with the property that
$$\tau=(\tau',\underbrace{A^{(n+1)},\dots,A^{(n+1)}}_l,B^{n+1}), \quad 0\le l\le \frak{l}_{A^{(n+1)}-1}$$
or $\tau=\tau'$.
The latter case holds when $\tau$ is just a tail of $B^{(n+1)}$ (which itself is a tail of $A^{(n+1)}$). It is possible that $\tau'=\emptyset$, but in this case $\tau\prec\tau^{(n+2)}$
because $A^{(n+1)}\prec \tau^{(n+1)}$ by Lemma \ref{lem-order}. If $\tau'\ne\emptyset$, we also get
that $\tau\prec \tau^{(n+2)}$ by using the inductive hypothesis relation $\tau'\prec \tau^{(n+1)}$.
Now we show that the points $(\tilde b-1,\tilde b)$ and $(\mathcal Hat b-1,\mathcal Hat b)$ belong to the set ${\mathcal T}^{A^{(n+2)}}_{\mathfrak{b}}$. The point $(\mathcal Hat b-1,\mathcal Hat b)$ belongs to ${\mathcal T}^{A^{(n+1)}}_{\mathfrak{b}}$ so $f^{A^{(n+1)}}\mathcal Hat b\le \mathcal Hat b$. If $\sigma$ is an intermediate block between $A^{(n+1)}$ and $A^{(n+2)}$, $A^{(n+1)}\subset\sigma\subset A^{(n+2)}$, then
$$f^\sigma(\mathcal Hat b)=-(0,\tau,\overline{A^{(n+2)}})\le -(0,\overline{A^{(n+2)}})=\mathcal Hat b$$
The inequality is due to the fact that $\tau$ is a tail block of $A^{(n+2)}$ obtained by eliminating $\sigma$, so
$A^{(n+2)}\prec \tau$.
Now we show that $f^\sigma(\tilde b)\gammae \tilde b/(\tilde b+1)$ for any intermediate block $\sigma$ between $A^{(n+1)}$ and $A^{(n+2)}$. We have that $f^{\sigma^{(n+2)}}(\tilde b)=\tilde b/(\tilde b+1)$ by Lemma \ref{relbn}, and
\[
f^{\sigma^{(n+2)}}(\tilde b)=-(0,\tau^{(n+2)},\overline{B^{(n+2)}})\, ,
\]
where $\tau^{(n+2)}=(\tau^{(n+1)},B^{(n+1)})$. Also $f^\sigma(\tilde b)=-(0,\tau,\overline{B^{(n+2)}})$ with $\tau$ being the tail block of $A^{(n+2)}$ obtained by eliminating $\sigma$. But $\tau\prec\tau^{(n+2)}$ as we have just proved, hence $f^\sigma(\tilde b)\gammae f^{\sigma(n+2)}(\tilde b)$.
In conclusion, any intermediate block
$\sigma$ between $A^{(n+1)}$ and $A^{(n+2)}$ satisfies
\[
\tilde b/(\tilde b+1)\le f^\sigma(\tilde b)\le f^\sigma(\mathcal Hat b)\le \mathcal Hat b\,,
\]
therefore the points $(\tilde b-1,\tilde b)$ and $(\mathcal Hat b-1,\mathcal Hat b)$ belong to the intermediate set ${\mathcal T}^\sigma_{\mathfrak{b}}$. This proves the induction step for ${\mathcal T}^{A^{(n+2)}}_{\mathfrak{b}}$.
\noindent (ii) $A^{(n+2)}=(A^{(n+1)},\underbrace{B^{(n+1)},\dots,B^{(n+1)}}_{{\frak l}_{B^{(n+1)}}})$. By Remark \ref{rem-ineq}, we have that
\[
\bar b_{n+2}\le -(0,\overline{A^{(n+2)}})=-(0,\overline{A^{(n+1)},\underbrace{B^{(n+1)},\dots,B^{(n+1)}}_{\frak{l}_{B^{(n+1)}}}})=:\mathcal Hat b
\]
and
\[
\underline b_{n+2}\gammae -(0,A^{(n+1)},\overline{B^{(n+1)}})=:\tilde b
\]
where $\tilde b$ was obtained by applying Lemma \ref{lem-cf} part (ii) to the starting block
$
\sigma^{(n+2)}=\sigma^{(n+1)}
$
of $A^{(n+2)}$.
We prove first the other inductive step: any tail block $\tau$ of $A^{(n+2)}$, $\tau\ne\tau^{(n+2)}$, satisfies $\tau\prec \tau^{(n+2)}$. There exists $\tau'$ a tail block of $A^{(n+1)}$ with the property that
$$\tau=(\tau',\underbrace{B^{(n+1)},\dots,B^{(n+1)}}_l), \quad 0\le l\le \frak{l}_{B^{(n+1)}}$$
(again, using the fact that $B^{(n+1)}$ is a tail block of $A^{(n+1)}$).
Since
$$\tau^{(n+2)}=(\tau^{(n+1)}, \underbrace{B^{(n+1)},\dots,B^{(n+1)}}_{{\frak l}_{B^{(n+1)}}}))\,,$$
we get that $\tau\prec \tau^{(n+2)}$ by using the inductive hypothesis $\tau'\prec \tau^{(n+1)}$.
Now we show that the points $(\tilde b-1,\tilde b)$ and $(\mathcal Hat b-1,\mathcal Hat b)$ belong to the set ${\mathcal T}^{A^{(n+2)}}_{\mathfrak{b}}$. The point $(\mathcal Hat b-1,\mathcal Hat b)$ belongs to ${\mathcal T}^{A^{(n+1)}}_{\mathfrak{b}}$ so $f^{A^{(n+1)}}\mathcal Hat b\le \mathcal Hat b$. If $\sigma$ is an intermediate block between between $A^{(n+1)}$ and $A^{(n+2)}$ then
$$f^\sigma(\mathcal Hat b)=-(0,\tau,\overline{A^{(n+2)}})\le -(0,\overline{A^{(n+2)}})=\mathcal Hat b$$
because $\tau$ is a tail block of $A^{(n+2)}$ obtained by eliminating $\sigma$, so
$A^{(n+2)}\prec \tau$.
Now we show that $f^\sigma(\tilde b)\gammae \tilde b/(\tilde b+1)$. We have that $f^{\sigma^{(n+2)}}(\tilde b)=\tilde b/(\tilde b+1)$ by Lemma \ref{relbn}, and
\[
f^{\sigma^{(n+2)}}(\tilde b)=-(0,\tau^{(n+1)},\overline{B^{(n+1)}})\, \quad f^\sigma(\tilde b)=-(0,\tau,\overline{B^{(n+1)}})
\]
with $\tau$ being the end block of $A^{(n+2)}$ obtained by eliminating $\sigma$. But $\tau\prec\tau^{(n+2)}$ as we have just proved, hence $f^\sigma(\tilde b)\gammae f^{\sigma(n+2)}(\tilde b)$. In conclusion, any intermediate sequence $\sigma$ between $A^{(n+1)}$ and $A^{(n+2)}$ satisfies
\[
\tilde b/(\tilde b+1)\le f^\sigma(\tilde b)\le f^\sigma(\mathcal Hat b)\le \mathcal Hat b\,,
\]
therefore the points $(\tilde b-1,\tilde b)$ and $(\mathcal Hat b-1,\mathcal Hat b)$ belong to the intermediate set ${\mathcal T}^\sigma_{\mathfrak{b}}$.
We proved the induction step for ${\mathcal T}^{A^{(n+2)}}_{\mathfrak{b}}$, when $A^{(n+1)}$ is given by \eqref{case1}. A similar argument can be provided for the case when $A^{(n+1)}$ is given by \eqref{case2}, so the conclusion of the theorem is true.
\end{proof}
We prove now that each set nonempty set ${\mathcal E}^{(n_i)}$ with $(n_i)$ not eventually aperiodic sequence is actually a singleton.
\begin{thm}
Assume that $(n_i)$ is a not eventually periodic sequence such that the set ${\mathcal E}^{(n_i)}_b$ is nonempty. Then the set ${\mathcal E}^{(n_i)}_{\mathfrak{b}}$ is a point on the line segment $b-a=1$.
\end{thm}
\begin{proof}
The sequence $(n_i)$ satisfies the recursive relations \eqref{case1} or \eqref{case2}. We look at the set ${\mathcal T}^{A^{(n+1)}}_{\mathfrak{b}}$ and estimate the length of its lower base. In case \eqref{case1}
its upper vertex is given by \eqref{bne} and its lower base satisfies \eqref{ube1}. The lower base is a segment whose right end coordinate is
$$\underline a^r_{n+1}= -(0,A^{(n+1)},\overline{B^{(n+1)}})-1$$ and left end coordinate is
\[
\underline a^l_{n+1}=f^{A^{(n +1)}}(- (0,A^{(n+1)},\overline{B^{(n+1)}})) -1=-(0,\overline{B^{(n+1)}}) -1\,.
\]
Hence the length of the lower base is given by
$$
L_{n+1}=\underline a^r_{n}-\underline a^l_{n+1}=(0,\overline{B^{(n+1)}})-(0,A^{(n+1)},\overline{B^{(n+1)}})\,.
$$
In case \eqref{case2}, the lower base is a segment whose right end coordinate is
$$
\underline a^r_{n+1}=-(0,A^{(n)},\overline{B^{(n)}})-1
$$ and the left end coordinate is given by
\[
\underline a^l_{n+1}=f^{A^{(n +1)}}(- (0,A^{(n)},\overline{B^{(n)}})) -1=-(0,\overline{B^{(n)}}) -1\,.
\]
Hence the length of the lower base is given by
$$
L_{n+1}=\underline a^r_{n+1}-\underline a^l_{n+1}=(0,\overline{B^{(n)}})-(0,A^{(n)},\overline{B^{(n)}})\,.
$$
Notice that in the first case the two continued fraction expansions have in common at least the block $A^{(n)}$, while in the second case they have in common at least the block $A^{(n-1)}$. This implies that in both cases $L_{n+1}\rightarrow 0$ as $n\rightarrow \infty$.
Moreover, the bases of the sets ${\mathcal T}^{n_1,\dots n_k}_{\mathfrak{b}}$ have non-increasing length and we have found a subsequence of these bases whose lengths converge to zero. Therefore the set ${\mathcal E}^{(n_i)}_{\mathfrak{b}}$ consists of only one point $(b-1,b)$, where
$b=-(0,n_1,n_2,\dots)$.
\end{proof}
The above result gives us a complete description of the set of exceptions ${\mathcal E}_{\mathfrak{b}}$ to the finiteness condition. It is a subset of the boundary segment $b=a+1$ of $\mathcal{P}$. Moreover, each set ${\mathcal E}^m_{\mathfrak{b}}$ is uncountable because the recursive construction of a nonempty set ${\mathcal E}^{(n_i)}_{\mathfrak{b}}$ allows for an arbitrary number of successive blocks $A^{(k)}$ at step $(k+1)$. Formally, one constructs a surjective map $\mathfrak j: {\mathcal E}^m_{\mathfrak{b}}\rightarrow \mathbb N^{\mathbb N}$ by associating to a singleton set ${\mathcal E}^{(n_i)}_b$ a sequence of positive integers defined as
\centerline{$\mathfrak j({\mathcal E}^{(n_i)}_b)(k)=\#$ of consecutive $A^{(k)}$-blocks at the beginning of $(n_i)$.}
The set ${\mathcal E}_{\mathfrak{b}}$ has one-dimensional Lebesgue measure $0$. The reason is that all associated formal continued fractions expansions of $b=-(0,n_1,n_2,\dots)$ have only two consecutive digits; such formal expansions $(0,n_1,n_2,\dots)$ are valid (-1,0)-continued fractions. Hence the set of such $b$'s has measure zero by Proposition \ref{bdigits1}.
Analogous conclusions hold for ${\mathcal E}_{\mathfrak{a}}$. Thus we have
\begin{thm} \label{ex}
For any $(a,b)\in\mathcal{P}$, $b\neq a+1$, the finiteness condition holds. The set of exceptions ${\mathcal E}$ to the finiteness condition
is an uncountable set of one-dimensional Lebesgue measure $0$ that lies on the boundary $b=a+1$ of $\mathcal{P}$.
\end{thm}
Now we are able to provide the last ingredient in the proof of part (b) of the Main Result:
\begin{prop}\label{open-dense}
The strong cycle property is an open and dense condition.
\end{prop}\begin{proof}
It follows from Theorems \ref{b-cycle} and \ref{a-cycle} that the condition is open. Theorem \ref{ex} asserts that for all $(a,b)\in\mathcal{P}$, $b\neq a+1$ the finiteness condition holds, i.e. all we need to show is that if $b$ has the week cycle property or the $(a,b)$-expansions of $Sb$ and $T^{-1}b$ are eventually periodic, then in any neighborhood of it there is a $b$ with the strong cycle property. For, if $b$ has the weak cycle property, it is a rational number obtained from the equation $\mathcal Hat{f}^nT^mSb=0$, and any small perturbation of it will have the strong cycle property. Similarly, if the $(a,b)$-expansions of $Sb$ and $T^{-1}b$ are eventually periodic, then $b$ is a quadratic irrationality (see Remark \ref{quadratic}), and for any neighborhood of $b$ will contain values
satisfying the strong cycle property. A similar argument holds for $Sa$ and $Ta$.
\end{proof}
\section{Invariant measures and ergodic properties}\label{s:9}
Based on the finite rectangular geometric structure of the domain $D_{a,b}$ one can study the measure-theoretic properties of the Gauss-type map $\mathcal Hat f_{a,b}:[a,b)\rightarrow [a,b)$,
\begin{equation}\label{1dGauss}
\mathcal Hat f_{a,b}(x)=-\frac{1}{x}-\left\lfloor -\frac{1}{x}\right\rceil_{a,b}\,,\quad \mathcal Hat f_{a,b}(0)=0
\end{equation}
and its associated natural extension map $\mathcal Hat F_{a,b}:\mathcal Hat D_{a,b}\rightarrow \mathcal Hat D_{a,b}$
\begin{equation}\label{2dGauss}
\mathcal Hat F _{a,b}=\left(\mathcal Hat f_{a,b}(x),-\frac{1}{y-\lfloor -1/x\rceil_{a,b}}\right).
\end{equation}
We remark that $\mathcal Hat F_{a,b}$ is obtained from the map $F_{a,b}$ induced on the set $D_{a,b}\cap \{(x,y)| a\le y <b\}$ by a change of coordinates $x'=y$, $y'=-1/x$. Therefore the domain
$\mathcal Hat D_{a,b}$ is easily identified knowing $D_{a,b}$ and may be considered its ``compactification".
We present the simple case when $\displaystyle 1\le -\frac{1}{a}\le b+1$ and $a-1\le -\frac{1}{b}\le -1$. The general
theory is the subject our paper in preparation \cite{KU6}.
The truncated orbits of $a$ and $b$ are
\begin{eqnarray*}
& &\mathcal{L}_a =\left\{a+1,-\frac{1}{a+1}\right\}, \quad \mathcal{U}_a=\left\{-\frac{1}{a},-\frac{a+1}{a}\right\} \\
& &\mathcal{L}_b =\left\{-\frac{1}{b},\frac{b-1}{b}\right\}, \quad \mathcal{U}_b=\left\{b-1,-\frac{1}{b-1}\right\}
\end{eqnarray*}
and the end points of the cycles are $c_a=\frac{a}{a+1}$, $c_b=\frac{b}{1-b}$.
\begin{thm} If $1\le -\frac{1}{a}\le b+1$ and $a-1\le -\frac{1}{b}\le -1$, then the domain $\mathcal Hat D_{a,b}$ of $\mathcal Hat F_{a,b}$ is given by
\[
\begin{split}
\mathcal Hat D_{a,b}&=[a,-\frac{1}{b}+1]\times[-1,0]\cup [-\frac{1}{b}+1,a+1]\times[-1/2,0]\\
&\quad \cup [b-1,-\frac{1}{a}-1]\times [0,1/2]\cup [-\frac{1}{a}-1,b]\times [0,1]
\end{split}
\]
and $\mathcal Hat F_{a,b}$ preserves the Lebesgue equivalent probability measure
\begin{equation}\label{dnu}
d\nu_{a,b}=\frac{1}{\log[(1+b)(1-a)]}\frac{dxdy}{(1+xy)^2}\,.
\end{equation}
\end{thm}
\begin{proof}
The description of $\mathcal Hat D_{a,b}$ follows directly from the cycle relations and the finite rectangular structure. It is a standard computation that the measure $\frac{dxdy}{(1+xy)^2}$ is preserved by $\mathcal Hat F_{a,b}$, by using the fact any M\"obius transformation, hence $F_{a,b}$, preserves the measure $\frac{du\,dw}{(w-u)^2}$, and $\mathcal Hat F_{a,b}$ is obtained from $F_{a,b}$ by coordinate changes $x=w, y=-1/u$.
Moreover, the density $\frac{1}{(1+xy)^2}$ is bounded away from zero on $\mathcal Hat D_{a,b}$ and
$$
\int_{\mathcal Hat D_{a,b}}\frac{dxdy}{(1+xy)^2}=\log[(b+1)(1-a)]<\infty
$$
hence the last part of the theorem is true.
\end{proof}
\begin{figure}
\caption{Typical domain $\mathcal Hat D_{a,b}
\end{figure}
The Gauss-type map $\mathcal Hat f_{a,b}$ is a factor of $\mathcal Hat F_{a,b}$
(projecting on the $x$-coordinate), so one can obtain its smooth
invariant measure $d\mu_{a,b}$ by integrating $d\nu_{a,b}$ over $\mathcal Hat
D_{a,b}$ with respect to the $y$-coordinate as explained in
\cite{AF3}. Thus, if we know the exact shape of the set $D_{a,b}$, we
can calculate the invariant measure precisely.
The measure $d\mu_{a,b}$ is ergodic and the measure-theoretic entropy
of $\mathcal Hat f_{a,b}$ can be computed explicitly using Rokhlin's formula.
\begin{thm} $ $
The map $\mathcal Hat f_{a,b}:[a,b)\rightarrow [a,b)$ is ergodic with respect to Lebesgue equivalent invariant probability measure
\begin{equation}
d\mu_{a,b}=\frac{1}{C_{a,b}}\left(\frac{\chi_{(a,-\frac{1}{b}+1)}}{1-x}+\frac{\chi_{(-\frac{1}{b}+1,a+1)}}{2-x}+\frac{\chi_{(b-1,-\frac{1}{a}-1)}}{x+2}+
\frac{\chi_{(-\frac{1}{a}-1,b)}}{x+1}\right)dx
\end{equation}
where $C_{a,b}=\log[(1+b)(1-a)]$. The measure-theoretic entropy of $\mathcal Hat f_{a,b}$ is given by
\begin{equation}\label{entropy}
h_{\mu_{a,b}}(\mathcal Hat f_{a,b})=\frac{\pi^2}{3\log[(1-a)(1+b)]}\,.
\end{equation}
\end{thm}
\begin{proof}
The measure $d\mu_{a,b}$ is obtained by integrating $d\nu_{a,b}$ over $\mathcal Hat D_{a,b}$. Ergodicity follows from a more general result concerning one-dimensional expanding maps (see \cite{AF3,Zw}). To compute the entropy, we use Rokhlin's formula
\[
\begin{split}
h_{\mu_{a,b}}(\mathcal Hat f_{a,b})&=\int_a^b\log|\mathcal Hat f'_{a,b}|d\mu_{a,b}=-2\int_a^b\log|x|d\mu_{a,b}\\
&=\frac{-2}{C_{a,b}}\left(\int_a^{-\frac{1}{b}+1}\frac{\log|x|}{1-x}dx+\int_{-\frac{1}{b}+1}^{a+1}\frac{\log|x|}{2-x}dx\right.\\
&\qquad+\left.\int_{b-1}^{-\frac{1}{a}-1}\frac{\log|x|}{x+2}dx+\int_{-\frac{1}{a}-1}^b\frac{\log|x|}{x+1}dx \right)
\end{split}
\]
Let $I(a,b)$ denote the sum of the four integrals. The function depends smoothly on $a,b$, hence we can compute the partial derivatives $\partial I/\partial a$ and $\partial I/\partial b$. We get that both partial derivatives are zero, hence $I(a,b)$ is constant. Using $a=-1, b=1$, we get
$$I(a,b)=I(-1,1)=2\int_0^1\frac{\log|x|}{1+x}dx=-\pi^2/6\,,$$
and the entropy formula \eqref{entropy}.
\end{proof}
\end{document} |
\begin{document}
\begin{abstract}
The package \texttt{numericalsgps} performs computations with and for numerical semigroups. Recently also affine semigroups are admitted as objects for calculations. This manuscript is a survey of what the package does, and at the same time of the trending topics on numerical semigroups.
\end{abstract}
\title{ exttt{numericalsgps}
\section{Introduction}
The motivation for the implementation of \texttt{numericalsgps} was the lack of a package specific to make computations related to numerical semigroups. We had several functions implemented in distinct programming languages, each with its own interface, which made the communication between them very difficult. Thus we decided to unify all these procedures in a single package written in an appropriate language. The choice for the language was \texttt{GAP} (\cite{gap}), mainly because of the experience achieved by the first author.
The first version of \texttt{numericalsgps} was released in 2005. Since then, the authors of this note have been adding new functionalities and replacing, when appropriate, algorithms with newer and faster ones.
This makes the package to permanently reflect the state of the art in the area.
Also some algorithms have different implementations that are used taking into account either the information stored in the object to deal with, or what packages has the user installed/loaded in his \texttt{GAP} installation.
The current version is 1.0 and can be found in \cite{numericalsgps}; the development version is available in \url{https://bitbucket.org/gap-system/numericalsgps}. The manual is over 90 pages long.
The heart of this manuscript is Section~\ref{sec:Contents}, which consists of a brief description of the contents of the package. Aiming to make the paper self contained, we give definitions when necessary. The few examples given are simple illustrations that can guide the reader to produce his owns (assuming he has a working installation of GAP). We give pointers to the literature where one can find the implemented algorithms, which makes also the paper a kind of review of the computational procedures to deal with numerical and affine semigroups.
The paper ends with a reference to interactions with other commutative algebra packages. The use of external software frequently improves very much the execution time of the functions offered for affine semigroups.
\section{Contents}\label{sec:Contents}
We briefly describe in this section the contents of the package, using as a guideline the chapters of the manual.
\subsection{Introduction}
In the introduction of the manual, the basic definitions concerning numerical semigroups are given. The author interested in the topic can find all these definitions in \cite{ns}.
\subsection{Numerical semigroups}
A \emph{numerical semigroup} $S$ is a submonoid of the set of nonnegative integers $\mathbb N$ under addition, and such that $\mathbb N\setminus S$ has finitely many elements. This section describes several ways to define a numerical semigroup.
The elements in the set $\mathbb N\setminus S$ are usually called \emph{gaps}, and its cardinality is the \emph{genus} of $S$. We say that a gap $g$ is a \emph{fundamental gap} of $S$ if it is maximal in the set of gaps with respect to division, or in other words, $g\not\in S$, $2g\in S$ and $3g\in S$. Gaps and fundamental gaps fully determine the semigroup $S$, and so, they can be used to describe a numerical semigroup. Functions are provided to determine if a list of nonnegative integers is a list of gaps or fundamental gaps; and also procedures to define numerical semigroups by means of these lists.
Since $\mathbb N\setminus S$ is finite, the maximum of $\mathbb Z\setminus S$ exists and it is known as the \emph{Frobenius number} of $S$ (there is actually a huge number of papers dealing with the computation/bounds of the Frobenius number of numerical semigroups; see for instance \cite{alfonsin}). The \emph{conductor} of $S$ is just the Frobenius number of $S$ plus one, and has the property that it is the least nonnegative integer $c$ such that $c+\mathbb N\subseteq S$. We call the elements in $S$ less than or equal to the conductor the \emph{small elements} of $S$. Clearly, the semigroup $S$ is uniquely determined by its small elements. A procedure is implemented to check if a list of integers is the set of small elements of a numerical semigroup, and also a function to define a numerical semigroup if this is the case in terms of this list.
If we take a closed interval $I=[a/b,c/d]$ with $a$, $b$, $c$ and $d$ positive integers such that $a/b<c/d$, then the set $ \bigcup_{k\in\mathbb N}(\mathbb N\cap kI)$ is a numerical semigroup (and coincides with the set of all numerators of rational elements in $I$). It can be shown that this class of semigroups coincides with the numerical semigroups that consist of nonnegative integer solutions to inequalities of the form $\alpha x\bmod \beta \le \gamma x$, which are known as \emph{proportionally modular} numerical semigroups. If $\gamma=1$, then they are simply called \emph{modular}. Hence we can also define a numerical semigroup in terms of the modular or proportionally modular inequality (giving a list with the parameters) or by an interval (providing its ends). Note that distinct intervals can yield the same numerical semigroup (and the same holds for proportionally modular inequalities). Membership to a numerical semigroup given by a proportionally modular inequality is trivial. Also specific fast algorithms exist for the computation of the Frobenius number if this is the case. For some kind of semigroups where testing being proportionally modular is fast, we perform this test and keep the inequality.
Another way to uniquely determine a numerical semigroup is by any of its Ap\'ery sets of its nonzero elements. Let $S$ be a numerical semigroup and let $n\in S\setminus\{0\}$. The \emph{Ap\'ery set} of $n$ in $S$ is the set $\{s\in S\mid s-n\not\in S\}$. This set has precisely $n$ elements, one for each congruence class modulo $n$. Once we know an Ap\'ery set, the cost of membership to $S$ is small, and also the Frobenius number and genus can be easily computed. Thus if the Ap\'ery set with respect to the least positive integer in $S$ (its \emph{multiplicity}) is computed, we store it as part of the object $S$. Many other invariants depend also on this specific Ap\'ery set as we will refer later. We provide a function to determine if a given list of integers is the Ap\'ery set of an element (the length of the list) in a numerical semigroup, and also to define a numerical semigroup by means of the Ap\'ery set.
Ap\'ery sets and proportionally modular inequalities can be seen as particular cases of periodic subadditive functions. We say that $f:\mathbb N\to \mathbb N$ is subadditive if $f(0)=0$ and $f(i+j)\le f(i)+f(j)$ for all $i,j\in \mathbb N$. Associated to $f$ we can define the semigroup of nonnegative integers $x$ such that $f(x)\le x$. This set is a numerical semigroup when $f$ is periodic (with positive period). We represent a periodic function by the values on the integers less than the period, and thus by a list of nonnegative integers. We give a function to test if a list corresponds to a subadditive function, and if so, a numerical semigroup can be defined by using this list as argument.
Let $A$ be a nonempty subset of $\mathbb N$. The monoid generated by $A$, denoted $\langle A\rangle$, is the set of all (finite) sums of elements of $A$. We say that $A$ \emph{generates} the numerical semigroup $S$ if $\langle A\rangle =S$. Observe that if this is the case, then the multiplicity of $S$ must be in $A$, and whenever two generators are congruent modulo the multiplicity, we do not need the largest one to generate the same semigroup. So we can always think of $A$ to be finite (since all its elements can be chosen to be incongruent modulo the multiplicity). Clearly, $S$ is uniquely determined by any of its systems of generators. Among these, there is only one minimal with respect to inclusion (actually also with respect to cardinality) which is $S^*\setminus (S^*+S^*)$, where $S^*=S\setminus\{0\}$. The cardinality of this set is known as the \emph{embedding dimension} of $S$. We give functions to define a numerical semigroup in terms of a generating set.
\begin{verbatim}
gap> s:=NumericalSemigroup("interval",71/5,153/8);
<Proportionally modular numerical semigroup satisfying 765x mod 10863 <= 197x >
gap> t:=NumericalSemigroup(15, 16, 17, 18, 19, 29, 43);
<Numerical semigroup with 7 generators>
gap> s=t;
true
\end{verbatim}
\subsection{Basic operations}
Among the basic operations of a numerical semigroup related to the contents of the preceding section, the package offers: computation of the multiplicity, generating system, minimal system of generators, small elements, gaps, embedding dimension, Ap\'ery sets, Frobenius number, conductor and fundamental gaps.
Some functions have different methods depending on what is known about the semigroup. As an example, if the Ap\'ery set is known, the Frobenius number will be computed by using Selmer's formulas (see for instance \cite[Proposition 2.12]{ns}).
Given $S$ a numerical semigroup, we also give a procedure to list the first $n$ integers in $S$, with $n$ a positive integer.
Associated to $S$ we can define the partial order relation $\le_S$ on $\mathbb Z$ as follows. We write
\[a\le_S b \hbox{ if } b-a\in S.\]
The set of maximal elements of $\mathbb Z\setminus S$ with respect to this order are known as \emph{pseudo-Frobenius} numbers (actually the Frobenius number is one of them), and their cardinality is the \emph{type} of $S$. We provide functions to compute the pseudo-Frobenius numbers (that can be also obtained from the Ap\'ery sets) and the type of $S$.
Let $m$ be the multiplicity of $S$. Then the elements in the Ap\'ery set of $m$ in $S$ are $w_i=k_im+i$ for $i\in\{0,\ldots, m-1\}$ with $k_0=0$ and $(k_1,\ldots, k_{m-1})$ fulfilling a set of inequalities (\cite{london}). In this way a numerical semigroup with multiplicity $m$ corresponds with a point inside a polytope. We give a function that outputs the set of inequalities describing this polytope, and also to compute $(k_1,\ldots,k_{m-1})$, which are known as the Kunz coordinates of $S$.
An element $s\in S$ is a minimal generator if $S\setminus \{s\}$ is again a numerical semigroup. Hence the dual of this property could be an element $g\not \in S$ such that $S\cup\{g\}$ is also a numerical semigroup. These elements are known as \emph{special gaps}. We give a function to compute them, that can be used to compute oversemigroups of a given semigroup (Section \ref{constructing}).
\subsection{Presentations of a numerical semigroup} \label{presentations}
Let $S$ be a numerical semigroup minimally generated by $\{n_1,\ldots, n_e\}$. Then the monoid morphism $\varphi:\mathbb N^e\to S$, $\varphi(a_1,\ldots , a_e)=\sum_{i=1}^e a_i n_i$ is an epimorphism, known as the \emph{factorization homomorphism} of $S$. Consequently $\mathbb N^e/\ker\varphi$ is isomorphic to $S$, where $\ker\varphi=\{ (a,b)\in \mathbb N^e\times \mathbb N^e\mid \varphi (a)=\varphi(b)\}$. A \emph{presentation} of $S$ is a generating system of the congruence $\ker\varphi$. A \emph{minimal presentation} of $S$ is a minimal generating system of $\ker\varphi$ (again, no matter if you think about minimal with respect to inclusion or to cardinality; both concepts coincide for numerical semigroups; see \cite[Chapter 7]{ns}).
Minimal presentations can be computed from graphs associated to elements in the numerical semigroup. Let $n$ be a nonzero element of $S$. We define the graph associated to $n$ as the graph with vertices the generators $n_i\in\{n_1,\ldots, n_e\}$ such that $n-n_i\in S$; and $n_in_j$ is an edge if $n-(n_i+n_j)\in S$. There is a function to compute the graph associated to $n$. A minimal presentation is constructed from those graphs that are not connected (there are finitely many of them and can be found by using, once more the Ap\'ery set of the multiplicity). The elements having an associated non connected graph are called \emph{Betti elements} of $S$. A procedure to find the set of Betti elements of $S$ is given in the package; and also to find a minimal presentation of $S$.
Some numerical semigroups admit essentially a unique minimal presentation, in the sense that if $\sigma$ and $\tau$ are two minimal presentations (and thus have the same cardinality), whenever $(a,b)\in \sigma$, either $(a,b)\in \tau$ or $(b,a)\in \tau$ (that is, unique up to permutation of the pairs of the presentation). In particular, generic numerical semigroups have unique minimal presentations (\cite{b-gs-g}). The semigroup $S$ is \emph{generic} if every pair $(a,b)$ in a minimal presentation of $S$ has the property that $a-b$ has no zero coordinates. We give procedures to detect whether or not $S$ is uniquely presented or generic.
A straight generalization of the graph associated to $n\in S$ is the following: we can construct the simplicial complex of subsets $A$ of $\{n_1,\ldots, n_e\}$ such that $n-\sum_{a\in A}a \in S$. This set is known as the shaded set of $n$ in $S$ and has some nice properties associated to the generating function of $S$ (\cite{s-w}).
The congruence $\ker\varphi$ is also a submonoid of $\mathbb N^e\times \mathbb N^e$, which is generated by its nonzero minimal elements with respect to the usual partial ordering on $\mathbb N^e\times \mathbb N^e$ (\cite[Chapter 8]{fg}). If $(a,b)$ is one of this minimal generators, then $\varphi(a)=\varphi(b)\in S$ is called a \emph{primitive element} of $S$. These elements play an important role in factorization properties of $S$, and consequently we provide a function to compute them.
\begin{verbatim}
gap> s:=NumericalSemigroup(5,7,9);
<Numerical semigroup with 3 generators>
gap> MinimalPresentationOfNumericalSemigroup(s);
[ [ [ 0, 2, 0 ], [ 1, 0, 1 ] ], [ [ 4, 1, 0 ], [ 0, 0, 3 ] ],
[ [ 5, 0, 0 ], [ 0, 1, 2 ] ] ]
\end{verbatim}
\subsection{Constructing numerical semigroups from others}\label{constructing}
We have already seen that adding a special gap to a numerical semigroup produces a new numerical semigroup, and the same holds if we remove a minimal generator. The intersection of two numerical semigroups also produces a numerical semigroup. Functions performing these tasks are provided in \texttt{numericalsgps}.
Let $p$ be a positive integer and $S$ be a numerical semigroup. The set $S/p=\{x\in \mathbb N\mid px \in S\}$ is again a numerical semigroup, called the \emph{quotient} of $S$ by $p$. A function is given to compute this new semigroup.
A kind of inverse is the notion of \emph{multiple} of a numerical semigroup: given an integer $a>1$ and a numerical semigroup $S$, then $aS$ is a submonoid of $\mathbb N$, but it is not a numerical semigroup. If we add to this set all the integers greater than or equal to a given positive integer, say $b$, then we obtain a numerical semigroup: $aS\cup \{b,\to\}$. If we start from $\mathbb N$, and we repeat this operation several times, then we construct the set of what is known in the literature as \emph{inductive numerical semigroups} (see for instance \cite{f-gs} and the references therein) .
For a numerical semigroup $S$ the set of numerical semigroups $T$ with $S\subseteq T\subseteq \mathbb N$ is finite (the \emph{oversemigroups} of $S$), since the genus of $S$ is always finite by definition. We provide a function to compute the set of all oversemigroups of a given semigroup. Also there is a procedure to compute all numerical semigroups with given Frobenius number (this is done using the concept of fundamental gap as explained in \cite{fund-gap}) and another function to compute the set of all numerical semigroups with given genus $g$ (by constructing the tree of all numerical semigroups up to the level $g$).
\begin{verbatim}
gap> s:=NumericalSemigroup(5,7,9);
<Numerical semigroup with 3 generators>
gap> Length(OverSemigroupsNumericalSemigroup(s));
15
gap> Length(NumericalSemigroupsWithFrobeniusNumber(21));
1828
\end{verbatim}
We provide functions implementing the algorithms given in \cite{DGSRP15} to compute the set of all numerical semigroups having a given set as set of pseudo-Frobenius numbers.
\begin{verbatim}
gap> pf := [13,24,25];;
gap> NumericalSemigroupsWithPseudoFrobeniusNumbers(pf);
[ ]
gap> pf := [13,19,25];;
gap> NumericalSemigroupsWithPseudoFrobeniusNumbers(pf);
[ <Numerical semigroup>, <Numerical semigroup>, <Numerical semigroup> ]
\end{verbatim}
\subsection{Irreducible numerical semigroups}\label{irreducibles}
A numerical semigroup is \emph{irreducible} if it cannot be expressed as the intersection of two numerical semigroups properly containing it. This is equivalent to saying that it is maximal in the set of numerical semigroups with its same Frobenius number. Every numerical semigroup can be expressed (though not uniquely) as an intersection of irreducible numerical semigroups. We give a function to do this in \texttt{numericalsgps} (see \cite[Chapter 3]{ns} for a description of the algorithm).
We also give a procedure to compute all irreducible numerical semigroups with given Frobenius number: the procedure is based in \cite{bl-r-irr}. This is actually equivalent to computing all irreducible numerical semigroups with given genus. This is due to the fact that if $f$ is the Frobenius number of an irreducible numerical semigroup, then either $g=(f+1)/2$ or $g=(f+2)/2$, depending on the parity of $f$.
A numerical semigroup $S$ with Frobenius number $f$ is \emph{symmetric} if whenever $x\in\mathbb Z\setminus S$, $f-x\in S$. The class of symmetric numerical semigroups coincides with that of irreducible numerical semigroups with odd Frobenius number. Irreducible numerical semigroups with even Frobenius number are called \emph{pseudo-symmetric}. We give tests to detect if a numerical semigroup is in any of these classes.
A particular class of irreducible numerical semigroups is the set of numerical semigroups with the least possible number of relations in its minimal presentations. These semigroups are called \emph{complete intersections}, and it can be shown that every complete intersection numerical semigroup is either $\mathbb N$ or a gluing of two complete intersections (see for instance \cite[Chapter 8]{ns}). We say that $S=S_1+S_2$, with $S$ a numerical semigroup and $S_1$ and $S_2$ submonoids of $\mathbb N$, is a \emph{gluing} of $S_1$ and $S_2$ if $\gcd(S_1)\gcd(S_2)\in S_1\cap S_2$ and $\gcd(S_1)\neq 1\neq \gcd(S_2)$. We give procedures to detect if a numerical semigroup can be expressed as a gluing of two of its submonoids, and if it is a complete intersection.
We also implement the procedures presented in \cite{ci} to compute the set of all complete intersection numerical semigroups with fixed Frobenius number (equivalently fixed genus, since we are still dealing with irreducible numerical semigroups). We present procedures to detect if a numerical semigroup is \emph{free} (either $\mathbb N$ or a gluing of a free numerical semigroup with a copy of $\mathbb N$) and to calculate all free numerical semigroups with fixed Frobenius number. The same is done for \emph{telescopic} numerical semigroups (these are free numerical semigroups where the gluing is performed in the same order given by the generators) and numerical semigroups associated to irreducible planar curve singularities (a particular case of telescopic numerical semigroups; see \cite{ci} for more details).
A generalization of the concept of irreducible numerical semigroup is the following. We have seen that the genus $g$ of an irreducible numerical semigroup $S$ with Frobenius number $f$ is either $g=(f+1)/2$ if $f$ is odd (symmetric), or $g=(f+2)/2$ if $f$ is even (pseudo-symmetric). It turns out that the type of symmetric numerical semigroups is 1 and the type of pseudo-symmetric numerical semigroups is 2. So if $S$ is an irreducible numerical semigroup with genus $g$, Frobenius number $f$ and type $t$, then $g=(f+t)/2$. We say that a numerical semigroup $S$ is \emph{almost-symmetric} if its genus is one half of its Frobenius number plus its type. We give a function to test if a numerical semigroup is almost-symmetric and include the procedure presented in \cite{almost} to compute the set of almost symmetric numerical semigroups with fixed Frobenius number.
\begin{verbatim}
gap> s:=NumericalSemigroup(5,7,9);
<Numerical semigroup with 3 generators>
gap> DecomposeIntoIrreducibles(s);
[ <Numerical semigroup>, <Numerical semigroup> ]
gap> List(last,MinimalGeneratingSystem);
[ [ 5, 7, 8, 9 ], [ 5, 7, 9, 11 ] ]
gap> Length(TelescopicNumericalSemigroupsWithFrobeniusNumber(101));
86
gap> Length(AlmostSymmetricNumericalSemigroupsWithFrobeniusNumber(31));
1827
\end{verbatim}
\subsection{Ideals of numerical semigroups}\label{ideals}
A nonempty subset $I$ of $\mathbb Z$ is a \emph{relative ideal} of a numerical semigroup $S$ if $I+S\subseteq I$ and there exists $d\in \mathbb Z$ such that $d+I\subseteq S$ (the concept of relative ideal corresponds to that of fractional ideal in domains). Every relative ideal $I$ of $S$ can be expressed in the form $I=\{i_1,\ldots, i_n\}+S$ for some integers $i_j$. The set $\{i_1,\ldots, i_n\}$ is a \emph{generating set} of the ideal, and it is minimal if no proper subset generates the same ideal.
\begin{verbatim}
gap> s:=NumericalSemigroup(3,4,5);
<Proportionally modular numerical semigroup satisfying 5x mod 15 <= 2x >
gap> 5+s;
<Ideal of numerical semigroup>
gap> [-1,2]+s;
<Ideal of numerical semigroup>
gap> MinimalGeneratingSystem(last);
[ -1 ]
\end{verbatim}
We provide functions for computing the small elements of an ideal (the definition is analogous to that in numerical semigroups), Ap\'ery sets (and tables; see \cite{cbjza13}), the ambient numerical semigroup, membership, and also some basic operations as addition, union, subtraction ($I-J=\{z\in \mathbb Z\mid z+J\subseteq I\}$), set difference, multiplication by an integer, translation by an integer, intersection, blow-up ($\bigcup_{n\in \mathbb N} nI-nI$) and $*$-closure with respect to a family of ideals (\cite{spi}).
Numerical semigroups are ``local'' in the sense that there is a unique maximal ideal: the set of nonzero elements of the semigroup. Also there exists a \emph{canonical ideal}, which for a numerical semigroup $S$ with Frobenius number $f$ is defined as $\{z\in \mathbb Z\mid f-z\not\in S\}$ (see for instance \cite{bf06}).
The \emph{Hilbert function} associated to an ideal $I$ of a numerical semigroup $S$ is the function that maps every $n\in \mathbb N$ to $nI\setminus(n+1)I$. The \emph{reduction number} of $I$ is the least positive integer $n$ such that $\min(I)+nI=(n+1)I$. We give functions to compute the reduction number and the Hilbert function associated to an ideal. Also we give a procedure that computes the microinvariants of a numerical semigroup which are used to determine if the graded ring associated to the semigroup ring $K\llbracket S\rrbracket$ is Cohen-Macaulay (see \cite{bf06}).
Finally we give a function to test if a numerical semigroup is a monomial semigroup ring following \cite{mi02}. A numerical semigroup $S$ is said to be \emph{monomial} if for any ring $R$ with $K\subseteq R\subseteq K\llbracket x\rrbracket$ and such that the algebraic closure of $R$ is $K\llbracket x\rrbracket$ ($K$ a field with characteristic zero)
and $\mathrm v(R)=S$, we have that $R$ is a semigroup ring. Here, $\mathrm v$ denotes the usual valuation.
\subsection{Numerical semigroups with maximal embedding dimension}
Recall that the embedding dimension of a numerical semigroup is the cardinality of its unique minimal generating system. Clearly, two minimal generators cannot be congruent modulo the multiplicity of the semigroup (the least positive integer in the semigroup). As a consequence, the embedding dimension is at most the multiplicity of the semigroup. Thus we say that a numerical semigroup $S$ has \emph{maximal embedding dimension} if its embedding dimension equals its multiplicity.
The set of maximal embedding dimension numerical semigroups with fixed multiplicity, say $m$, is closed under intersection, and also if $S\neq \{0\}\cup(m+\mathbb N)$, then $S\cup\{f\}$ is also a maximal embedding numerical semigroup, with $f$ the Frobenius number of $S$. This in particular implies that if we are given a numerical semigroup that is not of maximal embedding dimension, we can consider the set of all maximal embedding dimension numerical semigroups with its same multiplicity containing it, and then the intersection of all of them, obtaining in this way the maximal embedding dimension closure of the given semigroup. Following this idea one can define the concept of minimal generators with respect to this class: the elements in the semigroup so that the closure of them yields the given semigroup. These elements are precisely the elements $x$ in a maximal embedding dimension numerical semigroup $S$ (together with the multiplicity) such that $S\setminus\{x\}$ is a maximal embedding dimension numerical semigroup.
\begin{verbatim}
gap> s:=NumericalSemigroup(3,5,7);;
gap> MinimalMEDGeneratingSystemOfMEDNumericalSemigroup(s);
[ 3, 5 ]
\end{verbatim}
We also give functions to compute the maximal embedding dimension closure of an arbitrary numerical semigroup.
If $S$ is a numerical semigroup with multiplicity $m$, then $S$ has maximal embedding dimension if and only if for every $x,y\in S\setminus\{0\}$, $x+y-m\in S$. A natural generalization of this pattern is the following. We say that a numerical semigroup $S$ is Arf if for any $x,y,z\in S$ with $x\ge y\ge z$, then $x+y-z\in S$. Clearly, every Arf numerical semigroup has maximal embedding dimension. Also, the class of Arf numerical semigroups is closed under finite intersections and the adjoin of the Frobenius number (of course if we are considering semigroups other than $\mathbb N$). Thus the class of Arf numerical semigroups is a Frobenius variety (\cite[Chapter 6]{ns}). Again, it makes sense to talk about minimal generators with respect to this class, and also about the Arf closure of a given numerical semigroup (the intersection of all Arf numerical semigroups containing it). We give functions computing both things: Arf minimal generating sets and Arf closures. Also we provide a method to detect if a numerical semigroup is Arf, and a procedure that calculates the set of all Arf numerical semigroups with given Frobenius number.
Finally, we consider in this section the class of saturated numerical semigroups, which turns out to be again a Frobenius variety (closed under intersections and the adjoint of the Frobenius number). A numerical semigroup is \emph{saturated} if for every $s,s_1,\ldots, s_r\in S$ with $s_i\le s$ for all $i$ and every $z_1,\ldots, z_r\in\mathbb Z$ such that $z_1s_1+\cdots +z_rs_r\ge 0$ one gets $s+z_1s_1+\cdots +z_rs_r\in S$. We provide for saturated semigroups the analogous functions that we described in the above paragraph for Arf semigroups.
\subsection{Nonunique invariants for factorizations in numerical semigroups}
Let $S$ be a numerical semigroup minimally generated by $\{n_1,\ldots, n_e\}$. Recall that we defined a monoid epimorphism in Section \ref{presentations}, $\varphi:\mathbb N^{e}\to S$, $\varphi(a_1,\ldots, a_e)=a_1n_1+\cdots +a_en_e$. Observe that for $s\in S$, $\mathsf Z(s)=\varphi^{-1}(s)$ collects the different expressions of $s$ in terms of the generators of $S$. Thus we say that $\mathsf{Z}(s)$ is the set of \emph{factorizations} of $s$. The cardinality of $\mathsf{Z}(s)$ is usually known as the \emph{denumerant} of $s$. We use \texttt{RestritcedPartitions} to compute the set factorizations of $s$.
The number of connected components of the graph associated to $s\in S$ (Section \ref{presentations}) coincides with the number of connected components of the graph with vertices given by $\mathsf Z(s)$ and $zz'$ is an edge provided that $z\cdot z'\neq 0$.
Given $z=(z_1,\ldots,z_e)$ a factorization of $s\in S$, we write $|z|$ to denote the \emph{length} of $z$, $|z|=z_1+\cdots +z_e$. The \emph{maximal denumerant} of $s$ is the number of factorizations of $s$ with maximal length. Even though the denumerant is not bounded while $s$ increases in $S$, the maximal denumerant is finite and can be effectively computed (\cite{max-den}). We include this algorithm in the package as well as tests for supersymmetry and additiveness (see \cite{max-den} for details).
Let $A$ be the Ap\'ery set of $n_e$ in $S$. A subset $L$ of $\mathbb N^{e-1}$ is an \textsf{L}shape associated to $S$ if (1) $L\subset \mathsf Z(A)$ (the set of factorizations of the elements in $A$), (2) for every $a\in A$, $\#(L\cap \mathsf Z(a))=1$, and (3) for every $l\in L$, if $l'\in \mathbb N^{e-1}$ is such that $l'\le l$, then $l'\in L$. These sets give information on the factorizations on numerical semigroups (\cite{lformas-fact}), and this is why we have included a procedure to compute them.
The set of lengths of factorizations of $s$ is always finite (due to Dickson's lemma) and consequently we can write it as $\{l_1<\cdots <l_t\}$. The set $\{l_2-l_1,l_3-l_2,\ldots, l_t-l_{t-1}\}$ is the \emph{Delta set} associated to $s$. The Delta set of $S$ is the union of all the Delta sets of $s$. This set is finite, and its maximum is achieved in one of the Betti elements of $S$ (\cite{deltas}).
The \emph{elasticity} of an element $s$ is the ratio between the maximal and minimal lengths of factorizations of $s$. It was introduced to measure how far is a domain from being half-factorial (all factorizations of all the elements have the same length). No numerical semigroup other than $\mathbb N$ is half-factorial, which is a unique factorization monoid. We give a procedure to compute this invariant.
Given $z=(z_1,\ldots, z_e), z'=(z_1',\ldots, z_e')\in \mathsf Z(s)$, we denote by $z\wedge z'=(\min(z_1,z_1'), \ldots, \min(z_e,z_e'))$, which corresponds to the ``common part'' of these factorizations. The \emph{distance} between $z$ and $z'$ is the $\mathrm d(z,z')=\max(|z-z\wedge z'|, |z'-z\wedge z'|)$. The \emph{catenary degree} of $s$ is the least positive integer such that for any two factorizations of $s$, there exists a chain of factorizations such that the distance between two consecutive factorizations is bounded by this integer. The catenary degree of $S$ is defined as the supremum of the catenary degrees of its elements. This supremum is reached in one of its Betty elements (\cite{cat-tame}). We give procedures to compute the catenary degree of a set of factorizations and of a numerical semigroup. Also other variants of catenary degrees are included: adjacent, homogeneous, equal or monotone catenary degree (see \cite{g-hk, hom}). For the homogenization of a numerical semigroup we offer a series of auxiliary functions.
The \emph{tame degree} of $s\in S$ is the least positive integer $t$ such that for every factorization $z$ of $s$ and every integer $i\in\{1,\ldots, e\}$ such that $s-n_i\in S$, there exists another factorization $z'$ of $s$ with nonzero $i$th coordinate and such that the distance to $z$ is less than or equal to $t$ (there exists a factorization in which $n_i$ is involved at a distance at most $t$). The tame degree of the semigroup $S$ is the supremum of all the tame degrees of its elements, and it is reached in one of its primitive elements (also in an element with associated noncomplete graph). We give functions to compute the tame degree of a set of factorizations and that of the semigroup.
Recall that associated to the numerical semigroup $S$, we can define the partial order on $\mathbb Z$, $a\le_S b$ if $b-a\in S$. Thus $(\mathbb Z,\le_S)$ is a poset, and one can define the M\"obius function associated to it. We implement the procedure presented in \cite{mob}.
The last invariant we give procedures to compute is the $\omega$-primality, which determines how far is an element from being prime. The \emph{$\omega$-primality} of $s\in S$ is the least positive integer $\omega$ such that whenever $s \le_S \sum_{a\in A} a$ with $A\subseteq S$ finite, there exists $\Omega\subseteq A$ with $\#\Omega\le \omega$ such that $s\le_S \sum_{a\in \Omega}a$. Clearly, if the omega primality is one, then the element is prime, if we look at $\le_S$ as a division.
The $\omega$-primality of the semigroup is the maximum of the $\omega$-primalities of its minimal generators. Initially we used the algorithm presented in \cite{b-gs-g}. Now we use a faster procedure implemented by C. O'Neill (see Section \ref{contrib}).
\begin{verbatim}
gap> l:=FactorizationsIntegerWRTList(100,[10,11,13,15]);
[ [ 10, 0, 0, 0 ], [ 1, 7, 1, 0 ], [ 3, 4, 2, 0 ], [ 5, 1, 3, 0 ],
[ 0, 2, 6, 0 ], [ 3, 5, 0, 1 ], [ 5, 2, 1, 1 ], [ 0, 3, 4, 1 ],
[ 2, 0, 5, 1 ], [ 7, 0, 0, 2 ], [ 0, 4, 2, 2 ], [ 2, 1, 3, 2 ],
[ 0, 5, 0, 3 ], [ 2, 2, 1, 3 ], [ 4, 0, 0, 4 ], [ 1, 0, 0, 6 ] ]
gap> TameDegreeOfSetOfFactorizations(l);
5
gap> CatenaryDegreeOfSetOfFactorizations(l);
3
\end{verbatim}
\subsection{Polynomials, formal series and numerical semigroups}
Let $S$ be a numerical semigroup. The \emph{Hilbert series} (not to be confused with the Hilbert function in Section \ref{ideals}) is the formal series $\mathrm H_S(x)= \sum_{s\in S} x^s$. Clearly $\sum_{n\in \mathbb N} x^n=1/(1-x)=\sum_{s\in\mathbb N\setminus S}x^s+\mathrm H_S(x)$. Hence $\mathrm P_S(s)=1+(x-1)\sum_{s\in \mathbb N\setminus S} x^s=(1-x)\mathrm H_S(x)$ is a polynomial, which we call the \emph{polynomial associated} to $S$ (see \cite{moree}). We provide functions to compute both the polynomial and Hilbert series of a numerical semigroup.
It turns out that when $S$ is a complete intersection, the polynomial associated to $S$ has all its roots in the unit circumference (and zero is not a root, which by Kronecker's lemma means that all the roots are in the unit circle, or equivalently, it is a product of cyclotomic polynomials). We give functions to determine if a monic polynomial with integer coefficients has all its roots in the unit circle, and to do this we need two auxiliary implementations: that of being cyclotomic and the computation of the Graeffe polynomial (see \cite{c-gs-m} for details). A numerical semigroup is said to be \emph{cyclotomic} if its associated polynomial has all its roots in the unit circle.
Symmetry (see Section \ref{irreducibles}) can also be characterized in terms of the associated polynomial: a numerical semigroup is symmetric if and only if its associated polynomial is self-reciprocal (a palindrome if we look at the coefficients).
Let $K$ be an algebraically closed field. And let $f\in K[x,y]$ represent an irreducible curve with one place at infinity. Take $g\in K[x,y]$ and set $\mathrm{int}(f,g)=\dim_K (K[x,y])/(f,g)$. Then the set $\{\mathrm{int}(f,g)\mid g\not \in (f)\}$ is a numerical semigroup. We give a procedure to implement it (see \cite{a-gs}). This kind of semigroups are generated by what is called a $\delta$-sequence. There is a function to compute all $\delta$-sequences with fixed Frobenius number (equivalently genus since these semigroups are complete intersections and thus symmetric). Also associated to any $\delta$-sequence there is a ``canonical'' planar curve, and we offer a method to compute it.
Let $F$ be a set of polynomials. Then the set of values (respectively degrees) of the series (respectively polynomials) in the algebra $K\llbracket F\rrbracket$ (respectively $K[F]$) is a submonoid of $\mathbb N$. Under certain conditions it is a numerical semigroup, and we provide functions to compute it. Also to determine a basis of the algebra $K\llbracket F\rrbracket$ (or $K[F]$) such that the values (or degrees) minimally generate the semigroup of values of this algebra (see \cite{a-gs-m}).
\begin{verbatim}
gap> t:=Indeterminate(Rationals,"t");;
gap> l:=[t^4,t^6+t^7,t^13];
[ t^4, t^7+t^6, t^13 ]
gap> SemigroupOfValuesOfCurve_Local(l);
<Numerical semigroup with 4 generators>
gap> MinimalGeneratingSystem(last);
[ 4, 6, 13, 15 ]
gap> SemigroupOfValuesOfCurve_Local(l,"basis");
[ t^4, t^7+t^6, t^13, t^15 ]
\end{verbatim}
\subsection{Affine semigroups}
An \emph{affine semigroup} is a finitely generated submonoid of $\mathbb N^n$ for some positive integer $n$. In the package, affine semigroups can be defined by means of generators, as the set of elements in the positive orthant of a subgroup of $\mathbb Z^n$ (full semigroups) or as the set of elements in the positive orthant of a cone (normal semigroups). Our intention is to provide as many functions as possible for affine semigroups as we offer for numerical semigroups. Along this line, we present methods for membership, computing minimal presentations, determine gluings, Betti and primitive elements, and the whole series of procedures for nonunique factorization invariants (an overview of the existing methods for the calculation of these invariants can be found in \cite{overview_non_unique}). New procedures are now under development based in Hilbert functions and binomial ideals (\cite{chris}).
As an example, let us do some computations with $G\cap\mathbb N^3$, where $G$ is the subgroup of $\mathbb Z^3$ with defining equations $x+y\equiv 0\bmod 2$ and $x+z\equiv 0\bmod 2$ (this is actually the block monoid associated to $\mathbb Z_2^3$; see \cite{g-hk} for the definition of block monoid).
\begin{verbatim}
gap> a:=AffineSemigroup("equations",[[[1,1,0],[0,1,1]],[2,2]]);
<Affine semigroup>
gap> GeneratorsOfAffineSemigroup(a);
[ [ 0, 0, 2 ], [ 0, 2, 0 ], [ 2, 0, 0 ], [ 1, 1, 1 ] ]
gap> OmegaPrimalityOfAffineSemigroup(a);
3
gap> BettiElementsOfAffineSemigroup(a);
[ [ 2, 2, 2 ] ]
\end{verbatim}
\subsection{Random}\label{random}
Based on the the methods provided by GAP to create ``random'' objects, we provide some functions for ``random'' affine numerical semigroups. These are particularly useful to produce examples. Furthermore, they are extensively used each time new algorithms are implemented and tests need to be made.
\begin{verbatim}
gap> l:=List([1..20], _->RandomNumericalSemigroup(5,200));;
gap> ls:=Filtered(l, s-> 1+FrobeniusNumber(s)=GenusOfNumericalSemigroup(s)*2);;
gap> List(ls,MinimalGeneratingSystem);
[ [ 8, 103 ], [ 25, 109 ], [ 35, 57, 125 ], [ 3, 52 ], [ 15, 170, 178 ],
[ 3, 145 ], [ 21, 68, 153 ] ]
\end{verbatim}
\subsection{Contributions}\label{contrib}
There is a special section devoted to contributions. So far we are happy to count with functions implemented by A. Sammartano and C. O'Neill (apart from those co-implementations with J. I. Garc\'\i a-Garc\'\i a and A. S\'anchez-R.-Navarro).
The functions implemented by Sammartano are mainly focused on deriving properties of the semigroup algebra $k[[S]]$ and its associated graded algebra from properties of the numerical semigroup $S$. He offers procedures to determine purity and $M$-purity of $S$ (\cite{br}), Buchbaum (\cite{d-m-m}), Gorenstein (\cite{d-m-s}) and complete intersection (\cite{d-m-s-13b}) property for the graded algebra; some special shapes of the Ap\'ery sets ($\alpha$, $\beta$ and $\gamma$-rectangular, see \cite{d-m-s-13a}); and the type sequence of a numerical semigroup (\cite{b-d-f}).
O'Neill on his side offers methods dealing with non unique factorization invariants: factorizations, $\omega$-primality and Delta sets for a list of elements in a numerical semigroup, Delta sets for the whole semigroup, and periodicity for the Delta sets (\cite{b-on-p}).
\section{Interaction with other packages}
Since the first release of the package many other packages have come into scene (some still under development). We have tried to take advantage of this. Dealing with affine and numerical semigroups translates in many cases to computing nonnegative integer solutions of linear Diophantine equations or Gr\"obner basis calculations of binomial ideals. Hence the interaction with \texttt{singular} (\cite{Singular}), \texttt{Normaliz} (\cite{normaliz}) and \texttt{4ti2} (\cite{4ti2}) was a step forward for us. For \texttt{singular} there are several options to consider: \cite{singular-gap}, \cite{GradedModules} and SingularInteface \url{https://github.com/gap-system/SingularInterface}. As for \texttt{4ti2}, we can use \cite{4ti2Interface} and \cite{4ti2gap}, which is under development. Finally there is an interface for \texttt{Normaliz} that can be found in \url{https://github.com/fingolfin/NormalizInterface}. We have implemented different methods for each procedure depending on which of the above packages the user has loaded/installed.
\end{document} |
\begin{document}
\title[GM varieties with many symmetries]{
Gushel--Mukai varieties with many symmetries and an explicit irrational Gushel--Mukai threefold}
\author[O.\ Debarre]{Olivier Debarre}
\thanks{This project has received funding from the European
Research Council (ERC) under the European
Union's Horizon 2020 research and innovation
programme (Project HyperK --- grant agreement 854361).}
\address{Universit\'e de Paris, CNRS,
IMJ-PRG, F-75013 Paris, France}
\email{{\tt [email protected]}}
\author[G.\ Mongardi]{Giovanni Mongardi}
\address{Dipartimento di Matematica, Universit\`a degli studi di Bologna, Piazza Di Porta San Donato 5, Bologna, Italia 40126}
\email{{\tt [email protected]}}
\date{\today}
\subjclass[2020]{14E08,
14J45, 14J42,
14J30, 14J35, 14J40, 14J45, 14J50, 14K22, 14K30,
14C25,
14H52, 14J70
}
\mathbf keywords{Fano varieties, Gushel--Mukai varieties, hyperk\"ahler varieties,
EPW sextics, automorphisms, rationality, intermediate Jacobians, abelian varieties with complex multiplication.
}
\begin{abstract}
We construct an explicit complex smooth Fano threefold with Picard number 1, index 1, and degree 10 (also known as a Gushel--Mukai threefold) and prove that it is not rational by showing that its intermediate Jacobian has a faithful ${\bf P}SL(2,{\bf F}_{11}) $-action.\ Along the way, we construct Gushel--Mukai varieties of various dimensions with rather large (finite) automorphism groups.\
The starting point of all these constructions is an Eisenbud--Popescu--Walter sextic with a faithful ${\bf P}SL(2,{\bf F}_{11}) $-action discovered by the second author in 2013.
\end{abstract}
\maketitle
{\it To Fabrizio Catanese, on the occasion of his 70+1st birthday}
\section{Introduction}
The problem of the rationality of complex unirational smooth Fano threefolds has now been solved in most cases but there are still some unanswered questions.\ For example, Beauville established in \cite[Theorem.~5.6(ii)]{bea1}, by a degeneration argument using the Clemens--Griffiths criterion, that a {\em general} Fano threefold with Picard number 1, index 1, and degree 10 (also known as a Gushel--Mukai, or GM, threefold) is irrational, but not a single smooth example was known, although it is expected that all of these Fano threefolds are irrational.\ One of the main results of this article is the construction of a complete 2-dimensional family of such examples (Corollary~\ref{coro52}), including one such threefold defined (over~${\bf Q}$) by explicit equations (Section~\ref{se23}, Corollary~\ref{coro53}).
Our starting point was a remarkable EPW (for Eisenbud--Popescu--Walter) sextic hypersurface $Y_{\mathbb A}\subset {\bf P}^5$, constructed in \cite{monphd}, with a faithful action by the
simple group $\mathbb{G}:={\bf P}SL(2,{\bf F}_{11}) $ of order $660$ (Section~\ref{sect32}).\ We prove that the automorphism group of~$Y_{\mathbb A}$ is exactly~$\mathbb{G}$ (Proposition~\ref{prop:all_autom_A}) and that it is the only quasi-smooth EPW sextic with an automorphism of order $11$ (Theorem~\ref{th47}).\
From this sextic, one can construct GM varieties of various dimensions with exotic properties.\ Using \cite{dkeven}, we obtain for example families of GM varieties of dimensions $4$ or~$6$ with middle-degree Hodge groups of maximal rank~22 (Section~\ref{sect46}).\
Another application is the construction of GM varieties with large (finite) automorphism groups.\ The foremost example is
a GM fivefold $X^5_{\mathbb A}$ with automorphism group $\mathbb{G}$ (Corollary~\ref{cor48}(2)) but we also construct GM varieties of various dimensions with automorphism groups ${\bf Z}/11{\bf Z}$, $D_{12}$, ${\bf Z}/6{\bf Z}$, ${\bf Z}/3{\bf Z}$, $D_{10}$, ${\bf Z}/5{\bf Z}$, $\mathfrak A_4$, $({\bf Z}/2{\bf Z})^2$, or ${\bf Z}/2{\bf Z}$ (Table~\ref{tabaut}).\
By \cite{dkij}, the intermediate Jacobians of the GM varieties of dimension $3$ or $5$ obtained from the sextic $Y_{\mathbb A}$ are all isomorphic to a fixed principally polarized abelian variety\ $({\mathbb{J}},\theta)$ of dimension~$10$.\ This applies in particular to $X^5_{\mathbb A}$, and the $\mathbb{G}$-action on
$X^5_{\mathbb A}$ induces a faithful $\mathbb{G}$-action on $({\mathbb{J}},\theta)$.\ We use this fact
to prove that the GM threefolds that we construct from $Y_{\mathbb A}$ are not rational: by the Clemens--Griffiths criterion (\cite[Corollary~3.26]{cg}), it suffices to prove that their (common) intermediate Jacobian $({\mathbb{J}},\theta)$ is not a product of Jacobians of curves.\ For this, we
follow \cite{bea2,bea3} and use the fact that $({\mathbb{J}},\theta)$ has ``too many automorphisms'' (because of the $\mathbb{G}$-action).\ Note that the GM threefolds themselves may have no nontrivial automorphisms.\ This is how we produce a complete 2-dimensional family of irrational GM threefolds, all mutually birationally isomorphic.
The 10-dimensional principally polarized abelian variety\ $({\mathbb{J}},\theta)$ seems an interesting object of study.\ The 10-dimensional complex representation attached to the $\mathbb{G}$-action is irreducible and defined over ${\bf Q}$.\ This implies that $({\mathbb{J}},\theta)$ is indecomposable and isogeneous to the product of $10$ copies of an elliptic curve (Propositions~\ref{prop61} and~\ref{prop62}).\ We conjecture, but were unable to prove, that~$({\mathbb{J}},\theta)$ is isomorphic to an explicit 10-dimensional principally polarized abelian variety\ that we construct in Proposition~\ref{prop63}.\
The situation is reminiscent of that of the Klein cubic threefold $W\subset{\bf P}^4$: Klein proved in~\cite{kle} that $W$ has a faithful linear $\mathbb{G}$-action; one hundred years later, Adler proved in \cite{adl} that the automorphism group of $W$ is exactly~$\mathbb{G}$ and Roulleau showed in \cite{rou} that $W$ is the only smooth cubic
threefold with an automorphism of order 11.\ The intermediate Jacobian of~$W$ is a principally polarized abelian variety\ of dimension $5$ isomorphic to the product of $5$ copies of an elliptic curve with complex multiplication and Adler proved in \cite{adls} that it is the only abelian variety\ of dimension 5 with a faithful action of $\mathbb{G}$.\ This is the reason why we call our sextic $Y_{\mathbb A}$ the Klein EPW sextic.\ We also refer to \cite{cks} for the construction of a one-dimensional family of threefolds with $\mathfrak S_6$-actions whose intermediate Jacobians are isogeneous to the product of $5$ copies of varying elliptic curves (\cite[Remark~4.5]{cks}).
Our proofs heavily use the construction by O'Grady in~\cite{og7} of canonical double covers of quasi-smooth EPW sextics called double EPW sextics (see also \cite{dkcovers}).\ They are smooth {hyperk\"ahler}\ fourfolds whose automorphisms may, thanks to Verbitsky's Torelli Theorem, be determined using lattice theory.\ We also use the close relationship between EPW sextics and GM varieties developed in \cite{im,dkclass,dkeven,dkmoduli,dkij} and surveyed in \cite{debsur}.
The article is organized as follows.\ In Section~\ref{sect2}, we recall basic facts about EPW sextics and GM varieties.\ In Section~\ref{se3}, we describe explicitly the {Klein}\ Lagrangian ${\mathbb A}$ and the Klein EPW sextic~$Y_{\mathbb A}$, and we prove that the EPW sextic $Y_{\mathbb A}$ is quasi-smooth.\ In Section~\ref{sect4}, we prove that the automorphism group of $Y_{\mathbb A}$ is $\mathbb{G}$; we also prove that $Y_{\mathbb A}$ is the only quasi-smooth EPW sextic with an automorphism of order~$11$.\ We also discuss the possible automorphism groups and some Hodge groups of the various GM varieties that can be constructed from the Lagrangian~${\mathbb A}$.\
In Section~\ref{sect5}, we introduce the important surface~$\widetilde{Y}_A^{\ge 2}$ (a double \'etale cover of the singular locus of $Y_{\mathbb A}$) and its Albanese variety $({\mathbb{J}},\theta)$.\ We prove our irrationality results for GM threefolds and discuss the structure of the 10-dimensional principally polarized abelian variety\
$({\mathbb{J}},\theta)$.\
The rest of the article consists of appendices.\ In the long
Appendix~\ref{appC}, we gather old and new general results on automorphisms of double EPW sextics and of double EPW surfaces.\
Appendix~\ref{sea2} recalls a few classical facts about representations of the group $\mathbb{G}$.\ Appendix~\ref{b2} discusses decomposition results for abelian varieties with automorphisms.
\noindent{\bf Notation.} Let $m$ be a positive integer; throughout this article, $V_m$ denotes a complex vector space of dimension~$m$ and we set
$\zeta_m:=e^{\frac{2\pi i}{m}}$.\ As we did above, we denote by
$\mathbb{G}$ the simple group ${\bf P}SL(2,{\bf F}_{11}) $
of order $660$.\
\noindent{\bf Acknowledgements.} We would like to thank B.~Gross, G.~Nebe, D.~Prasad, Yu.~Prokhorov, and O.~Wittenberg for fruitful exchanges.\ Special thanks go to A.~Kuznetsov, whose numerous comments and suggestions helped improve the exposition and the results of this article; in particular, Propositions~\ref{split1} and~\ref{split2} are his.
\section{Eisenbud--Popescu--Walter sextics and Gushel--Mukai varieties}\label{sect2}
We recall in this section a few basic facts about Eisenbud--Popescu--Walter (or EPW for short) sextics and Gushel--Mukai (or GM for short) varieties.
\subsection{EPW sextics and their automorphisms}\label{se1}
Let $V_6$ be a $6$-dimensional complex vector space.\ We endow $\bw3V_6$ with the $\bw6V_6$-valued symplectic form defined by wedge product.\
Given a Lagrangian subspace $A\subset \bw3V_6$ and a nonnegative integer $\ell$, one defines (see \cite[Section 2]{og1} or \cite[Appendix~B]{dkclass}) in ${\bf P}(V_6)$ the closed subschemes
\begin{equation*}\label{yabot}
Y_A^{\ge \ell}:=\bigl\{[x]\in{\bf P}(V_6) \mid \dim\bigl(A\cap (x \wedge\bw{2}{V_6} )\bigr)\ge \ell\bigr\}
\end{equation*}
and the locally closed subschemes
\begin{equation*}\label{yaell}
Y_A^\ell :=\bigl\{[x]\in{\bf P}(V_6) \mid \dim\bigl(A\cap (x \wedge\bw{2}{V_6} )\bigr)= \ell\bigr\} = Y_A^{\ge \ell} \smallsetminus Y_A^{\ge \ell + 1}.
\end{equation*}
We henceforth assume that
$A$ contains no decomposable vectors (that is, no nonzero products $x\wedge y\wedge z$).\ The scheme $Y_A:=Y_A^{\ge 1}$ is then an integral sextic hypersurface (called an {\em EPW sextic}) whose singular locus is the integral surface $Y_A^{\ge 2}$; the singular locus of that surface is the finite set $Y_A^{\ge 3}$ (see \cite[Theorem~B.2]{dkclass}) which is empty for $A$ general.\
One
has moreover (\cite[Proposition~B.9]{dkclass})
\begin{equation}\label{autya}
{\mathbb A}ut(Y_A)=\{ g\in {\bf P}GL(V_6)\mid (\bw3g)(A)=A\}
\end{equation}
and this group is finite.
\subsection{GM varieties and their automorphisms}\label{se22n}
A (smooth ordinary) GM variety of dimension $n\in\{3,4,5\}$ is the smooth complete intersection, in ${\bf P}(\bw2V_5)$, of the Grassmannian~$\mathbb{G}r(2,V_5)$ in its Pl\"ucker embedding, a linear space ${\bf P}^{n+4}$, and a quadric.\ It is a Fano variety with Picard number~$1$, index~$n-2$, and degree~$10$.
There is a bijection between the set of isomorphism classes of (smooth ordinary) GM varieties~$X$ of dimension $n$ and the set of isomorphism classes of triples $(V_6,V_5,A)$, where $A\subset\bw3 V_6$ is a Lagrangian subspace with no decomposable vectors and $V_5\subset V_6$ is a hyperplane such that
\begin{equation}\label{yperp}
\dim (A\cap \bw3V_5)=5-n
\end{equation}
(this bijection was first described in the proof of~\cite[Proposition~2.1]{im} when $n=5$; for the general case, see \cite[Theorem~3.10 and Proposition~3.13(c)]{dkclass} or~\cite[(2)]{debsur}).
By \cite[Lemma~2.29 and Corollary~3.11]{dkclass}, we have
\begin{equation}\label{autxa}
{\mathbb A}ut(X)\simeq \{ g\in {\mathbb A}ut(Y_A)\mid g(V_5)=V_5\}.
\end{equation}
\section{The {Klein}\ Lagrangian}\label{se3}
The following construction of an EPW sextic
with a faithful $\mathbb{G}$-action first appeared in \cite[Example~4.5.2]{monphd}.\
\subsection{The {Klein}\ Lagrangian ${\mathbb A}$ and the GM fivefold $X_{\mathbb A}^5$}\label{se31}
Let $\xi\colon\mathbb{G}\to\mathbb{G}L(V_\xi)$ be the irreducible
representation of~$ \mathbb{G}$ of dimension 5 described in Appendix~\ref{sea2}.\ From the existence of a unique (up to multiplication by a nonzero scalar) $\mathbb{G}$-equivariant
symmetric isomorphism
\begin{equation}\label{defw}
w\colon \bw2V_\xi\simeqlra \bw2V_\xi^\vee
\end{equation}
as in~\eqref{defu}, we infer that there is a unique $\mathbb{G}$-invariant quadric
\begin{equation}\label{defq}
{\bf Q}Q \subset {\bf P}(\bw2V_\xi)
\end{equation}
and that it is smooth.\ Since its equation does not lie in the image of the $\mathbb{G}$-equivariant morphism
$$V_\xi\simeq \bw4V_\xi^\vee \ensuremath{\lhook\joinrel\relbar\joinrel\rightarrow} \Sym^2(\bw2V_\xi^\vee),
$$
which is the space of Pl\"ucker quadrics, the quadric ${\bf Q}Q$ does not contain
the Grassmannian~$\mathbb{G}r(2,V_\xi)$.\ Therefore, it defines a GM fivefold
\begin{equation}\label{defx}
X_{\mathbb A}^5:={\bf Q}Q\cap \mathbb{G}r(2,V_\xi)
\end{equation}
with a faithful $\mathbb{G}$-action (we will show below that $X_{\mathbb A}^5$ is smooth).\
The group $\mathbb{G}$ being simple nonabelian, the representation $\bw5\xi$ is trivial.\ The isomorphism~$w$ from~\eqref{defw} therefore induces an isomorphism of representations
\begin{equation}\label{defv}
v\colon \bw2V_\xi\simeqlra \bw2V_\xi^\vee \otimesimes \bw5V_\xi \simeqlra \bw3 V_\xi.
\end{equation}
Since $w$ is symmetric, $v$ satisfies
$v(x)\wedge y=x\wedge v(y)$ for all $x,y\in \bw2V_\xi$.\
Let $\chi_0\colon \mathbb{G}\to V_{\chi_0}$ be the trivial representation and consider the $\mathbb{G}$-representation
$$V_6:=V_{\chi_0}\opluslus V_\xi.$$
The decomposition of~$\bw3V_6$ into irreducible $\mathbb{G}$-representations is
\begin{equation}\label{deco}
\bw3V_6=( V_{\chi_0}\wedge \bw2V_\xi)\opluslus \bw3V_\xi
\end{equation}
and, if $e_0$ is a generator of $V_{\chi_0}$, the Lagrangian subspace ${\mathbb A}\subset \bw3V_6$ associated with the GM fivefold $X^5_{\mathbb A}$ according to the general procedure outlined in Section~\ref{se22n}
is the graph
$${\mathbb A}:=\{ e_0\wedge x+ v(x)\mid x\in \bw2V_\xi\}$$
of $v$.\
Conversely, $X^5_{\mathbb A}$ is the GM fivefold associated with the Lagrangian ${\mathbb A}$ and the hyperplane \mbox{$V_\xi\subset V_6$} (referring to~\eqref{yperp}, note that ${\mathbb A}\cap \bw3V_\xi=0$).
We will use the following notation.\ Let $c$ and $a$ be the elements of $\mathbb{G}$ defined in Appendix~\ref{sea2}
and let $(e_1,\dots,e_5)$ be a basis of $V_\xi$ in which $\xi(c)$ and $\xi(a)$ have matrices as in~\eqref{real}.\ Let $(e^\vee_1,\dots,e^\vee_5)$ be the dual basis of~$V_\xi^\vee$.\ We also set
$e_{i_1\cdots i_r}=e_{i_1}\wedge \dots \wedge e_{i_r}\in \bw{r}V_6$.
\begin{prop}\label{prop:GM5_smooth}
The GM fivefold $X^5_{\mathbb A}$ is smooth and the Lagrangian subspace ${\mathbb A}$ contains no decomposable vectors.
\end{prop}
\begin{proof}
The basis $(e_{ij})_{1\le i<j\le 5}$ of $\bw{2}V_\xi$ consists of eigenvectors of $\bw{2}\xi(c)$, with eigenvalues all the primitive $11^{\textnormal{th}}$ roots of $1$, and similarly for the dual basis $(e_{ij}^\vee)_{1\le i<j\le 5}$ of $\bw{2}V_\xi^\vee$.\
Looking at the corresponding eigenvalues, we see that we may normalize the isomorphism $w$ in~\eqref{defw} so that it satisfies $w(e_{12})=-e_{13}^\vee$ (both are eigenvectors of $\bw{2}\xi(c)$ with eigenvalue~$\zeta_{11}^{5}$).\ Applying $\bw{2}\xi(a)$, we find
$$
w(e_{12})=-e_{13}^\vee,\ w(e_{23})=-e_{24}^\vee,\ w(e_{34})=-e_{35}^\vee,\ w(e_{45})=e_{14}^\vee,\ w(e_{15})=-e_{25}^\vee.
$$
Since $w$ is symmetric, we also have
$$
w(e_{13})= -e_{12}^\vee,\ w(e_{24})=- e_{23}^\vee,\ w(e_{35})=- e_{34}^\vee,\ w(e_{14})= e_{45}^\vee,\ w(e_{25})= -e_{15}^\vee.$$
The quadric ${\bf Q}Q$ from~\eqref{defq} is therefore defined by
\begin{equation}\label{eqQ}
x_{12}x_{13}+x_{23}x_{24}+x_{34}x_{35}-x_{45}x_{14}+x_{15}x_{25}=0.
\end{equation}
A computer check with \cite{m2}
now ensures that the GM fivefold $X^5_{\mathbb A}$ defined by~\eqref{defx} is smooth.\ It follows from \cite[Theorem~3.16]{dkclass} that ${\mathbb A}$ contains no decomposable vectors.
\end{proof}
The group $\mathbb{G}$ acts faithfully on the GM fivefold $X^5_{\mathbb A}$.\ Using
the isomorphism~\eqref{autxa}, we see that it also acts faithfully on the EPW sextic~$Y_{\mathbb A}$ by linear automorphisms that fix the hyperplane $V_\xi$.\ More precisely, the representation $\chi_0\opluslus \xi\colon \mathbb{G}\hookrightarrow \mathbb{G}L(V_6)$ induces an embedding
$ \mathbb{G} \hookrightarrow {\mathbb A}ut(Y_{\mathbb A})\subset {\bf P}GL(V_6)$.\
We will prove in Proposition \ref{prop:all_autom_A} that the embedding
$ \mathbb{G} \hookrightarrow {\mathbb A}ut(Y_{\mathbb A})$ is in fact an isomorphism.
\subsection{Explicit equations}\label{sect32}
As we saw in the proof of Proposition~\ref{prop:GM5_smooth}, and with the notation of that proof, the isomorphism $v\colon \bw2V_\xi\simeqto \bw3 V_\xi$ from~\eqref{defv} may be defined by
\begin{equation}\label{v2}
\begin{aligned}
v(e_{12})=e_{245},\ v(e_{23})=e_{135},\ v(e_{34})&=e_{124},\ v(e_{45})=e_{235},\ v(e_{15})=-e_{134},\\
v(e_{13})= -e_{345},\
v(e_{24})= -e_{145},\ v(e_{35})&=- e_{125},\ v(e_{14})= e_{123},\ v(e_{25})=e_{234}.
\end{aligned}
\end{equation}
This gives
\begin{equation}\label{defA}
\begin{aligned}
{\mathbb A}= \langle &
e_{012}+ e_{245},
e_{013} - e_{345},
e_{014} + e_{123},
e_{015} - e_{134},
e_{023} + e_{135},\\
&\qquad e_{024}- e_{145} ,
e_{025} + e_{234} ,
e_{034}+ e_{124} ,
e_{035}- e_{125} ,
e_{045}+ e_{235} \rangle.
\end{aligned}
\end{equation}
One can readily see from this that the isomorphism $V_6\simeqto V_6^\vee$ that sends $e_0$ to $-e_0^\vee$ and $e_j$ to~$e_j^\vee$ for $j\in\{1,\dots,5\}$ maps ${\mathbb A}$ onto its orthogonal ${\mathbb A}^\bot$, a Lagrangian subspace of $\bw3V_6^\vee$; we say that ${\mathbb A}$ is {\em self-dual.}\
Also, if one starts from the dual representation~$\xi^\vee$, one obtains the same Lagrangian~${\mathbb A}$.
\begin{prop}\label{yaqs}
The EPW sextic~$Y_{\mathbb A}$ is defined by the equation
\begin{equation}\label{sextic_equation}
\begin{aligned}
&x_0^6+ 2x_0^3(x_1x_3^2+x_2x_4^2+x_3x_5^2+x_4x_1^2+x_5x_2^2)-4x_0(x_1^3x_2^2+ x_2^3x_3^2+ x_3^3x_4^2+x_4^3x_5^2+x_5^3x_1^2)\\
&{}+4x_0(x_1x_3x_4^3+x_2x_4x_5^3+x_3x_5x_1^3+ x_4x_1x_2^3 + x_5x_2x_3^3)-12x_0x_1x_2x_3x_4x_5\\
&{}+x_1^2x_3^4+x_2^2x_4^4 +x_3^2x_5^4+x_4^2x_1^4+x_5^2x_2^4 -4(x_1x_4x_5^4+x_2x_5x_1^4+x_3x_1x_2^4+x_4x_2x_3^4+x_5x_3x_4^4) \\
&{}-2(x_1x_3^3x_5^2+x_2x_4^3x_1^2+x_3x_5^3x_2^2+x_4x_1^3x_3^2+x_5x_2^3x_4^2)\\
&{}+6(x_1x_2x_3^2x_4^2+x_2x_3x_4^2x_5^2+x_3x_4x_5^2x_1^2+x_4x_5 x_1^2x_2^2 + x_5x_1x_2^2x_3^2)=0
\end{aligned}
\end{equation}
in ${\bf P}(V_6)$.\
The scheme $ Y^{\ge 2}_{\mathbb A}$ is a smooth irreducible surface, so that the scheme $Y^{\ge 3}_{\mathbb A}$ is empty.\
\end{prop}
\begin{proof} The scheme $Y_{\mathbb A}$ is the locus in ${\bf P}(V_6)$ where the map
$$ x\wedge \bw2 V_6\longrightarrow \bw3 V_6/{\mathbb A}$$
drops rank.\ In the decomposition~\eqref{deco}, the second summand is transverse to ${\mathbb A}$ and we can identify $\bw3 V_6/A$ with $\bw3 V_\xi$.\ Moreover, in the affine open subset $U_0$ of ${\bf P}(V_6)$ defined by $x_0\neq 0$, one has $x\wedge \bw2 V_6=x\wedge \bw2 V_\xi$.\ In $U_0$, the scheme $Y_{\mathbb A}$ is therefore the locus where the map
$$ x\wedge \bw2 V_\xi\longrightarrow \bw3 V_\xi\xrightarrow{\ v^{-1}\ } \bw2 V_\xi$$
drops rank.\ Concretely, if $x=e_0+x_1e_1+\dots+x_5e_5$, we see, using~\eqref{defA} and~\eqref{v2}, that it maps
\begin{equation*}
\begin{aligned}
e_{12}&\longmapsto x\wedge e_{12}=e_{012}+x_3e_{123}+x_4e_{124}+x_5e_{125}\\
&\longmapsto -e_{245}+x_3e_{123}+x_4e_{124}+x_5e_{125}\\
&\longmapsto -e_{12}+x_3e_{14}+x_4e_{34}-x_5e_{35}.
\end{aligned}
\end{equation*}
All in all, using the basis $(e_{12},e_{13},e_{14},e_{15},e_{23},e_{24},e_{25},e_{34},e_{35},e_{45})$ of $\bw2V_\xi$, one sees that $Y_{\mathbb A}\cap U_0$ is defined as the determinant of the $10\times 10$ matrix
\begin{equation*}\label{matrixA}
\left(
\begin{smallmatrix}
-1&0&0&0&0&x_5&-x_4&0&0&x_2\\
0&-1&0&0&0&0&0&-x_5&x_4&-x_3\\
x_3&-x_2&-1&0&x_1&0&0&0&0&0\\
0&-x_4&x_3&-1&0&0&0&-x_1&0&0\\
0&x_5&0&-x_3&-1&0&0&0&x_1&0\\
0&0&-x_5&x_4&0&-1&0&0&0&-x_1\\
0&0&0&0&x_4&-x_3&-1&x_2&0&0\\
x_4&0&-x_2&0&0&x_1&0&-1&0&0\\
-x_5&0&0&x_2&0&0&-x_1&0&-1&0\\
0&0&0&0&x_5&0&-x_3&0&x_2&-1
\end{smallmatrix}\right).
\end{equation*}
We obtain the equation~\eqref{sextic_equation} by homogenizing this determinant, computed with Macaulay2 (\cite{m2}).\ We then check with Macaulay2 that $\Sing(Y_{\mathbb A})$ is a smooth surface (this reproves that ${\mathbb A}$ contains no decomposable vectors and proves in addition that $Y^{\ge 3}_{\mathbb A}$ is empty).
\end{proof}
\subsection{The GM threefold $X_{\mathbb A}^3$}\label{se23}
We keep the notation above.\ By Proposition \ref{yaqs}, $Y^{\ge 3}_{\mathbb A} $ is empty and, since ${\mathbb A}$ is self-dual, so is $Y^{\ge 3}_{{\mathbb A}^\bot}$.\ For all hyperplanes $V_5\subset V_6$, we thus have
\begin{equation}\label{y3vide}
\dim ({\mathbb A}\cap \bw3V_5)\le 2.
\end{equation}
Consider the hyperplane $V_5\subset V_6$ spanned by $e_0,\dots,e_4$.\
From the description~\eqref{defA}, one sees that there is an inclusion
\begin{equation*}
\langle e_{014} + e_{123}, e_{034}+ e_{124}\rangle \subset {\mathbb A}\cap \bw3 V_5
\end{equation*}
of vector spaces which, because of the inequality~\eqref{y3vide}, is an equality.\ The associated
GM variety is therefore smooth of dimension~$3$ (see Section~\ref{se22n}).\ Using the automorphism $\xi(a)$ of $V_6$ that permutes the vectors $e_1,\dots, e_5$, we see that we get isomorphic GM threefolds if we start from hyperplanes spanned by $e_0$ and any four vectors among $e_1,\dots, e_5$.\ We denote it by~$X^3_{\mathbb A}$.
Going through the procedure mentioned in Section~\ref{se22n}, A.~Kuznetsov found that $X^3_{\mathbb A}$ is the intersection, in ${\bf P}(\bw2 V_5)$, of the Grassmannian $\mathbb{G}r(2,V_5)$, the linear space ${\bf P}^7$ with equations
$$
x_{03} + x_{12} = x_{04} - x_{23} = 0,
$$
and the quadric with equation
$$
x_{01}x_{02} - x_{13}x_{14} - x_{24}x_{34} = 0.
$$
\section{EPW sextics and GM varieties with many automorphisms}\label{sect4}
As in Section~\ref{se1}, let $V_6$ be a $6$-dimensional complex vector space and let $A\subset \bw3V_6$ be a Lagrangian subspace with no decomposable vectors.\ It defines an integral EPW sextic $Y_A\subset {\bf P}(V_6)$.\ As explained in more detail in Appendix~\ref{se41}, there is a canonical double covering $\pi_A\colon \widetilde{Y}_A\to Y_A
$ and, when $Y^{\ge 3}_A=\varnothing$, the fourfold $\widetilde{Y}_A$ is a smooth {hyperk\"ahler}\ variety of K3$^{[2]}$-type.\
\subsection{Automorphisms of the EPW sextic $Y_{\mathbb A}$}\label{sec43}
We constructed at the end of Section~\ref{se31} an injection $\mathbb{G}\hookrightarrow {\mathbb A}ut(Y_{\mathbb A})$.\ It follows from Proposition~\ref{yaqs} that the double EPW sextic $\widetilde{Y}_{\mathbb A}$ is smooth and, by Proposition~\ref{split1}, the group ${\mathbb A}ut(Y_{\mathbb A})$ is isomorphic to the group~${\mathbb A}ut_H^s(\widetilde{Y}_{\mathbb A}) $ of symplectic isomorphisms of $\widetilde{Y}_{\mathbb A}$ that preserve the polarization $H$.\
\begin{prop}\label{prop:all_autom_A}
The automorphism group of the Klein EPW sextic $Y_{\mathbb A}$ is isomorphic to $\mathbb{G}$.
\end{prop}
\begin{proof}
It is enough to prove that $ {\mathbb A}ut^s_H(\widetilde{Y}_{\mathbb A}) $ is isomorphic to $\mathbb{G}$.\ Let $g\in{\mathbb A}ut^s_H(\widetilde{Y}_{\mathbb A})$.\
It acts on the orthogonal of $H$ in ${\bf P}ic(\widetilde{Y}_{\mathbb A} )$ which, by Corollary \ref{th14}, is the rank-20 lattice~${\mathsf S} $ discussed in Section~\ref{secc1} and the action is faithful.\ Let us prove that $g$ acts trivially on the discriminant group~$\Disc({\mathsf S})$.\
By Corollary~\ref{th14}, the lattice $H^\perp\simeq (-2)^{\opluslus 2}\opluslus E_8(-1)^{\opluslus 2}\opluslus U^{\opluslus 2}\subset H^2(\widetilde{Y}_{\mathbb A},{\bf Z})$ (see~\eqref{defhperp}) primitively contains the lattices ${\bf T}r(\widetilde{Y}_{\mathbb A})\simeq (22)^{\opluslus 2}$ and ${\mathsf S}$ and it is a finite extension of their direct sum.\ This extension is obtained by adding to ${\bf T}r(\widetilde{Y}_{\mathbb A})\opluslus{\mathsf S}$ two elements $\frac{a_1+b_1}{11}$ and $\frac{a_2+b_2}{11}$, where~$a_1$ and~$a_2$ are orthogonal generators of ${\bf T}r(\widetilde{Y}_{\mathbb A})$ of square $22$, and $b_1$ and~$b_2$ are classes in~${\mathsf S}$ of divisibility 11.\ Since $g$ preserves $H^\perp$ and ${\bf T}r(\widetilde{Y}_{\mathbb A})$, it follows readily that $g(b_i)=b_i+11c_i$ for some $c_i\in{\mathsf S}$, which implies that $g$ acts trivially on $\Disc({\mathsf S})$, as claimed.
The proposition follows since, by \cite[Table 1, line 120]{HM}, the group of isometries of ${\mathsf S}$ that act trivially on $\Disc({\mathsf S})$ coincides with $\mathbb{G}$.\
\end{proof}
\subsection{GM varieties with many symmetries}\label{sec42n}
Proposition~\ref{prop:all_autom_A} can be used to determine the automorphism groups of the GM varieties constructed from the Lagrangian ${\mathbb A}$, and in particular the varieties~$X^5_{\mathbb A}$ and $X^3_{\mathbb A}$ defined in Sections~\ref{se31} and~\ref{se23}.\ By~\eqref{autxa}, all we have to do is determine the stabilizers of hyperplanes in $V_6$ under the $\mathbb{G}$-action.\ Since this action is conjugate to its dual, we might as well determine the stabilizers of lines in $V_6={\bf C} e_0\opluslus V_\xi$.\ We proceed in three steps:
\begin{itemize}
\item determine the various fixed-point sets of all subgroups of $\mathbb{G}$, listed up to conjugacy in \cite[Figure~1]{bue};
\item compute the stabilizers of these fixed-points;
\item find in which stratum $Y_{\mathbb A}^\ell$ they lie.
\end{itemize}
A first useful remark is the following: {\em if $g\in\mathbb{G}$ is a nontrivial element of odd order, the fixed-point set of $g$ in~$Y_{\mathbb A}$ is finite.}\ Indeed, we will see below by a case-by-case analysis that
the fixed-point set ${\bf F}ix(g)$ of $g$ in ${\bf P}(V_6)$ is a union of lines and isolated points.\ Assume that a line $\Delta\subset {\bf F}ix(g)$ is contained in $Y_{\mathbb A}$.\
By Proposition~\ref{split1}, $g$ lifts to a symplectic automorphism~$\tilde g$ of~$\widetilde{Y}_{\mathbb A}$ which commutes with its covering involution $\iota$.\ For any $x$ in the curve $ \pi_{\mathbb A}^{-1}(\Delta)\subset \widetilde{Y}_{\mathbb A}$, one has either $\tilde g(x)=x$ or $\tilde g(x)=\iota(x)$, hence $\tilde g^2(x)=x$.\ The curve
$ \pi_{\mathbb A}^{-1}(\Delta)\subset \widetilde{Y}_A$ is therefore contained in the fixed-point set of the nontrivial symplectic automorphism $\tilde g^2$.\ But this fixed-point set is, on the one hand, a disjoint union of surfaces and isolated points and, on the other hand, contained in $ \pi_{\mathbb A}^{-1}({\bf F}ix(g^2))$, whose dimension is at most $1$ (because~$g^2$ is again nontrivial of odd order), so we reach a contradiction.\ Moreover, $1$ is not an eigenvalue for the action of $g$ on the tangent space at a fixed-point, hence any line in ${\bf F}ix(g)$ meets $Y^1_{\mathbb A}$ and $Y^2_{\mathbb A}$ transversely.
Furthermore, since $g$ itself can be written as a square, we see that the fixed-point set of its symplectic lift $\tilde g$ (which has the same order) is the inverse image in $\widetilde{Y}_A$ of ${\bf F}ix(g)$.
Our second tool will be the Lefschetz topological fixed-point theorem for an automorphism~$g$ {\em with finite fixed-point set} on the regular surface $Y_{\mathbb A}^{ \ge2}$.\ This theorem reads
\begin{equation*}
\#({\bf F}ix(g)\cap Y_{\mathbb A}^{ \ge2})=\sum_{i=0}^4(-1)^i{\bf T}r (g^*\vert_{H^i(Y_{\mathbb A}^{ \ge2},{\bf Q})})=2+{\bf T}r (g^*\vert_{H^2(Y_{\mathbb A}^{ \ge2},{\bf Q})}).
\end{equation*}
The group $ \mathbb{G}$ acts on ${\mathbb A}$ (via the representation $\bw2 V_\xi$) and $Y_A^{ \ge2}$ and, by Proposition~\ref{propc7}, the isomorphism $H^2(Y_{\mathbb A}^{ \ge2},{\bf C})\simeq \bw2({\mathbb A}\opluslus {\mathbf a}r {\mathbb A})$ from~\eqref{h1} is equivariant for these actions.\ Using the fact that the representation $\bw2 V_\xi$ is self-dual and the formula
$$\chi_{\sbw2(\sbw2 V_\xi\opluslus \sbw2 V_\xi)}(g)
=2\chi_{\sbw2(\sbw2 V_\xi)}(g)+\chi_{ \sbw2V_\xi\otimesimes \sbw2V_\xi}(g)=2\chi_{ \sbw2 V_\xi}(g)^2-\chi_{ \sbw2V_\xi}(g^2),
$$
one can then compute the numbers of fixed points of $g$ in $Y_{\mathbb A}^{ \ge2}$ given in Table~\ref{tabf}.
The Lefschetz theorem was also used to the same effect in \cite[Section~6.2]{monphd} on {hyperk\"ahler}\ varieties of K3$^{[2]}$-type.\ It gives, for symplectic automorphisms of $\widetilde{Y}_{\mathbb A}$ of prime order, the number (when finite) of fixed-points on $\widetilde{Y}_{\mathbb A}$.\ By the remark made above, this is the number of fixed points on $Y^{\ge 2}_{\mathbb A}$ (which we get from Table~\ref{tabf}) plus twice the number of fixed points on~$Y^1_{\mathbb A}$.\ So we get from \cite[Section~6.2]{monphd} the following numbers (except for the information between parentheses (when~$g$ has order $2$ or $6$), which will be a consequence of the discussion below---where it will not be used).
\begin{table}[h]
\renewcommand\arraystretch{1.5}
\begin{tabular}{|c|c|c|c|c|c|c|c|c|}
\hline
order of $g$&$11$&$5$&$6$&$3$&$2$
\\
\hline
$\# ({\bf F}ix (g)\cap Y^{ \ge2}_{\mathbb A})$
&$5$&$2 $&$ 3$ &$3 $ & $(\dim 1)$
\\
\hline
$\# ({\bf F}ix (g)\cap Y_{\mathbb A})$
&$5$&$8 $&$(7) $&$ 15$& $(\dim 2)$
\\
\hline
\end{tabular}
\captionsetup{justification=centering}
\caption{Number (when finite) of fixed-points on\\ the surface $Y^{ \ge2}_{\mathbb A}$ and the fourfold $Y_{\mathbb A}$}\label{tabf}
\end{table}
We will see in the discussion below that these sets are in fact always finite, except when~$g$ has order 2.\ We can now go through the list of all subgroups of $\mathbb{G}$ from \cite[Figure~1]{bue} and determine their various fixed-point sets.\ We will use the notation and results of Appendix~\ref{sea2}.
\subsubsection{The subgroups $\mathbb{G}$ and $ {\bf Z}/11{\bf Z}\rtimes {\bf Z}/5{\bf Z}$}\label{sec421} The subgroups $ {\bf Z}/11{\bf Z}\rtimes {\bf Z}/5{\bf Z}$ of $\mathbb{G}$ are all conjugate to the subgroup generated by the elements $a$ and $c$ of $\mathbb{G}$.\
We see from~\eqref{real} that their only fixed-point is~$[e_0]$.\ It is on $Y_{\mathbb A}^0$ hence defines a GM fivefold,~$X^5_{\mathbb A}$, already defined in Section~\ref{se31}, with automorphism group $\mathbb{G}$.
\subsubsection{The subgroups $ {\bf Z}/11{\bf Z}$}\label{sec422} The subgroups $ {\bf Z}/11{\bf Z} $ of $\mathbb{G}$ are all conjugate to the subgroup generated by the element $c$ of $\mathbb{G}$.\
We see from~\eqref{real} that there are $6$ fixed-points: the point~$[e_0]$ (on~$Y_{\mathbb A}^0$) and 5 other points.\ For these $5$ points, which are all in the same~$\mathbb{G}$-orbit, the stabilizers are exactly $ {\bf Z}/11{\bf Z}$ (because the only nontrivial oversubgroups are $ {\bf Z}/11{\bf Z}\rtimes {\bf Z}/5{\bf Z}$ and $\mathbb{G}$).\ Furthermore, using Table~\ref{tabf}, one sees that they are in $Y_{\mathbb A}^2$ (this was already observed in Section~\ref{se23}).\
So we get isomorphic GM threefolds,~$X^3_{\mathbb A}$, already defined in Section~\ref{se23}, with automorphism groups~$ {\bf Z}/11{\bf Z}$.
\subsubsection{The subgroups $ {\bf Z}/3{\bf Z}$, $ {\bf Z}/6{\bf Z}$, and $D_{12}$}\label{sec423}
The elements of order 6 of $\mathbb{G}$ are all conjugate to the element $b$ of $\mathbb{G}$.\
Since its character in the representation $\xi$ is $1$, it acts on $V_\xi$ with eigenvalues $1,\zeta_6,\zeta_6^2,\zeta_6^4,\zeta^5_6$, for which we choose eigenvectors $w_0,w_1,w_2,w_4,w_5$.\ The fixed-point set of $b$ consists of the line~$\Delta_6=\langle [e_0],[w_0]\rangle$ and the $4$ isolated points $[w_1]$, $[w_2]$, $[w_4]$, $[w_5]$.\ Any involution $\tau$ in $\mathbb{G}$ that, together with~$b$, generates a dihedral group $D_{12}$, exchanges the eigenspaces corresponding to conjugate eigenvalues.\ Looking at the subgroup pattern of $\mathbb{G}$, one sees that the stabilizers of the $4$ isolated points are~$ {\bf Z}/6{\bf Z}$, whereas those of points of $\Delta_6\smallsetminus\{[e_0]\}$ are $D_{12}$ (a maximal proper subgroup).\
The fixed-point set of an element of $\mathbb{G}$ of order 3 (such as~$b^2$; they are all conjugate) is the union of~$\Delta_6$ and two other disjoint lines,~$\Delta_3=\langle [w_1],[w_4]\rangle$ and~$\Delta'_3=\tau(\Delta_3)=\langle [w_2],[w_5]\rangle$.\
The fixed-point set of the subgroup $D_{6}=\langle b^2,\tau\rangle$ is therefore the line $\Delta_6$.\
Consider now the isomorphism of representations $v\colon \bw2V_\xi\simeqto \bw3 V_\xi$ from~\eqref{defv}.\ Looking at the eigenspaces for the action of $b$, we see that we can write
$$
v(w_0\wedge w_2)=\alpha w_1\wedge w_2\wedge w_5
$$
for some $\alpha\in {\bf C}$.\ By definition of ${\mathbb A}$, this implies $w_2\wedge (e_0\wedge w_0-\alpha w_1\wedge w_5)\in {\mathbb A}$.\ Similarly, one can write
$$
v(w_2\wedge w_5)= \beta w_1\wedge w_2\wedge w_4+\mathfrak amma w_0\wedge w_2\wedge w_5,
$$
for some $\beta,\mathfrak amma\in {\bf C} $, so that $w_2\wedge (e_0\wedge w_5+\beta w_1 \wedge w_4+\mathfrak amma w_0 \wedge w_5)\in {\mathbb A}$.\ This proves that $[w_2]$ is in~$Y^{\ge2}_{\mathbb A}$, and so is
$[w_4]=\tau([w_2])$.
Consider the length-$18$ scheme ${\bf F}ix(g^2)\cap Y_{\mathbb A}=Y_{\mathbb A}\cap (\Delta_6\cup \Delta_3\cup \Delta'_3)$.\ We see from Table~\ref{tabf} that it has 15 points, 3 of them in $Y^{\ge2}_{\mathbb A}$ (hence nonreduced) and fixed by $g$, therefore $12$ of them in $Y^1_{\mathbb A} $ (reduced by the remark made above), none fixed by $g$.\
Since the set ${\bf F}ix(g^2)\cap Y^{\ge2}_{\mathbb A}$ is $\tau$-invariant and contains $[w_2]$ and $ [w_4]$,
and
$g$ acts as an involution with no fixed-points on the set ${\bf F}ix(g^2)\cap Y^1_{\mathbb A}\cap \Delta_3$, whose cardinality is thus even, we see that
each line $\Delta_6$, $ \Delta_3$, $ \Delta'_3$ contains a single point of $Y^{\ge2}_{\mathbb A}$ and $4$ points of~$Y^1_{\mathbb A}$; the points $[w_1]$ and $[w_5]$ are in $Y^{0}_{\mathbb A}$.\ In particular, the set $ {\bf F}ix (g)\cap Y_{\mathbb A}$ has 7 points, as claimed in Table~\ref{tabf}.
So altogether, we get GM varieties of dimensions $3$, $4$, or $5$, with automorphism groups ${\bf Z}/3{\bf Z}$, of dimensions $3$ or $5$ with automorphism groups~${\bf Z}/6{\bf Z}$, and of dimensions $3$ or $4$ with automorphism groups $D_{12}$, and we see that no GM varieties $X_{{\mathbb A},V_5}$ have
automorphism groups the dihedral group~$D_{6}$ or the alternating group~$\mathfrak A_5$.
\subsubsection{The subgroups $ {\bf Z}/5{\bf Z}$ and $D_{10}$} The subgroups $ {\bf Z}/5{\bf Z} $ of $\mathbb{G}$ are all conjugate to the subgroup generated by the element $a$ of $\mathbb{G}$.\
Since its character is $0$, it acts on $V_\xi$ with eigenvalues $1,\zeta_5,\zeta_5^2,\zeta_5^3,\zeta_5^4$.\ Its fixed-point set in ${\bf P}(V_6)$ therefore consists of a line~$\Delta_5$ passing through~$[e_0]$ and~$4$ isolated points.\ Any involution $\tau$ in $\mathbb{G}$ that, together with~$a$, generates a dihedral group~$D_{10}$, exchanges the eigenspaces corresponding to conjugate eigenvalues.\ Looking at the subgroup pattern of $\mathbb{G}$, one sees that the stabilizers of the $4$ isolated points are~$ {\bf Z}/5{\bf Z}$, whereas those of points of~$\Delta_5$ contain $D_{10}$.\ Since we saw above that $\mathfrak A_5$-stabilizers are not possible, the stabilizers are therefore ~$D_{10}$ for all points of $\Delta_5\smallsetminus\{[e_0]\}$.
Since $\# ({\bf F}ix (g)\cap Y_{\mathbb A})=8$ (Table~\ref{tabf}), one sees that the line $\Delta_5$ meets $Y_{\mathbb A}$ in only 4 points.\ Since $Y^1_{\mathbb A}\cap \Delta_5$ is reduced, at least one of them must be in~$Y_{\mathbb A}^{\ge2}$.\ Among the $4$ isolated fixed-points, the involution $\tau$ acts with no fixed-points on the set of those that are in $Y_{\mathbb A}^{\ge2}$, hence its cardinality is even.\ Since $\# ({\bf F}ix (g)\cap Y_{\mathbb A}^{\ge2})=2$ (Table~\ref{tabf}), the only possibility is that $\Delta_5$ contain~$2$ points in~$Y_{\mathbb A}^1$ and 2 points in~$Y_{\mathbb A}^2$, and the $4$ isolated points are in~$Y_{\mathbb A}^1$.\ So altogether, we get GM fourfolds with automorphism groups~${\bf Z}/5{\bf Z}$ and
GM varieties of dimensions 3, 4, or~$5$ with automorphism groups $D_{10}$.\
\subsubsection{The subgroups $ {\bf Z}/2{\bf Z}$, $ ({\bf Z}/2{\bf Z})^2$, and $\mathfrak A_4$} Since its character is $1$, any order-2 element $g$ of~$\mathbb{G}$ acts on $V_\xi$ with eigenvalues $1,1,1,-1,-1$.\ Its fixed-point set in ${\bf P}(V_6)$ therefore consists of the disjoint union of a 3-space ${\bf P}(V_4)$ passing through $[e_0]$ and a line $\Delta_2$.\ Double EPW sextics with a symplectic involution were studied in \cite[Theorem~5]{cam} and \cite[Theorem~6.2.3]{monphd}: they prove that the fixed-point set is always the union of a smooth K3 surface and $28$ isolated points.\ By \cite[Proposition~17]{cam} (which holds under some generality assumptions which are satisfied by~${\mathbb A}$ because it contains no decomposable vectors), we obtain:
\begin{itemize}
\item ${\bf F}ix(g)\cap Y_{\mathbb A}$ is the union of a smooth quadric $Q$ and a Kummer quartic $S$, both contained in ${\bf P}(V_4)$, and the 6 distinct points of $Y_{\mathbb A}\cap \Delta_2$;
\item ${\bf F}ix(g)\cap Y_{\mathbb A}^{\ge2}$ is contained in $E_2$ and is the disjoint union of the smooth curve $Q\cap S$ and the $16$ singular points of $S$.\end{itemize}
The fixed K3 surface in $\widetilde{Y}_{\mathbb A}$ mentioned above is a double cover of $Q$ branched along $Q\cap S$.\ The images in $Y_{\mathbb A}$ of the $28$ fixed-points are the $6$ points of $Y_{\mathbb A}\cap \Delta_2$ and the $16$ singular points of $S$.
The fixed-point set of any subgroup $ ({\bf Z}/2{\bf Z})^2$ of $\mathbb{G}$ is a plane~${\bf P}i_4$ passing through $[e_0]$ and~$3$ isolated points.\ This plane is contained in ${\bf P}(V_4)$ and contains the line $\Delta_6$ fixed by any $D_{12}$ containing $ ({\bf Z}/2{\bf Z})^2$.\ For points in ${\bf P}i_4\smallsetminus \Delta_6$ and the $3$ isolated points, the stabilizers are either $ ({\bf Z}/2{\bf Z})^2$ or~$\mathfrak A_4$.\
As an $\mathfrak A_4$-representation, $V_6$ splits as the direct sum of the $3$ characters (which span the plane~${\bf P}i_4$) and the one irreducible representation of dimension~$3$.\ It follows that the
fixed-point set of any
$\mathfrak A_4$ containing $ ({\bf Z}/2{\bf Z})^2$ has $3$ points (corresponding to the $3$ characters), all in ${\bf P}i_4$.\ One of them is~$[e_0]$ and the stabilizer of the other two is indeed $\mathfrak A_4$.
The plane ${\bf P}i_4$ meets $Y_{\mathbb A}$ along the union of the conic ${\bf P}i_4\cap Q$ and the quartic curve ${\bf P}i_4\cap S$.\ Since the $1$-dimensional part of ${\bf F}ix(g)\cap Y_A^{\ge2}$ is the smooth octic curve $Q\cap S$, its intersection with the plane~${\bf P}i_4$ is finite nonempty.\ So we get points in ${\bf P}i_4\smallsetminus \Delta_6$ (with stabilizers $({\bf Z}/2{\bf Z})^2$) in each of the strata.
Finally, the two fixed-points of~$\mathfrak A_4$ are not in $Y_{\mathbb A}$: if they were, we would obtain a point of $\widetilde{Y}_{\mathbb A}$ fixed by a symplectic action of $\mathfrak A_4$; however there are no representations of $\mathfrak A_4$ in $\Sp({\bf C}^4)$ without trivial summands, so there are no points in $\widetilde{Y}_{\mathbb A}$ fixed by $\mathfrak A_4$. Therefore, we only get GM varieties of dimension~$5$ with automorphism groups~$\mathfrak A_4$.
We sum up our results in a table:
\begin{table}[h]
\renewcommand\arraystretch{1.5}
\begin{tabular}{|c|c|c|c|c|c|c|c|c|c|c|c|c|}
\hline
aut. groups&$\mathbb{G}$&${\bf Z}/11{\bf Z}$&$D_{12}$&${\bf Z}/6{\bf Z}$&${\bf Z}/3{\bf Z}$&$D_{10}$&${\bf Z}/5{\bf Z}$&$\mathfrak A_4$&$({\bf Z}/2{\bf Z})^2$&${\bf Z}/2{\bf Z}$&$\{1\}$
\\
\hline
$\dim(X_{{\mathbb A},V_5})$
&$5$& $3 $& $ 3$, $4$ & $ 3$, $5$ &$ 3$, $ 4$, $5$&$ 3$, $4$, $5$& $ 4$& $5$& $ 3$, $4$, $5$& $ 3$, $4$, $5$&$ 3$, $4$, $5$
\\
\hline
\end{tabular}
\captionsetup{justification=centering}
\caption{Possible automorphisms groups of (ordinary) GM varieties associated with the Lagrangian ${\mathbb A}$}
\label{tabaut}
\end{table}
\subsection{EPW sextics with an automorphism of order $11$}\label{sect44}
We use the injectivity of the period map~\eqref{defp} to characterize quasi-smooth EPW sextics with an automorphism of prime order at least~$11$.
\begin{theo}\label{th47}
The only quasi-smooth EPW sextic with an automorphism of prime order~$p\ge11$ is the EPW sextic $Y_{\mathbb A}$, and $p=11$.
\end{theo}
\begin{proof}
Let $Y_A$ be a quasi-smooth EPW sextic with an automorphism $g$ of prime order~$p\ge11$.\
By Proposition~\ref{split1}, $g$ lifts to a symplectic automorphism of the same order of the smooth double EPW sextic $\widetilde{Y}_A$ which fixes the polarization~$H$.\ By Corollary~\ref{th14}, the transcendental lattice ${\bf T}r(\widetilde{Y}_A)$ is isomorphic to the lattice $T:= (22)^{\opluslus 2}$ and is primitively embedded in the lattice~$H^\bot$, with orthogonal complement isomorphic to ${\mathsf S}$.
\begin{lemm}
Any two primitive embeddings of $T$ into the lattice $h^\bot$ with orthogonal complements isomorphic to ${\mathsf S}$ differ by an isometry in~$\widetilde O(h^\bot)$.
\end{lemm}
\begin{proof}
According to~\cite[Proposition~1.5.1]{nik} (see also \cite[Proposition~2.7]{bcs}),
to primitively embed the lattice $T$ into the lattice $h^\bot$, one needs subgroups $K_T\subset \Disc(T)\simeq ({\bf Z}/22{\bf Z})^2$ and $K_{h^\bot}\subset \Disc(h^\bot)\simeq ({\bf Z}/2{\bf Z})^2$ and
an isometry $u\colon K_T\simeqto K_{h^\bot}$ for
the canonical ${\bf Q}/2{\bf Z}$-valued quadratic forms on these groups.\ The discriminant of the orthogonal complement is then \mbox{$22^2\cdot 2^2/{\bf C}ard(K_T)^2$.}\
In our case, we want this orthogonal complement to be ${\mathsf S}$, with discriminant group $({\bf Z}/11{\bf Z})^2$.\ The only choice is therefore to take $K_T$ to be the $2$-torsion part of $\Disc(T)$ and $K_{h^\bot}=\Disc(h^\bot)$.\ There are only two choices for $u$ and they correspond to switching the two factors of $({\bf Z}/2{\bf Z})^2$.\ Any two such embeddings $T\hookrightarrow h^\bot$ therefore differ by an isometry of $h^\bot$ and, upon composing with the involution of $h^\bot$ that switches the two $(-2)$-factors, we may assume that this isometry is in~$\widetilde O(h^\bot)$.
\end{proof}
If we fix any embedding $T\hookrightarrow h^\bot$ as in the lemma, the period of $\widetilde{Y}_A$ therefore belongs to the (uniquely defined) image in the quotient $ \widetilde O(h^\bot){\mathbf a}ckslash \Omega_{h}$ of the set
$ {\bf P}(T\otimesimes {\bf C})\cap \Omega_h$.\ This set consists of two conjugate points,
one on each component of $\Omega_h$, hence they are mapped to the same point in the period domain $ \widetilde O(h^\bot){\mathbf a}ckslash \Omega_{h}$.\ The theorem now follows from the injectivity of the polarized period map, which implies that~$\widetilde{Y}_A$ and $\widetilde{Y}_{\mathbb A}$ are isomorphic by an isomorphism that respects the polarizations.\ Since these polarizations define the double covers $\pi_A$ and $\pi_{\mathbb A}$, this isomorphism descends to an isomorphism between~$Y_A$ and $Y_{\mathbb A}$.
\end{proof}
\begin{coro}\label{cor48}
{\rm(1)} The only smooth double EPW sextic with a symplectic automorphism of prime order~$p\ge11$ fixing the polarization $H$ is the {Klein}\ double sextic $\widetilde{Y}_{\mathbb A}$, and $p=11$.
{\rm(2)} The only (smooth ordinary) GM varieties with an automorphism of prime order~$p\ge11$ are the GM varieties $X_{\mathbb A}^3$ and $X_{\mathbb A}^5$, and $p=11$.
\end{coro}
\begin{proof}
Part (1) is only a rephrasing of Theorem~\ref{th47}, using the isomorphism $ {\mathbb A}ut_H^s(\widetilde{Y}_A)\simeqto {\mathbb A}ut(Y_A)$ from Proposition~\ref{split1}.\ For part (2), let $X$ be a (smooth ordinary) GM variety with an automorphism of prime order~$p\ge11$ and let $A$ be an associated Lagrangian.\ By \eqref{autxa}, the quasi-smooth EPW sextic $Y_A$ also has an automorphism of order~$p$.\ It follows from Theorem~\ref{th47} that we can take $A={\mathbb A}$ and that $p=11$.\ The result now follows from Sections~\ref{sec421} and~\ref{sec422}.
\end{proof}
\subsection{GM varieties of dimensions 4 and 6 with many Hodge classes}\label{sect46}
GM sixfolds do not appear in the definition given in Section~\ref{se22n}.\ This is because they are {\em special} (as opposed to {\em ordinary}): they are double covers $\mathfrak amma\colon X\to \mathbb{G}r(2,V_5)$ branched along the smooth intersection of $\mathbb{G}r(2,V_5)$ with a quadric (a GM fivefold!).\ To the GM fivefold correspond a Lagrangian $A$ and a hyperplane $V_5\subset V_6$ such that $A\cap \bw3V_5=\{0\}$.\ When $X$ is a GM fourfold, we let
$\mathfrak amma\colon X\to \mathbb{G}r(2,V_5)$ be the inclusion (in both cases, $\mathfrak amma$ is called the Gushel map in \cite{dkclass}).
One can use the results of~\cite{dkeven} to construct explicit GM varieties $X$ of even dimensions~$2m\in\{4,6\}$ with groups $\Hdg^m(X):=H^{m,m}(X)\cap H^{2m}(X,{\bf Z})$ of Hodge classes of maximal rank $h^{m,m}(X)=22$ (\cite[Proposition~3.1]{dkeven}).\
The main ingredient is~\cite[Theorem~5.1]{dkeven}: there is an isomorphism
$$(H^{2m}(X,{\bf Z})_{00},\smile)\simeq (H^2(\widetilde{Y}_A,{\bf Z})_0,(-1)^{ m-1}q_{BB})
$$
of polarized Hodge structures, where
$$H^{2m}(X,{\bf Z})_{00}:=\mathfrak amma^*H^{2m}(\mathbb{G}r(2,V_5),{\bf Z})^\bot\subset H^{2m}(X,{\bf Z})$$ and $H^2(\widetilde{Y}_A,{\bf Z})_0$ is, in our previous notation, $H^\bot\subset H^2(\widetilde{Y}_A,{\bf Z})$.\
If we start from the Lagrangian ${\mathbb A}$ and any hyperplane $V_5\subset V_6$ that satisfies the condition $\dim(A\cap \bw3V_5)=3-m$, we obtain, by Corollary~\ref{th14}, a family (parametrized by the fourfold~$Y_{\mathbb A}^1 $ when $m=2$ and by the fivefold~${\bf P}(V_6)\smallsetminus Y_{\mathbb A}$ when $m=3$) of GM $2m$-folds $X$ that satisfy
$$
\begin{aligned}
\Hdg^m(X)( (-1)^{m-1})&\simeq \mathfrak amma^* H^{2m}(\mathbb{G}r(2,V_5),{\bf Z})( (-1)^{m-1})\opluslus {\mathsf S}\\
&\simeq (2)^{\opluslus 2}
\opluslus {\mathsf S} \\
&\simeq (2)^{\opluslus 2}\opluslus E_8( -1)^{\opluslus 2}\opluslus \begin{pmatrix}
-2 &-1 \\
-1 & -6
\end{pmatrix}^{\opluslus 2}\!\!\!\! ,
\end{aligned}$$
a rank-$ 22$ lattice, the maximal possible rank (the last isomorphism follows from the last isomorphism in the statement of Corollary~\ref{th14}).\ Indeed, $\Hdg^m(X)((-1)^{ m-1})$ contains the lattice on the right and the latter has no overlattices (its discriminant group has no nontrivial isotropic elements; see Section~\ref{secc1}).
\begin{rema}\upshape
Take $m=2$.\
The integral Hodge conjecture in degree $2$ for GM fourfolds was recently proved in \cite[Corollary~1.2]{per}.\ Therefore, we get a family (parametrized by the fourfold~$Y_{\mathbb A}^1 $) of GM fourfolds $X$ such that all classes in $\Hdg^2(X)$ are classes of algebraic cycles.
\end{rema}
\begin{exam}\upshape
Take $m=3$ and $V_5=V_\xi$.\ We get a GM sixfold $X^6_{\mathbb A}$ which can be defined
inside ${\bf P}({\bf C} e_{00}\opluslus \bw2V_\xi)$ by the quadratic equation
\begin{equation*}
x_{00}^2=x_{12}x_{13}+x_{23}x_{24}+x_{34}x_{35}-x_{45}x_{14}+x_{15}x_{25}
\end{equation*}
(the right side is the equation~\eqref{eqQ} of the $\mathbb{G}$-invariant quadric ${\bf Q}Q\subset {\bf P}(\bw2V_\xi)$) and the Pl\"ucker quadrics in the $(x_{ij})_{1\le i<j\le 5}$ that define $\mathbb{G}r(2,V_\xi)$ in ${\bf P}(\bw{2}V_\xi)$.\ Since the equation of ${\bf Q}Q$ is $\mathbb{G}$-invariant, we see that ${\bf Z}/2{\bf Z}\times \mathbb{G}$ acts on ${\bf C} e_{00}\opluslus \bw2V_\xi$ component-wise, and this group is~${\mathbb A}ut(X^6_{\mathbb A})$.
The integral Hodge conjecture in degree $3$ is not known in general for GM sixfolds $X$, but it was proved in \cite[Corollary~8.4]{per} that the cokernel $V^3(X)$ (the {\em Voisin group}) of the cycle map
$${\bf C}H^3(X)\longrightarrow \Hdg^3(X)
$$
is $2$-torsion.\ When $X=X^6_{\mathbb A}$, since the cycle map is surjective for $\mathbb{G}r(2,V_\xi)$, the image of the cycle map, modulo $\mathfrak amma^* H^{2m}(\mathbb{G}r(2,V_5),{\bf Z})$, is a $\mathbb{G}$-invariant, not necessarily saturated, sublattice of ${\mathsf S}$ of index a power of $2$.
\end{exam}
\section{Irrational GM threefolds}\label{sect5}
\subsection{Double EPW surfaces and their automorphisms}\label{se51}
Let $Y_A\subset {\bf P}(V_6)$ be a quasi-smooth EPW sextic, where $A\subset \bw3V_6$ is a Lagrangian subspace with no decomposable vectors.\ Its singular locus is the smooth surface $Y_A^{\ge 2}$ and, as explained in Appendix~\ref{sec3}, there is a canonical connected \'etale double covering
$\widetilde{Y}_A^{\ge 2}\to Y_A^{\ge 2}$.
Let $X$ be
any (smooth) GM variety of dimension $3$ or $5$ associated with $A$ and let~${\mathbb{J}}ac(X)$ be its intermediate Jacobian.\ It is a 10-dimensional abelian variety\ endowed with a canonical principal polarization $\theta_X$.\ By \cite[Theorem~1.1]{dkij}, there is a canonical principal polarization $\theta$ on ${\mathbb A}lb (\widetilde{Y}_A^{\ge 2})$ and a
canonical isomorphism
\begin{equation}\label{jxa}
({\mathbb{J}}ac(X),\theta_X)\simeqlra ({\mathbb A}lb (\widetilde{Y}_A^{\ge 2}),\theta)
\end{equation}
between $10$-dimensional principally polarized abelian varietys.\ By~\eqref{h1}, the tangent spaces at the origin of these abelian varietys\ are isomorphic to $A$.
The subgroup ${\mathbb A}ut(X)$ of ${\mathbb A}ut(Y_A)$ (see~\eqref{autxa}) acts faithfully on both ${\mathbb{J}}ac(X) $ and ${\mathbb A}lb (\widetilde{Y}_A^{\ge 2})$ and, by~Proposition~\ref{propc8}, the isomorphism above is ${\mathbb A}ut(X)$-equivariant.
\subsection{Explicit irrational GM threefolds}\label{sec52}
Consider the
{Klein}\ Lagrangian ${\mathbb A}$.\ By Proposition~\ref{prop:all_autom_A}, we have ${\mathbb A}ut(Y_{\mathbb A})\simeq \mathbb{G}$ and the analytic representation of the action of that group on ${\mathbb A}lb (\widetilde{Y}_A^{\ge 2})$ is, by~Proposition~\ref{propc7}, the representation of $\mathbb{G}$ on ${\mathbb A}$, that is, the irreducible representation~$\bw2\xi$ of $\mathbb{G}$ (Section~\ref{se31}).\ In particular,~$\mathbb{G}$ acts faithfully on the $10$-dimensional principally polarized abelian variety\
\begin{equation}\label{defj}
({\mathbb{J}},\theta):=({\mathbb A}lb (\widetilde{Y}_{\mathbb A}^{\ge 2}),\theta)
\end{equation}
by automorphisms that preserve the principal polarization~$\theta$.\ By Lemma~\ref{lb3}, any $\mathbb{G}$-invariant polarization on ${\mathbb{J}}$ is proportional to~$\theta$.\
\begin{prop}\label{prop61}
The principally polarized abelian variety\ $({\mathbb{J}},\theta)$ is indecomposable.
\end{prop}
\begin{proof}
If $({\mathbb{J}},\theta)$ is isomorphic to a product of $m\ge 2$ nonzero indecomposable principally polarized abelian varietys, such a decomposition is unique up to the order of the factors hence induces a morphism $u\colon\mathbb{G}\to\mathfrak S_m$ (the group $\mathbb{G}$ permutes the factors).\ Since the analytic representation is irreducible, the image of $u$ is nontrivial and, the group $\mathbb{G}$ being simple, $u$ is injective; but this is impossible because~$\mathbb{G}$ contains elements of order $11$ but not $ \mathfrak S_m$, because $m\le 10$.
\end{proof}
We can now prove our main result.
\begin{theo}\label{main}
Any smooth GM threefold associated with the Lagrangian ${\mathbb A}$ is irrational.
\end{theo}
\begin{proof}
Let $X$ be such a threefold.\ By Proposition~\ref{propc7}, the isomorphism
$({\mathbb{J}}ac(X),\theta_X)\simeqto ( {\mathbb{J}},\theta)$ in~\eqref{jxa}
is $\mathbb{G}$-equivariant.\
We follow \cite{bea2,bea3}: to prove that~$X$ is not rational, we apply the Clemens--Griffiths criterion (\cite[Corollary~3.26]{cg}); in view of Proposition~\ref{prop61}, it suffices to prove that $( {\mathbb{J}},\theta)$ is not the Jacobian of a smooth projective curve.\
Suppose $( {\mathbb{J}},\theta)\simeq ({\mathbb{J}}ac(C),\theta_C)$ for some smooth projective curve $C$ of genus $10$.\ The group~$\mathbb{G}$ then embeds into the group of automorphisms of $({\mathbb{J}}ac(C),\theta_C)$; by the Torelli theorem, this group is isomorphic to ${\mathbb A}ut(C)$ if $C$ is hyperelliptic and to ${\mathbb A}ut(C)\times{\bf Z}/2{\bf Z}$ otherwise.\ Since any morphism from $\mathbb{G}$ to ${\bf Z}/2{\bf Z}$ is trivial, we see that $\mathbb{G}$ is a subgroup of ${\mathbb A}ut(C)$.\ This contradicts the fact that the automorphism group of a curve of genus $10$ has order at most $432$ (\cite{lmfd}).
\end{proof}
\begin{coro}\label{coro52}
There exists a complete family, with finite moduli morphism, parametrized by the smooth projective surface $Y^{\ge2}_{{\mathbb A}}$, of irrational smooth ordinary GM threefolds.
\end{coro}
\begin{proof}
This follows from the theorem and \cite[Example~6.8]{dkmoduli}.
\end{proof}
The theorem applies in particular to the GM threefold $X^3_{\mathbb A}$ defined in Section~\ref{se23}.
\begin{coro}\label{coro53}
The GM threefold $X^3_{\mathbb A}$ is irrational.
\end{coro}
\begin{rema}\label{rema53}
It is a general fact that all smooth GM varieties of the same dimension constructed from the same Lagrangian are birationally isomorphic (\cite[Corollary~4.16]{dkclass}); in particular, all threefolds in the family of Corollary~\ref{coro52} are mutually birationally isomorphic.\
\end{rema}
\begin{rema}\label{rema56a}
The Clemens--Griffiths component of a
principally polarized abelian variety\ is the product of its indecomposable factors that are not isomorphic to Jacobians of smooth projective curves, and the Clemens--Griffiths component of a Fano threefold is the Clemens--Griffiths component of its intermediate Jacobian; it
follows from the Clemens--Griffiths method that the Clemens--Griffiths component of a Fano threefold is a birational invariant.\ By Proposition~\ref{prop61}, the Clemens--Griffiths component of the GM threefolds constructed from the Lagrangian~${\mathbb A}$ is $({\mathbb{J}},\theta)$; in particular, these threefolds are not birationally isomorphic to any smooth cubic threefold (because their Clemens--Griffiths components all have dimension $5$).
\end{rema}
\begin{rema}\label{rema53a}
All GM fivefolds are rational (\cite[Proposition~4.2]{dkclass}).\ We do not know whether
the smooth GM fourfolds associated with the Lagrangian ${\mathbb A}$ are rational (folklore conjectures say that they should be irrational, because they have no associated K3 surfaces; see Proposition~\ref{assoc}).
\end{rema}
Let us go back to the
10-dimensional principally polarized abelian variety\ $({\mathbb{J}},\theta)$ defined by~\eqref{defj}.\ It is acted on faithfully by the group~$\mathbb{G}$, and the associated analytic representation
$\mathbb{G}\to \mathbb{G}L(T_{{\mathbb{J}},0})$
is the irreducible
representation $\bw2\xi$ of $\mathbb{G}$ (Sections~\ref{se51} and~\ref{sec52}).\
\begin{prop}\label{prop62}
The abelian variety ${\mathbb{J}} $ is isogeneous to $E^{10}$, for some elliptic curve $E$.
\end{prop}
\begin{proof}
Since the analytic representation is irreducible and defined over ${\bf Q}$ (Appendix~\ref{sea2}), the proposition follows from~Proposition~\ref{propb1}.
\end{proof}
Unfortunately, we were not able to say more about the elliptic curve $E$ in the proposition: as explained in Remark~\ref{remb4}, the mere existence of a $\mathbb{G}$-action on $E^{10}$ with prescribed analytic representation and of a $\mathbb{G}$-invariant polarization does not put any restriction on $E$.\
We suspect that this curve $E$ is isomorphic to the elliptic curve $E_\lambda:={\bf C}/{\bf Z}[\lambda]$, which has complex multiplication by~${\bf Z}[\lambda]$, where
$ \lambda:=\tfrac12(-1+\sqrt{-11})$.\
More precisely, we conjecture that~$({\mathbb{J}},\theta)$ is isomorphic to the principally polarized abelian variety\ constructed in Proposition~\ref{prop63}.
\appendix\section{Automorphisms of double EPW sextics}\label{appC}
\subsection{Double EPW sextics and their automorphisms}\label{se41}
As in Section~\ref{se1}, let $V_6$ be a $6$-dimensional complex vector space and let $A\subset \bw3V_6$ be a Lagrangian subspace with no decomposable vectors, with associated EPW sextic $Y_A\subset {\bf P}(V_6)$.\ There is a canonical double covering
\begin{equation}\label{piA}
\pi_A\colon \widetilde{Y}_A\longrightarrow Y_A
\end{equation}
branched along the integral surface $Y^{\ge 2}_A$.\ The fourfold $\widetilde{Y}_A$ is called a {\em double EPW sextic} and its singular locus is the finite set~$\pi_A^{-1}(Y^{\ge 3}_A)$ (\cite[Section~1.2]{og7} or \cite[Theorem~B.7]{dkclass}).\ It carries the canonical polarization $H:=\pi_A^*\mathcal{O}_{Y_A}(1)$ and the image of the associated morphism $\widetilde{Y}_A\to {\bf P}(H^0(\widetilde{Y}_A,H)^\vee)$ is isomorphic to $Y_A$.\
When $Y_A^{\ge3}=\varnothing$, we say that $Y_A$ is {\em quasi-smooth} and~$\widetilde{Y}_A$ is a smooth {hyperk\"ahler}\ variety of K3$^{[2]}$-type.
Every automorphism of $Y_A$ induces an automorphism of~$\widetilde{Y}_A$ (see the proof of \cite[Proposition~B.8(b)]{dkclass}) that fixes the class~$H$.\
Conversely, let ${\mathbb A}ut_H(\widetilde{Y}_A)$ be the group of automorphisms of $\widetilde{Y}_A$ that fix the class~$H$.\ It
contains the covering involution $\iota$ of~$\pi_A$.\
Any element of ${\mathbb A}ut_H(\widetilde{Y}_A)$ induces an automorphism of ${\bf P}(H^0(\widetilde{Y}_A,H)^\vee)\simeq {\bf P}( V_6)$ hence descends to an
automorphism of~$Y_A$.\ This gives a central extension
\begin{equation}\label{central}
0\to \langle \iota\rangle \to {\mathbb A}ut_H(\widetilde{Y}_A) \to {\mathbb A}ut(Y_A)\to 1.
\end{equation}
As we will check in~\eqref{h2o}, the space $H^2(\widetilde{Y}_A, \mathcal{O}_{\widetilde{Y}_A}) $ has dimension 1.\ It is acted on by the group of
automorphisms of $\widetilde{Y}_A$ and this defines
another extension
\begin{equation}\label{defm}
1\to {\mathbb A}ut_H^s(\widetilde{Y}_A) \to {\mathbb A}ut_H(\widetilde{Y}_A) \to{\boldsymbol \mu}_r\to 1.
\end{equation}
The image of ~$\iota$ in ${\boldsymbol \mu}_r$ is $-1$ and ${\mathbb A}ut_H^s(\widetilde{Y}_A)$ is the subgroup of elements of ${\mathbb A}ut_H(\widetilde{Y}_A)$ that act trivially on $H^2(\widetilde{Y}_A, \mathcal{O}_{\widetilde{Y}_A}) $ (when $Y_A^{\ge3}=\varnothing$, these are exactly, by Hodge theory, the symplectic automorphisms---those that leave any symplectic $2$-form on $\widetilde{Y}_A$ invariant).\
We will show in the next proposition (which was kindly provided by A. Kuznetsov) that these extensions are both trivial.\
For that, we construct an extension
\begin{equation}\label{exttilde}
1 \to {\boldsymbol \mu}_2 \to \widetilde{\mathbb A}ut(Y_A) \to {\mathbb A}ut(Y_A) \to 1
\end{equation}
as follows.\ Recall from~\eqref{autya} that there is an embedding ${\mathbb A}ut(Y_A) \hookrightarrow {\bf P}GL(V_6)$.\ Let $G$ be the inverse image of ${\mathbb A}ut(Y_A)$ via the canonical map $\SL(V_6)\to {\bf P}GL(V_6)$.\ It is an extension of~${\mathbb A}ut(Y_A)$ by~${\boldsymbol \mu}_6$ and we set $\widetilde{\mathbb A}ut(Y_A):=G/{\boldsymbol \mu}_3$.\
The action of $G$ on $V_6$ induces
an action on $\bw3V_6$ such that ${\boldsymbol \mu}_6$ acts through its cube,
hence the latter action factors through an action of
$
\widetilde{\mathbb A}ut(Y_A) $.\
The subspace $A \subset \bw3V_6$ is preserved by this action,
hence we have a morphism of central extensions
\begin{equation}\label{esss}
\begin{aligned}
\xymatrix
@R=5mm@M=2mm
{
1\ar[r]&{\boldsymbol \mu}_2\ar[r]\ar@{_(->}[d]&\widetilde{\mathbb A}ut(Y_A)\ar[r]\ar[d]&{\mathbb A}ut(Y_A)\ar[r]\ar[d]&1\\
1\ar[r]&{\bf C}^\times\ar[r]
&\mathbb{G}L(A)\ar[r]&{\bf P}GL(A)\ar[r]&1.
}
\end{aligned}
\end{equation}
\begin{lemm}\label{nlem}
The vertical morphisms in~\eqref{esss} are injective.
\end{lemm}
\begin{proof}
Let $g\in G\subset \SL(V_6)$.\ Assume that $g$ acts trivially on $A$.\ Then it also acts trivially on~$A^\vee$.\ There is a $G$-equivariant exact sequence $0 \to A \to \bw3V_6 \to A^\vee \to 0$ which splits $G$-equivariantly because~$G$ is finite.\ It follows that $G$ also acts trivially on $\bw3V_6$.\
The natural morphism
${\bf P}GL(V_6) \to {\bf P}GL(\bw3V_6)$ being injective, $g$ is in ${\boldsymbol \mu}_6$.\ Finally, ${\boldsymbol \mu}_6/{\boldsymbol \mu}_3 $
acts nontrivially on $A$, hence $g$ is in ${\boldsymbol \mu}_3$ and its image in $ \widetilde{\mathbb A}ut(Y_A)$ is~$1$.\ This proves that the middle vertical map in~\eqref{esss} is injective.
Assume now that $g $ acts as $\lambda \Id_A$ on $A$.\ Its eigenvalues on $\bw3V_6$ are then~$\lambda$ and $\lambda^{-1}$, both with multiplicity $10$.\ Let $\lambda_1,\dots,\lambda_6$ be its eigenvalues on $V_6$.\ For all $1\le i<j<k\le 6$, one then has $\lambda_i\lambda_j\lambda_k= \lambda$ or $\lambda^{-1}$.\ It follows that if $i,j,k,l,m$ are all distinct, $\lambda_i\lambda_j\lambda_k,\lambda_i\lambda_j\lambda_l,\lambda_i\lambda_j\lambda_m$ can only take 2 values, hence $\lambda_k,\lambda_l,\lambda_m$ can only take 2 values.\ So, there are at most $2$ distinct eigenvalues and one of the eigenspaces, say $E_{\lambda_1}$, has dimension at least~$ 3$.\ If $\lambda\ne \lambda^{-1}$, the eigenspace in $\bw3V_6$ for the eigenvalue $\lambda_1^3$, which is either $A$ or $A^\vee$, contains $\bw3E_{\lambda_1}$.\ This contradicts the fact that $A$ and $A^\vee$ contain no decomposable vectors.\ Therefore, $\lambda= \lambda^{-1}$ and~$g$ acts as $\pm \Id_A$, and the first part of the proof implies that the image of $\pm g$ in $ \widetilde{\mathbb A}ut(Y_A)$ is~$ 1$.\ This proves that the rightmost vertical map in~\eqref{esss} is injective.
\end{proof}
\begin{prop}[Kuznetsov]\label{split1}
Let $A\subset \bw3V_6$ be a Lagrangian subspace with no decomposable vectors.\ The extensions~\eqref{central} and~\eqref{defm} are trivial and $r=2$; more precisely, there is an isomorphism
$${\mathbb A}ut_H(\widetilde{Y}_A)\simeq {\mathbb A}ut(Y_A)\times \langle \iota\rangle$$
that splits~\eqref{central} and the factor ${\mathbb A}ut(Y_A)$ corresponds to the subgroup ${\mathbb A}ut_H^s(\widetilde{Y}_A)$ of ${\mathbb A}ut_H(\widetilde{Y}_A)$.
\end{prop}
\begin{proof}
We briefly recall from \cite[Section~1.2]{og7} (see also \cite{dkcovers}) the construction of the double cover $\pi_A\colon \widetilde{Y}_A\to Y_A$.\ In the terminology of the latter article, one considers the Lagrangian subbundles $\mathcal{A}_1:=A \otimesimes \mathcal{O}_{{\bf P}(V_6)}$ and $\mathcal{A}_2:=\bw2T_{{\bf P}(V_6)}(-3)$ of the trivial vector bundle $\bw3V_6 \otimesimes \mathcal{O}_{{\bf P}(V_6)}$, and the first Lagrangian cointersection sheaf
$
\mathcal{R}_1 := \coker(\mathcal{A}_2\hookrightarrow \mathcal{A}_1^\vee)
$, a rank-$1$ sheaf with support~$Y_A$.\
One sets (\cite[Theorem~5.2(1)]{dkcovers})
$$
\widetilde{Y}_A = \Spec(\mathcal{O}_{Y_A} \opluslus \mathcal{R}_1(-3)).
$$
In particular, one has
\begin{equation}\label{h2o}
H^2(\widetilde{Y}_A, \mathcal{O}_{\widetilde{Y}_A}) \simeq H^2(Y_A, \mathcal{R}_1(-3)) \simeq H^3({\bf P}(V_6), \mathcal{A}_2(-3))= H^3({\bf P}(V_6), \bw2T_{{\bf P}(V_6)}(-6))\simeq {\bf C}.
\end{equation}
The subbundles $\mathcal{A}_1$ and $\mathcal{A}_2$ are invariant for the action of $\widetilde{\mathbb A}ut(Y_A)$ on~$\bw3V_6$, hence
the sheaf~$\mathcal{R}_1 $ is $\widetilde{\mathbb A}ut(Y_A)$-equivariant.\ Finally, the line bundle $\mathcal{O}_{{\bf P}(V_6)}(-1) $
has a $G$-linearization (the subgroup $G\subset \SL(V_6)$ was defined right before Lemma~\ref{nlem}).\ It follows that $\mathcal{O}_{{\bf P}(V_6)}(-3)$ has an $\widetilde{\mathbb A}ut(Y_A)$-linearization, hence
the same is true for the sheaf $\mathcal{R}_1(-3)$.\ Therefore, the group $\widetilde{\mathbb A}ut(Y_A)$ acts on $\widetilde{Y}_A$ and fixes the polarization $H$.
Observe now that since the nontrivial element of $ {\boldsymbol \mu}_2 \subset \widetilde{\mathbb A}ut(Y_A)$ acts by $-1$ on $A$, hence also on $ \mathcal{R}_1$, and since it acts by $-1$ on $\mathcal{O}(-1)$, hence also
on $\mathcal{O}(-3)$, it follows that $ {\boldsymbol \mu}_2$ acts trivially on $\mathcal{R}_1(-3)$, hence also on $\widetilde{Y}_A$.\ Therefore, the morphism $\widetilde{\mathbb A}ut(Y_A)\to {\mathbb A}ut_H(\widetilde{Y}_A)$ factors through the quotient
$\widetilde{\mathbb A}ut(Y_A)/{\boldsymbol \mu}_2 = {\mathbb A}ut(Y_A)$.\ In other words, the surjection ${\mathbb A}ut_H(\widetilde{Y}_A) \to {\mathbb A}ut(Y_A)$ in~\eqref{central} has a section and this central extension is trivial.
The action of the group ${\mathbb A}ut(\widetilde{Y}_A)$ on the 1-dimensional vector space
$H^2(\widetilde{Y}_A, \mathcal{O}_{\widetilde{Y}_A})$ defines a morphism ${\mathbb A}ut(\widetilde{Y}_A)\to{\bf C}^\star$ that maps $\iota$ to $-1$.\
The lift $\widetilde{\mathbb A}ut(Y_A)\to {\mathbb A}ut(Y_A) \hookrightarrow {\mathbb A}ut_H(\widetilde{Y}_A)$ acts trivially on $H^2(\widetilde{Y}_A, \mathcal{O}_{\widetilde{Y}_A})$ because
its action is induced by the action of ${\bf P}GL(V_6)$, which has no nontrivial characters.\ This gives a surjection ${\mathbb A}ut_H(\widetilde{Y}_A) \to \langle \iota\rangle$ which is trivial on the image of the section ${\mathbb A}ut(Y_A) \hookrightarrow {\mathbb A}ut_H(\widetilde{Y}_A)$.\ This implies that the extension~\eqref{defm} is also trivial and $r=2$.\ The theorem is therefore proved.
\end{proof}
\subsection{Moduli space and period map of (double) EPW sextics} \label{sec42}
Quasi-smooth EPW sextics admit an affine coarse moduli space ${\mathbf{M}^{\mathrm{EPW},0}}$, constructed in \cite{og5} as a GIT quotient by ${\bf P}GL(V_6)$ of an affine open dense subset of the space of Lagrangian subspaces in~$\bw3V_6$.\
Let $\widetilde{Y}$ be a hyperk\"ahler fourfold of K3$^{[2]}$-type (such as a double EPW sextic).\ The lattice $H^2(\widetilde{Y},{\bf Z})$ (endowed with the Beauville--Bogomolov quadratic form $q_{BB}$) is isomorphic to the lattice
\begin{equation}\label{defL}
L:=U^{\opluslus 3}\opluslus E_8(-1)^{\opluslus 2}\opluslus (-2),
\end{equation}
where $U$ is the hyperbolic plane $\bigl( {\bf Z}^2, \bigl(\begin{smallmatrix} 0& 1\\ 1 & 0 \end{smallmatrix}\bigr)\bigr)$, $ E_8(-1)$ is the negative definite even rank-8 lattice, and $(m)$ is the rank-$1$ lattice with generator of square $m$.\
Fix a class $h\in L$ with $h^2=2$.\ These classes are all in the same $O(L)$-orbit and
\begin{equation}\label{defhperp}
h^\bot \simeq U^{\opluslus 2}\opluslus E_8(-1)^{\opluslus 2}\opluslus (-2)^{\opluslus 2}.
\end{equation}
The space
$$
\begin{aligned}
\Omega_{h} :={}& \{ [x]\in {\bf P}(L \otimesimes {\bf C})\mid x\cdot h=0,\ x\cdot x=0,\ x\cdot {\mathbf a}r x>0\}\\
{}={}& \{ [x]\in {\bf P}(h^\bot \otimesimes {\bf C})\mid x\cdot x=0,\ x\cdot {\mathbf a}r x>0\}
\end{aligned}
$$
has two connected components, interchanged by complex conjugation, which are Hermitian symmetric domains.\
It is acted on by the group
$$ \{g\in O(L)\mid g(h)=h\},$$
also with two connected components, which is also the index-2 subgroup $\widetilde O(h^\bot)$ of $O(h^\bot)$ that consists of isometries that act trivially on the discriminant group $\Disc(h^\bot)\simeq ({\bf Z}/2{\bf Z})^2$.\
The quotient is an irreducible quasi-projective variety (Baily--Borel) and the {\em period map}
\begin{equation}\label{defp}
\wp\colon {\mathbf{M}^{\mathrm{EPW},0}} \longrightarrow \widetilde O(h^\bot){\mathbf a}ckslash \Omega_{h},\quad [\widetilde{Y}]\longmapsto [H^{2,0}(\widetilde{Y})]
\end{equation}
is algebraic (Griffiths).\ It is an open embedding by
Verbitsky's Torelli theorem (\cite{ver, marsur, huybki}).\
If $A\subset \bw3V_6$ is a Lagrangian such that $\widetilde{Y}_A$ is smooth with period $[x]\in{\bf P}(L \otimesimes {\bf C})$ (well defined only up to the action of $ \widetilde O(h^\bot)$), the Picard group ${\bf P}ic(\widetilde{Y}_A)$ is, by Hodge theory, isomorphic to $x^\bot\cap L$.\ It contains the class $h$ (of square $2$) but, as explained in \cite[Theorem~5.1]{dm}, no class orthogonal to $h$ of square $-2$.
\subsection{Automorphisms of prime order }\label{secc1}
Let $\widetilde{Y}$ be a hyperk\"ahler fourfold of K3$^{[2]}$-type.\ In the lattice $(H^2(\widetilde{Y},{\bf Z}),q_{BB})$ mentioned in Appendix~\ref{sec42}, we consider
the {\em transcendental lattice}
$${\bf T}r(\widetilde{Y}) :={\bf P}ic(\widetilde{Y})^\bot\subset H^2(\widetilde{Y},{\bf Z}) .$$
The automorphism group ${\mathbb A}ut(\widetilde{Y})$ acts faithfully by isometries on the lattice $(H^2(\widetilde{Y},{\bf Z}),q_{BB})$ and preserves the sbulattices ${\bf P}ic(\widetilde{Y})$ and ${\bf T}r(\widetilde{Y})$.\
If $G$ is a subset of~${\mathbb A}ut(\widetilde{Y})$, we denote by
$T_G(\widetilde{Y})$
the invariant lattice (of elements of~$H^2(\widetilde{Y},{\bf Z}) $ that are invariant by all elements of $G$) and by $S_G(\widetilde{Y}):=T_G(\widetilde{Y})^\bot$ its orthogonal in $H^2(\widetilde{Y},{\bf Z}) $.\
Many results are known about automorphisms of prime order $p$ of {hyperk\"ahler}\ fourfolds.\ We restrict ourselves to the case $p\ge 11$.\ In the statement below, the rank-$20$ lattice ${\mathsf S}$ was defined in \cite[Example~2.9]{mon3} by an explicit $20\times 20$ Gram matrix (see also \cite[Example~2.5.9]{monphd}); it is negative definite, even, contains no $(-2)$-classes, its discriminant group is $({\bf Z}/11{\bf Z})^2$, and its discriminant form is $\left(\begin{smallmatrix} -2/11 & 0\\ 0 & -2/11 \end{smallmatrix}\right)$.\
\begin{theo}\label{thc1}
Let $\widetilde{Y}$ be a projective hyperk\"ahler fourfold of K3$^{[2]}$-type and let $g$ be a symplectic automorphism of~$\widetilde{Y}$ of prime order $p\ge11$.\ There are inclusions ${\bf T}r(\widetilde{Y})\subset T_g(\widetilde{Y})$ and $S_g(\widetilde{Y})\subset {\bf P}ic(\widetilde{Y})$, and \mbox{$p= 11$.}\ The lattice $S_g(\widetilde{Y})$ is
isomorphic to ${\mathsf S}$ and $\rho(\widetilde{Y})=21$.\ The possible lattices~$T_g(\widetilde{Y})$ are
$$ \begin{pmatrix}
2 &1 &0 \\
1 &6&0\\
0&0&22
\end{pmatrix}\quad or \quad
\begin{pmatrix}
6&2 &2 \\
2 &8&-3\\
2&-3&8
\end{pmatrix}.$$
\end{theo}
\begin{proof}
The proof is a compilation of previously known results on symplectic automorphisms.\
The bound $p\le 11$ is \cite[Corollary~2.13]{mon3}.\ The inclusions and the properties of the lattice~$S_g(\widetilde{Y})$ are in \cite[Lemma~3.5]{mon2}, the equality $\rho(\widetilde{Y})=21$ is in \cite[Proposition~1.2]{mon3}, the lattice $S_g(\widetilde{Y})$ is determined in \cite[Theorem~7.2.7]{monphd}, and the possible lattices $T_g(\widetilde{Y})$ in \cite[Section~5.5.2]{bns}.
\end{proof}
This theorem applies in particular to (smooth) double EPW sextics~$\widetilde{Y}_A$.\ We are interested in automorphisms that preserve the canonical degree-2 polarization $H$.\ By Proposition~\ref{split1}, the group of these automorphisms, modulo the covering involution $\iota$, is isomorphic to the group of automorphisms of the EPW sextic~$Y_A$.
\begin{coro}\label{th14}
Let $\widetilde{Y}_A $ be a smooth double EPW sextic and let $g$ be an automorphism of $\widetilde{Y}_A$ of prime order $p\ge 11$ that fixes the polarization~$H$.\ Then $p=11$ and\,\footnote{In the given decomposition of the lattice ${\bf P}ic(\widetilde{Y}_A)$, the summand $(2)$ is {\em not} generated by the polarization $H$, because~${\mathsf S}$ contains no $(-2)$-classes.}
$$
\begin{aligned}
S_{g}(X) \simeq {\mathsf S},\qquad T_{g}(\widetilde{Y}_A)&\simeq \begin{pmatrix}
2 &1 \\
1 &6
\end{pmatrix}\opluslus (22),\qquad {\bf T}r(\widetilde{Y}_A)\simeq (22)^{\opluslus 2},
\\
{\bf P}ic(\widetilde{Y}_A)= {\bf Z} H \opluslus {\mathsf S}&\simeq (2)\opluslus E_8(-1)^{\opluslus 2}\opluslus \begin{pmatrix}
-2 &-1 \\
-1 & -6
\end{pmatrix}^{\opluslus 2} .
\end{aligned}
$$
In particular, the fourfold $\widetilde{Y}_A$ has maximal Picard number $21$.\
\end{coro}
\begin{proof} By Proposition~\ref{split1}, the automorphism $g$ is symplectic (all nonsymplectic automorphisms have even order).\ Since $H\in T_{g}(\widetilde{Y}_A)$ and $q_{BB}(H)=2$, and the second lattice in Theorem~\ref{thc1} contains no classes of square $2$, there is only one possibility for~$T_{g}(\widetilde{Y}_A)$ (see also \cite[Section~7.4.4]{monphd}).\ There are only two (opposite) classes of square 2 in that lattice, so we find $
{\bf T}r(\widetilde{Y}_A)$ as their orthogonal.\
We know that ${\bf P}ic(\widetilde{Y} _A)$ is an overlattice of ${\bf Z} H\opluslus S_{g} (\widetilde{Y}_A)$.\ Since the latter has no nontrivial overlattices (its discriminant group has no nontrivial isotropic elements), they are equal.\ Finally, the negative definite lattices ${\mathsf S}$ and
$$S:= E_8(-1)^{\opluslus 2}\opluslus \begin{pmatrix}
-2 &-1 \\
-1 & -6
\end{pmatrix}^{\opluslus 2}$$
are in the same genus.\footnote{By Nikulin's celebrated result \cite[Corollary~1.9.4]{nik}, this means that they have same ranks, same signatures, and that their discriminant forms coincide.}\ They are not isomorphic (because ${\mathsf S}$ does not represent $-2$) but the indefinite lattices $(2)\opluslus {\mathsf S}$ and $(2)\opluslus S$ are by \cite[Corollary~1.13.3]{nik}.
\end{proof}
We prove in Theorem~\ref{th47} that the double EPW sextic $\widetilde{Y}_{\mathbb A}$ is the only smooth double EPW sextic with an automorphism of order $ 11$ that fixes the polarization $H$.\
In Hassett's terminology (recalled in \cite[Section~4]{dm}), a (smooth) double EPW sextic $\widetilde{Y}_A$ is {\em special of discriminant $d$} if there exists a primitive rank-2 lattice $K\subset {\bf P}ic(\widetilde{Y}_A)$ containing the polarization $H$ such that $\disc(K^\bot)=-d$ (the orthogonal is taken in $(H^2(\widetilde{Y}_A,{\bf Z}),q_{BB})$); this may only happen when $d\equiv 0,2,4\pmod{8}$ and $d>8$ (\cite[Proposition~4.1 and Remark~6.3]{dm}).\ The fourfold $\widetilde{Y}_A$ has an {\em associated K3 surface} if moreover the lattice $K^\bot$ is isomorphic to the opposite of the primitive cohomology lattice
of a pseudo-polarized K3 surface (necessarily of degree $d$); a necessary condition for this to happen is $d\equiv 2,4\pmod{8}$ (this was proved in \cite[Proposition~6.6]{dim} for GM fourfolds but the computation is the same).
\begin{prop}\label{assoc}
The double EPW sextic $\widetilde{Y}_{\mathbb A}$ is special of discriminant $d $ if and only if $d$ is a multiple of $8$ greater than~$8$.\ In particular, it has no associated K3 surfaces.
\end{prop}
\begin{proof}
Assume that $\widetilde{Y}_{\mathbb A}$ is special of discriminant $d $.\ Since ${\bf P}ic(\widetilde{Y}_{\mathbb A})\simeq {\bf Z} H\opluslus {\mathsf S}$, the required lattice~$K$ as above is of the form $\langle H,\mathbf kappa\rangle$, where $\mathbf kappa\in {\mathsf S}$ is primitive.\ Since $\Disc({\mathsf S})\simeq ({\bf Z}/11{\bf Z})^2$, the divisibility $\mathop{\rm div}\nolimits_{{\mathsf S}}(\mathbf kappa)$ divides $11$ and, since $\Disc(H^\bot)\simeq ({\bf Z}/2{\bf Z})^2$ (see \cite[(1)]{dm}), the divisibility $\mathop{\rm div}\nolimits_{H^\bot}(\mathbf kappa)$ divides~$2$, but also divides $\mathop{\rm div}\nolimits_{{\mathsf S}}(\mathbf kappa)$ (because ${\mathsf S}\subset H^\bot$).\ It follows that $\mathop{\rm div}\nolimits_{H^\bot}(\mathbf kappa)=1$.\ The lattice $\langle H,\mathbf kappa\rangle^\bot$ therefore has discriminant~$4\mathbf kappa^2$ by the formula \cite[(4)]{dm}.\
It follows that $\widetilde{Y}_{\mathbb A}$ is special of discriminant $d$ if and only if $d\equiv 0 \pmod8$ and ${\mathsf S}$ primitively represents $-d/4$.\
A direct computation shows that the lattice ${\mathsf S}$ contains the rank-$5$ lattice with diagonal quadratic form
$(-4,-4,-4,-6,-8)$.\ By \cite[Section 6(iii)]{Bharg}, the quadratic form on the last four variables represents every even negative integer with the exception of $-2$, and the first variable can be used to ensure that all these integers can be primitively represented.\ This proves the proposition.
\end{proof}
\subsection{Double EPW surfaces and their automorphisms}\label{sec3}
Let $Y_A\subset {\bf P}(V_6)$ be an EPW sextic, where $A\subset \bw3V_6$ is a Lagrangian subspace with no decomposable vectors.\ By \cite[Theorem~5.2(2)]{dkcovers}, there is a canonical connected double covering
\begin{equation}\label{piA2}
\widetilde{Y}_A^{\ge 2}\longrightarrow Y_A^{\ge 2}
\end{equation}
between integral surfaces, with covering involution $\tau$, branched over the finite set $Y_A^{\ge 3}$.
We compare automorphisms of $Y_A$ with those of $\widetilde{Y}_A^{\ge 2}$.\ Any automorphism of $Y_A$ induces an automorphism of its singular locus $Y_A^{\ge 2}$.\ This defines a morphism
${\mathbb A}ut(Y_A)\to {\mathbb A}ut(Y_A^{\ge 2})$.\ Since ${\mathbb A}ut(Y_A)$ is a subgroup of ${\bf P}GL(V_6)$ and the surface
$Y_A^{\ge 2}$ is not contained in a hyperplane, this morphism is injective.
\begin{prop}[Kuznetsov]\label{split2}
Let $A\subset \bw3V_6$ be a Lagrangian subspace with no decomposable vectors.\ Any element
of ${\mathbb A}ut(Y_A)$ lifts to an automorphism of $\widetilde{Y}_A^{\ge 2}$.\ These lifts form a subgroup of
$ {\mathbb A}ut(\widetilde{Y}_A^{\ge 2})$ which is isomorphic to the group $\widetilde{\mathbb A}ut(Y_A)$ in the extension~\eqref{exttilde} via an isomorphism that takes $\langle \tau\rangle$ to ${\boldsymbol \mu}_2$.
\end{prop}
\begin{proof}
The proof follows the exact same steps as the proof of Proposition~\ref{split1}, whose notation we keep.\ By \cite[Theorem~5.2(2)]{dkcovers}, the surface $\widetilde{Y}_A^{\ge 2}$ is defined as
\begin{equation}\label{y2}
\widetilde{Y}^{\ge 2}_A = \Spec(\mathcal{O}_{Y^{\ge 2}_A} \opluslus \mathcal{R}_2(-3)),
\end{equation}
where
$
\mathcal{R}_2 = (\bw2\mathcal{R}_1\vert_{Y^{\ge 2}_A})^{\vee\vee}$.\ As in the proof of Proposition~\ref{split1}, the group $\widetilde{\mathbb A}ut(Y_A) $ acts on $\widetilde{Y}^{\ge 2}_A$ and the nontrivial element of $ {\boldsymbol \mu}_2 $ acts by~$-1$ on both~$\mathcal{R}_1$
and $\mathcal{O}(-3)$.\ It follows that it acts by~$1$ on $ \mathcal{R}_2$ and by $-1$ on $ \mathcal{R}_2(-3)$, hence as the involution~$\tau$ on $\widetilde{Y}^{\ge 2}_A$.\ This proves the proposition.
\end{proof}
It is possible to deform the double cover~\eqref{piA2} to the canonical double \'etale covering associated with the (smooth) variety of lines on a quartic double solid (see the proof of \cite[Proposition~2.5]{dkij}), so we can use Welters' calculations in \cite[Theorem (3.57) and Proposition~(3.60)]{wel}.\ In particular, the abelian group $H_1(\widetilde{Y}_A^{\ge 2},{\bf Z})$ is free of rank $20$ (and $\tau$ acts as $-\Id$) and there are canonical isomorphisms (\cite[Proposition~2.5]{dkij})
\begin{equation}
\begin{aligned}
T_{{\mathbb A}lb (\widetilde{Y}_A^{\ge 2}),0}&\simeq H^1(\widetilde{Y}_A^{\ge 2},\mathcal{O}_{\widetilde{Y}_A^{\ge 2}})\simeq A,
\label{h1}\\
H^2(Y_A^{\ge 2},{\bf C})& \simeq \bw2 H^1(\widetilde{Y}_A^{\ge 2},{\bf C})\simeq \bw2(A\opluslus {\mathbf a}r A).
\end{aligned}
\end{equation}
The Albanese variety ${\mathbb A}lb (\widetilde{Y}_A^{\ge 2})$ is thus an abelian variety\ of dimension 10 and one can consider the analytic representation (see Section~\ref{sectb1})
$$\rho_a\colon {\mathbb A}ut(\widetilde{Y}_A^{\ge 2})\longrightarrow \mathbb{G}L(T_{{\mathbb A}lb (\widetilde{Y}_A^{\ge 2}),0})\simeq\mathbb{G}L(A).
$$
Recall from Proposition~\ref{split2} that there is an injective morphism $\widetilde{\mathbb A}ut(Y_A)\hookrightarrow {\mathbb A}ut(\widetilde{Y}_A^{\ge 2})$.
\begin{prop}\label{propc7}
Let $Y_A$ be a quasi-smooth EPW sextic.\
The restriction of the analytic representation $\rho_a$ to the subgroup $\widetilde{\mathbb A}ut(Y_A)$ of $ {\mathbb A}ut(\widetilde{Y}_A^{\ge 2})$ is the injective middle vertical map in the diagram~\eqref{esss}.
\end{prop}
\begin{proof}
The morphism $\rho_a$ is the representation of the group ${\mathbb A}ut(\widetilde{Y}_A^{\ge 2})$ on the vector space
$$
T_{{\mathbb A}lb (\widetilde{Y}_A^{\ge 2}),0}\simeq H^1(\widetilde{Y}_A^{\ge 2},\mathcal{O}_{\widetilde{Y}_A^{\ge 2}}).
$$
As in the proof of \cite[Proposition~2.5]{dkij}), there are canonical isomorphisms
$$
H^1(\widetilde{Y}_A^{\ge 2},\mathcal{O}_{\widetilde{Y}_A^{\ge 2}})\simeq H^1(Y_A^{\ge 2},\mathcal{R}_2(-3)) \simeq H^1(Y_A^{\ge 2},\mathcal{O}_{Y_A^{\ge 2}}(3))^\vee
,
$$
where the first isomorphism comes from~\eqref{y2} and the second one from Serre duality (because~$ \mathcal{R}_2 $ is the canonical sheaf of $Y_A^{\ge 2} $).
As in the proof of Proposition~\ref{split2}, the sheaf $ \mathcal{O}_{Y_A^{\ge 2}}(3)$ has an $\widetilde{\mathbb A}ut(Y_A) $-linearization, where~${\mathbb A}ut(Y_A)$ acts on $Y^{\ge 2}_A$ by restriction and the nontrivial element of~$ {\boldsymbol \mu}_2 $ acts by~$-1$ on~$ \mathcal{O}_{Y_A^{\ge 2}}(3)$.\
By construction, the resolution
$$0\to (\bw2\mathcal{A}_2)(-6)\to (\mathcal{A}_1^\vee\otimesimes \mathcal{A}_2)(-6)\to (\Sym^2\!\mathcal{A}_1)(-6)\opluslus\mathcal{O}_{{\bf P}(V_6)}(-6)\to
\mathcal{O}_{{\bf P}(V_6)}\to \mathcal{O}_{Y_A^{\ge 2}}\to 0
$$
given in \cite[(33)]{dkeven} is $\widetilde{\mathbb A}ut(Y_A) $-equivariant, hence induces an $\widetilde{\mathbb A}ut(Y_A)$-equivariant isomorphism
$$H^1(Y_A^{\ge 2},\mathcal{O}_{Y_A^{\ge 2}}(3))\simeq H^3({\bf P}(V_6),(\mathcal{A}_1^\vee\otimesimes \mathcal{A}_2)(-3)) =A^\vee\otimesimes H^3({\bf P}(V_6), \mathcal{A}_2(-3)).
$$
As already noted during the proof of Proposition~\ref{split1}, $\widetilde{\mathbb A}ut(Y_A)$ acts trivially on the $1$-dimensional vector space $H^3({\bf P}(V_6), \mathcal{A}_2(-3))=H^3({\bf P}(V_6), \bw2T_{{\bf P}(V_6)}(-6))$.\ All this proves that the action of~${\mathbb A}ut(\widetilde{Y}_A^{\ge 2})$ on $T_{{\mathbb A}lb (\widetilde{Y}_A^{\ge 2}),0}$ is indeed given by the desired morphism.
\end{proof}
\subsection{Automorphisms of GM varieties}\label{appc4}
Let as before $V_6$ be a 6-dimensional vector space and let $A\subset \bw3V_6$ be a Lagrangian subspace with no decomposable vectors.\ Let $V_5\subset V_6$ be a hyperplane and let $X$ be the associated (smooth ordinary) GM variety (Section~\ref{se22n}).\ One has (see~\eqref{autxa})
\begin{equation*}
{\mathbb A}ut(X)\simeq \{ g\in {\bf P}GL(V_6)\mid \bw3g(A)=A,\ g(V_5)=V_5\}.
\end{equation*}
Since the extension~\eqref{exttilde} splits (Proposition~\ref{split1}), there is a lift
\begin{equation}\label{repx2}
{\mathbb A}ut(X)\longrightarrow \mathbb{G}L(A)
\end{equation}
(see~\eqref{esss}) which is injective by Lemma~\ref{nlem}.
When the dimension of $X$ is either $3$ or $5$, its intermediate Jacobian~${\mathbb{J}}ac(X)$ is a 10-dimensional abelian variety.\
By \cite[Theorem~1.1]{dkij}, it is
canonically isomorphic to
$
{\mathbb A}lb (\widetilde{Y}_A^{\ge 2})
$ (see~\eqref{jxa}).\ Therefore, there is
an isomorphism
\begin{equation*}
T_{{\mathbb{J}}ac(X),0}\simeqlra T_{{\mathbb A}lb (\widetilde{Y}_A^{\ge 2}),0}.
\end{equation*}
Together with the isomorphism~\eqref{h1}, this gives an
analytic representation
$$\rho_{a,X}\colon {\mathbb A}ut(X)\longrightarrow \mathbb{G}L(T_{{\mathbb{J}}ac(X),0})\simeqlra \mathbb{G}L(A).$$
\begin{prop}\label{propc8}
The analytic representation
$\rho_{a,X}$ coincides with the injective morphism~\eqref{repx2}.\ Equivalently, the isomorphism ~\eqref{jxa} is ${\mathbb A}ut(X)$-equivariant.
\end{prop}
\begin{proof}
Assume $\dim(X)=3$ and choose a line $L_0\subset X$.\ The isomorphism $
{\mathbb A}lb (\widetilde{Y}_A^{\ge 2})\simeqto {\mathbb{J}}ac(X)
$ was then constructed in \cite[Theorem~4.4]{dkij} from the Abel--Jacobi map
$${\mathbb A}J_{Z_{L_0}}\colon H_1( \widetilde{Y}_A^{\ge 2},{\bf Z})\longrightarrow H_3(X,{\bf Z})
$$
associated with a family $Z_{L_0}\subset X\times \widetilde{Y}_A^{\ge 2}$ of curves on $X$ parametrized by $\widetilde{Y}_A^{\ge 2}$.\ Although the family $Z_{L_0}$ does depend on the choice of $L_0$, the map ${\mathbb A}J_{Z_{L_0}}$ does not.
Let $g\in {\mathbb A}ut(X)$ (also considered as an automorphism of $\widetilde{Y}_A^{\ge 2}$).\ By the functoriality properties of the Abel--Jacobi map (\cite[Lemma~3.1]{dkij}), we obtain
$${\mathbb A}J_{Z_{L_0}}\circ g_*={\mathbb A}J_{(\Id_{X}\times g)^*(Z_{L_0})}=
{\mathbb A}J_{( g\times \Id_{\widetilde{Y}_A^{\ge 2}})_*(Z_{g^{-1}(L_0)})}=g_*\circ {\mathbb A}J_{Z_{g^{-1}(L_0)}},
$$
which proves the proposition.\
When $\dim(X)=5$, the proof is similar, except that $Z_{{\bf P}i_0}$ is now a family of surfaces in $X$ that depends on a plane ${\bf P}i_0\subset X$.
\end{proof}
\section{Representations of the group $\mathbb{G}$}\label{sea2}
The group $\mathbb{G}:={\bf P}SL(2,{\bf F}_{11})$ is the only simple group of order $660=2^2\cdot 3\cdot 5\cdot 11$.\
It can be generated by the classes
$$ a=\begin{pmatrix}
5 &0 \\
0 & 9
\end{pmatrix},\quad
b=\begin{pmatrix}
3 &5 \\
-5 & 3
\end{pmatrix},\quad
c=\begin{pmatrix}
1 & 1 \\
0 & 1
\end{pmatrix},
$$
and $a^5=-b^6=c^{11}=I_2$, the identity matrix.
The group $\mathbb{G}$ has 8 irreducible ${\bf C}$-representations, of dimensions $1$, $5$, $5$, $10$, $10$, $11$, $12$, and~$12$.\ Here is a character table for four of these
irreducible representations.
\begin{table}[h]
\renewcommand\arraystretch{1.5}
\begin{tabular}{|c|c|c|c|c|c|c|c|c|}
\hline
Conjugation class&$[I_2]$&$[c]$&$[c^2]$&$[a]=[a^4]$&$[a^2]=[a^3]$&$[b]=[b^5]$&$[b^2]=[b^4]$&$[b^3]$
\\
Cardinality&$1$&$60$&$60$&$132$&$132$&$110$&$110$&$55$
\\
Order&$1$&$11$&$11$&$5$&$5$&$6$&$3$&$2$
\\
\hline
$\chi_0$
&$1$&$1 $&$1 $ &$1$ &$1 $ &$1 $&$1 $&$1 $
\\
\hline
$\xi$&$5$&$\lambda$&${\mathbf a}r\lambda$&$0$&$0$&$ 1$&$-1$&$1$
\\
\hline
$\xi^\vee$&$5$&${\mathbf a}r\lambda$&$\lambda$&$0$ &$0$ &$1$&$-1$&$1$
\\
\hline
$\bw2\xi $&$ 10$&$ -1$&$ -1$&$ 0$&$ 0$&$1 $&$ 1$&$ -2$
\\
\hline
\end{tabular}
\captionsetup{justification=centering}
\caption{Partial character table for $\mathbb{G}$}\label{tab1}
\end{table}
As before, we have set (where $\zeta_{11}=e^{\frac{2i\pi}{11}}$)
\begin{equation*}\label{defgamma}
\lambda:=\zeta_{11}^{1^2}+\zeta_{11}^{2^2}+\zeta_{11}^{3^2}+\zeta_{11}^{4^2}+\zeta_{11}^{5^2}=\zeta_{11}+\zeta_{11}^3+\zeta_{11}^4+\zeta_{11}^5+\zeta_{11}^9=\tfrac12(-1+\sqrt{-11}).
\end{equation*}
The representation $\xi$ has a realization in the matrix ring $\mathcal{M}_5({\bf C})$ for which
\begin{equation}\label{real}
\xi(a)= \begin{pmatrix}
0&0&0&0&1\\
1&0&0&0&0\\
0&1&0&0&0\\
0&0&1&0&0\\
0&0&0&1&0
\end{pmatrix},\quad
\xi(c)= \begin{pmatrix}
\zeta_{11}&0&0&0&0\\
0&\zeta_{11}^4&0&0&0\\
0&0&\zeta_{11}^5&0&0\\
0&0&0&\zeta_{11}^9&0\\
0&0&0&0&\zeta_{11}^3
\end{pmatrix}
.
\end{equation}
Every irreducible character of $\mathbb{G}$ has Schur index 1 (\cite[\S~12.2]{ser}, \cite[Theorem~6.1]{fei}).\
In particular, the
representation $\bw2\xi$, having an integral character,
can be defined over ${\bf Q}$ and even, by a theorem of Burnside (\cite{bur}), over ${\bf Z}$, that is, by a morphism $\mathbb{G}\to \mathbb{G}L(10,{\bf Z})$.\ The representation $\bw2\xi$ is self-dual, so there is a $\mathbb{G}$-equivariant isomorphism
\begin{equation}\label{defu}
w\colon \bw2V_\xi\simeqlra \bw2V_\xi^\vee,
\end{equation}
unique up to multiplication by a nonzero scalar, and it is symmetric (\cite[prop.~38]{ser}).\
\section{Decomposition of abelian varieties with automorphisms}\label{b2}
We gather here a few very standard notation and facts about abelian varieties.\ Let $X$ be a complex abelian variety.\
We denote by ${\bf P}ic(X)$ the group of isomorphism classes of line bundles on~$X$, by ${\bf P}ic^0(X)\subset {\bf P}ic(X)$ the subgroup of classes of line bundles that are algebraically equivalent to $0$, and by ${\bf N}S(X)$ the N\'eron--Severi group ${\bf P}ic(X)/{\bf P}ic^0(X)$, a free abelian group of finite rank.\ The group ${\bf P}ic^0(X)$ has a canonical structure of an abelian variety; it is called the dual abelian variety.\
Any endomorphism $u$ of $X$ induces an endomorphism $\widehat u$ of ${\bf P}ic^0(X)$.
Given the class $\theta\in {\bf N}S(X)$ of a line bundle $L$ on $X$, we let $\varphi_{\theta}$ be the morphism
$$
\begin{aligned}
X&\longrightarrow {\bf P}ic^0(X) \\
x& \longmapsto \tau_x^*L\otimesimes L^{-1}
\end{aligned}
$$
of abelian varietys, where $\tau_x$ is the translation by $x$ (it is independent of the choice of the representative $L$ of $\theta$).\ When $\theta$ is a polarization, that is, when $L$ is ample, $\varphi_{\theta}$ is an isogeny.\
We say that $\theta$ is a principal polarization when $\varphi_\theta$ is an isomorphism.\ If $n:=\dim(X)$, this is equivalent to saying that the self-intersection number $\theta^n$ is $n!$.\ The associated {\em Rosati involution} on $\End(X)$ is then defined by
$u\mapsto u':=\varphi_{\theta}^{-1}\circ \widehat u \circ \varphi_{\theta}$.\ The map
$$
\begin{aligned}
\iota_{\theta}\colon {\bf N}S(X) &\ensuremath{\lhook\joinrel\relbar\joinrel\rightarrow} \End(X) \\
\theta'& \longmapsto \varphi_{\theta}^{-1}\circ \varphi_{\theta'}
\end{aligned}
$$
is an injective morphism of free abelian groups
whose image is the group
$\End^s(X)$
of symmetric elements for the Rosati involution (\cite[Theorem~5.2.4]{bl}).\ If $u\in \End(X)$, one has $\varphi_{u^*\theta'}=\widehat u\circ \varphi_{\theta'}\circ u$ hence
\begin{equation}\label{for}
\iota_{\theta}(u^*\theta')=\varphi_{\theta}^{-1}\circ \varphi_{u^*\theta'}
=\varphi_{\theta}^{-1}\circ \widehat u\circ \varphi_{\theta'}\circ u=u'\circ \varphi_{\theta}^{-1} \circ \varphi_{\theta'}\circ u=
u'\circ \iota_{\theta}(\theta')\circ u.
\end{equation}
Set ${\bf N}S_{\bf Q}(X)={\bf N}S(X)\otimesimes {\bf Q}$ and $\End_{\bf Q}(X)=\End(X)\otimesimes {\bf Q}$ (both are finite-dimensional ${\bf Q}$-vector spaces).\ If the polarization $\theta$ is no longer principal, or if $\theta\in {\bf N}S_{\bf Q}(X)$ is only a ${\bf Q}$-polarization, the Rosati involution is still defined on $\End_{\bf Q}(X)$ by the same formula and
we may view $\iota_{\theta}$ as an injective morphism
$$
\begin{aligned}
\iota_{\theta}\colon {\bf N}S(X)_{\bf Q} &\ensuremath{\lhook\joinrel\relbar\joinrel\rightarrow} \End_{\bf Q}(X)
\end{aligned}
$$
with image $\End^s_{\bf Q}(X)$ (\cite[Remark~5.2.5]{bl}).\ Formula \eqref{for} remains valid for $u\in \End(X)$ and $\theta'\in {\bf N}S(X)_{\bf Q}$.
We will also need the so-called {\em analytic} representation
\begin{equation*}
\rho_a\colon \End_{\bf Q}(X) \ensuremath{\lhook\joinrel\relbar\joinrel\rightarrow}\End_{\bf C}(T_{X,0}).
\end{equation*}
It sends an endomorphism of $X$ to its tangent map at $0$.
\subsection{${\bf Q}$-actions on abelian varieties}\label{sectb1}
Let $X$ be an abelian variety\ and let $G$ be a finite group.\ A ${\bf Q}$-action of $G$ on $X$ is a morphism $\rho\colon {\bf Q}[G]\to \End_{\bf Q}(X)$ of ${\bf Q}$-algebras.\ The composition
$$
G\xrightarrow{\ \rho\ } \End_{\bf Q}(X) \xrightarrow{\ \rho_a\ } \End_{\bf C}(T_{X,0})
$$
is called the analytic representation
of $G$.
\begin{prop}\label{propb1}
Let $X$ be an abelian variety\ of dimension~$n$ with a ${\bf Q}$-action of a finite group~$G$.\ Assume that the analytic representation of $G$ is irreducible and defined over~${\bf Q}$.\ Then~$X$ is isogeneous to the product of $n$ copies of an elliptic curve.
\end{prop}
\begin{proof}
This follows from~\cite[(3.1)--(3.4)]{es} (see also \cite[Section~1]{kr} and \cite[Proposition~13.6.2]{bl}).\ This reference assumes that we have a bona fide action of $G$ on $X$ but only uses the induced morphism ${\bf Q}[G]\to \End_{\bf Q}(X)$ of ${\bf Q}$-algebras.
\end{proof}
In the situation of Proposition~\ref{propb1}, we prove that any $G$-invariant ${\bf Q}$-polarization is essentially unique.
\begin{lemm}\label{lb3}
Let $X$ be an abelian variety\ with a ${\bf Q}$-action of a finite group $G$ and let~$\theta $ be a $G$-invariant polarization on $X$.\ If the analytic representation of $G$ is irreducible, any $G$-invariant ${\bf Q}$-polarization on $X$ is a rational multiple of $\theta$.
\end{lemm}
\begin{proof}
Let $g\in G$, which we view as an invertible element of $\End_{\bf Q}(X) $.\ Since~$\theta$ is $g$-invariant, identity~\eqref{for} (applied with $\theta'=\theta$ and $u=g$) implies $g'\circ g=\Id_X$.\ Let
$\theta'\in {\bf N}S(X)_{\bf Q}$.\ Applying~\eqref{for} again, we get
$$\iota_{\theta}(g^*\theta')=g'\circ \iota_{\theta}(\theta')\circ g=g^{-1}\circ \iota_{\theta}(\theta')\circ g.$$
If $\theta'$ is $G$-invariant, we obtain $\iota_{\theta}(\theta')=g^{-1}\circ \iota_{\theta}(\theta')\circ g$ for all $g\in G$.\
If the analytic representation of $G$
is irreducible, $\rho_a(\iota_{\theta}(\theta'))$ must, by Schur's lemma, be a multiple of the identity, hence $\theta'$ must be a multiple of $\theta$.
\end{proof}
\subsection{Polarizations on self-products of elliptic curves}
Let $E$ be an elliptic curve, so that $\mathfrak o_E:=\End(E)$ is either ${\bf Z}$ or an order in an imaginary quadratic extension of ${\bf Q}$.\ We have
$$\End(E^n)\simeq \mathcal{M}_{n}(\mathfrak o_E)\quad\textnormal{and}\quad \End_{\bf Q}(E^n)\simeq \mathcal{M}_{n}(\mathfrak o_E\otimesimes{\bf Q}), $$
and $\rho_a$ is the embedding of these matrix rings into the ring $\mathcal{M}_n({\bf C})$ induced by the choice of an embedding $\mathfrak o_E\hookrightarrow {\bf C}$.
Polarizations on $E^n$ were studied in particular by Lange in~\cite{lan}.\ We denote by $\theta_0$ the product principal polarization on $E^n$.
\begin{prop}\label{propb3}
Let $E$ be an elliptic curve.\
\begin{itemize}
\item The Rosati involution defined by $\theta_0$ on $\End(E^n)$ corresponds to the involution $M\mapsto \overline M^T$ on $\mathcal{M}_{n}(\mathfrak o_E)$.
\item Via the embedding $\iota_{\theta_0}$, polarizations $\theta$ on $E^n$ correspond to positive definite Hermitian matrices $M_\theta\in\mathcal{M}_{n}(\mathfrak o_E)$ and the degree of the polarization $\theta$ is $ \det(M_\theta)$.
\item The group of automorphisms ${\mathbb A}ut(E^n,\theta)$ is the unitary group
$${\bf U}(n,M_\theta):= \{M\in \mathcal{M}_{n}(\mathfrak o_E)\mid \overline M^T M_\theta\, M= M_\theta\}.$$
\end{itemize}
\end{prop}
\begin{proof}
If we write $E={\bf C}/({\bf Z}\opluslus \tau{\bf Z})$, the period matrix for $E^n$ is $\begin{pmatrix}I_n&\tau I_n\end{pmatrix}$.\ The first item then follows from \cite[Lemma~2.3]{lan} and elements of $ {\bf N}S(E^n)$ correspond to Hermitian matrices.\ By \cite[Theorem~5.2.4]{bl}, polarizations correspond to positive definite Hermitian matrices and the degree of the polarization is the determinant of the matrix.\ More precisely, one has (\cite[Proposition~5.2.3]{bl})
$$\det(T I_n-M_\theta)=\sum_{j=0}^n (-1)^{n-j}\frac{\theta_0^j\cdot \theta^{n-j}}{j!(n-j)!}\,T^j
.$$
The last item follows from~\eqref{for}.
\end{proof}
\begin{rema}\label{remb4}
Let $G$ be a finite group with a ${\bf Q}$-representation $\rho\colon {\bf Q}[G]\to \mathcal{M}_{n}({\bf Q})$.\ For any elliptic curve $E$, this defines a ${\bf Q}$-action of $G$ on $E^n$.\
It follows from the proposition that
any positive definite symmetric matrix $M_\theta\in\mathcal{M}_{n}({\bf Q})$ such that, for all $g\in G$,
$$\rho(g)^T M_\theta\, \rho(g)= M_\theta$$
defines a $G$-invariant ${\bf Q}$-polarization on $E^n$.\
Such a matrix always exists: take for example $M_\theta:=\sum_{g\in G} \rho(g)^T \rho(g)$ (it corresponds to the~${\bf Q}$-polarization $ \sum_{g\in G} g^*\theta_0$).\
The analytic representation is $\rho_{\bf C}\colon {\bf C}[G]\to \mathcal{M}_{n}({\bf C})$.\ If it is irreducible, every $G$-invariant ${\bf Q}$-polarization on $E^{ n}$ is, by Lemma~\ref{lb3}, a rational multiple of $\theta$.\
\end{rema}
We end this section with the construction of an explicit abelian variety\ of dimension~$10$ with a $\mathbb{G}$-action, such that the associated analytic representation is the irreducible representation $\bw2\xi$, together with a $\mathbb{G}$-invariant {\em principal} polarization.\
Set $ \lambda:=\tfrac12(-1+\sqrt{-11})$ and consider the elliptic curve $E_\lambda:={\bf C}/{\bf Z}[\lambda]$, which has complex multiplication by~${\bf Z}[\lambda]$.\
\begin{prop}\label{prop63}
There exists a principal polarization $\theta$ on the abelian variety\ $E_\lambda^{10}$ and a faithful action $\mathbb{G}\hookrightarrow{\mathbb A}ut(E_\lambda^{10},\theta )$ such that the associated analytic representation is the irreducible representation $\bw2\xi$ of $\mathbb{G}$.
\end{prop}
\begin{proof}
By \cite[Table~1]{sch}), there is a positive definite unimodular ${\bf Z}[\lambda]$-sesquilinear Hermitian form~$H'$ on~${\bf Z}[\lambda]^5$ with an automorphism of order $11$.\ Its Gram matrix in the canonical ${\bf Z}[\lambda]$-basis $(e_1,\dots,e_5)$ of~${\bf Z}[\lambda]^5$ is
$$
\begin{pmatrix}
3 & 1-{\mathbf a}r\lambda &-\lambda&1&-{\mathbf a}r\lambda \\
1-\lambda & 3 & -1& - \lambda&1 \\
-{\mathbf a}r\lambda & -1 & 3&\lambda&-1+\lambda \\
1 & -{\mathbf a}r\lambda& {\mathbf a}r\lambda&3&1-{\mathbf a}r \lambda \\
- \lambda & 1 & -1+{\mathbf a}r\lambda& 1-\lambda&3
\end{pmatrix}
$$
and its unitary group has order $2^{3}\cdot 3\cdot 5\cdot 11=1\,320 $ (\cite{sch}).
By Proposition~\ref{propb3}, this form defines a principal polarization $\theta'$ on the abelian variety~$E_\lambda^5$ and the group ${\mathbb A}ut(E_\lambda^5,\theta')$ has order $1\,320 $; in particular, it contains an element of order~$11$.\ It follows from \cite{bb} that the group ${\mathbb A}ut(E_\lambda^5,\theta')$ is isomorphic to $\mathbb{G}\times\{\pm 1\} $ and the
faithful representation $\mathbb{G}\hookrightarrow{\mathbb A}ut(E_\lambda^5,\theta')\hookrightarrow {\bf U}(5,H')$ given by Proposition~\ref{propb3} is $\xi$.\footnote{The principally polarized abelian fivefold $(E_\lambda^5,\theta')$ was studied in \cite{adl,adls,gon,rou}: it is the intermediate Jacobian of the Klein cubic threefold
with equation
$x_1^2x_2+x_2^2x_3+x_3^2x_4+x_4^2x_5 +x_5^2x_1 =0$ in ${\bf P}^4$.}
The Hermitian form $H'$ on ${\bf Z}[\lambda]^5$ induces a positive definite unimodular Hermitian form~$H$ on $\bw2{\bf Z}[\lambda]^5={\bf Z}[\lambda]^{10}$ by the formula
$$
H(x_1\wedge x_2,x_3\wedge x_4):=H'(x_1,x_3)H'(x_2,x_4)-H'(x_1,x_4)H'(x_2,x_3).
$$
The matrix of $H$ (in the basis $(e_{12},e_{13},e_{14},e_{15},e_{23},e_{24},e_{25},e_{34},e_{35},e_{45})$) is
\begin{equation}\label{mat10}
\left(\begin{smallmatrix}
4& 2\lambda&-1 -2\lambda&-1-\lambda&-2+2\lambda &-\lambda &-1-2 \lambda&-2-\lambda &1& -2 \\
2{\mathbf a}r\lambda&6 &-1+2\lambda &-1+2\lambda&6+2\lambda &-2+\lambda &-4+\lambda&\lambda &-\lambda &2+\lambda \\
-1-2{\mathbf a}r\lambda&-1+2{\mathbf a}r\lambda & 8&5+2\lambda&-2-2\lambda & 5+2\lambda &3+2\lambda&1-2\lambda &1&-1-2\lambda \\
-1-{\mathbf a}r\lambda&-1+2{\mathbf a}r\lambda & 5+2{\mathbf a}r\lambda& 6&-1-2 \lambda&4&5+2\lambda&-1-\lambda &-1-\lambda&-1-\lambda \\
-2+2{\mathbf a}r\lambda& 6+2{\mathbf a}r\lambda&-2-2{\mathbf a}r\lambda &-1-2{\mathbf a}r \lambda & 8&2\lambda & -2+3\lambda& 2\lambda& -2-\lambda&3+\lambda \\
-{\mathbf a}r\lambda & -2+{\mathbf a}r\lambda& 5+2{\mathbf a}r\lambda &4 & 2{\mathbf a}r\lambda& 6& 5+2\lambda&0 &-1 &-\lambda \\
-1-2 {\mathbf a}r\lambda&-4+{\mathbf a}r\lambda &3+2{\mathbf a}r\lambda & 5+2{\mathbf a}r\lambda&-2+3{\mathbf a}r\lambda &5+2{\mathbf a}r\lambda & 8&2 &-1+\lambda&-1 -2\lambda \\
-2-{\mathbf a}r\lambda &{\mathbf a}r\lambda &1-2{\mathbf a}r\lambda &-1-{\mathbf a}r\lambda & 2{\mathbf a}r\lambda &0 &2 & 6&2+2 \lambda & -2\lambda \\
1&-{\mathbf a}r\lambda &1 &-1-{\mathbf a}r\lambda & -2-{\mathbf a}r\lambda & -1&-1+{\mathbf a}r\lambda &2+2{\mathbf a}r \lambda &4 & -2 \\
-2&2+{\mathbf a}r\lambda&-1-2{\mathbf a}r\lambda&-1-{\mathbf a}r\lambda&3+{\mathbf a}r\lambda &-{\mathbf a}r\lambda &-1 -2{\mathbf a}r\lambda & -2{\mathbf a}r\lambda &-2 & 4
\end{smallmatrix}\right).
\end{equation}
By Proposition~\ref{propb3} again, the form $H$ defines a principal polarization $\theta$ on the abelian variety~$E_\lambda^{10}$, the group ${\mathbb A}ut(E_\lambda^{10},\theta)$ contains $\mathbb{G} $, and the corresponding analytic representation is~$\bw2\xi$.\
\end{proof}
The $\mathbb{G}$-action on~$E_\lambda^{10}$ in the proposition is not the $\mathbb{G}$-action described in Remark~\ref{remb4} (otherwise, since $\mathbb{G}$-invariant polarizations are proportional, the matrix~\eqref{mat10} would, by Lemma~\ref{lb3}, have rational coefficients): these actions are only conjugate by a ${\bf Q}$-automorphism of $E_\lambda^{10}$.
\end{document} |
\begin{document}
\def\spacingset#1{\renewcommand{\baselinestretch}
{#1}\small\normalsize} \spacingset{1}
\if00
{
\title{\bf Flexible Modeling of Nonstationary Extremal Dependence Using Spatially-Fused LASSO and Ridge Penalties}
\author{Xuanjie Shao \\
Statistics Program, Computer, Electrical and Mathematical Sciences and\\
Engineering (CEMSE) Division, King Abdullah University of Science \\ and Technology (KAUST), Thuwal 23955-6900, Saudi Arabia\\
and \\
Arnab Hazra \\
Department of Mathematics and Statistics, \\Indian Institute of Technology Kanpur, Kanpur 208016, India\\
and \\
Jordan Richards \\
Statistics Program, Computer, Electrical and Mathematical Sciences and \\ Engineering (CEMSE) Division, King Abdullah University of Science \\ and Technology (KAUST), Thuwal 23955-6900, Saudi Arabia\\
and \\
Rapha\"el Huser\thanks{
This publication is based upon work supported by the King Abdullah University of Science and Technology (KAUST) Office of Sponsored Research (OSR) under Award No. OSR-CRG2020-4394.}\hspace{.2cm}\\
Statistics Program, Computer, Electrical and Mathematical Sciences and \\ Engineering (CEMSE) Division, King Abdullah University of Science \\ and Technology (KAUST), Thuwal 23955-6900, Saudi Arabia}
\date{}
\maketitle
} \fi
\if10
{
\begin{center}
{\LARGE\bf Flexible Modeling of Nonstationary Extremal Dependence Using Spatially-Fused LASSO and Ridge Penalties}
\end{center}
} \fi
\begin{abstract}
Statistical modeling of a nonstationary spatial extremal dependence structure is challenging. Parametric max-stable processes (MSPs) are common choices for modeling spatially-indexed block maxima, where an assumption of stationarity is usual to make inference feasible. However, this assumption is unrealistic for data observed over a large or complex domain. We develop a computationally-efficient method for estimating extremal dependence using a globally nonstationary but locally-stationary MSP construction, with the spatial domain divided into a fine grid of subregions, each with its own dependence parameters. We use LASSO ($L_1$) or ridge ($L_2$) penalties to obtain spatially-smooth parameter estimates. We then develop a novel data-driven algorithm to merge homogeneous neighboring subregions. The algorithm facilitates model parsimony and interpretability. To make our model suitable for high-dimensional data, we exploit a pairwise likelihood to perform inference and discuss its computational and statistical efficiency. We apply our proposed method to model monthly maximum temperature data at over 1400 sites in Nepal and the surrounding Himalayan and sub-Himalayan regions; we show significant improvements in model fit compared to a stationary model. Furthermore, we demonstrate that the estimated merged partition is interpretable from a geographic perspective and leads to better model diagnostics by adequately reducing the number of parameters.
\end{abstract}
\noindent
{\it Keywords:} domain partitioning, max-stable process, nonstationary extremal dependence, regularization, spatial extremes.
\spacingset{1.5}
\section{Introduction}
Over the past decades, various parametric max-stable processes (MSPs) have been proposed \citep[see, e.g.,][]{brown1977extreme, smith1990max, schlather2002models, reich2012hierarchical, opitz2013extremal}, among which the extremal-$t$ and Brown--Resnick classes are considered to be the most flexible. Most classical applications of MSPs assume stationarity and isotropy for simplicity and computational feasibility. Such a simplification may be unrealistic when we obtain the data over a large or complex geographical domain. Misspecification of the extremal dependence structure may cause issues for model fitting and estimation of spatial risk measures (e.g., quantiles of some spatially aggregated quantity); see \cite{huser2016non}. Therefore, building flexible models that can capture nonstationary extremal dependence is crucial.
To capture nonstationary extremal dependence, {\color{blue} \cite{blanchet2011spatial}} suggest splitting the study domain into distinct subregions and fitting separate stationary extremal dependence models within each subregion, yielding a process that is inconsistent at the subregion boundaries. Locally-stationary processes are also used in the geostatistical literature by \cite{fuentes2001high, fuentes2002interpolation} and \cite{muyskens2022partition}. Based on the Bayesian information criterion or likelihood ratio test, these authors also propose a subregion-merging procedure to define the boundaries of the local processes. Such models can parsimoniously capture global nonstationarity. Alternatively, the spatial deformation approach, advocated by \cite{sampson1992nonparametric} in the geostatistical literature and by \cite{richards2021spatial} in the spatial extremes literature, maps the original spatial locations onto a latent space, where an assumption of stationarity and isotropy is reasonable. However, the estimation and interpretation of this deformed space can be challenging. A simpler approach is to define the latent space based on covariates, such as climate variables, rather than estimating it \citep{cooley2007bayesian}; this method strongly relies on the choice of covariates for the latent space. Another way of modeling nonstationary extremal dependence is to define a stationary model on an expanded space through multidimensional scaling \citep{chevalier2021modeling}, but computations and interpretation may be tricky.
In the classical spatial statistics setting, \cite{parker2016fused} adopt a local stationarity perspective, and propose a computationally-efficient method for modeling global nonstationary spatial dependence using Gaussian processes (GPs) by first partitioning the domain of interest into a fine grid of subregions and assigning to each its own set of covariance parameters, then combined into a global model through the nonstationary covariance function proposed by \cite{paciorek2006spatial}. Their method fixes the subregion partition and fits an overdetermined model to data, with LASSO regularization utilized to reduce the number of parameters. These penalties are also used by {\color{blue} \cite{sass2021flexible}} in the extremes context to regularize nonstationary marginal parameters and return levels. For extremal-$t$ MSPs, \cite{huser2016non} extend the correlation function proposed by \cite{paciorek2006spatial} to incorporate covariates and model nonstationary spatial extremal dependence, but their approach is fairly rigid and relies on strong assumptions about the effect of specific covariates on the extremal dependence structure. By contrast, our proposed approach merges ideas from classical geostatistics and extreme value theory by developing a computationally-efficient model that can flexibly capture global nonstationarity in the extremal dependence with locally stationary subdomains. Specifically, our proposed methodology relies on MSPs and domain partitioning, akin to {\color{blue} \cite{parker2016fused}}, and then the efficient merging of subregions with similar parameter values in a data-driven way. While we constrain our focus to the Brown--Resnick model in this work, extensions to other MSPs are straightforward. Our proposed method also bears similarities with the local likelihood approach advocated by \citet{castro2020local} for modeling threshold exceedances, although we here focus on modeling maxima through max-stable processes instead and, unlike \citet{castro2020local}, our approach takes advantage of the whole dataset at once for estimating the nonstationary spatial dependence structure.
The full-likelihood for MSPs is intractable when the dimension of data exceeds $D = 13$ \citep{castruccio2016high, huser2019full}, and the analytical expression of the multivariate density is only available for a small collection of parametric models. Pairwise and triplewise likelihoods are commonly used to perform inference after being carefully studied by \cite{padoan2010likelihood} and \cite{huser2013composite}, with the latter concluding that triplewise likelihood inference has only moderate improvements over the pairwise variant. Similarly, \cite{huser2022vecchia} adopt the Vecchia likelihood approximation technique \citep{vecchia1988estimation, stein2004approximating}, originally proposed in the classical geostatistical literature, in the context of spatial extremes analysis with MSPs. Alternatively, several recent works \citep{gerber2021fast, lenzi2021neural, sainsbury2022fast} focus on estimating the spatial dependence structures using artificial neural networks in the context of both classical geostatistics and spatial extremes. However, both the Vecchia method and neural estimation are impractical in our context. The Vecchia method for spatial extremes can indeed still be relatively computationally intensive in high dimensions when using component likelihoods beyond the pairwise case, and a faster estimation technique is necessary for our iterative merging procedure, while neural estimation is also infeasible due to the large number of parameters in our model; we would need to train many neural networks with different architectures sequentially due to the nature of the merging algorithm, which would be inefficient. Hence, we prefer to use a pairwise likelihood for inference, which is both simple and quite fast. \cite{huser2016non} suggest a careful choice of a small fraction of observation pairs for achieving computational and statistical efficiency, and we investigate this thoroughly.
The paper is organized as follows. In Section \ref{maxstable}, we provide a brief summary of MSPs along with details of the pairwise likelihood approach to inference.
Besides the nonstationary Brown--Resnick model, we discuss the computational and statistical efficiency of our pairwise likelihood approach, along with our approach to merge subregions without expert knowledge in Section~\ref{inference}. We illustrate the efficacy of our approach with a simulation study in Section~\ref{simulation} and apply our proposed methodology to Nepalese temperature data in Section~\ref{application}. Section~\ref{discussion} concludes with a discussion of avenues for future research.
\section{Max-Stable Processes} \label{maxstable}
\subsection{Construction} \label{maxstableDef}
Suppose that $\{X_i(\bm{s}) : i=1,\ldots,m\}$ are independent and identically distributed (i.i.d.) random processes with continuous sample paths on $\mathcal{S}\subset\mathbb{R}^2$, and that there exist sequences of functions $\alpha_{m}(\bm{s})>0$ and $\beta_{m}(\bm{s})$ such that the renormalized process of pointwise maxima $\alpha_{m}(\bm{s})^{-1}[\max\{X_1(\bm{s}), \ldots, X_m(\bm{s})\} - \beta_{m}(\bm{s})]$ converges weakly, as $m\rightarrow\infty$, to a process $\{Z(\bm{s}), \bm{s}\in\mathcal{S}\}$ with non-degenerate margins. Then $Z(\bm{s})$ is a max-stable process, and its margins follow the generalized extreme value distribution, denoted by $\operatorname{GEV}\{\mu(\bm{s}), \varsigma(\bm{s}), \xi(\bm{s})\}$, with distribution function
\begin{equation}
G_{\bm{s}}(z) = \exp\left( -\left[ 1+\xi(\bm{s})\left\{\dfrac{z-\mu(\bm{s})}{\varsigma(\bm{s})}\right\} \right]^{-1/\xi(\bm{s})}_+ \right),
\label{eq:GEV}
\end{equation}
defined on $\{z \in \mathbb{R}:1+\xi(\bm{s})\{z-\mu(\bm{s})\}/\varsigma(\bm{s})\geq 0\}$, $a_+ = \max(0, a)$, and where $\mu(\bm{s})\in\mathbb{R}$, $\varsigma(\bm{s})>0$, and $\xi(\bm{s})\in\mathbb{R}$ are site-dependent location, scale, and shape parameters, respectively. Consider the standardized processes $Y_i(\bm{s}) = [1-F_{\bm{s}}\{X_i(\bm{s})\}]^{-1}$, $i = 1,\ldots,m$, with $F_{\bm{s}}(\cdot)$ being the marginal distribution function of $X_i(\bm{s})$. The limiting process of renormalized maxima $\max\{Y_1(\bm{s}), \ldots, Y_m(\bm{s})\}/m$ is a MSP with unit Fr\'echet margins, i.e., $\operatorname{GEV}(1,1,1)$. Following \cite{de1984spectral} and \cite{de2007extreme}, under mild conditions any MSP $\{Z(\bm{s}): \bm{s}\in \mathcal{S}\}$ with unit Fr\'echet margins can be expressed as
\begin{equation}
Z(\bm{s}) = \sup_{i\geq 1} W_i(\bm{s})/P_i
\label{maxstab},
\end{equation}
where for all $i \in \mathbb{N}$, $P_i$ are points of a Poisson process on $(0, \infty)$ with unit rate intensity, and $\{W_i(\bm{s}): \bm{s}\in \mathcal{S}\}$ are i.i.d. copies of a non-negative stochastic process satisfying $\mathbb{E}\{W(\bm{s})\} = 1$. The $D$-dimensional joint distribution of $Z$ at the sites $\bm{s}_1, \ldots, \bm{s}_D \in \mathcal{S}$ is
\begin{equation}
\operatorname{Pr}\left\{Z\left(\bm{s}_{1}\right) \leq z_{1}, \ldots, Z\left(\bm{s}_{D}\right) \leq z_{D}\right\}=\exp \left\{-V\left(z_{1}, \ldots, z_{D}\right)\right\},
\label{maxsta_cdf}
\end{equation}
where the exponent function $V$ is
\begin{equation}
V\left(z_{1}, \ldots, z_{D}\right)=\mathbb{E}\left[\max \left\{\dfrac{W\left(\bm{s}_{1}\right)}{z_{1}}, \ldots, \dfrac{W\left(\bm{s}_{D}\right)}{z_{D}}\right\}\right].
\label{exponent}
\end{equation}
The function $V$ has an explicit form only for specific choices of $W(\bm{s})$ with one such example arising from the construction of the Brown--Resnick model (see Section~\ref{ch2:BR}).
To measure extremal dependence between the process observed at any two sites $\bm{s}_i$ and $\bm{s}_j$, where $i,j \in \{1,\ldots,D\}$, we can use the extremal coefficient $\theta(\bm{s}_i, \bm{s}_j) = V_{(\bm{s}_i, \bm{s}_j)}(1,1)\in [1,2]$ \citep{schlather2003dependence}, where $V_{(\bm{s}_i, \bm{s}_j)}$ denotes the restriction of the $D$-variate function $V$ to the variables at sites $\bm{s}_i$ and $\bm{s}_j$ only. The case $1 \leq \theta(\bm{s}_i, \bm{s}_j) < 2$ corresponds to asymptotic dependence with increasing strength of dependence as $\theta$ decreases, and $\theta(\bm{s}_i, \bm{s}_j) = 2$ corresponds to perfect independence between limiting block maxima $Z(\bm{s}_i)$ and $Z(\bm{s}_j)$. The $F$-madogram {\color{blue} \citep{cooley2006variograms}}, defined by
\begin{equation*}
\nu(\bm{s}_i, \bm{s}_j) = \dfrac{1}{2}\mathbb{E}[ |F\{Z(\bm{s}_i)\} - F\{Z(\bm{s}_j)\}| ],
\end{equation*}
where $F(\cdot)$ here denotes the standard Fréchet distribution function, can be used to estimate the extremal coefficient, $\theta(\bm{s}_i, \bm{s}_j) = \frac{1+2\nu(\bm{s}_i, \bm{s}_j)}{1-2\nu(\bm{s}_i, \bm{s}_j)}$; the empirical counterpart $\widetilde{\nu}(\bm{s}_i, \bm{s}_j)$ can be obtained by replacing expectations by sample averages, $F$ by the empirical CDF, and $Z$ by independent replicates of the MSP at those sites. The empirical extremal coefficient can thus naturally be defined as $\widetilde{\theta}(\bm{s}_i, \bm{s}_j) = \frac{1+2\widetilde{\nu}(\bm{s}_i, \bm{s}_j)}{1-2\widetilde{\nu}(\bm{s}_i, \bm{s}_j)}$.
\subsection{Brown--Resnick Model} \label{ch2:BR}
\cite{brown1977extreme} and \cite{kabluchko2009} propose the Brown--Resnick (BR) process, defined by specifying $W(\bm{s}) = \exp\{\epsilon(\bm{s}) - \sigma^2(\bm{s})/2\}$ in (\ref{maxstab}), for a zero-mean Gaussian process $\{\epsilon(\bm{s}): \bm{s}\in\mathcal{S}\}$ with variance $\sigma^2(\bm{s})$ at location $\bm{s} \in \mathcal{S}$ and semivariogram $\gamma(\cdot, \cdot): \mathcal{S} \times \mathcal{S} \rightarrow [0,\infty)$. The bivariate exponent function defined in (\ref{exponent}) is
\begin{equation} \label{eq:V}
V_{(\bm{s}_i, \bm{s}_j)}\left(z_{i}, z_{j}\right)=\dfrac{1}{z_{i}} \Phi\left\{\dfrac{a}{2}-\dfrac{1}{a} \log \left(\dfrac{z_{i}}{z_{j}}\right)\right\}+\dfrac{1}{z_{j}} \Phi\left\{\dfrac{a}{2}-\dfrac{1}{a} \log \left(\dfrac{z_{j}}{z_{i}}\right)\right\},
\end{equation}
with $a = \sqrt{2\gamma(\bm{s}_i, \bm{s}_j)}$ and where $\Phi$ is the standard Normal distribution function. Here we use the notation $\gamma(\bm{s}_i, \bm{s}_j)$ as the semivariogram may be nonstationary, i.e., a function of both $\bm{s}_i$ and $\bm{s}_j$ rather than their distance. The theoretical extremal coefficient for the BR model is
\begin{equation}
\theta\left(\bm{s}_i, \bm{s}_j\right)= V_{(\bm{s}_i, \bm{s}_j)}(1,1) =2 \Phi\left[\dfrac{\left\{2 \gamma(\bm{s}_i, \bm{s}_j)\right\}^{1 / 2}}{2}\right],
\label{eq:extCoef}
\end{equation}
where $\gamma(\bm{s}_i, \bm{s}_j)$ controls the strength and decay of dependence within the process.
\subsection{Composite Likelihood Inference}
Full likelihood inference for max-stable processes observed at a large number of sites is computationally expensive, as evaluation of the $D$-dimensional joint density involves the differentiation of (\ref{maxsta_cdf}) with respect to $z_1, \ldots, z_D$, which leads to a summation indexed by all possible partitions of $\{1,\ldots,D\}$. The number of terms grows at a sup-exponential rate with $D$. More precisely, the full $D$-dimensional density is computationally intractable even for moderate $D$, i.e., $D>12$ \citep{padoan2010likelihood, castruccio2016high,huser2019full}. To tackle this issue, (composite) pairwise likelihood (PL) inference is often used in practice. Denoting $z_{t,i}$ as the $t$-th block maxima recorded at $\bm{s}_i$ for $t=1,\ldots,T$ and $i=1,\ldots,D$, the pairwise log-likelihood $\ell_{PL}$ with $p$-dimensional parameter set $\bm{\psi}$ is
\begin{equation}
\ell_{PL}(\bm{\psi}) = \sum^T_{t=1}\sum_{(i,j)\in\mathcal{O}}\left[ \log\left\{ V_i(z_{t,i}, z_{t,j})V_j(z_{t,i}, z_{t,j}) - V_{ij}(z_{t,i}, z_{t,j})\right\} - V_{(\bm{s}_i, \bm{s}_j)}(z_{t,i}, z_{t,j}) \right],
\label{pairwiseLike}
\end{equation}
where $V_i=\frac{\partial V_{(\bm{s}_i, \bm{s}_j)}}{\partial z_i}$ and $V_{ij}=\frac{\partial^2 V_{(\bm{s}_i, \bm{s}_j)}}{\partial z_i\partial z_j}$, and the function arguments have been omitted for simplicity, and $\mathcal{O} \subset \mathcal{O}_{total}$ with $\mathcal{O}_{total} = \{(i, j): 1\leq i < j\leq D\}$ taken to be all unique pairs of site indices. If $\mathcal{O} = \mathcal{O}_{total}$, all available observation pairs are utilized and this leads to inefficient inference due to the use of redundant information. Therefore, a careful choice of a significantly smaller number of pairs is suggested by \cite{huser2016non} to achieve computational and statistical efficiency. A possibility is to include a small fraction of strongly dependent pairs \citep{bevilacqua2012estimating}, i.e., the closest ones in the stationary and isotropic case, or using more distant pairs of sites (\cite{huser2013statistical}, Chapter~3; \cite{huser2014space}). For a nonstationary model, selecting pairs using the lowest pre-computed empirical extremal coefficients, i.e., the ones estimated with the strongest dependence, leads to bias in parameter estimates \citep{huser2016non}; instead of sampling only the closest observation pairs, we want to include pairs with various distances to fully explore the extremal dependence structure. To this end, we propose two strategies: a simple and a stratified sampling scheme. Our simple strategy is to randomly sample a small fraction of observation pairs with uniform probability from all those available. With stratified sampling, we stratify observation pairs into predefined distance classes, and then draw the same percentage of pairs from each class; this approach ensures that the distribution of distances among sampled pairs is approximately uniform so that the distant pairs have a similar \enquote{weight} in the pairwise likelihood compared to the close pairs. Our simulations in Section \ref{SelectionOfPairs} investigate the statistical efficiency of both sampling schemes.
Assuming independence between temporal replicates, the maximum pairwise likelihood estimator, denoted $\hat{\bm{\psi}}$, is generally asymptotically Gaussian with convergence rate $\sqrt{T}$. Specifically, if $\bm{\psi}_0$ is the true parameter vector, then under mild regularity conditions the distribution of $\hat{\bm{\psi}}$ for large $T$ is
$\hat{\bm{\psi}} \overset{\cdot}{\sim} \textrm{Normal}\left(\bm{\psi}_0, \bm{J}(\bm{\psi}_0)^{-1}\bm{K}(\bm{\psi}_0)\bm{J}(\bm{\psi}_0)^{-1} \right)$,
where $\bm{J}(\bm{\psi}) = \mathbb{E}\{-\partial^2\ell_{PL}(\bm{\psi})/(\partial\bm{\psi}\partial\bm{\psi}^T)\} \in\mathbb{R}^{p\times p}$ and $\bm{K}(\bm{\psi}) = \operatorname{Var}\{\partial\ell_{PL}(\bm{\psi})/\partial\bm{\psi}\} \in\mathbb{R}^{p\times p}$ are the (pairwise) expected information matrix and variance of the score function, respectively. Evaluating $\bm{J}(\bm{\psi})$ and $\bm{K}(\bm{\psi})$ at an estimate $\hat{\bm{\psi}}$, we can obtain the approximate asymptotic variance of $\hat{\bm{\psi}}$. Model selection in this composite likelihood framework may be performed using either the composite likelihood information criterion (CLIC) or the composite Bayesian information criterion (CBIC), defined by $\operatorname{CLIC} = -2\ell_{PL}(\hat{\bm{\psi}}) + 2\operatorname{tr}\{\bm{J}(\hat{\bm{\psi}})^{-1}\bm{K}(\hat{\bm{\psi}})\}$, and $\operatorname{CBIC} = -2\ell_{PL}(\hat{\bm{\psi}}) + \log(T)\operatorname{tr}\{\bm{J}(\hat{\bm{\psi}})^{-1}\bm{K}(\hat{\bm{\psi}})\}$, respectively, where a model with a lower CLIC or CBIC value is preferred \citep{ng2014model}.
\section{Methodology} \label{inference}
\subsection{Nonstationary Variogram for Brown--Resnick Model} \label{ch3:NonstaBR}
Similarly to \cite{huser2016non}, we model nonstationarity in extremal dependence by combining the BR max-stable process with a nonstationary exponential variogram constructed using a convolution-based nonstationary covariance model {\color{blue} \citep{paciorek2006spatial}}. We begin by writing the variogram as
\begin{equation}
2\gamma(\bm{s}_i, \bm{s}_j) = \operatorname{Var}\{\epsilon(s_i)-\epsilon(s_j)\} = \operatorname{Var}\{\epsilon(\bm{s}_i)\} + \operatorname{Var}\{\epsilon(\bm{s}_j)\} - 2\operatorname{Cov}\{\epsilon(\bm{s}_i), \epsilon(\bm{s}_j)\}.
\label{eq:relationship}
\end{equation}
We then allow $\operatorname{Cov}(\cdot, \cdot)$ to be the nonstationary covariance function
\begin{equation}
\operatorname{Cov}\{\epsilon(\bm{s}_i), \epsilon(\bm{s}_j)\} = \sigma\left(\bm{s}_{i}\right) \sigma\left(\bm{s}_{j}\right) \rho\left(\bm{s}_{i}, \bm{s}_{j}\right),
\label{nonsta-cov}
\end{equation}
where $\sigma(\bm{s})>0$ is a location-dependent sill parameter and $\rho\left(\bm{s}_{i}, \bm{s}_{j}\right): \mathcal{S}\times\mathcal{S}\rightarrow [0,1]$ is a nonstationary (positive definite) correlation function. Although other choices are possible, we here define $\rho$ in terms of a stationary isotropic exponential correlation function $\rho^*(h) = \exp(-h)$, for $h \geq 0$, as
\begin{equation}
\rho\left(\bm{s}_{i}, \bm{s}_{j}\right)=\left|\bm{\Omega}\left(\bm{s}_{i}\right)\right|^{\frac{1}{4}}\left|\bm{\Omega}\left(\bm{s}_{j}\right)\right|^{\frac{1}{4}}\left|\dfrac{\bm{\Omega}\left(\bm{s}_{i}\right)+\bm{\Omega}\left(\bm{s}_{j}\right)}{2}\right|^{-\frac{1}{2}} \rho^*\left(d_{ij}\right),
\label{nonsta-rho}
\end{equation}
with $\bm{\Omega}\left(\bm{s}\right)$ a location-dependent $2\times 2$ positive definite kernel matrix controlling the range and shape of local dependence at $\bm{s}$, and where $d_{ij}$ is the Mahalanobis distance between sites, with the form
$d_{ij}=\left[\left(\bm{s}_{i}-\bm{s}_{j}\right)^{T}\left\{\dfrac{\bm{\Omega}\left(\bm{s}_{i}\right)+\bm{\Omega}\left(\bm{s}_{j}\right)}{2}\right\}^{-1}\left(\bm{s}_{i}-\bm{s}_{j}\right)\right]^{1/2}$.
Here, we focus on the locally isotropic case, for which $\bm{\Omega}\left(\bm{s}\right) = \phi(\bm{s})\bm{I}_2$, where $\phi(\bm{s}) \geq 0$ is a location-dependent range parameter. More complex non-isotropic cases are possible with general covariance matrices $\bm{\Omega}(\bm{s})$. Substituting (\ref{nonsta-cov}) into (\ref{eq:relationship}) yields the nonstationary exponential variogram
$2\gamma(\bm{s}_i, \bm{s}_j) = \sigma^2\left(\bm{s}_{i}\right) + \sigma^2\left(\bm{s}_{j}\right) - 2\sigma\left(\bm{s}_{i}\right) \sigma\left(\bm{s}_{j}\right) \rho\left(\bm{s}_{i}, \bm{s}_{j}\right)$,
which can be used into \eqref{eq:V} for constructing nonstationary BR processes.
\subsection{Domain Partitioning and Parameter Regularization} \label{ParDefPenTun}
To reduce the number of parameters that are needed to represent the dependence structure, we partition the whole domain into a base partition $\mathcal{P}$ which consists of $R$ subregions $\{\mathcal{R}_1, \mathcal{R}_2, \ldots, \mathcal{R}_{R} \}$. This partition should be constructed to compromise parameter estimation accuracy and model flexibility. If the subregions are too small, the estimation of parameters in each subregion can be numerically unstable; if subregions are too large, the model may be incapable of capturing fine-scale nonstationarity in the extremal dependence. Similarly to {\color{blue} \cite{parker2016fused}}, we impose a penalty to regularize the dependence parameters and fit the model by maximizing the penalized pairwise log-likelihood (PPL)
\begin{equation}
\ell_{PPL}(\bm{\psi}) = \ell_{PL}(\bm{\psi}) - \sum_{i=1}^{2} \lambda_{i} \sum_{r_{1} \sim r_{2}}\left|\psi_{i r_{1}}-\psi_{i r_{2}}\right|^{q},
\label{penPair}
\end{equation}
where $\psi_{1 r} = \log(\sigma^2_r)$ and $\psi_{2 r} = \log(\phi_r)$ are the log-transformed sill and range parameters, respectively, for subregion $\mathcal{R}_r$, and the operation $r_1\sim r_2$ refers to neighboring subregions $\mathcal{R}_{r_1},\mathcal{R}_{r_2}$, sharing a common boundary. Equation (\ref{penPair}) with $q = 1$ and $q = 2$ corresponds to LASSO ($L_1$) and ridge ($L_2$) regularization, respectively. Both LASSO and ridge regularization enforce spatial ``smoothness'' in the dependence parameters, but in different ways: while the LASSO regularization penalizes more small discrepancies (and can even enforce parameters in neighboring subregions to be equal), the ridge regularization penalizes more large discrepancies and tends to produce parameter surfaces that are smoother overall.
Instead of fixing the domain partition as \cite{parker2016fused}, we iteratively update the partition $\mathcal{P}$ and reduce the number of parameters required for our model. Alongside increased parsimonity, this approach allows us to better identify stationary subdomains of $\mathcal{S}$, which also leads to easier model interpretation. We achieve this in a data-driven way, without the need for domain knowledge or covariate information. Our strategy is to start with a fine collection of subregions as a base partition and then iteratively update the partition by merging neighboring subregions, according to some quantifiable improvement in model fit.
Starting with the base partition, denoted $\mathcal{P}^0$, a new partition $\mathcal{P}^j$ is generated by merging neighboring subregions at subsequent steps $j = 1, 2, 3, \ldots$, according to some criteria. Different criteria have been proposed, including the BIC \citep{fuentes2001high} and deviance \citep{muyskens2022partition}, but these approaches are not applicable when the likelihood is penalized. Instead, we select a partition using the PPL, computed over a single holdout set $\bm{z}_{\mathrm{hold}}$. We denote this $\ell_{PPL}(\hat{\bm{\psi}}; \bm{z}_{\mathrm{hold}})$, where the estimate $\hat{\bm{\psi}}$ maximizes $\ell_{PPL}$ on a training set $\bm{z}_{\mathrm{train}}$. We can also use the average holdout PPL over data folds, which involves a higher computational burden but a more stable model fitting. We take this approach in our data application. The penalty term in (\ref{penPair}) inherently includes information about the number of parameters in the model, with the total number of neighboring subregion pairs, i.e., the number of model parameters, reducing whenever some subregions merge. By selecting the model with the larger PPL, we balance model flexibility and parsimony.
To choose candidate subregions to merge at step $j$, \cite{muyskens2022partition} suggest randomly sampling neighboring pairs from all available neighboring subregions, and testing a merge; this is performed exhaustively, without replacement, until acceptance. We instead merge all neighboring pairs of subregions with estimated parameter difference $d(\hat{\bm{\psi}}^{j-1}_{r_{1}}, \hat{\bm{\psi}}^{j-1}_{r_{2}})>0$ smaller than some predefined threshold $\eta^{j}>0$, which is dependent on step $j$. This procedure allows us to reduce computational time by merging multiple subregions simultaneously.
At step $j$, we choose a sequence of descending candidate thresholds $\bm{\mathcal{H}}^j = \{\eta^{j}_{1:H}: \eta^{j}_1 > \cdots > \eta^{j}_{H}\}$, where $H = H(j)\in \mathbb{N}_{+}$ that may change with iteration $j$.
We start by generating a candidate for the next partition $\mathcal{P}^j$ by merging all neighboring pairs of subregions with estimated parameter difference smaller than $\eta^j_1$. If the model fitted with this candidate partition increases the holdout PPL, then the candidate partition is accepted for $\mathcal{P}^j$. Otherwise, we create a new candidate similarly using the smaller threshold $\eta^{j}_{h}$ and repeat this procedure, for all $h$. If no improvement is observed using the final threshold in $\bm{\mathcal{H}}^j$, the procedure stops and the final partition $\hat{\mathcal{P}}$ is $\mathcal{P}^{j-1}$.
The number of subregions in the final partition $\hat{\mathcal{P}}$ is of interest. As we take a data-driven approach, the final partition $\hat{\mathcal{P}}$ may be sensitive to the choice of holdout set and observation pairs used for optimization. Hence, we advocate estimation of a number of different $\hat{\mathcal{P}}$ using different holdout sets and observation pairs, and selecting the most reasonable.
We tune $\bm{\lambda}=(\lambda_1,\lambda_2)$ using a similar approach to \cite{parker2016fused}. For the initial partition $\mathcal{P}^0$, we begin with the fully stationary model, i.e., with $\lambda_1 = \lambda_2 = \infty$, and then iteratively fit two models with new $\bm{\lambda}$ values. Each time $\bm{\lambda}$ is updated according to descending grids $\bm{\Lambda}^0_i = \{\infty, \lambda^0_{i,2}, \ldots, \lambda^0_{i,K^0_i}\}$ for $i = 1,2$, until acquiring an optimal selection, denoted $\hat{\bm{\lambda}}^0$. Specifically, two different models are fitted, one with $(\lambda^0_{1,2}, \infty)$ and one with $(\infty, \lambda^0_{2,2})$. If a larger pairwise log-likelihood on the holdout set is achieved by one of them, we choose the model with the largest improvement. We repeat this procedure until there is no improvement. As $\bm{\lambda}$ controls model complexity, it should vary with the partition, and so we tune $\bm{\lambda}$ for each new partition $\mathcal P^j$, yielding the sequence of penalty parameters $\hat{\bm{\lambda}}^j$ for step $j$. We denote by $\hat{\bm{\lambda}}$ the estimated penalty parameter $\bm{\lambda}$ for the final estimated partition $\hat{\mathcal{P}}$. After tuning $\bm{\lambda}$, the PPL is calculated on the holdout sets and used for selecting a new partition. We describe the selection of candidate thresholds $\bm{\mathcal{H}}$ and the descending grid $\bm{\Lambda} = \{\bm{\Lambda}_1, \bm{\Lambda}_2\}$ in Section \ref{Supp:MASDiag} of supplementary materials. The full algorithm for estimating $\hat{\mathcal{P}}$ is detailed in Algorithm~\ref{alg:comb}.
\RestyleAlgo{ruled}
\SetKwComment{Comment}{/* }{ */}
\begin{algorithm}[t!]
\SetKwData{Left}{left}\SetKwData{This}{this}\SetKwData{Up}{up}
\SetKwFunction{LambdaTuning}{LambdaTuning}\SetKwFunction{UpdateGrid}{UpdateGrid}
\SetKwInOut{Input}{input}\SetKwInOut{Output}{output}
\caption{Partition selection and $\bm{\lambda}$ tuning}\label{alg:comb}
\Input{Base partition $\mathcal{P}^0 = \{r_{1}, \ldots, r_{R^{0}}\}$, descending grid $\bm{\Lambda}^0$, a holdout set $\bm{z}_{\mathrm{hold}}$}
\Output{A final partition $\hat{\mathcal{P}}$}
$\hat{\bm{\lambda}}^0, \ell_{PPL}^0(\bm{z}_{\mathrm{hold}}; \hat{\bm{\psi}}^0, \bm{z}_{\mathrm{train}}) \gets$ \LambdaTuning{$\mathcal{P}^0$, $\bm{\Lambda}^0$, $\bm{\lambda}^0$}\;
$j \gets 1$ \Comment*[r]{Iteration $j$.}
\Repeat{$h = H$}{
$\bm{\lambda}^{j} \gets \hat{\bm{\lambda}}^{j-1}$\;
$\bm{\Lambda}^j \gets$ \UpdateGrid{$\hat{\bm{\lambda}}^{j-1}$, $\bm{\lambda}^{j-1}$, $\bm{\Lambda}^{j-1}$}\;
Calculate $\{d(\hat{\bm{\psi}}^{j-1}_{r_{1}}, \hat{\bm{\psi}}^{j-1}_{r_{2}}) : r_{1} \sim r_{2}, r_{1},r_{2}=1,\ldots,R^{j-1}\}$\;
Choose $\bm{\mathcal{H}}^j = \{\eta^{j}_{1:H}: \eta^{j}_1 > \cdots > \eta^{j}_{H}\}$\;
$h \gets 1$ \Comment*[r]{$h$-th element of the threshold candidates.}
\Repeat{$\ell_{PPL}^j(\bm{z}_{\mathrm{hold}}; \hat{\bm{\psi}}^{j}, \bm{z}_{\mathrm{train}}) > \ell_{PPL}^{j-1}(\bm{z}_{\mathrm{hold}}; \hat{\bm{\psi}}^{j-1}, \bm{z}_{\mathrm{train}})$}{
$\eta^{j} \gets \eta^{j}_{h}$\;
$r_{1}^j \gets (r_{1}^{j-1}, r_{2}^{j-1})$ if $d(\hat{\bm{\psi}}^{j-1}_{r_{1}}, \hat{\bm{\psi}}^{j-1}_{r_{2}}) < \eta^{j}$ \& $r_{1}^{j-1}\sim r_{2}^{j-1}$ and acquire a candidate partition $\mathcal{P}^j_{\mathrm{cand}} = \{\mathcal{R}^j_1, \ldots, \mathcal{R}^j_{R^j_{\mathrm{cand}}}\}$\;
$ \hat{\bm{\lambda}}_{\mathrm{cand}}^j, \ell_{PPL,\mathrm{cand}}^j(\bm{z}_{\mathrm{hold}}; \hat{\bm{\psi}}^{j}, \bm{z}_{\mathrm{train}}) \gets$ \LambdaTuning{$\mathcal{P}^j_{\mathrm{cand}}$, $\bm{\Lambda}^j$, $\bm{\lambda}^j$}\;
$h \gets h + 1$\;
}
Update $\mathcal{P}^j \gets \mathcal{P}^j_{\mathrm{cand}}$; $\hat{\bm{\lambda}}^j \gets \hat{\bm{\lambda}}_{\mathrm{cand}}^j$; $\ell_{PPL}^j(\bm{z}_{\mathrm{hold}}; \hat{\bm{\psi}}^{j}, \bm{z}_{\mathrm{train}}) \gets \ell_{PPL,\mathrm{cand}}^j(\bm{z}_{\mathrm{hold}}; \hat{\bm{\psi}}^{j}, \bm{z}_{\mathrm{train}})$\;
$j \gets j + 1$ \;
}
$\hat{\mathcal{P}} = \mathcal{P}^{j-1}$ \Comment*[r]{Final merged partition.}
\end{algorithm}
\section{Simulation Study} \label{simulation}
\subsection{Overview}
In the simulation study, we first investigate the statistical properties of the maximum PL estimator using different proportions of pairs for inference in Section \ref{SelectionOfPairs}. We then study the model performance on simulated data in terms of model fit and true partition recovery in Sections \ref{SimulationFit} and \ref{SimulationRecover}, respectively.
We generate data on a $\mathcal{D} = 40\times 40$ grid of sites in the unit square $[0,1]\times[0,1]$, thus with $D=1600$ spatial locations. We then partition $\mathcal{D}$ into four subdomains, $\mathcal{D}_r, r = 1,\ldots,4$, under two cases: i) a square grid or ii) a triangular grid, see Figure \ref{pic:4truePar}. Each subdomain has its own sill, $\sigma^2$, and range parameter, $\phi$, as defined in (\ref{nonsta-rho}). We consider two different cases for the extremal dependence structure and enforce that only one of the two parameters varies across subregions. The other remains fixed over $\mathcal{D} = \bigcup^4_{r=1}\mathcal{D}_r$. The two cases are:
\begin{itemize}
\item Case 1: $\sigma^2$ varies with values $\{0.5, 2, 2, 5\}$ for the subdomains $\mathcal{D}_1$, $\mathcal{D}_2$, $\mathcal{D}_3$, and $\mathcal{D}_4$, respectively, with $\phi = 2$ fixed over $\mathcal{D}$.
\item Case 2: $\phi$ varies with values $\{0.05, 0.2, 0.2, 0.5\}$ for the subdomains $\mathcal{D}_1$, $\mathcal{D}_2$, $\mathcal{D}_3$, and $\mathcal{D}_4$, respectively, with $\sigma^2 = 0.2$ fixed over $\mathcal{D}$.
\end{itemize}
These two cases can help us to understand and compare the influence of each parameter on the extremal dependence structure. Approximate samples of a BR process are then generated in each case; for a single replicate, we generate $m^*$ independent realizations $\epsilon_i(\bm{s})$, $i = 1, \ldots, m^*$, of a Gaussian process $\{\epsilon(\bm{s}): \bm{s}\in\mathcal{S}\}$ with zero mean and covariance matrix constructed using (\ref{nonsta-cov}). Points $P_1, \ldots, P_{m^*}$ are sampled independently from a unit rate Poisson point process and ordered increasingly. Setting $W_i(\bm{s}) = \exp\{\epsilon_i(\bm{s}) - \sigma^2(\bm{s})/2\}$ for $i = 1,\ldots,m^*$, and then computing $Z(\bm{s}) = \max_{1 \leq i \leq m^*}\{W_i(\bm{s})/P_i\}$ gives an approximate realization of the target BR process. With the accuracy of the approximation increasing with $m^*$, we found that setting $m^* = 10^5$ produced reasonable replicates of a BR process. We repeat this to obtain $T = 100$ independent temporal replicates of $Z(\bm{s})$, and the overall experiment is repeated $N = 100$ times to assess the performance of our estimation approach.
\begin{figure}
\caption{True partitions for simulation study. Left: true partition i), square grid; right: true partition ii), triangular grid.}
\label{pic:4truePar}
\end{figure}
\subsection{Selecting Observation Pairs} \label{SelectionOfPairs}
Suppose $\hat{\bm{\psi}}_{\mathcal{O}} = \max_{{\bm{\psi}}_{\mathcal{O}}}\ell_{PL}(\bm{\psi}; \mathcal{O})$ is the maximum pairwise likelihood estimator, evaluated using the subset of observation pairs $\mathcal{O} \subset \mathcal{O}_{\mathrm{total}}$. Using different percentages of observation pairs, varying from $0.005\%$ to $100\%$, and nonstationary data, we estimate $\hat{\bm{\psi}}_{\mathcal{O}}$ to study the variability of the estimator around the true parameter values. We select the observation pairs with the methods discussed in Section \ref{inference}, i.e., simple and stratified random sampling, with the latter using ten equal-length distance classes. Nonstationary models are fitted with true partitions i) and ii), and we consider both cases 1 and 2 for parameter specification; results for true partition ii) were very similar and so we omit these for brevity. We quantify the estimation accuracy through the root mean squared error (RMSE). For example, with estimated parameter, $\hat{\sigma}^2_{n,r}$, and its true value, $\sigma^2_{r}$, for the subdomain $\mathcal{D}_r$ and each experiment $n = 1,\ldots,N$ in case 1, we compute $\operatorname{RMSE}(\hat{\bm{\sigma}}_n^2) = \sqrt{\frac{1}{4}\sum^4_{r=1}(\hat{\sigma}^2_{n,r} - \sigma^2_{r})^2}$, and similarly for the range parameter $\phi$.
\begin{figure}
\caption{Boxplots of the RMSE for $\hat{\bm{\psi}
\label{pic:percentageOfPairs-nonsta}
\end{figure}
Figure \ref{pic:percentageOfPairs-nonsta} (left and right panels) reports the RMSE boxplots for cases 1 and 2, i.e., $\operatorname{RMSE}(\hat{\bm{\sigma}}_n^2)$ and $\operatorname{RMSE}(\hat{\bm{\phi}}_n)$, respectively. We observe that there is no significant difference in the performance of different sampling strategies if more than $0.05\%$ of observation pairs are used for fitting. However, the stratified sampling scheme tends to slightly outperform the simple one when only $0.01\%$ pairs or less are available, and usually provides less biased estimates in both cases 1 and 2, especially for the estimation of the range parameter; hence we advocate using the stratified sampling method if only a small fraction of observation pairs are available for inference. In our application, we use only $0.1\%$ of observation pairs for fitting, to balance computational time and statistical efficiency. We use simple random sampling in Section \ref{SimulationFit}, but stratified sampling in our application (see Section \ref{application}).
\subsection{Model Performance} \label{SimulationFit}
We now compare the performance of the three extremal dependence models we have discussed: a fully stationary BR model (model S), a nonstationary BR model using the base partition (Model~B), and a nonstationary BR model using the fully-merged partition (Model~M); in the case of Models B and M, we consider both $L_1$ and $L_2$ regularizations to optimally ``smooth'' parameter estimates. We simulate data with both extremal dependence cases 1 and 2 on both true partitions i) and ii).
To study the influence of base partition $\mathcal{P}^0$ misspecification (with respect to the true partition) on the final model fit, we consider two scenarios: one where the base partition $\mathcal{P}^0$ is well-specified and it is feasible to obtain $\hat{\mathcal{P}}=\mathcal{P}_0$, for true partition $\mathcal{P}_0$, and one where $\mathcal{P}^0$ is misspecified, i.e., $\hat{\mathcal{P}}=\mathcal{P}_0$ is infeasible. In both scenarios, the base partition is constructed from 100 subregions. For true partition i) (square grid), the well-specified $\mathcal{P}^0$ is constructed by considering a regular subgrid, whereas the misspecified $\mathcal{P}^0$ is generated with a $k$-means clustering algorithm using spatial coordinates as inputs, and where some subregions overlap the true boundaries. For true partition ii), we only consider a misspecified base partition constructed using a similar $k$-means algorithm.
Among the $D = 1600$ observation locations, $15\%$ are randomly selected as a validation set ($D_{\mathrm{valid}} = 240$). For the nonstationary models, another $15\%$ locations are randomly selected as a holdout set ($D_{\mathrm{hold}} = 240$) whilst ensuring that at least one location is within each subregion in the base partition.
To evaluate parameter estimation, we adopt the integrated RMSE, e.g., for the sill estimates $\hat{\bm{\sigma}}^2$, we have $\operatorname{IntRMSE}(\hat{\bm{\sigma}}^2) = \sqrt{\frac{1}{N}\sum^N_{n=1}\frac{1}{D}\sum^D_{j=1}\left(\hat{\sigma}^2_n(\bm{s}_j) - \sigma^2(\bm{s}_j)\right)^2},$ where $\sigma^2(\bm{s}_j)$ and $\hat{\sigma}_n^2(\bm{s}_j)$ denote the true and the fitted value, respectively, for experiment $n$ at site $\bm{s}_j$. To assess full model fits, we compute the PL on the validation set, the PPL on the holdout set, and the CLIC and CBIC on the training set for each model. The results of our studies regarding model fit are given in Tables \ref{tab:extVarySill} and Tables \ref{tab:extVaryRange} and \ref{tab:triangle} in supplementary materials.
\begin{table}[t!]
\centering
\caption{Models' performances for case 1 (varying $\sigma^2$) with true partition i). The relative factors shown in subscript parentheses highlight the ratios between the nonstationary and stationary models. In the first four rows, the value of the best model has been subtracted. The best value in each row is in bold.}
\resizebox{\columnwidth}{!}{
\begin{tabular}{l|l|ll|ll|ll|ll}
\hline
& Sta. & \multicolumn{8}{c}{Nonstationary}\\
\cline{3-10}
& & \multicolumn{4}{c|}{Model~B} & \multicolumn{4}{c}{Model~M}\\
\cline{3-10}
& & \multicolumn{2}{c|}{well-specified} & \multicolumn{2}{c|}{misspecified} & \multicolumn{2}{c|}{well-specified} & \multicolumn{2}{c}{misspecified}\\
\cline{3-10}
& & $L_1$ & $L_2$ & $L_1$ & $L_2$ & $L_1$ & $L_2$ & $L_1$ & $L_2$\\
\hline
PL Diff. & $-80769$ & $\textbf{0}$ & $-73$ & $-9878$ & $-9951$ & $-522$ & $-331$ & $-11664$ & $-11137$ \\
PPL Diff. & -- & $-27782$ & $-27017$ & $-5589$ & $-1878$ & $-24601$ & $-24458$ & $-909$ & $\textbf{0}$ \\
CLIC Diff. & $7971778$ & $371200$ & $30100$ & $987205$ & $1883071$ & $46107$ & $\textbf{0}$ & $733784$ & $686710$ \\
CBIC Diff. & $7912086$ & $896595$ & $103570$ & $1480716$ & $3538099$ & $91714$ & $\textbf{0}$ & $750201$ & $718914$ \\
IntRMSE $\hat{\bm{\sigma}}^2$ & $1.65$ & $0.54_{(0.33)}$ & $0.54_{(0.33)}$ & $0.73_{(0.44)}$ & $0.73_{(0.44)}$ & $0.39_{(0.23)}$ & $\bm{0.36_{(0.22)}}$ & $0.71_{(0.43)}$ & $0.71_{(0.43)}$ \\
IntRMSE $\hat{\bm{\phi}}$ & $0.04$ & $0.09_{(2.26)}$ & $0.09_{(2.27)}$ & $0.09_{(2.32)}$ & $0.09_{(2.33)}$ & $0.05_{(1.16)}$ & $\bm{0.04_{(0.86)}}$ & $0.06_{(1.57)}$ & $0.06_{(1.54)}$ \\
\hline
\end{tabular}}
\label{tab:extVarySill}
\end{table}
When only the sill varies (Table \ref{tab:extVarySill}), all nonstationary models exhibit larger estimates of the PL, PPL, and lower estimates of CLIC, and CBIC than the stationary model, which indicates that the nonstationary models provide better fits as expected. Model~M outperforms Model~B as we observe larger PPL, and lower CLIC and CBIC. The nonstationary models provide a more accurate estimation of the spatially-varying $\sigma^2$ (with both penalties), observable through the reduced integrated RMSE. However, for $\phi$ estimation, some estimation accuracy is sacrificed when using nonstationary models (approximately $2.3$ times integrated RMSE for Model~B, and only about 1--1.5 times for Model~M, compared to the stationary model), but, from a practical standpoint, these errors are not large. It is noteworthy that nonstationary models with a misspecified base partition outperform the stationary model, which validates the efficacy of nonstationary models. We further note that Model~M provides lower integrated RMSE than Model~B for both the sill and range parameters, which indicates that merging subregions improves parameter estimation. In general, $L_2$ regularization performs better in the well-specified and misspecified cases for Model~M. As expected, nonstationary models with a well-specified partition have better performance.
Tables \ref{tab:extVaryRange} and \ref{tab:triangle} in the supplementary materials show the results for case 2 of the extremal dependence structure with true partition i) and both dependence cases with true partition ii), respectively. Similarly to case 1 with true partition i), we again observe that nonstationary models show their advantages in terms of both model fit and estimation of spatially-varying parameters, and Model M further generally outperforms Model~B. We need to sacrifice some accuracy for the fixed parameter estimation when using nonstationary models, but Model~M, compared to Model~B, demonstrates ability to reduce the estimation error. In general, $L_2$ regularization performs better than $L_1$.
\subsection{True Partition Recovery} \label{SimulationRecover}
To evaluate the accuracy of an estimated partition, we use the Rand index (RI). As a measure of the similarity between two data clusters, the RI is defined as the proportion of points that are correctly grouped together based on the true model partition $\mathcal{P}_0$ and the estimated partition $\hat{\mathcal{P}}$, i.e.,
\begin{equation*}
\operatorname{RI} = \frac{O_{ss} + O_{dd}}{\binom{D}{2}},
\label{eq:rand}
\end{equation*}
where $O_{ss}$ ($O_{dd}$) is the number of pairs of sites that are simultaneously in the same (different) subset of both $\mathcal{P}_0$ and $\hat{\mathcal{P}}$.
We also introduce a local Rand index (LRI) to assess the partition accuracy for each site. For each specific site $\bm{s}_j$, for $j = 1,\ldots,D$, the $\operatorname{LRI}(\bm{s}_j)$ is defined as
\begin{equation}
\begin{aligned}
\operatorname{LRI}(\bm{s}_j) = \frac{1}{T}\sum^T_{t=1} \frac{1}{D-1}\sum_{i\neq j} \mathbb{I}( &\bm{s}_i, \bm{s}_j\in \text{ the same subregion in both } \mathcal{P}_0 \text{ and } \hat{\mathcal{P}} \text{ or } \\
& \bm{s}_i, \bm{s}_j\in \text{ different subregions in both } \mathcal{P}_0 \text{ and } \hat{\mathcal{P}}).
\end{aligned}
\end{equation}
where $\mathbb{I}(\cdot)$ denotes the indicator function. Larger values of $\operatorname{LRI}(\bm{s}_j)$ correspond to better classification of site $\bm{s}_j$.
\begin{table}[t!]
\centering
\caption{Rand index values for the well-specified and misspecified variants of the true partition designs in Figure~\ref{pic:4truePar}, with both choices of regularization and dependence regimes. The best value in each row and for each setting (well-specified or misspecified) is in bold.}
\begin{tabular}{l|l|l|l|l|l|l|l}
\hline
\multicolumn{2}{c|}{Base partition} & \multicolumn{3}{c|}{Well-specified} & \multicolumn{3}{c}{Misspecified} \\
\hline
\multicolumn{2}{c|}{} & Model~B & \multicolumn{2}{c|}{Model~M} & Model~B & \multicolumn{2}{c}{Model~M} \\
\cline{4-5} \cline{7-8}
\multicolumn{2}{c|}{} & & $L_1$ & $L_2$ & & $L_1$ & $L_2$ \\
\hline
true partition i) & Case 1 & 0.760 & 0.982 & \textbf{0.998} & 0.759 & 0.852 & \textbf{0.859} \\
(Square) & Case 2 & 0.760 & 0.954 & \textbf{0.964} & 0.759 & 0.832 & \textbf{0.838} \\
\hline
true partition ii) & Case 1 & - & - & - & 0.758 & 0.741 & \textbf{0.769} \\
(Triangular) & Case 2 & - & - & - & 0.758 & 0.787 & \textbf{0.790} \\
\hline
\end{tabular}
\label{tab:RI}
\end{table}
Table \ref{tab:RI} summarizes the performance of models in terms of their recovery of true partitions i) and ii). The results show that Model~M generally outperforms Model~B by providing larger RI values overall, with the $L_2$ regularization consistently performing better. Results in Tables \ref{tab:extVarySill}--\ref{tab:RI} suggest that the $L_2$ regularization is generally preferable.
\begin{figure}
\caption{Average LRI for Model~B (first row) and Model~M (second row) for each site with true partition i) and misspecified base partition (based on $k$-means clustering).}
\label{pic:LRI+}
\end{figure}
Figures \ref{pic:LRI+}, \ref{pic:LRI+well} and \ref{pic:LRIx} (see supplementary materials for the latter two) give the LRI for each site for the three scenarios discussed in Section \ref{SimulationFit}, which illustrate the performance of the merged or base partitions on each pixel in terms of region classification. Model~M with a well-specified base partition generally gives the correct classification of each site as shown from the results in Figure \ref{pic:LRI+well}. When the base partition is misspecified, the merged partitions generally provide smaller LRI for the few sites lying near the boundaries of the true partition and considerably larger LRI for the many sites within the interior of the true subdomains, indicating a better classification of individual sites overall. We note that the LRI of the sites in subdomains $\mathcal{D}_2$ and $\mathcal{D}_3$ in Figure \ref{pic:4truePar} are smaller than those in subdomains $\mathcal{D}_1$ and $\mathcal{D}_4$ for both true partition cases, as the parameter values in the former subdomains are here assumed to be equal and hence they are sometimes merged together by our algorithm. This affects partition estimation using Model~M. However, with accurate parameter estimation, such misspecification does not impact our characterization of the extremal dependence structure, and grouping more locations with similar dependence characteristics together in the same cluster may even improve parameter estimation.
\section{Nepal Temperature Data Analysis} \label{application}
Nepal is a south Asian country covering the Himalayan foothills and the highest peaks of the Himalayan mountains. A strong nonstationary behavior in the dependence of temperature extremes is therefore expected, and to investigate this hypothesis we obtain daily temperature data, generated by version 2 of the NASA Global Land Data Assimilation System {\color{blue} \citep{rodell2004global}}, from 2004 to 2019. To avoid potential edge effects, we include $D = 1419$ observation locations within and surrounding Nepal, see Figure \ref{pic:Nepal_elev+clu}. At these locations, we derive $T = 192$ observations of monthly maxima.
\begin{figure}
\caption{Left: topographic map of the study region; right: elevation subdomains.}
\label{pic:Nepal_elev+clu}
\end{figure}
To model extremal dependence, we must first transform the monthly maxima data onto the unit Fréchet scale. To account for spatial nonstationarity in the margins, we require estimates of location-dependent GEV parameters as in (\ref{eq:GEV}). To accommodate temporal nonstationarity, e.g., seasonality, in the time series, harmonic terms are imposed in the modeling of the location $\mu(\bm{s})$ and scale $\varsigma(\bm{s})$. The shape parameter $\xi(\bm{s})$ is fixed with respect to time for each location. The marginal model for monthly maxima $Y_t(\bm{s}_i)$ at site $\bm{s}_i \in \mathcal{S}$ and month $t$ is
\begin{equation*}
Y_t(\bm{s}_i) \overset{\mathrm{ind}}{\sim} \operatorname{GEV}\{\mu_t(\bm{s}_i), \varsigma_t(\bm{s}_i), \xi(\bm{s}_i)\},
\end{equation*}
for all $i = 1,\ldots,D$ and $t= 1,\ldots,T$, with
\begin{equation} \label{spacetime_mu_sigma}
\begin{aligned}
&\mu_t(\bm{s}_i) = \mu_0(\bm{s}_i) + \mu_1(\bm{s}_i)\sin(2\pi t_{month}/12) + \mu_2(\bm{s}_i)\cos(2\pi t_{month}/12), \text{ and} \\
&\log[\varsigma_t(\bm{s}_i)] = \varsigma_0(\bm{s}_i) + \varsigma_1(\bm{s}_i)\sin(2\pi t_{month}/12) + \varsigma_2(\bm{s}_i)\cos(2\pi t_{month}/12),
\end{aligned}
\end{equation}
where $t_{month}$ is the index of the corresponding month of each observation, and $\mu_0(\bm{s})$, $\mu_1(\bm{s})$, and $\mu_2(\bm{s})$ ($\varsigma_0(\bm{s})$, $\varsigma_1(\bm{s})$, and $\varsigma_2(\bm{s})$) are coefficients for covariates of the location (scale) structure. Bayesian inference for spatially-varying marginal parameters may be performed flexibly and quickly using the Max-and-Smooth method, introduced by {\color{blue} \cite{hrafnkelsson2021max}}, {\color{blue} \cite{johannesson2022approximate}}, and {\color{blue} \cite{hazra2021latent}}, where unknown GEV parameters are transformed using specific link functions and then assumed to follow a multivariate Gaussian distribution with fixed and random effects at a latent level. Approximate Bayesian inference is performed using the stochastic partial differential equations approach and MCMC methods (see references for full details). Using the posterior mean of each marginal parameter, we transform the original data onto the unit Fréchet scale. We draw the quantile-quantile (QQ) plot for transformed data and conduct a hypothesis testing based on Kolmogorov--Smirnov distance, whose results show that the marginal fit is satisfactory overall (see Section \ref{Supp:MASDiag} in the supplementary materials for details).
We then fit five BR max-stable models to the standardized data: a stationary model (model S), and our proposed nonstationary model with a base and fully merged partition (Model~B and M), using both $L_1$ and $L_2$ regularization; we compare the performance of all five models. The base partition (Figure \ref{pic:nepal-BaseAndMerged}) consists of 80 subregions and is generated using $k$-means clustering on standardized longitude and latitude coordinates. To evaluate model performance, a validation set ($15\%$) is randomly sampled from all observation sites. To tune the smoothness penalty $\bm{\lambda}$ and merge subregions, we split the remaining locations into 5 holdout folds, ensuring that there is at least one holdout site in each subregion for each fold. The initial descending grid for the $\bm{\lambda}$-tuning procedure is chosen to be $\bm{\Lambda}^0 = (\infty, 2^5, 2^4, 2^3, 2^2, 2^1, 2^0, 2^{-1}, 2^{-2}, 0)$; see Algorithm~\ref{alg:comb} in Section \ref{ParDefPenTun}. We compute the PL on the validation set, the holdout PPL averaged over folds, and the CLIC and CBIC metrics for the remaining data excluding the validation set.
\begin{figure}
\caption{Left: base partition (80 subregions). Estimated merged partitions with $L_1$ (centre) and $L_2$ (right) regularization.}
\label{pic:nepal-BaseAndMerged}
\end{figure}
\begin{table}[t!]
\centering
\caption{Model performance metrics ($\times10^3$). The number of parameters for each model is provided in brackets. The best value in each row is in bold.}
\begin{tabular}{c|c|c|c|c|c}
\hline
\multirow{2}{*}{} & \multirow{2}{*}{Sta.(2)} & \multicolumn{2}{c|}{$L_1$} & \multicolumn{2}{c}{$L_2$} \\
\cline{3-6}
& & Base (160) & Merged (28) & Base (160) & Merged (28) \\
\hline
PL & $-18066$ & $-17975$ & $\textbf{-17960}$ & $-17966$ & $-17963$ \\
PPL & -- & $-10400$ & $-10395$ & $-10394$ & $\textbf{-10391}$ \\
CLIC & $1180692$ & $1171161$ & $1169603$ & $1170135$ & $\textbf{1169505}$ \\
CBIC & $1181132$ & $1173024$ & $\textbf{1170104}$ & $1171403$ & $1170314$ \\
\hline
\end{tabular}
\label{tab:Nepal_performance}
\end{table}
\begin{figure}
\caption{Estimated sill (left block) and range (right block) parameters with base partition (top row) and merged partition (bottom row). Each 2-by-2 block displays Model~B (top) and M (bottom) with $L_1$ (left column) and $L_2$ (right column) regularization.}
\label{pic:nepal-estimates}
\end{figure}
Table \ref{tab:Nepal_performance} shows that all nonstationary models provide a better fit than the stationary one, as we observe improvement in all goodness-of-fit metrics; we further observe that Model~M outperforms Model~B, regardless of the regularization used. For Model~M, we find that $L_2$ regularization performs slightly better, when performing the comparison using PPL or CLIC, but the opposite holds in terms of PL and CBIC. Figure \ref{pic:nepal-BaseAndMerged} shows the final estimated partitions for both regularization schemes, while estimates of the sill and range parameters are given in Figure \ref{pic:nepal-estimates}. The partitions roughly identify the plain and mountainous areas shown in Figure \ref{pic:Nepal_elev+clu}, suggesting that there is significantly different extremal dependence behaviour in these areas. All nonstationary models provide larger sill estimates and smaller range estimates in mountainous regions compared to the plains, suggesting that extremal dependence in these areas decays faster with distance; the converse holds for the plains.
As a further model diagnostic, we compare the theoretical extremal coefficients, defined in (\ref{eq:extCoef}) from the fitted models with their empirical counterparts; empirical estimates $\widetilde{\nu}(\bm{s}_i, \bm{s}_j)$ are obtained using the $F$-madogram (see Section \ref{maxstableDef}). As the resulting empirical extremal coefficient $\widetilde{\theta}(\bm{s}_i, \bm{s}_j)$ does not necessarily lie in the interval $[1,2]$, we truncate estimates outside of this interval. Computation was conducted using the $\texttt{R}$ package \texttt{SpatialExtremes} {\color{blue} \citep{ribatet2011spatialextremes}}. To assess model fits, we partition the domain into 3 subdomains according to low, medium, and high elevation (see Figure \ref{pic:Nepal_elev+clu}), and compute pairwise extremal coefficients for all pairs of sites within each subdomain. Figure~\ref{pic:nepal_extDep_nonsta} in the supplementary materials displays the theoretical extremal coefficients for the different model fits against their empirical counterparts. We further compute the mean absolute difference (MAD) between the theoretical and empirical extremal coefficients for all pairs of sites in the whole domain and in each elevation subdomain, and report the results in Table~\ref{tab:Nepal_extDep}.
From Figure \ref{pic:nepal_extDep_nonsta}, we observe that the stationary model fails completely at identifying the varied extremal dependence behavior within each subdomain, while the nonstationary models, regardless of the partition and penalty used, provide a much better description of different extremal dependence behaviour. While the theoretical extremal coefficients for the nonstationary models may appear to slightly underestimate the extremal dependence (comparing to the empirical ones), it is important to note that this apparent lack of fit may be due to the high variability of the empirical estimates. We here have 192 independent monthly maxima replicates, and hence the empirical extremal coefficients may not be estimated well, especially in weak-dependence settings when $\theta(\bm{s}_i, \bm{s}_j)\approx2$. Another reason might be that the exponential variogram we used has limited flexibility for capturing long-range independence. Model~M better describes nonstationary extremal dependence behaviours overall, especially in low-elevation subdomain, which is confirmed by the MADs (see Table \ref{tab:Nepal_extDep}). Nonstationary models significantly outperform the stationary one in the low-elevation and middle-elevation subdomains in terms of MAD, with Model~M slightly outperforming Model~B. In the middle-elevation subdomain, models S and B are comparable, but Model~M provides the smallest MAD. In the high-elevation subdomain, model S gives a slightly better fit than the nonstationary models, which is reasonable because nonstationary models give more realistic (larger) fitted values with increasing uncertainty in the high-elevation subdomain. However, Model~M again outperforms Model~B. Finally, the total MAD between theoretical and empirical extremal coefficients for the whole domain is smaller for the nonstationary models, with Model~M outperforming Model~B. Therefore, we conclude that the nonstationary models provide better estimates of the extremal dependence structure of Nepal temperatures, and our proposed merging procedure can further improve the fit, with $L_2$ regularization generally outperforming $L_1$ regularization.
\begin{table}[t!]
\centering
\caption{Mean absolute differences for pairwise extremal coefficients, with pairs in the whole domain (``Total'') or stratified by elevation subdomain (``Low'', ``Medium'', and ``High'').}
\begin{tabular}{c|c|c|c|c|c}
\hline
\multirow{2}{*}{} & \multirow{2}{*}{Sta.} & \multicolumn{2}{c|}{$L_1$} & \multicolumn{2}{c}{$L_2$} \\
\cline{3-6}
& & Model~B & Model~M & Model~B & Model~M \\
\hline
Total & 0.203 & 0.156 & 0.150 & 0.152 & \textbf{0.149} \\
Low & 0.250 & 0.109 & 0.108 & 0.103 & \textbf{0.095} \\
Medium & 0.248 & 0.169 & 0.164 & 0.168 & \textbf{0.163} \\
High & \textbf{0.119} & 0.253 & 0.213 & 0.245 & 0.216 \\
\hline
\end{tabular}
\label{tab:Nepal_extDep}
\end{table}
\section{Concluding Remarks} \label{discussion}
In this paper, we propose a flexible, yet relatively parsimonious model for nonstationary extremal dependence using the Brown--Resnick max-stable process, and we develop a new computationally-efficient method to simultaneously estimate unknown parameters, identify stationary subregions, and reduce the number of model parameters. More precisely, our model is created by constructing a nonstationary variogram and partitioning the study domain into locally-stationary subregions. We further propose practical strategies for merging subregions, to create a parsimonious model in a data-driven way, without expert knowledge. Inference is performed using a pairwise likelihood to mitigate computational expense, and its statistical efficiency is investigated. In both our simulation study and application, we show that our model provides good fits to data, and better captures nonstationarity in extremal dependence, significantly improving on models previously seen in the literature.
We find in our simulation study that even when the base partition is misspecified, we still observe improved model fits over stationary models, but even better fits are observable with a well-specified base partition. Choosing the base partition for our model can be problematic, as subregions in the base partition may overlap with the boundaries in the true partition (if one exists). To mitigate this, we could divide the domain into finer subregions, but this may lead to unreliable parameter estimation. Another possible approach could be to allow subregions to split at each step as well as merge.
Our nonstationary Brown--Resnick max-stable process model uses an exponential variogram, which is bounded above as the distance between locations $\bm{s}_i$ and $\bm{s}_j$ increases, and hence our model can only capture asymptotic dependence; it cannot capture situations where two sufficiently distant sites are independent. In a relatively small domain of study, asymptotic dependence everywhere may be a reasonable assumption to make, but this may not be the case for large, or topographically complex regions; creating valid unbounded nonstationary variograms would be required to make our methodology more flexible. Moreover, even with an unbounded variogram, the model would not be able to capture forms of asymptotic independence other than perfect independence, as it uses max-stable processes. However, asymptotic independence can be accommodated by adapting our methodology to inverted MSPs \citep{wadsworth2012dependence}, or other types of asymptotic independence models, such as certain types of Gaussian location-scale mixtures \citep{opitz2016modeling, huser2017bridging, hazra2022realistic, zhang2022modeling}, which are more flexible to capture sub-asymptotic extremal dependence \citep{huser2022advances}.
Further work includes an extension to incorporate local anisotropy and adapting our methodology to other max-stable processes, e.g., extremal-$t$; the former can be easily accommodated by parameterizing the matrix $\Omega(\bm{s})$ in (\ref{nonsta-rho}). Our approach could also be extended to other spatial extreme models, such as Pareto processes {\color{blue} \citep{de2018high}} or the spatial conditional extremes model {\color{blue} \citep{WADSWORTH2022100677}}.
\begin{center}
{\large\bf SUPPLEMENTARY MATERIAL}
\end{center}
\begin{description}
\item[PDF Supplement:] This supplement contains further details on the algorithm; additional simulation results; and further results for the data application (marginal and dependence goodness-of-fit diagnostics) (PDF file)
\item[\texttt{R} Code:] This supplement contains the implementation of our proposed method in \texttt{R}, along with the real Nepal temperature dataset, and a small simulation example (zip file)
\end{description}
\end{document} |
\betaegin{document}
\title{Taut foliations in branched cyclic covers and left-orderable groups}
\betaegin{center}
\today
\varepsilonsilonnd{center}
\betaegin{abstract}
We study the left-orderability of the fundamental groups of cyclic branched covers of links which admit co-oriented taut foliations. In particular we do this for cyclic branched covers of fibred knots in integer homology $3$-spheres and cyclic branched covers of closed braids. The latter allows us to complete the proof of the L-space conjecture for closed, connected, orientable, irreducible $3$-manifolds containing a genus $1$ fibred knot. We also prove that the universal abelian cover of a manifold obtained by generic Dehn surgery on a hyperbolic fibred knot in an integer homology $3$-sphere admits a co-oriented taut foliation and has left-orderable fundamental group, even if the surgered manifold does not, and that the same holds for many branched covers of satellite knots with braided patterns. A key fact used in our proofs is that the Euler class of a universal circle representation associated to a co-oriented taut foliation coincides with the Euler class of the foliation's tangent bundle. Though known to experts, no proof of this important result has appeared in the literature. We provide such a proof in the paper.
\varepsilonsilonnd{abstract}
\betalfootnote{2010 Mathematics Subject Classification. Primary 57M50, 57R30, 20F60; Secondary 57M25, 57M99, 20F36}
\betalfootnote{Key words and phrases. Cyclic branched covers, left-orderable groups, fractional Dehn twist coefficient, taut foliation, contact structure.}
\sigmaection{Introduction}
\label{sec:intro}
In this paper we study the left-orderability of the fundamental groups of rational homology $3$-spheres $M$ which admit co-oriented taut foliations. Our primary motivation is the {\it L-space conjecture}:
\betaegin{conjecture}[Conjecture 1 in \cite{BGW}, Conjecture 5 in \cite{Ju}]
\label{conj: lspace}
{\it Assume that $M$ is a closed, connected, irreducible, orientable $3$-manifold. Then the following statements are equivalent.
$(1)$ $M$ is not a Heegaard Floer $L$-space,
$(2)$ $M$ admits a co-orientable taut foliation,
$(3)$ $\pi_1(M)$ is left-orderable. }
\varepsilonsilonnd{conjecture}
The conjecture is known to hold in a variety of situations, most notably when $M$ has positive first Betti number (\cite{BRW,Ga1}), or is a non-hyperbolic geometric $3$-manifold (\cite{BGW, BRW, LS}), or is a graph manifold ({\cite{BC, HRRW}). Condition (2) of the conjecture is known to imply condition (1) (\cite{{OS1},KR2, {Bn}}). Gordon and Lidman introduced the term {\it excellent} for manifolds satisfying conditions (2) and (3), and therefore (1), of the conjecture, and {\it total L-space} for manifolds satisfying neither (1) nor (3), and therefore neither (2). It is clear that Conjecture \ref{conj: lspace} holds for manifolds which are either excellent or total L-spaces and that the conjecture is equivalent to the statement that a closed, connected, irreducible, orientable $3$-manifold is either excellent or a total $L$-space.
Given a closed, connected, irreducible, orientable $3$-manifold $M$, the available techniques for verifying that $M$ satisfies conditions (1) and (2) of Conjecture \ref{conj: lspace} are far in advance of those available for verifying (3). An equivalent condition for (3) is the existence of a non-trivial homomorphism $\pi_1(M) \to \hbox{Homeo}_+(\mathbb R)$ (\cite[Theorem 1.1]{BRW}), but these are difficult to construct in general. One method for producing them is to consider a non-trivial representation $\rho: \pi_1(M) \to PSL(2, \mathbb R)$ whose Euler class vanishes (cf. \S \ref{sec: euler class}). Such a $\rho$ lifts to a representation $\pi_1(M) \to \widetilde{SL_2} \leq \hbox{Homeo}_+(\mathbb R)$, and so $\pi_1(M)$ is left-orderable. A drawback of this approach is that it gives no insight into potential connections between condition (3) and conditions (1) and (2). To address this point, suppose that $M$ satisfies (2) and let $\rho: \pi_1(M) \to \hbox{Homeo}_+(S^1)$ be a non-trivial representation obtained through Thurston's universal circle construction applied to a co-oriented taut foliation on $M$ (cf. \S \ref{sec:taut foliations circle}). As before, there is a characteristic class $e(\rho) \in H^2(M)$ whose vanishing implies the left-orderability of $\pi_1(M)$ (see \S \ref{sec: euler class}). It is known that $e(\rho)$ coincides with the Euler class of the foliation's tangent bundle (see Proposition \ref{prop:Euler class}), and while the latter does not always vanish, one goal of this paper is to show that it does in topologically interesting situations. In particular, we use this approach to investigate Conjecture \ref{conj: lspace} in the context of manifolds obtained as branched covers of knots and links in rational homology $3$-spheres.
Gordon and Lidman initiated such a study for links in $S^3$ (\cite{GLid1, GLid2}), focusing on torus links and certain families of satellite knots, including cables. Here we will be mainly concerned with cyclic branched covers of hyperbolic links. In this case, the cyclic branched covers are almost always hyperbolic (\cite{BPH,Dun}).
Hyperbolic $2$-bridge knots form one of the simplest families of hyperbolic knots and various aspects of Conjecture \ref{conj: lspace} have been studied for their branched covers. For instance, work of Dabkowski, Przytycki, and Togha \cite{DPT} combines with that of Peters \cite{Pe} to show that the branched covers of many genus one $2$-bridge knots, including the figure eight knot, are total L-spaces. The second named author showed that for large $n$, the fundamental group of the $n$-fold branched cyclic cover of the $(p,q)$ $2$-bridge knot is left-orderable if $q \varepsilonsilonquiv 3$ (mod $4$) \cite{Hu}. More generally, Gordon showed that the same conclusion holds for any $2$-bridge knot with non-zero signature \cite{Gor}.
Before we state our results, we introduce some notation and terminology. See \S \ref{sec: background} for the details.
Given a $3$-manifold $V$ with a connected toroidal boundary, a slope on $\partial V$ is a $\partial V$-isotopy class of essential simple closed curves contained in $\partial V$. We identify slopes with $\pm$-classes of primitive elements of $H_1(\partial V)$, in the usual way, and often represent them by primitive classes $\alpha \in H_1(\partial V)$. The Dehn filling of $V$ determined by a slope $\alpha$ on $\partial V$ will be denoted by $V(\alpha)$.
Let $K$ be an oriented null-homologous knot in an oriented rational homology sphere $M$. We use $X(K)$ to denote its exterior in $M$ and $\mu, \lambda \in H_1(\partial X(K))$ to denote, respectively, the longitudinal and meridional classes of $K$ (cf. \S \ref{subsec:knot exterior}). Since $K$ is null-homologous, $\{\mu, \lambda\}$ is a basis of $H_1(\partial X(K))$.
For each $n \geq 1$, $X_n(K) \to X(K)$ will be the canonical $n$-fold cyclic cover of $X(K)$ and $\Sigma_n(K)\to M$ the associated $n$-fold cyclic cover branched over $K$.
There is a basis $\{\mu_n, \lambda_n\}$ of $H_1(\partial X_n(K))$ where the image of $\mu_n$ in $H_1(\partial X(K))$ is $n \mu$ and that of $\lambda_n$ is $\lambda$. By construction, $\Sigma_n(K)=X_n(K)(\mu_n)$
(\S \ref{sec:cyclic branched cover}).
Given a fibred knot $K$ in an irreducible rational homology sphere, we use $c(h)$ to denote the fractional Dehn twist coefficient of its monodromy $h$ (\S \ref{sec:fractional Dehn twist coefficient}). When $K$ is hyperbolic and $c(h)\neq 0$, work of Roberts (\cite{Rob}) can be used to show that if $n|c(h)| \geq 1$, the $n$-fold cyclic cover branched cover of such $K$ admits co-oriented taut foliations (\cite[Theorem 4.1]{HKM2}). We use the universal circle construction to show that under the same conditions, the branched covers have left-orderable fundamental groups:
\betaegin{theorem}
\label{thm:conjecture fibre knots}
Let $K$ be a hyperbolic fibred knot in an oriented integer homology $3$-sphere $M$ with monodromy $h$.
$(1)$ $\Sigma_n(K)$ is excellent for $n|c(h)| \geq 1$. In particular, if the fractional Dehn twist coefficient $c(h) \ne 0$ and $g$ is the genus of $K$, then $\Sigma_n(K)$ is excellent for $n\geq 2(2g-1)$.
$(2)$ More generally, for $n \geq 1$, $X_n(K)(\mu_n + q \lambda_n)$ is excellent whenever $|nc(h) - q| \geq 1$.
\varepsilonsilonnd{theorem}
For a fixed $n$, there are at most two values of $q$ for which $|nc(h) - q| < 1$ and if two, they are successive integers. Such exceptional values of $q$ are necessary as, for instance, $X_n(K)(\mu_n + q \lambda_n)$ could have a finite fundamental group. Compare Corollary \ref{cor:universal abelian cover}.
It is known that the fractional Dehn twist coefficients of the monodromies of hyperbolic, fibred, strongly quasipositive knots are non-zero (\cite{Hed, HKM1}). In particular, this is true for $K$ an L-space knot, as they are fibred and strongly quasipositive (cf. \cite[Theorem 1.2]{Hed}, \cite[Corollary 1.3]{Ni} and the calculations of \cite{OS2}).
\betaegin{corollary}
\label{cor:cyclic branched covers of SQP}
Suppose that $K$ is a hyperbolic, fibred, strongly quasipositive knot with monodromy $h$. Then $\Sigma_n(K)$ is excellent for $n \geq \frac{1}{|c(h)|}$. In particular, $\Sigma_n(K)$ is excellent if $n\geq 2(2g-1)$.
\qed
\varepsilonsilonnd{corollary}
Boileau, Boyer and Gordon have investigated the $n$-fold branched cyclic covers of strongly quasipositive knots \cite{BBG} and have shown that in the fibred case they are not L-spaces for $n \geq 6$. Since $c(h)$ can be arbitrarily small for such knots, the disparity between the sufficient condition $n \geq 6$ for condition (1) of the conjecture to hold and $n \geq \frac{1}{|c(h)|}$ for conditions (2) and (3) to hold is arbitrarily large. A major challenge is to develop techniques to bridge this gap. \\
\betaegin{remarks} \label{rem: main theorem}$\;$
(1) Theorem \ref{thm:conjecture fibre knots} and its corollaries (Corollary \ref{cor:universal abelian cover}, Corollary \ref{cor: knots rational homology sphere}) hold for hyperbolic fibred knots in oriented rational homology spheres under the assumption that the Euler class of the tangent plane bundle of the fibring of the exterior of the knot is zero (Proposition \ref{prop: e=0 and c>1 implies lo}).
(2) In Theorem {\ref{thm:conjecture fibre knots}}, the inequality $|nc(h) - q| \geq 1$ can be recast in terms of the {\it distance} ${\mathcal D}elta(\alpha, \betaeta)$ between slopes $\alpha, \betaeta$ on $\partial X(K)$. Thinking of $\alpha$ and $\betaeta$ as primitive classes in $H_1(\partial X(K))$ and using $\alpha \cdot \betaeta$ to denote their algebraic intersection number, ${\mathcal D}elta(\alpha, \betaeta)$ is defined to be $|\alpha \cdot \betaeta|$. If $c(h) = \frac{a}{b}$ where $a, b$ are coprime integers, then the {\it degeneracy slope} of $K$ is represented by the primitive class $\delta = b \mu + a \lambda$\ (\cite{GO, KR1}). Then $|nc(h) - q| < 1$ if and only if ${\mathcal D}elta(n \mu + q \lambda, \delta) = |na - qb| < |b| = {\mathcal D}elta(\lambda, \delta)$. Thus the theorem says that $X_n(K)(\mu_n + q \lambda_n)$ is excellent if ${\mathcal D}elta(n \mu + q \lambda, \delta) \geq {\mathcal D}elta(\lambda, \delta)$.
\varepsilonsilonnd{remarks}
The {\it universal abelian cover} of a manifold $W$ is the regular cover $\widetilde{W} \to W$ corresponding to the abelianisation homomorphism $\pi_1(W) \to H_1(W)$. It is simple to see that if $\gcd(n, q) = 1$, there is a universal abelian cover $X_n(K)(\mu_n + q \lambda_n) \to X(K)(n\mu + q \lambda)$.
\betaegin{corollary}
\label{cor:universal abelian cover}
Let $K$ be a hyperbolic fibred knot in an integer homology $3$-sphere with monodromy $h$. Given coprime integers $n \geq 1$ and $q$, then the universal abelian cover of $X(K)(n\mu+q\lambda)$ is excellent for
$q \not \in \left\{
\betaegin{array}{cl}
\{nc(h)\} & \hbox{ if } nc(h) \in \mathbb Z \\
\{\lfloor nc(h) \rfloor, \lfloor nc(h) \rfloor+ 1\} & \hbox{ if } nc(h) \not \in \mathbb Z
\varepsilonsilonnd{array} \right.$.
\varepsilonsilonnd{corollary}
Corollary \ref{cor:universal abelian cover} is striking in that it says that the universal abelian cover of the generic Dehn surgery on a hyperbolic fibred knot in an integer homology $3$-sphere is excellent even when the surgered manifold is not. Consider, for instance, a hyperbolic L-space knot $K \sigmaubset S^3$. Up to replacing $K$ by its mirror image, we can suppose that $n/q$-surgery of $K$ is an L-space if and only if $n/q \geq 2g(K) - 1$. The corollary implies that if $n/q \geq 2g(K) - 1$, then avoiding the specified values of $q$, $n/q$-surgery of $K$ is a non-excellent manifold whose universal abelian cover is excellent.
Assuming the truth of Conjecture \ref{conj: lspace}, the corollary holds for all hyperbolic knots in the $3$-sphere. For instance, if $K$ is a non-fibred hyperbolic knot in $S^3$, it admits no non-trivial surgeries which yield L-spaces (\cite{Ghi, Ni}). Conjecture \ref{conj: lspace} then implies that for $n$ and $q$ as in the corollary, the rational homology sphere $X(K)(n \mu + q \lambda)$ admits a co-orientable taut foliation. Hence the same is true for its universal abelian cover $X_n(K)(\mu_n + q \lambda_n)$. This cover also has a left-orderable fundamental group, and is therefore excellent, by Remark \ref{rem: rhouniv non-trivial} and \cite[Lemma 3.1]{BRW}.
\betaegin{conjecture}
{\it Let $n, q$ be coprime integers with $nq \ne 0$ and let $K$ be a hyperbolic knot in $S^3$. If the universal abelian cover of $X(K)(n \mu + q \lambda)$ is not excellent, then $K$ is fibred and if $h$ is its monodromy, $q \in \left\{
\betaegin{array}{cl}
\{nc(h)\} & \hbox{ if } nc(h) \in \mathbb Z \\
\{\lfloor nc(h) \rfloor , \lfloor nc(h) \rfloor + 1\} & \hbox{ if } nc(h) \not \in \mathbb Z
\varepsilonsilonnd{array} \right.$.}
\varepsilonsilonnd{conjecture}
\betaegin{problem}
{\it Determine necessary and sufficient conditions for the universal abelian cover of an irreducible rational homology $3$-sphere $M$ to be excellent. }
\varepsilonsilonnd{problem}
For instance, is the existence of a representation $\pi_1(M) \to \mbox{{\rm Homeo}}_+(S^1)$ with non-abelian image necessary and sufficient for the universal abelian cover of an irreducible rational homology $3$-sphere $M$ to be excellent?
Fix a knot $K$ in an integer homology $3$-sphere $M$ and coprime integers $p > 0$ and $q$. Let $m$ be a positive integer and set $n = mp$. We can generalize Theorem \ref{thm:conjecture fibre knots}(1) and Corollary \ref{cor:universal abelian cover} by considering the orbifold with underlying space $X(K)(p\mu + q \lambda)$ and singular set the core of the filling solid torus with isotropy groups $\mathbb Z/m$. Here $H_1(\mathcal{O}) \cong \mathbb Z/n$ and the universal abelian cover of $\mathcal{O}$ corresponds to an $n$-fold cyclic cover $X_n(K)(\mu_n + mq \lambda_n) \to X(K)(p\mu + q \lambda)$ branched over the core of the $(p\mu + q \lambda)$-filling torus with branching index $m$. When $p = 1$ and $q = 0$ this is the branched cover $\Sigma_n(K) \to M$.
\betaegin{corollary}
\label{cor: knots rational homology sphere}
Let $K$ be a hyperbolic fibred knot in an integer homology $3$-sphere $M$ with monodromy $h$ and consider coprime integers $p > 0$ and $q$ as well as a positive integer $m\geq 1$. The universal abelian cover of the orbifold with underlying space $X(K)(p\mu + q \lambda)$ and singular set the core of the filling solid torus with isotropy group $\mathbb Z/m$ is an excellent $3$-manifold if $m|pc(h) - q| \geq 1$.
\varepsilonsilonnd{corollary}
We also have results on cyclic branched covers of non-fibred hyperbolic links in $S^3$. Here is a special case of Theorem \ref{thm:taut foliation in cyclic covers of a braid}.
\betaegin{theorem}
\label{thm:conjecture cyclic braids}
Let $b \in B_{m}$ be an odd-strand pseudo-Anosov braid and let $c(b)$ denote its fractional Dehn twist coefficient. If $|c(b)| \geq 2$, then all even order cyclic branched covers of $\hat{b}$ are excellent.
\varepsilonsilonnd{theorem}
\betaegin{remark}
It is useful to note that under the hypothesis that $|c(b)| \geq 2$, work of Ito and Kawamuro (\cite[Theorem 8.4]{IK}) implies that $b$ is a pseudo-Anosov braid if and only if $\hat b$ is a hyperbolic link.
\varepsilonsilonnd{remark}
Theorem \ref{thm:conjecture cyclic braids} combines with results of Baldwin (\cite{Bal}) and Li-Watson (\cite{LW}) to prove that:
\betaegin{theorem}
\label{thm:lspace genus one open book decomposition}
Conjecture \ref{conj: lspace} holds for irreducible $3$-manifolds which admit genus one open book decompositions with connected binding.
\varepsilonsilonnd{theorem}
In its turn, Theorem \ref{thm:lspace genus one open book decomposition} combines with Theorem \ref{thm:conjecture fibre knots} to determine precisely which branched covers of genus one fibred knots $K$ are excellent and which are total L-spaces. To describe this, let $T_1$ be the fibre of such a knot. It is known that the mapping class group ${\rm Mod}(T_1)$ is generated by two right-handed Dehn twists $T_{c_1}$ and $T_{c_2}$ (cf. \S \ref{sec:L-space conjecture genus one open books}, especially Figure \ref{fig:double cover 3-punctured disk}). Let
$$\delta = (T_{c_1}T_{c_2})^3$$
and note that $\delta^2$ is the right-handed Dehn twist along $\partial T_1$.}
\betaegin{corollary}
\label{cor: branched cover genus 1}
Suppose that $K$ is a genus one fibred knot with monodromy $h$ in a closed, connected, orientable and irreducible $3$-manifold $M$. Then for each $n \geq 2$, $\Sigma_n(K)$ is either excellent or a total L-space. Further, $\Sigma_n(K)$ is a total L-space if and only if
$(1)$ $h$ is pseudo-Anosov, $c(h) = 0$, and $n \geq 2$.
$(2)$ $h$ is periodic and up to replacing it by a conjugate homeomorphism, $h$ and $n$ are given by
$$h = \left\{ \betaegin{array}{rll}
T_{c_1}^{-1} T_{c_2}^{-1} & \hbox{ and } & n \leq 5 \\
\delta T_{c_1}^{-1} T_{c_2}^{-1} & \hbox{ and } & n= 2 \\
T_{c_1}^{-2} T_{c_2}^{-1} & \hbox{ and } & n \leq 3 \\
\delta T_{c_1}^{-2} T_{c_2}^{-1} & \hbox{ and } & n \leq 3 \\
T_{c_1}^{-3} T_{c_2}^{-1} & \hbox{ and } & n = 2 \\
\delta T_{c_1}^{-3} T_{c_2}^{-1} & \hbox{ and } & n \leq 5 .
\varepsilonsilonnd{array} \right.$$
\varepsilonsilonnd{corollary}
Next we consider satellite links.
In \cite{GLid1}, Gordon and Lidman studied the cyclic branched covers of $(p,q)$-cable knots in $S^3$. These are satellite knots whose patterns are $(p,q)$-torus knots embedded standardly as a $q$-braid in a solid torus. They showed that the $n$-fold cyclic branched covers of $(p,q)$-cable knots are always excellent, except possibly for the case $n=q=2$ (\cite[Theorem 1.3]{GLid1}). In the latter case they showed that the $2$-fold branched covers of a $(p,2)$-cable knots are never L-spaces \cite[Theorem 1]{GLid2}, and hence the truth of Conjecture \ref{conj: lspace} would imply that they are excellent.
\betaegin{conjecture} {\rm (Gordon-Lidman)}
\label{conj:cyclic satellite}
{\it The $n$-fold cyclic branched cover of a prime, satellite knot is excellent.}
\varepsilonsilonnd{conjecture}
Satellite links whose patterns are closed braids and whose companions are fibred are a particularly interesting class to investigate as, for instance, all satellite L-space knots in $S^3$ fall into this category (\cite[Theorem 7.3, Theorem 7.4]{BM}; also see \cite[Theorem 35]{HRW} and \cite[Proposition 3.3]{Hom}). Theorem \ref{thm:satellite c(h)>0 n>>0} and Corollary \ref{cor:satellite c(h)>0 n>>0} verify special cases of Conjecture \ref{conj:cyclic satellite}.
\betaegin{theorem}
\label{thm:satellite c(h)>0 n>>0}
Assume that $L$ is a satellite link in an integer homology $3$-sphere $M$ whose pattern is contained in its solid torus as the closure of an $m$-strand pseudo-Anosov braid and whose companion is a fibred hyperbolic knot in $M$ with monodromy $h$.
$(1)$ If $c(h)=0$, then the $n$-fold cyclic branched cover of $L$ is excellent whenever $\gcd(m,n)=1$.
$(2)$ If $c(h)\neq 0$, then the $n$-fold cyclic branched cover of $L$ is excellent when $\gcd(m,n) =1$ and $n\geq \frac{2}{|c(h)|}$.
\varepsilonsilonnd{theorem}
By Proposition \ref{prop:lower bound FDTC}, if $c(h) \ne 0$, then $|c(h)| \geq \frac{1}{2(2g(C)-1)}$ where $g(C)$ is the genus of the companion knot $C$ in Theorem \ref{thm:satellite c(h)>0 n>>0}. Hence the condition $n\geq \frac{2}{|c(h)|}$ in Theorem \ref{thm:satellite c(h)>0 n>>0}(2) holds if $n\geq 4(2g(C)-1)$.
\betaegin{corollary}
\label{cor:satellite c(h)>0 n>>0}
Assume that $L$ is a satellite link in an integer homology $3$-sphere $M$ whose pattern is contained in its solid torus as the closure of an $m$-strand pseudo-Anosov braid and whose companion is a fibred hyperbolic knot. Then the $n$-fold cyclic branched cover of $L$ is excellent when $\gcd(m,n) =1$ and $n \gg 0$.
\qed
\varepsilonsilonnd{corollary}
Consider an L-space satellite knot $K$. Baker and Motegi have shown that the pattern is a closed braid \cite[\S 7]{BM}. Further, Hanselman, Rasmussen and Watson \cite{HRW} have shown that the companion is also an L-space knot. Hence the companion knot $C$ is fibred and strongly quasipositive (cf. \cite[Theorem 1.2]{Hed}, \cite[Corollary 1.3]{Ni} and the calculations of \cite{OS2}), so its fractional Dehn twist coefficient is non-zero (cf. \cite{Hed, HKM2}). Up to replacing $K$ by its mirror image, we can suppose that the fractional Dehn twist of the monodromy of the companion knot $C$ is positive.
Boileau, Boyer and Gordon have shown that the cyclic branched covers of satellite L-space knots are never L-spaces \cite[Corollary 6.4]{BBG}.
In the case that both pattern and companion are hyperbolic, and the fractional Dehn twist coefficient of the pattern braid is nonnegative (cf. \cite[Question 1.8]{Hom}), Theorem \ref{thm: satellite knot c(b) and c(h) nonnegative} shows that $\Sigma_n(K)$ is excellent whenever $n$ is relatively prime to the braid index of the pattern.
\betaegin{theorem}
\label{thm: satellite knot c(b) and c(h) nonnegative}
Assume that $L$ is a satellite link in an integer homology $3$-sphere $M$ whose pattern is contained in its solid torus as the closure of an $m$-strand pseudo-Anosov braid $b$ and whose companion is a fibred hyperbolic knot with monodromy $h$. Suppose that the fractional Dehn twist coefficients $c(b)$ and $c(h)$ are non-negative. Then for $n\geq 2$ relatively prime to $m$, the $n$-fold cyclic branched cover of $L$ is excellent.
\varepsilonsilonnd{theorem}
Here is the plan of the paper. In \S \ref{sec: background} we introduce background material and notational conventions. Section \ref{sec:braids MCG} covers some basic concepts regarding mapping class groups and braids. Section \ref{sec:fractional Dehn twist coefficient} introduces fractional Dehn twist coefficients from two perspectives: isotopies (\S \ref{subsec:FDTC isotopy}) and translation numbers (\S \ref{subsec:FDTC translation number}). The Euler classes of representations with values in $\hbox{Homeo}_+(S^1)$ and of oriented circle bundles are defined and related in \S \ref{sec: euler class}. Section \ref{sec:taut foliations circle} is devoted to a description of the universal circle and the universal circle representation associated to a rational homology $3$-sphere $M$ endowed with a co-oriented taut foliation. In \S \ref{sec:euler class universal circle} we give a detailed proof of the fact, due to Thurston, that the Euler class of the universal circle representation coincides with that of the associated foliation's tangent bundle (Proposition \ref{prop:Euler class}), and \S \ref{sec: lo and taut foliations} uses this to deduce the left-orderabilty of $\pi_1(M)$ when this Euler class vanishes. The material of the previous sections is combined in \S \ref{sec: fdtc and lo} to study the left-orderability of $3$-manifolds given by open books. In particular, Theorem \ref{thm:conjecture fibre knots}, Corollary \ref{cor:universal abelian cover} and Corollary \ref{cor: knots rational homology sphere} are proved here. In \S \ref{sec:cyclic closed braids} we prove Theorems \ref{thm:conjecture cyclic braids} and \ref{thm:taut foliation in cyclic covers of a braid}, which are used in \S \ref{sec:L-space conjecture genus one open books} to deduce Theorem \ref{thm:lspace genus one open book decomposition} and Corollary \ref{cor: branched cover genus 1}. Finally in \S \ref{sec:LO cyclic cover satellite knots}, we apply the results of \S \ref{sec: fdtc and lo} and \S \ref{sec:cyclic closed braids} to study cyclic branched covers of satellite knots in order to prove Theorems \ref{thm:satellite c(h)>0 n>>0} and \ref{thm: satellite knot c(b) and c(h) nonnegative}.
{\betaf Acknowledgement}.
The authors would like to thank Jonathan Bowden for pointing out the possibility of avoiding the use of \cite[Theorem 1.4]{HKP} in our arguments and for discussions which led us to add Lemma \ref{lem:Euler class vanishes links} in its place. They also thank Bill Menasco for an enlightening correspondence concerning relations between fractional Dehn twist coefficients and open book foliations. Finally they thank the anonymous referee for suggestions which led to an improved exposition.
\sigmaection{Some background results, terminology and notation}
\label{sec: background}
We set some conventions in this section which will be used throughout the paper.
\sigmaubsection{Link exteriors in rational homology spheres}
\label{subsec:knot exterior}
Let $M$ be an oriented rational homology $3$-sphere and $L$ be an oriented null-homologous link in $M$. We use $N(L)$ to denote a closed tubular neighbourhood of $L$ and $X(L) = \overline{M \sigmaetminus N(L)}$ to denote the exterior of $L$ in $M$.
If $L = \betaigsqcup_i K_i$ is the decomposition of $L$ into its component knots, then $N(L) = \betaigsqcup_i N(K_i)$ where $N(K_i)$ is a tubular neighbourhood of $K_i$.
A {\it meridional disk} of $K_i$ is any essential properly embedded disk in $N(K_i)$ which is oriented so that its intersection with the oriented knot $K_i$ is positive.
The {\it meridional slope} of $K_i$ is represented by a primitive class $\mu_i \in H_1(\partial N(K_i))$ corresponding to the oriented boundary of a meridional disk of $K_i$.
A {\it meridional class of $K_i$} in $H_1(X(L))$ is the image of $\mu_i$ under the inclusion-induced homomorphism $H_1(\partial N(K_i)) \to H_1(X(L))$.
The assumption that $L$ is null-homologous implies that there is a compact, connected, oriented surface $S$ properly embedded in $X(L)$ which intersects each component $\partial N(K_i)$ of $\partial X(L)$ in an oriented simple closed curve $\lambda_i$ isotopic in $N(K_i)$ to $K_i$. It is clear from the construction that
$$\mu_i \cdot \lambda_i = 1$$
for each $i$.
In the case that $L$ is a knot, $\lambda_1$ represents the longitudinal class of $L = K_1$ in $H_1(\partial X(K_1))$.
\betaegin{lemma}
\label{lemma: homology of exterior}
Suppose that $K$ is a null-homologous knot in a rational homology $3$-sphere $M$ with exterior $X(K)$. Then $H_1(X(K)) \cong H_1(M) \oplus \mathbb Z$ where the second factor is generated by a meridional class of $K$. Further, the inclusion-induced homomorphism $H^2(M) \to H^2(X(K))$ is an isomorphism.
\varepsilonsilonnd{lemma}
\betaegin{proof}
Excision implies that
$$H_r(M, X(K)) \cong H_r(N(K), \partial N(K)) \cong \left\{ \betaegin{array}{ll} \mathbb Z & \hbox{ if } r = 2,3 \\ 0 & \hbox{ otherwise} \varepsilonsilonnd{array} \right.$$
where $H_2(M, X(K)) \cong \mathbb Z$ is generated by the class $\varepsilonsilonta$ carried by a meridional disk of $N(K)$. Then the exact sequence of the pair $(M, X(K))$ yields a short exact sequence
\betaegin{equation}
\label{eqn: sequence}
0 \to H_2(M, X(K)) \xrightarrow{\partial} H_1(X(K)) \to H_1(M) \to 0
\varepsilonsilonnd{equation}
where $\partial(\varepsilonsilonta)$ is a meridional class $\mu$ of $K$. Since $K$ is null-homologous in $M$, there is a properly embedded, compact, connected, oriented surface $S$ in $X(K)$ whose boundary represents the longitudinal class $\lambda$ of $K$ in $H_1(\partial X(K))$. Then $\partial([S]) \cdot \mu = \lambda \cdot \mu = \pm 1$, where $[S] \in H_2(X(K), \partial X(K))$ corresponds to the fundamental class of $S$. Hence the homomorphism $H_1(X(K)) \to H_2(M, X(K)), \alpha \mapsto (\alpha \cdot [S]) \varepsilonsilonta$, splits the sequence (\ref{eqn: sequence}) up to sign, which proves the first assertion of the lemma.
For the second, consider the connecting map $H^1(X(K)) \xrightarrow{\delta} H^2(M, X(K))$ from the cohomology exact sequence of the pair $(M, X(K))$. Excision shows that
$$H^2(M, X(K)) \cong H^2(N(K), \partial N(K)) \cong \hbox{Hom}(H_2(N(K), \partial N(K)), \mathbb Z) \cong \mathbb Z$$
is generated by the homomorphism which takes the value $1$ on the class $\varepsilonsilonta \in H_2(M, X(K)) = H_2(N(K), \partial N(K))$. On the other hand, since $H_1(X(K)) \cong H_1(M) \oplus \mathbb Z$ where the $\mathbb Z$ factor is generated by $\partial \varepsilonsilonta$, if $\nu \in \hbox{Hom}(H_1(X(K)), \mathbb Z) \cong H^1(X(K))$ is the homorphism which takes the value $1$ on a meridian of $K$ and $0$ on $H_1(M)$, then $\delta(\nu)$ is a generator of $H^2(N(K), \partial N(K))$. Thus $\delta$ is surjective. It then follows from the exact cohomology sequence of the pair $(M, X(K))$ that the homomorphism $H^2(M) \to H^2(X(K))$ is an isomorphism, which completes the proof.
\varepsilonsilonnd{proof}
\sigmaubsection{Cyclic branched covers of null-homologous links}
\label{sec:cyclic branched cover}
Given a null-homologous oriented link $L= \betaigsqcup_i K_i$ in an oriented rational homology sphere $M$ and compact, connected, oriented surface $S$ properly embedded in $X(L)$ as above, let $[S] \in H_2(X(L), \partial X(L))$ correspond to the fundamental class of $S$. For each $n \geq 1$, the epimorphism
$$H_1(X(L)) \xrightarrow{\;\; \alpha \mapsto \alpha \cdot [S] \;\;} \mathbb{Z} \xrightarrow{\tiny \;\; \hbox{(mod $n$) reduction} \;\;} \mathbb Z/n$$
determines an $n$-fold cyclic cover
$$X_n(L) \to X(L)$$
and an $n$-fold cyclic cover
$$(\Sigma_n(L), \widetilde L) \xrightarrow{\;\; p \;\;} (M, L)$$
branched over $L$.
The link $\widetilde L$ decomposes into components $\widetilde L = \betaigsqcup_i \widetilde K_i$
where $\widetilde K_i=p^{-1}(K_i)$. Similarly, its closed tubular neighbourhood $N(\widetilde{L}) = \overline{\Sigma_n(L) \sigmaetminus X_n(L)}$ splits into components $N(\widetilde{L}) = \betaigsqcup_i N(\widetilde K_i)$ where $N(\widetilde K_i)$ is a tubular neighbourhood of $\widetilde K_i$.
For each $i$ there is a basis $\{\widetilde \mu_{i}, \widetilde \lambda_{i}\}$ of $H_1(\partial N(\widetilde K_i))$ determined by the property that
$$\widetilde \mu_i \xrightarrow{\;\;\; p_* \;\;} n \mu_i $$
and
$$\widetilde \lambda_i \xrightarrow{\;\;\; p_* \;\;} \lambda_i$$
The surface $S$ lifts to a properly embedded surface $\widetilde S \sigmaubset X_n(L)$ which intersects $\partial \widetilde N(K_i)$ in an oriented simple closed curve representing $\widetilde \lambda_i$.
By construction, $\Sigma_n(L)$ is the $(\widetilde \mu_1, \widetilde \mu_2, \ldots, \widetilde \mu_n)$-Dehn filling of $X_n(L)$.
\sigmaubsection{Lifting contact structures to branched covers}
\label{subsec: contact}
Let $M$ be an oriented rational homology $3$-sphere and $L$ an oriented null-homologous link in $M$. Fix a compact, connected, oriented surface $S$ properly embedded in $X(L)$ and let $p: (\Sigma_n(L), X_n(L), \widetilde L) \xrightarrow{\;\; p \;\;} (M, X(L), L)$ be as above, where $n \geq 1$.
Let $\xi=\ker(\alpha)$ be positive contact structure on $M$ determined by a smooth, nowhere zero $1$-form $\alpha$ and suppose that $L$ is a positively transverse to $\xi$. There is a lift of $\xi$ to $\Sigma_n(L)$, denoted by $\widetilde{\xi}$, which is the kernel of the pull-back form $p^*(\alpha)$ on $X_n(L)$ and is positively transverse to $\widetilde{L}$ (cf. \cite[\S 2.5]{HKP}, \cite[Theorem 7.5.4]{Gei}). More precisely, $\xi$ can be constructed as follows.
Recall that $L=\betaigsqcup_i K_i$ and $\widetilde L=\betaigsqcup_i \widetilde K_i$ where $\widetilde K_i=p^{-1}(K_i)$. For each $i$, there is a suitable tubular neighborhoods $N(K_i)$ and $N(\widetilde{K}_i)$, and cylindrical coordinates $(r,\theta,z)$ and $(\tilde{r},\tilde{\theta},\tilde{z})$ over the tubular neighborhoods $N(K_i)$ and $N(\widetilde{K}_i)$ respectively such that the contact form $\alpha$ restricted to $N(K_i)$ is in the standard form $\alpha|_{N(K_i)}=dz+r^2d\theta$ (\cite[Example 2.5.16]{Gei}) and the cyclic branched cover $p$ restricts to $N(\widetilde{K}_i)\sigmaetminus \widetilde{K}_i$ sends $(\tilde{r},\tilde{\theta},\tilde{z})$ to $(r,n\theta, z)$. The pull-back $p^*(\alpha|_{{N(K_i)}\sigmaetminus K_i})=d\tilde{z}+n\tilde{r}^2d\tilde{\theta}$ is a contact form over $N(\widetilde{K}_i) \sigmaetminus \widetilde{K}_i$ which extends smoothly to $N(\widetilde{K}_i)$ by letting $\tilde\alpha|_{\tilde K_i} = d\tilde z$. Extending $p^*(\alpha|_{\Sigma_n(L)\sigmaetminus \widetilde{L}})$ in this way to the entire tubular neighborhood, we produce the desired contact form on $\Sigma_n(L)$, denoted by $\widetilde \alpha$. Let $\widetilde{\xi}=\ker(\widetilde \alpha)$.
\sigmaubsection{Fibred knots and open books}
In this section, we review the definitions of fibred knots and open books. See \cite[\S 10H and \S 10K]{Rol} for the details.
An oriented knot $K$ in $M$ is called {\it fibred} with {\it fibre} $S$ if $S$ is a compact, connected, orientable surface properly embedded in $X(K)$ which has connected boundary and there is a locally-trivial fibre bundle $X(K) \to S^1$ with fibre $S$. Note that $S \cap \partial X(K)$ carries the longitudinal slope $\lambda$ of $K$.
A {\it monodromy} of $K$ is an orientation-preserving homeomorphism $h:S \to S$ such that $h|_{\partial S}$ is the identity, $X(K) \cong (S \times I)/((x,1) \sigmaim (h(x), 0))$, and if $x \in \partial S$, then the loop on $\partial X(K)$ determined by $\{x\} \times I$ carries the meridional slope $\mu$ of $K$. If $K$ is a knot in a rational homology $3$-sphere, its monodromy is well-defined up to conjugation and an isotopy fixed on $\partial S$. See \cite[Proposition 5.10]{BZ} for a proof of this claim in the case that $K \sigmaubset S^3$.
Conversely, given an orientation-preserving homeomorphism $h$ of a compact, connected, orientable surface $S$ with connected boundary which restricts to the identity on $\partial S$, there is a well-defined closed, connected, orientable $3$-manifold $M$ obtained from the Dehn filling of $(S \times I)/((x,1) \sigmaim (h(x), 0))$ along the slope determined by the image of $\{x\} \times I$ for $x \in \partial S$. The core of the filling solid torus is a knot $K$ in $M$ which is fibred with fibre $S$ and monodromy $h$. The meridian of $K$ is carried by the image of $\{x\} \times I$. The pair $(S, h)$ is called an {\it open book} decomposition of $M$ with {\it binding} $K$.
\sigmaection{Mapping class groups and closed braids}
\label{sec:braids MCG}
Throughout this section $S$ will denote an $m$-punctured ($m \geq 0$) smooth orientable compact surface with nonempty boundary. All diffeomorphisms of $S$ will be assumed to be orientation-preserving.
We use ${\rm Mod}(S)$ to denote the mapping class group of isotopy classes of diffeomorphisms of $S$ which restrict to the identity on $\partial S$. Isotopies are assumed to be fixed on $\partial S$.
From time to time we will identify an element of ${\rm Mod}(S)$ with one of its representative diffeomorphisms, though only when discussing properties held by all such representatives.
\sigmaubsection{The Nielsen-Thurston classification of mapping classes}
\label{subsec:nielsen-thurson classification}
A homeomorphism $\varphi:S\rightarrow S$ is called pseudo-Anosov if it preserves a pair $(\mathcal{F}^s, \mu^s)$ and $(\mathcal{F}^u,\mu^u)$ of mutually transverse, measured, singular foliations on $S$, and there is a number $\lambda>1$ such that $\varphi$ scales the transverse measure $\mu^s$ by $\lambda^{-1}$ and the transverse measure $\mu^u$ by $\lambda$. Here $(\mathcal{F}^s, \mu^s)$ and $(\mathcal{F}^u,\mu^u)$ are called
the {\it stable foliation} and {\it unstable foliation} of $\varphi$ respectively. We refer the reader to \cite[Chapter 13]{FM} for more precise details on pseudo-Anosov homeomorphisms as well as other results stated in this section.
By the Nielsen-Thurston classification \cite{Thu2}, each element $f$ in ${\rm Mod}(S)$ is freely isotopic to a map $\varphi:S\rightarrow S$ which is either
\betaegin{itemize}
\item a periodic diffeomorhpism, i.e. $\varphi^n=1$ for some $n>0$, or
\item a reducible diffeomorphism, i.e. there exists a nonempty collection $\mathcal C=\{c_1,\cdots c_r\}$ of pairwise disjoint essential simple closed curves in $S$ such that $f(\mathcal{C})= \mathcal{C}$, or
\item a pseudo-Anosov homeomorphism.
\varepsilonsilonnd{itemize}
The homeomorphism $\varphi$ is called a {\it Nielsen-Thurston representative} of $f$; $f$ is called periodic, reducible or pseudo-Anosov if its Nielsen-Thurston representative has the corresponding property.
It is known that a pseudo-Anosov mapping class is neither periodic nor reducible \cite{Thu2}. A fundamental result of Thurston is that the interior of the mapping torus of $f \in \rm{Mod}(S)$ is a finite volume hyperbolic manifold if and only if $f$ is pseudo-Anosov \cite{Thu1}. It contains an essential torus if $f$ is reducible and it is a Seifert fibred manifold if $f$ is periodic.
\sigmaubsection{The braid group $B_m$ and ${\rm Mod}(D_m)$}
\label{subsec:braid group mcg}
We use $B_m$ to denote the group of isotopy classes of smooth $m$-strand braids, where each strand of a braid is oriented upward (Figure \ref{fig:fig34}(A)). Let $\sigmaigma_i$ be the standard $i^{th}$ Artin generator of the braid group $B_m$, $i=1,\cdots, n-1$ (Figure \ref{fig:fig34}(A)).
\betaegin{figure}[ht]
\centering
\betaegin{subfigure}{.4\textwidth}
\betaegin{tikzpicture}[scale=0.8]
\draw [thick, ->] (0.5,1) -- (0.5,3);
\node [above] at (0.5,3) {$1$};
\draw [thick, ->] (1.5,1) -- (1.5,3);
\node [above] at (1.5,3) {$2$};
\node [right] at (2,2) {$\cdots$};
\draw [thick, ->] (3.5,1) -- (5,3);
\draw [thick] (5,1) -- (4.3,1.9);
\draw [thick,->] (4.16,2.1) -- (3.5,3);
\node [right] at (5.1,2) {$\cdots$};
\draw [thick,->] (6.3, 1) -- (6.3,3);
\node [above] at (6.3,3) {$m$};
\node [above] at (5,3) {$i+1$};
\node [above] at (3.5,3) {$i$};
\node at (0,1) {\large \color{white}{d}};
\varepsilonsilonnd{tikzpicture}
\caption{}
\varepsilonsilonnd{subfigure}
\betaegin{subfigure}{.55\textwidth}
\centering
\betaegin{tikzpicture}[scale=0.7]
\node [below] at (1,3) {\sigmamall $1$};
\draw (1,3) circle (0.05);
\node [right] at (1.07,3) {$\cdots$};
\draw (2,3) circle (0.05);
\node [below] at (2,3) {\sigmamall $i$};
\draw (3,3) circle (0.05);
\node [below] at (3,3) {\sigmamall $i+1$};
\node [right] at (3.05,3) {$\cdots$};
\draw (4,3) circle (0.05);
\node [below] at (4,3) {\sigmamall $m$};
\draw [blue] (2.5, 4.8) -- (2,3);
\draw [blue] (2.5,4.8) -- (3,3);
\draw [thick] (2.5,3) circle (1.8);
\draw [thick, ->] (5.3,3) -- (7.3,3);
\node [above] at (6.3,3) {$\sigmaigma_i$};
\node [below] at (8.5,3) {\sigmamall $1$};
\draw (8.5,3) circle (0.05);
\node [right] at (8.57,3) {$\cdots$};
\draw (9.8,3) circle (0.05);
\draw [blue] (10,4.8)--(9.8,3);
\draw [blue] (10,4.8) to [in=90,out=240] (9.5,3) to [in=180, out=270] (10,2.5) to [out=0,in=240] (10.5,3);
\draw (10.5,3) circle (0.05);
\node [right] at (10.55,3) {$\cdots$};
\draw (11.5,3) circle (0.05);
\node [below] at (11.5,3) {\sigmamall $m$};
\node [below] at (9.8,3) {\sigmamall $i$};
\node [below] at (10.5,3) {\sigmamall $i+1$};
\draw [thick] (10,3) circle (1.8);
\varepsilonsilonnd{tikzpicture}
\caption{}
\varepsilonsilonnd{subfigure}
\caption{(A) $\sigmaigma_i$ in $B_m$; (B) $\sigmaigma_i$ in ${\rm Mod}(D_m)$}
\label{fig:fig34}
\varepsilonsilonnd{figure}
The braid group $B_m$ is isomorphic to the mapping class group ${\rm Mod}(D_m)$, where $D_m$ denotes the $m$-punctured disk obtained by removing $m$ points on the real line from the interior of the unit disk $D^2$. See \cite[Chapter 9]{FM} for instance. We identify these two groups through the following correspondence. Given an $m$-strand braid the corresponding diffeomorphism of $D_m$ is obtained by sliding the $m$-punctured disk along the braid from the bottom to the top (see Figure \ref{fig:fig34}).
The product of two braids $b_1$ and $b_2$ is the braid obtained by placing $b_1$ on the top of $b_2$. When $b_1$ and $b_2$ are viewed as diffeomorphisms of the punctured disk $D_m$, we have $b_1b_2(x)=b_1(b_2(x))$ for all $x\in D_m$.
A braid $b\in {\rm Mod}(D_m)$ is called {\it pseudo-Anosov}, respectively {\it periodic}, respectively {\it reducible}, if it is freely isotopic to a homeomorphism of $D_m$ with the corresponding property.
\sigmaubsection{Hyperbolic links as closures of pseudo-Anosov braids}
\label{subsec:hyperbolic pseudo-Anosov}
The {\it closure} of a braid $b$, denoted $\widehat{b}$, is an oriented link in $S^3$ obtained by closing the braid $b$ as illustrated in Figure \ref{fig:branched_cover_braid_fig0}. A classical theorem of Alexander \cite{Al} asserts that for any oriented link $L$ in $S^3$, there is an $m \geq 1$ and a braid $b \in B_m$ such that $L$ is isotopic to $\widehat b$.
\betaegin{figure}[ht]
\centering
\betaegin{tikzpicture}[scale=0.85]
\draw [gray, thick] (4, 4.5) ellipse (0.75 and 0.3);
\draw [gray,thick] (4, 1) ellipse (0.75 and 0.3);
\draw [gray, thick,->] (4,0.7) -- (4.05,0.7);
\draw [gray, thick] (3.25,1) -- (3.25, 4.5);
\draw [gray,thick] (4.75,1) -- (4.75, 4.5);
\draw [gray,thick, ->] (4.75,2.75) -- (4.75,2.8);
\draw [thick] (3.5,2) rectangle (4.5,3.5);
\node at (4,2.75) {$b$};
\betaegin{scope}[thick,decoration={
markings,
mark=at position 0.5 with {\arrow{>}}}
]
\draw[postaction={decorate}] (3.6,3.5)--(3.6,4.5);
\draw[postaction={decorate}] (3.85,3.5)--(3.85,4.5);
\draw[postaction={decorate}] (4.4,3.5)--(4.4,4.5);
\draw[postaction={decorate}] (3.6,1)--(3.6,2);
\draw[postaction={decorate}] (3.85,1)--(3.85,2);
\draw[postaction={decorate}] (4.4,1)--(4.4,2);
\varepsilonsilonnd{scope}
\filldraw (4,3.8) circle (0.015);
\filldraw (4.1,3.8) circle (0.015);
\filldraw (4.2,3.8) circle (0.015);
\filldraw (4,1.65) circle (0.015);
\filldraw (4.1,1.65) circle (0.015);
\filldraw (4.2,1.65) circle (0.015);
\filldraw (3.6,1) circle (0.035);
\filldraw (3.85,1) circle (0.035);
\filldraw (4.4,1) circle (0.035);
\filldraw (3.6,4.5) circle (0.035);
\filldraw (3.85,4.5) circle (0.035);
\filldraw (4.4,4.5) circle (0.035);
\draw [->] (5.5, 2.8) -- (7.5,2.8);
\draw [gray, dashed] (8.25, 2.65) to [out=90, in=180] (9,3) to [out=0, in=90] (9.65, 2.65);
\draw [thick] (8.5,2) rectangle (9.5,3.5);
\betaegin{scope}[thick,decoration={
markings,
mark=at position 0.1 with {\arrow{>}}}
]
\draw[postaction={decorate}] (9.4,3.5) to [out=90, in=270] (9.4,3.7) to [out=90, in=180] (9.8,4) to [out=0, in=90] (10.2,2.75) to [out=270, in=0] (9.8, 1.5) to [out=180, in=270] (9.4,1.8) to [out=90,in=270] (9.4,2);
\draw[postaction={decorate}] (8.85,3.5) to [out=90, in=270] (8.85, 3.7) to [out=90, in=180] (9.8, 4.5) to [out=0,in=90] (10.55, 2.75) to [out=270, in=0] (9.8, 1.1) to [out=180, in=270] (8.85,1.8) to [out=90,in=270](8.85,2);
\draw[postaction={decorate}] (8.6,3.5) to [out=90,in=270] (8.6,3.65) to [out=90,in=180] (9.8, 4.75) to [out=0,in=90] (10.75,2.75) to [out=270,in=0] (9.8,0.9) to [out=180, in=270] (8.6, 1.85) to [out=90,in=270] (8.6, 2);
\varepsilonsilonnd{scope}
\betaegin{scope}[decoration={
markings,
mark=at position 0 with {\arrow{>}}}
]
\draw [blue, postaction={decorate}] (9.65,2.9) -- (9.65, 2.93);
\varepsilonsilonnd{scope}
\draw [gray,thick] (8.25, 2.8) to [out=90, in=270] (8.25, 3.5) to [out=90, in=180] (9.7, 5) to [out=0, in=90] (11.1,2.75) to [out=-90, in=0] (9.7, 0.5) to [out=180, in=270] (8.25, 1.7) to [out=90, in=-90] (8.25,2.8);
\node at (9,2.75) {$b$};
\betaegin{scope}[decoration={
markings,
mark=at position 0.5 with {\arrow{>}}}
]
\draw [gray,thick, postaction={decorate}] (8.25, 2.65) to [out=270, in=180] (9,2.3) to [out=0, in=270] (9.65, 2.65);
\varepsilonsilonnd{scope}
\draw [blue, thick] (9.8, 2.75) ellipse (0.15 and 0.7);
\node at (9.8, 3.65) {{\color{blue}\sigmamall $\nu$}};
\filldraw (9,3.8) circle (0.015);
\filldraw (9.1,3.8) circle (0.015);
\filldraw (9.2,3.8) circle (0.015);
\filldraw (9,1.65) circle (0.015);
\filldraw (9.1,1.65) circle (0.015);
\filldraw (9.2,1.65) circle (0.015);
\varepsilonsilonnd{tikzpicture}
\caption{}
\label{fig:branched_cover_braid_fig0}
\varepsilonsilonnd{figure}
Recall that a link $L$ in $S^3$ is {\it hyperbolic} if its exterior $S^3\sigmaetminus L$ is hyperbolic, i.e., it admits a complete finite volume Riemannian metric of constant curvature $-1$. Ito has shown \cite[Theorem 1.3]{Ito2} that if the absolute value of the Dehornoy floor (Definition \ref{def:Dehornoy floor}) of $b \in B_m$ is at least $2$ and $\widehat b$ is a knot, then $\widehat b$ is hyperbolic if and only if $b$ is pseudo-Anosov. More recently, Ito and Kawamuro have shown \cite[Theorem 8.4]{IK} that if the absolute value of the fractional Dehn twist coefficient of $b$ is larger than $1$, then $\hat b$ is a hyperbolic link if and only if $b$ is a pseudo-Anosov braid.
\sigmaection{Fractional Dehn twist coefficients}
\label{sec:fractional Dehn twist coefficient}
In this section we suppose that $S$ is an oriented hyperbolic surface with nonempty geodesic boundary. Given $h: S \rightarrow S$ representing an element of ${\rm Mod}(S)$, let $H_t: S\rightarrow S$ denote a free isotopy between $H_0 = h$ and its Nielsen-Thurston representative $H_1 = \varphi$.
We are interested in the fractional Dehn twist coefficient of $h$ with respect to a boundary component of $S$. Intuitively, this is a rational number representing the amount of twisting $\partial S$ undergoes during the isotopy $H_t$ from $h$ to $\varphi$. The concept was introduced in \cite{HKM1} to study the tightness of the contact structure supported by an open book $(S, h)$. When $h$ is pseudo-Anosov, it is closely related to the degeneracy slope of a pseudo-Anosov homeomorphism \cite{GO}. If $\partial S$ is connected, it can be used to formulate a convenient criterion, due to Honda, Kazez and Matic, for the existence of co-oriented taut foliations on the open book $(S,h)$ (cf. Theorem \ref{thm:cgeq1}).
We give two equivalent definitions of the fractional Dehn twist coefficient in Section \ref{subsec:FDTC isotopy} and Section \ref{subsec:FDTC translation number} below, and will take advantage of both points of view. For simplicity, we also assume that $\partial S$ is connected and leave the simple task of extending the definition to the case that $\partial S$ is not connected to the reader (also see \cite{HKM1}).
The following theorem summarizes results from \cite{HKM2}. See Theorem 4.1, Theorem 4.3, and Lemma 4.4 of that paper for the details. (We remark that the results of \cite{Bn, KR2} are needed for the proof of \cite[Theorem 4.3]{HKM2}.)
\betaegin{theorem}[\cite{HKM2}] \label{thm:cgeq1}
Assume that $(S,h)$ is an open book decomposition of a closed oriented $3$-manifold $M$, where $\partial S$ is connected and $h$ is freely isotopic to a pseudo-Anosov homeomorphism. If the fractional Dehn twist coefficient of $h$ satisfies $c(h)\geq 1$, then there exists a co-orientable taut foliation on $M$ which is transverse to the binding of $(S,h)$ and is homotopic to the contact structure supported by $(S,h)$.
\varepsilonsilonnd{theorem}
\sigmaubsection{Fractional Dehn twist coefficients via isotopies}
\label{subsec:FDTC isotopy}
Here, we define the fractional Dehn twist coefficient following the ideas of \cite{HKM1}, where the pseudo-Anosov case is dealt with. The extension to all Nielson-Thurston types is immediate.
Let $C \cong S^1$ denote the boundary of $S$ and fix a periodic orbit $\{p_0,\cdots, p_{n-1}\} \sigmaubset C$ of the Nielsen-Thurston representative $\varphi$ of $h$ as follows: When $\varphi$ is pseudo-Anosov, we may choose $\{p_0,\cdots, p_{n-1}\}$ to be a subset of the singular points on $C$ of the stable singular foliation of $\varphi$ (see \cite[\S 5.1]{FLP} for examples of singularities on the boundary). In the case that $\varphi$ is reducible, let $S_0$ be the subsurface of $S$ that contains $C$. We require that $\varphi|_{S_0}$ is either pseudo-Anosov or periodic.
Assume that $p_0,\cdots, p_{n-1}$ are indexed cyclically according to the orientation on $C$ induced by that on $S$. Since the set $\{p_0,\cdots, p_{n-1}\}$ is preserved under $\varphi$, there exists an integer $k\in \{0, 1, \cdots, n-1\}$ such that $\varphi(p_0)=p_k$. Then $H_t|_{p_0}: [0,1]\rightarrow C$ defines a path on the boundary component $C$ connecting $H_0(p_0)=p_0$ to $H_1(p_0)=p_k$.
The orientation of $C$ determines an oriented subarc $\gamma_{p_0p_k}$ of $C$ from $p_0$ to $p_k$. Let $\betaar \gamma_{p_0p_k}$ denote the same arc with the opposite orientation. Then $H_t(p_0) * \betaar \gamma_{p_0p_k}$, the concatenation of the paths $H_t(p_0)$ and $\betaar \gamma_{p_0p_k}$, is a loop in $C$ based at $p_0$. Hence there is a unique integer $m$ such that
\betaegin{equation}
[H_t|_{p_0}\cdot\betaar \gamma_{p_0p_k}]= [C]^m \in \pi_1(C, p_0)
\label{equ:defhomotopyI}
\varepsilonsilonnd{equation}
where $[C]$ is the generator of $\pi_1(C, p_0) \cong \mathbb Z$ determined by the orientation of $C$.
\betaegin{definition}[\cite{HKM1}]
The {\it fractional Dehn twist coefficient} $c(h)$ of the diffeomorphism $h$ is defined to be
$$c(h)=m+\frac{k}{n}.$$
\label{def:fractional dehn twist}
\varepsilonsilonnd{definition}
\betaegin{remark}
Since the connected components of ${\rm Homeo}(S)$ are contractible when $S$ is hyperbolic (see Theorem 1.14 in \cite{FM} and the references therein), any two paths $H_t$ and $H'_t$ between $h$ and $\varphi$ (as above) are homotopic rel $\{h, \varphi\}$. Thus the paths $H_t|_{p_0}$ and $H'_t|_{p_0}$ are homotopic rel $\{0, 1\}$, which shows that $c(h)$ is independent of the choice of $H_t$. A similar argument shows that $c(h)$ depends only on the class of $h$ in ${\rm Mod}(S)$, and so determines an invariant for each mapping class in ${\rm Mod}(S)$.
\varepsilonsilonnd{remark}
\betaegin{proposition}{\rm (cf. \cite[Theorem 4.4]{KR1})} \label{prop:lower bound FDTC}
Let $S$ be a compact orientable hyperbolic surface with connected boundary $C$ and let $h$ be a diffeomorphism of $S$ which restricts to the identity on $\partial S$. If $h$ is pseudo-Anosov and $c(h)\neq 0$, then
\betaegin{displaymath}
|c(h)|\geq \frac{1}{-2\chi(S)}.
\varepsilonsilonnd{displaymath}
\varepsilonsilonnd{proposition}
\betaegin{proof}
This is a straightforward consequence of the Euler-Poincar\'e formula \cite[Proposition 11.4]{FM}.
Let $\mathcal{F}^s$ be the stable singular foliation of the pseudo-Anosov homeomorphism $\varphi$ that is freely isotopic to $h$. By definition, $c(h)$ can be written as a possibly unreduced fraction $p/q$ with $q > 0$ being the number of the singular points of $\mathcal{F}^s$ on $C$.
Let $\{x_i\}$ be the singular points of $\mathcal{F}^s$ contained in the interior of $S$. For each $i$, $n_i \geq 3$ will denote the number of prongs of $\mathcal{F}^s$ at $x_i$. Then by the Euler-Poincar\'e formula, we have
\betaegin{displaymath}
2(\chi(S) +1)= (2-q) + \sigmaum_{i} (2-n_i).
\varepsilonsilonnd{displaymath}
Since $n_i\geq 3$, $\sigmaum_{x_i} (2-n_i) \leq 0$ with equality only if $\{x_i\}$ is empty. It follows that
$-2\chi(S)\geq q$. By assumption, $c(h)\neq 0$. Therefore,
\betaegin{displaymath}
|c(h)|=\frac{|p|}{q}\geq \frac{1}{q}\geq \frac{1}{-2\chi(S)}.
\varepsilonsilonnd{displaymath}
\varepsilonsilonnd{proof}
\sigmaubsection{Fractional Dehn twist coefficients via translation numbers}
\label{subsec:FDTC translation number}
Recall that the group ${\rm Homeo}_+(S^1)$ has the following central extension:
\betaegin{equation}
1 \longrightarrow \mathbb Z \longrightarrow {\widetilde{\rm{Homeo}}}_+(S^1) \sigmatackrel{\pi}{\longrightarrow} {\rm Homeo}_+(S^1) \longrightarrow 1,
\label{equ:homeoext}
\varepsilonsilonnd{equation}
where ${\widetilde{\rm{Homeo}}}_+(S^1)$ is the universal covering group of ${\rm Homeo}_+(S^1)$, consisting of the elements of ${\rm Homeo}_+(\mathbb{R})$ which commute with translation by $1$, which we denote by ${\rm sh}(1)$. The kernel of the covering homomorphism $\pi$ is the group of integral translations of the real line.
Poincar\'e showed that for $\tilde f \in {\widetilde{\rm{Homeo}}}_+(S^1)$ and $x_0 \in \mathbb R$, the limit
\betaegin{displaymath}
\lim_{n\to \infty} \frac{\tilde f^n(x_0)-x_0}{n}
\varepsilonsilonnd{displaymath}
exists and is independent of $x_0$ (see \cite[\S 5]{Ghy}). He defined the {\it translation number} $\tau(\tilde f)$ of $\tilde f$ to be this common limit.
Let $\tilde S$ be the universal cover of $S$ and $\tilde{C}\sigmaubset\tilde S$ be a lift of $\partial S=C$. By construction, we can take $\tilde S$ to be a closed subset of $\mathbb H^2$ with geodesic boundary. In particular, $\tilde C$ is geodesic. We use $\partial_{\infty} \tilde{S}$ to denote the intersection of the Euclidean closure of $\tilde S$ in $\overline{\mathbb H}^2$ with $\partial \overline{\mathbb{H}}^2$. Then $\partial \tilde S \cup \partial_{\infty} \tilde{S}$ is homeomorphic to a circle. The complement of the closure of $\tilde C$ in this circle is homeomorphic to $\mathbb{R}$ which we parameterise and orient so that the lift of the Dehn twist along $C$, denoted by $T_{\partial S}$, is the translation ${\rm sh}(1)$.
Given any element $f\in {\rm Mod}(S)$, let $\tilde f: \tilde{S}\rightarrow \tilde{S}$ denote the unique lift of $f$ satisfying $\tilde f|_{\tilde{C}}=id_{\tilde{C}}$. This correspondence defines an embedding of groups
\betaegin{displaymath}
\iota: {\rm Mod}(S)\rightarrow {\widetilde{\rm{Homeo}}}_+(S^1),
\varepsilonsilonnd{displaymath}
with $\iota(T_{\partial S})={\rm sh}(1)$. It was shown in \cite{Mal} (see also \cite[Theorem 4.16]{IK}) that the fractional Dehn twist coefficient of $h$ in ${\rm Mod}(S)$ satisfies
\betaegin{displaymath}
c(h)=\tau(\iota(h)).
\varepsilonsilonnd{displaymath}
Here are a few properties of fractional Dehn twist coefficients inherited from those of translation numbers (cf. \cite[\S 5]{Ghy}).
\betaegin{lemma} \label{lem:poincare translation number}
The fractional Dehn twist coefficient map $c: {\rm Mod}(S) \to \mathbb Q$ takes the value $1$ on $T_{\partial S}$ and is invariant under conjugation. If $h_1, h_2 \in \hbox{Mod}(S)$ commute, then $c(h_1h_2) = c(h_1) + c(h_2)$. In particular, $c(h^n)=nc(h)$ for any $h \in {\rm Mod}(S)$ and $n \in \mathbb{Z}$.
\varepsilonsilonnd{lemma}
\sigmaection{Euler classes of circle bundles and representations}
\label{sec: euler class}
In this section, we first review the definition of the the Euler class of an oriented $S^1$-bundle over a CW complex $X$ and how it relates to the problem of lifting a representation $\rho: \pi_1(X)\rightarrow {\rm Homeo}_+(S^1)$ to a representation into $\widetilde{\rm Homeo}_+(S^1)$ (see (\ref{equ:homeoext})).
\sigmaubsection{Euler classes of circle bundles}
\label{subsec: Euler class circle bundle}
Let $\xi$ be an oriented circle bundle $E \to X$ where $X$ is a CW complex. The Euler class $e(\xi) \in H^2(X)$ is the obstruction to finding a section of $\xi$ and its vanishing is equivalent to the triviality of $\xi$ as a bundle. A representative cocycle for $e(\xi)$ is constructed as follows. See Chapter 4 of \cite{CC}, and in particular \S 4.3 and \S 4.4, for the details.
Since $S^1$ is a $K(\mathbb{Z},1)$, the only obstruction to the existence of a section of $\xi$ arises when one tries to extend a section over the $1$-skeleton $X^{(1)}$ of $X$ to the $2$-skeleton $X^{(2)}$. Fix a section $\sigmaigma: X^{(1)}\rightarrow E $ and define a cellular $2$-cochain $c_\sigmaigma: C_2(X)\rightarrow \mathbb{Z}$ as follows. Let $\varphi_\alpha: D^2\rightarrow X^{(2)}$ be the characteristic map of a $2$-cell $e_\alpha$ and let $\xi_{D^2}=(E_{D^2}\rightarrow D^2)$ denote the pull-back of $\xi$ through $\varphi_\alpha$. Then $\sigmaigma$ defines a section of $\xi_{D^2}$ over $\partial D^2$. Since $D^2$ is contractible, $\xi_{D^2}$ is trivial. By fixing a trivialization $E_{D^2}\rightarrow D^2\times S^1$, one has the following composite map from $S^1$ to $S^1$
\betaegin{displaymath}
S^1 = \partial D^2\rightarrow E_{D^2}\rightarrow D^2\times S^1 \rightarrow S^1.
\varepsilonsilonnd{displaymath}
The value of $c_\sigmaigma$ on $e_\alpha$ is defined to be the degree of this map. This $2$-cochain is actually a cocycle whose cohomology class $[c_\sigmaigma]$ is independent of the choices made in its construction. Further, the class is equal to the Euler class $e(\xi)$.
Given an oriented $2$-disk-bundle or an oriented $\mathbb R^2$-bundle, there is an associated oriented circle bundle $\xi$ over $X$. The Euler class of the $2$-disk-bundle or the $\mathbb R^2$-bundle is defined to be $e(\xi)$.
\sigmaubsection{Euler classes and Thom classes}
\label{subsec: euler and thom}
For later use, we record how to express the Euler class of an oriented $S^1$-bundle $\xi$ in terms of the Thom class of the associated disk bundle. For details, see \cite[\S 5.7]{Spa}, where the Thom class is referred to as the {\it orientation class} and the Euler class is referred to as the {\it characteristic class}.
Consider the mapping cylinder $D_\xi \to X$ of an oriented circle bundle $E \to X$. This is an oriented $2$-disk bundle and as such has a Thom class $u_\xi \in H^2(D_\xi, E)$ uniquely characterised by the condition that for each disk fibre $D$ of $D_\xi$, the image of $u_\xi$ under the restriction homomorphism $H^2(D_\xi, E) \to H^2(D, \partial D)$ is the orientation generator. The Euler class $e(\xi)$ of $\xi$ is the image of $u_\xi$ under the composition $H^2(D_\xi, E) \to H^2(D_\xi) \xrightarrow{\cong} H^2(X)$.
\sigmaubsection{Lifting representations with values in ${\rm Homeo}_+(S^1)$}
Fix a representation $\rho:\pi_1(X)\rightarrow {\rm Homeo}_+(S^1)$. There is an associated oriented circle bundle $E_\rho \to X$ whose total space is defined by
\betaegin{displaymath}
E_\rho = \widetilde X \times S^1 /(x, v)\sigmaim (g\cdot x, \rho(g) v),
\varepsilonsilonnd{displaymath}
where $\widetilde X$ is the universal cover of $X$. The projection map $\widetilde X \times S^1 \to \widetilde X$ descends to the bundle map $E_\rho \to X$.
\betaegin{lemma}[\cite{Mil} Lemma 2]
A representation $\rho: \pi_1(X) \rightarrow {\rm Homeo}_+(S^1)$ lifts to a representation $\tilde{\rho}: \pi_1(X)\rightarrow {\widetilde{\rm{Homeo}}}_+(S^1)$ if and only if the Euler class of the circle bundle $E_\rho$ vanishes.
\qed
\label{lem:milnor Euler class}
\varepsilonsilonnd{lemma}
\betaegin{remark}
\label{rem: Euler class of a representation}
Each central extension of a group $G$ by an abelian group $A$ determines a class $e \in H^2(G; A)$, called the characteristic class of the extension, and the correspondence is bijective (cf.~\cite[\S 6.1]{Ghy}). Such an extension is isomorphic to the trivial extension $1 \to A \to G \times A \to G \to 1$ if and only if its characteristic class is zero. It is known that the characteristic class $e_{S^1}$ of the extension (\ref{equ:homeoext}) generates $H^2({\rm Homeo}_+(S^1))\cong \mathbb{Z}$. See \cite[Example 2.12]{MM}. Hence given a representation $\rho: G\rightarrow {\rm Homeo}_+(S^1)$, $\rho$ admits a lift to a representation with values in ${\widetilde{\rm{Homeo}}}_+(S^1)$ if and only if $e(\rho)=\rho^*(e_{S^1})= 0$. When $G = \pi_1(M)$, the two obstruction classes described above coincide. More precisely, when $M$ is irreducible, we have $e(\rho)= \pm e(E_\rho) \in H^2(M)=H^2(\pi_1(M))$. See the proof of \cite[Lemma 2]{Mil}.
\varepsilonsilonnd{remark}
\sigmaubsection{ The vanishing of the Euler class of certain lifted contact structures}
\label{subsec: euler and contact}
Let $M$ be an oriented rational homology $3$-sphere and $L= \betaigsqcup_i K_i$ an oriented null-homologous link in $M$. Fix a compact, connected, oriented surface $S$ properly embedded in $X(L)$ and let
$$(\Sigma_n(L), X_n(L), \widetilde L) \xrightarrow{\;\; p \;\;} (M, X(L), L)$$
be as in \S \ref{sec:cyclic branched cover}, where $n \geq 1$.
Let $\xi=\ker(\alpha)$ be a positive contact structure on $M$ determined by a smooth, nowhere zero $1$-form $\alpha$ and suppose that $L$ is a positively transverse to $\xi$. Recall the lift $\widetilde \xi$ of $\xi$ to $\Sigma_n(L)$ described in \S \ref{subsec: contact}.
\betaegin{lemma}
\label{lem:Euler class vanishes links}
If $e(\xi) = 0$, then $e(\widetilde \xi)=0$.
\varepsilonsilonnd{lemma}
\betaegin{proof}
To simplify notation, we write $N_i$ for $N(K_i)$ and $\widetilde N_i$ for $N(\widetilde{K}_i)$. Let $D_i$ be a meridian disk of $N_i$ oriented coherently with $K_i$ and $M$. Then $\widetilde D_i=p^{-1}(D_i)$ is an oriented meridional disk in $\widetilde N_i$. We use $\widetilde m_i$ to denote the oriented boundary of $\widetilde D_i$. By construction, the $n$-fold cyclic branched cover $\Sigma_n(L)$ is obtained from $X_n(L)$ by attaching the meridian disk $\widetilde D_i$ to $X_n(L)$ along $\widetilde m_i$ for each $i$ and then plugging the boundary of the resultant $3$-manifold with $3$-cells.
Set $\alpha_0=\alpha$ where $\alpha$ is the smooth, nowhere zero $1$-form with $\xi = \mbox{ker}(\alpha)$ described above, and fix a one-parameter family of co-oriented $2$-plane fields $\xi_t=\ker(\alpha_t)$ ($t\in [0,1]$) on $\Sigma_n(L)$ such that $\alpha_t|_{N_i}=dz_i+(1-t)r_i^2d\theta_i$. Note that $\xi_1|_{D_i}$ is the tangent bundle $D_i$.
Let $\widetilde \alpha_t=p^*(\alpha_t)$ over $\Sigma_n(L)\sigmaetminus \widetilde L$ where $\Sigma_n(L) \xrightarrow{p} M$ is the branched cover. According to our choice of tubular neighborhoods $N_i$, $\widetilde N_i$ and the coordinate systems on them (\S \ref{subsec: contact}), we have
\betaegin{displaymath}
\widetilde \alpha_t|_{\widetilde N_i\sigmaetminus \widetilde K_i}=d\tilde z_i+n(1-t)\tilde r_i^2d\tilde \theta_i.
\varepsilonsilonnd{displaymath}
Then $\widetilde \alpha_t$ extends smoothly to $\Sigma_n(L)$ with $\widetilde \alpha_t=d\tilde z_i$ over $\widetilde K_i$. Hence it defines a homotopy between co-oriented $2$-plane fields $\tilde \xi=\ker(\widetilde \alpha_0)$ and $\widetilde \xi_1=\ker(\widetilde \alpha_1)$. To show $e(\widetilde \xi)=0$, we will show $e(\widetilde \xi_1)=0$.
By assumption $e(\xi_1) = e(\xi)=0$ in $H^2(M)$ so in particular, $\xi_1$ admits a nowhere zero section $\sigmaigma$. Since $\xi_1|_{D_i}$ is the tangent bundle of $D_i$, we may suppose that $\sigmaigma|_{D_i}=\partial_{x_i}$, where $x_i=r_i\cos(\theta_i)$ over $D_i$). See Figure \ref{fig:branched cover of a disk}.
Let $\widetilde \sigmaigma: X_n\rightarrow \widetilde \xi_1$ be a section of the restriction of $\widetilde \xi_1$ to $X_n$ obtained by lifting $\sigmaigma$. Then there is a $2$-cocycle $c_{\widetilde \sigmaigma}$ which vanishes on $X_n$ determined by $\widetilde \sigmaigma$ with $[c_{\widetilde\sigmaigma}]=e(\widetilde \xi_1)$ (cf. \S \ref{subsec: Euler class circle bundle}).
Let $i: X_n(L) \rightarrow \Sigma_n(L)$ and $j: (\Sigma_n(L), \varepsilonsilonmptyset) \to (\Sigma_n(L), X_n(L))$ be the inclusions and consider the exact sequence
\betaegin{equation}
\label{equ:cohomology sequence of the pair (un)branched covers}
\ldots \rightarrow H^1(\Sigma_n(L)) \rightarrow H^1(X_n(L)) \xrightarrow{\delta} H^2(\Sigma_n(L),X_n(L)) \xrightarrow{j^*} H^2(\Sigma_n(L)) \xrightarrow{i^*} H^2(X_n(L))\rightarrow \ldots \nonumber
\varepsilonsilonnd{equation}
Since $\widetilde \sigmaigma$ is defined over $X_n(L)$, we have $i^*([c_{\widetilde \sigmaigma}]) = 0$ and hence $c_{\widetilde \sigmaigma}$ represents a cohomology class in $H^2(\Sigma_n(L),X_n(L))$, which we also denoted by $[c_{\widetilde \sigmaigma}]$. We will show that this latter class lies in the image of $\delta$ and therefore $e(\tilde\xi_1)$, which equals $j^*([c_{\widetilde \sigmaigma}])$, is $0$.
Note that as $H_1(\widetilde N,\partial \widetilde N)\cong H^2(\widetilde N)=0$ we have
$$H^2(\Sigma_n(L),X_n(L)) \cong H^2(\widetilde N, \partial \widetilde N) \cong {\rm Hom}(H_2(\widetilde N, \partial \widetilde N),\mathbb{Z}) \cong \oplus_i {\rm Hom}(H_2(\widetilde N_i, \partial \widetilde N_i),\mathbb{Z}),$$
where $\widetilde N=\betaigsqcup_i \widetilde N_i$, as above.
It follows that $[c_{\widetilde \sigmaigma}] \in H^2(\Sigma_n(L),X_n(L))$ is determined by the value of $c_{\widetilde \sigmaigma}$ on the classes $[\widetilde D_i]$ carried by the fundamental classes of the disks $\widetilde D_i$.
\betaegin{figure}[ht]
\betaegin{tikzpicture}[scale=0.95]
\centering
\filldraw[light-gray] (3,3) circle(1.5);
\foreach \x in {0,22.5,...,360} {
\draw [help lines,->] ({3+1.5*cos(\x)}, {3+1.5*sin(\x)}) -- ({3+1.9*cos(\x)}, {3+1.9*sin(\x)});
}
\draw (3,3) circle(1.5);
\foreach \x in {0, 90, ..., 270}{
\draw [blue,->] ({3+1.5*cos(\x)}, {3+1.5*sin(\x)}) -- ({3+1.9*cos(\x)}, {3+1.9*sin(\x)});
\filldraw ({3+1.5*cos(\x)}, {3+1.5*sin(\x)}) circle (0.04);
\node at ({3+1.5*cos(\x+22.5)}, {3+1.5*sin(\x+22.5)}) {\footnotesize $\times$};
\draw [blue,->] ({3+1.5*cos(\x+22.5)},{3+1.5*sin(\x+22.5)}) -- ({3+0.4*cos(\x-65.5)+1.5*cos(\x+22.5)}, {3+0.4*sin(\x-65.5)+1.5*sin(\x+22.5)});
\draw [blue, <-] ({3+1.1*cos(\x+45)}, {3+1.1*sin(\x+45)}) -- ({3+1.5*cos(\x+45)}, {3+1.5*sin(\x+45)});
\filldraw [white] ({3+1.5*cos(\x+45)}, {3+1.5*sin(\x+45)}) circle (0.05);
\draw ({3+1.5*cos(\x+45)}, {3+1.5*sin(\x+45)}) circle (0.05);
\draw [blue,->] ({3+1.5*cos(\x+67.5)},{3+1.5*sin(\x+67.5)}) -- ({3+0.4*cos(\x+157.5)+1.5*cos(\x+67.5)}, {3+0.4*sin(\x+157.5)+1.5*sin(\x+67.5)});
\node at ({3+1.5*cos(\x+67.5)}, {3+1.5*sin(\x+67.5)}) {\footnotesize $\ast$};
}
\node at ({3+1.1*cos(0)}, {3+1.1*sin(0)}) {\tiny $\tilde A_1$};
\node at ({3+1.1*cos(90)}, {3+1.1*sin(90)}) {\tiny $\tilde A_2$};
\node at ({3+1.1*cos(180)}, {3+1.1*sin(180)}) {\tiny $\tilde A_3$};
\node at ({3+1.1*cos(270)}, {3+1.1*sin(270)}) {\tiny $\tilde A_4$};
\node [blue] at (0.7,3) {\footnotesize $\tilde\sigmaigma|_{\partial\widetilde D}$};
\node at (3,3) {\footnotesize $\widetilde D$};
\filldraw[light-gray] (11,3) circle(1.5);
\foreach \x in {0,22.5,...,360} {
\draw [help lines,->] ({11+1.5*cos(\x)}, {3+1.5*sin(\x)}) -- ({11+1.9*cos(\x)}, {3+1.9*sin(\x)});
\draw [->, blue] ({11+1.5*cos(\x)}, {3+1.5*sin(\x)}) -- ({11.4+1.5*cos(\x)}, {3+1.5*sin(\x)});
}
\draw (11,3) circle(1.5);
\filldraw ({11+1.5*cos(0)}, {3+1.5*sin(0)}) circle (0.04);
\node [left] at ({11+1.5*cos(0)}, {3+1.5*sin(0)}) {\tiny $A$};
\node at ({11+1.5*cos(90)}, {3+1.5*sin(90)}) {{\footnotesize $\times$}};
\filldraw [white]({11+1.5*cos(180)}, {3+1.5*sin(180)}) circle (0.05);
\draw ({11+1.5*cos(180)}, {3+1.5*sin(180)}) circle (0.05);
\node at ({11+1.5*cos(270)}, {3+1.5*sin(270)}) {{\footnotesize $\ast$}};
\node [blue,right] at (13,3) {\footnotesize $\sigmaigma|_D=\partial_x$};
\node at (11,3) {\footnotesize $D$};
\draw [thick,->] (6,3) --(8,3);
\node [above] at (7,3) {\footnotesize $p$};
\varepsilonsilonnd{tikzpicture}
\caption{\footnotesize The normal vector field $\partial_r$ along $\partial D$ lifts to the normal vector field $\partial_{\tilde r}$ along $\partial \tilde D$. Also for any given point $\tilde x\in \partial \tilde D$, the angle between ${\partial_{\tilde r}}|_{\tilde x}$ and $\tilde \sigmaigma|_{\tilde x}$ is the same with the angle between $\partial_r|_{p(\tilde x)}$ and $\sigmaigma|_{p(\tilde x)}$. Based on these two observations, it is easy to draw $\tilde \sigmaigma$ along $\partial \tilde D$. In this figure, we illustrate $\tilde \sigmaigma$ alone the boundary of a meridional disk $\tilde D$ for $4$-fold cyclic branched covers. It is also easy to see that from the point $\tilde A_1$ to $\tilde A_2$, the vector field $\tilde \sigmaigma$ rotates by an angle of $(2\pi-\frac{2\pi}{4})$ clockwise. Hence the total rotation of $\tilde \sigmaigma$ along $\partial \tilde D$ is $-\frac{2\times 3\pi}{4}\times 4=-3\times 2\pi$. Therefore, by the construction of $c_{\tilde \sigmaigma}$, we have $c_{\tilde \sigmaigma}([\tilde D])=-3$.}
\label{fig:branched cover of a disk}
\varepsilonsilonnd{figure}
Figure \ref{fig:branched cover of a disk} illustrates the calculation of the value $c_{\widetilde \sigmaigma}$ on a meridional disk. In particular, by our choice of $\widetilde \sigmaigma$, it follows that the value $c_{\widetilde\sigmaigma}([\widetilde D_i])=1-n$ is independent on $i$. On the other hand, if we denote by $u \in H^1(X_n(L))$ the Poincar\'e dual of the element of $H_2(X_n(L), \partial X_n(L))$ carried by the fundamental class of the lift $\widetilde S$ of a Seifert surface $S$, our assumptions on $S$ and $L$ imply that $\delta(u) \in H^2(\Sigma_n(L), X_n(L))$ evaluates to $1$ on each $[\widetilde D_i]$. It follows that $[c_{\widetilde \sigmaigma}] \in H^2(\Sigma_n(L), X_n(L))$ is $(1-n)\delta(u)$. In particular, it lies in the image of $\delta$, which completes the proof.
\varepsilonsilonnd{proof}
\sigmaection{Universal circle actions}
\label{sec:taut foliations circle}
Throughout this section and the next, $M$ will denote a closed connected oriented $3$-manifold and $\mathcal{F}$ a topological co-oriented taut foliation on $M$. Such foliations are known to be isotopic to foliations whose leaves are smoothly immersed and whose tangent planes vary continuously across $M$ (\cite{Cal1}). We assume below that $\mathcal{F}$ satisfies this degree of smoothness. Consequently, the tangent planes of $\mathcal{F}$ determine a $2$-plane subbundle $T\mathcal{F}$ of $TM$. Orient $T\mathcal{F}$ so that its orientation together with the co-orientation of $\mathcal{F}$ determines the orientation of $M$.
Suppose that there is a Riemannian metric $g$ on $M$ whose restriction to the leaves of $\mathcal F$ has constant curvature $-1$. By this we mean that the restriction of $g$ to each plaque of $\mathcal F$ satisfies this curvature condition. Ostensibly, this appears to be a strong constraint on $M$ and $\mathcal{F}$. It precludes, for instance, the existence of a leaf of $\mathcal{F}$ which is homeomorphic to either a $2$-sphere or torus. However, work of Plante and Candel (\cite{Can,Pla}) shows that when $M$ is a rational homology $3$-sphere, such a metric always exists. See the proof of Theorem \ref{prop:taut foliation left orderability}.
Given such a metric $g$ on $M$, Thurston constructed a circle $S^1_{univ}$ associated to $\mathcal F$ and a non-trivial homomorphism $\rho_{univ}: \pi_1(M) \rightarrow {\rm Homeo}_+(S^1_{univ})$ determined by the geometry of $g$. In this section, we review the construction of $\rho_{univ}$ following the approach found in \cite{CD} (see also \cite{Cal2}).
\sigmaubsection{Bundles from circles at infinity}
\label{subsubsec:circle bundles at infinity}
Let $(M, g, \mathcal F)$ be as above. As $\mathcal F$ is taut, the inclusion map induces an injection from the fundamental group of each of its leaves to $\pi_1(M)$ (\cite{Nov}). Hence each leaf of the pull-back foliation $\widetilde{\mathcal{F}}$ on the universal cover $\widetilde M$ of $M$ is simply-connected.
The {\it leaf space} $\mathcal{L}$ of $\widetilde{\mathcal{F}}$ is the quotient space of $\widetilde{M}$ obtained by collapsing each leaf of $\widetilde{\mathcal{F}}$ to a point. The simple-connectivity of $\widetilde M$ implies that transversals to $\widetilde{\mathcal F}$ map homeomorphically to their images in $\mathcal L$ and as such, the co-orientation on $\mathcal{F}$ determines an orientation on these images. Globally, $\mathcal{L}$ is an oriented, though not necessarily Hausdorff, $1$-manifold (cf. \cite[Corollary D.1.2]{CC}).
We use the Poincar\'e disk model for the hyperbolic plane $\mathbb{H}^2$. In particular, the underling space of $\mathbb{H}^2$ is the open unit ball in $\mathbb R^2$ whose closure, denoted by $\overline{\mathbb{H}}^2$, is the unit disk. The boundary of $\overline{\mathbb{H}}^2$ is the unit circle $S^1$ and is called the boundary of $\mathbb{H}^2$ at infinity. Given a point $p$ in $\mathbb{H}^2$ and a unit tangent vector $v\in UT_p\mathbb{H}^2$, there is a unique geodesic ray $\gamma_{p,v}: [0, \infty) \to \mathbb{H}^2$ for which $\gamma_{p,v}(0) = p$ and $\dot{\gamma}_{p,v}(0) = v$, and this geodesic ray limits to a unique point of $\partial \overline{\mathbb{H}}^2$. This correspondence determines a canonical homeomorphism between $UT_p\mathbb{H}^2$ and $\partial \overline{\mathbb{H}}^2$ for any $p\in \mathbb{H}^2$.
Since each leaf $\lambda$ of $\widetilde{\mathcal{F}}$ is isometric to the hyperbolic plane $\mathbb{H}^2$ with respect to the pull-back $\tilde g$ of $g$ to $\widetilde M$, each $\lambda$ gives rise to a circle at infinity which we denote by $\partial_\infty \lambda$. This association allows us to define two related $S^1$-bundles with fibres $\partial_\infty\lambda$. The first, denoted by $\betaar{E}_\infty$, has base $\mathcal{L}$ and the second, denoted by $E_\infty$, has base $\widetilde M$. The topologies of these bundles are defined similarly.
Let $\{(U_\alpha, \varphi_\alpha)\}$ be a regular foliated atlas of $\widetilde{\mathcal{F}}$ such that $\varphi_\alpha(U_\alpha)\cong R_\alpha \times B_\alpha$ where $R_\alpha$ is a rectangular region in $\mathbb{R}^2$ and $B_\alpha$ is an open interval in $\mathbb{R}$. We use $\mathcal F_\alpha$ to denote the foliation on $R_\alpha \times B_\alpha$ determined by the plaques $R_\alpha \times \{x\}$.
Recall that we have assumed that the transition maps $\varphi_\betaeta\circ \varphi_\alpha^{-1}$ are horizontally smooth. In particular, the differential ``$D(\varphi_\betaeta\circ \varphi_\alpha^{-1})$" is
defined and varies continuously over $T\mathcal F_\alpha|_{\varphi_\alpha(U_\alpha \cap U_\betaeta)}$. Hence if $UT\widetilde{\mathcal{F}}$ and $UTR_\alpha$ denote the unit tangent bundles of $\widetilde{\mathcal F}$ and $R_\alpha$, the atlas $\{(U_\alpha, \varphi_\alpha)\}$ determines local trivialisations $\{\widetilde{\varphi}_\alpha\}$ of $UT\widetilde{\mathcal F}$:
$$UT\widetilde{\mathcal F}|_{U_\alpha}\rightarrow (UTR_\alpha) \times B_\alpha \varepsilonsilonquiv (R_\alpha \times S^1) \times B_\alpha \varepsilonsilonquiv (R_\alpha \times B_\alpha) \times S^1 \cong U_\alpha \times S^1$$
whose transition functions $\widetilde{\varphi}_\betaeta \circ \widetilde{\varphi}_\alpha^{-1}: \varphi_\alpha(U_\alpha \cap U_\betaeta) \times S^1 \to \varphi_\betaeta(U_\alpha \cap U_\betaeta) \times S^1$ are continuous.
Consider the fibre-preserving bijection
\betaegin{equation} \label{eqn: G}
G: UT\widetilde{\mathcal{F}} \rightarrow E_\infty, (\tilde p, v) \mapsto \gamma_{(\tilde p, v)}
\varepsilonsilonnd{equation}
where $\gamma_{(\tilde p, v)}(t)$ is the geodesic ray on the leaf of $\widetilde{\mathcal F}$ containing $\tilde p$ which satisfies $\gamma_{(\tilde p, v)}(0)=\tilde{p}$ and $\dot{\gamma}_{(\tilde p, v)}(0)=v$. We use this bijection to topologise $E_\infty$ and endow it with the structure of a locally-trivial oriented $S^1$-bundle over $\widetilde M$ with transition functions $\{\widetilde{\varphi}_\betaeta \circ \widetilde{\varphi}_\alpha^{-1}\}$.
Defining the topology on $\overline{E}_\infty$ is similar. Fix a transversal $\tau$ to $\widetilde{\mathcal F}$. The simple-connectivity of $\widetilde M$ implies that $\tau$ embeds in $\mathcal L$ with image $l$, say. As above, there is a bijection
\betaegin{equation} \label{eqn: Gtau}
G_\tau: UT\widetilde{\mathcal{F}}|_\tau \rightarrow \betaar E_\infty|_{l}, (\tilde p, v) \mapsto \gamma = \gamma_{(\tilde p, v)}
\varepsilonsilonnd{equation}
which we declare to be a homeomorphism. Distinct transversals with the same image in $\mathcal L$ determine the same topology on $\betaar E_\infty|_{l}$ since the geometry of the leaves of $\widetilde{\mathcal F}$ varies continuously over compact subsets of $\widetilde M$. See \cite[\S 2.8]{CD} for more details and discussion.
\betaegin{remark} \label{remark: bundle action}
By construction, the deck transformations of the cover $\widetilde M \to M$ determine isometries between the leaves of $\widetilde{\mathcal F}$ and as such, induce homeomorphisms between the fibres of $\betaar{E}_\infty$ and $E_\infty$. The naturality of the topologies on $\betaar{E}_\infty$ and $E_\infty$ is reflected in the fact these homeomorphisms determine actions of $\pi_1(M)$ on $\betaar{E}_\infty$ and $E_\infty$ by bundle maps.
\varepsilonsilonnd{remark}
\sigmaubsection{Circular orders and monotone maps}
\label{subsubsec:sections and universal circle}
A circular order on a set $O$ of cardinality $4$ or more is a collection of linear orders $<_p$ on $O \sigmaetminus \{p\}$, one for each $p \in O$, such that for $p, q \in O$, the linear orders $<_p$ and $<_q$ differ by a {\it cut} on $O \sigmaetminus \{p, q\}$. (See \cite[Definition 2.34]{Cal2} for the details.) If $O = \{x, y, z\}$ has three elements, we add the condition that $y <_x z$ if and only if $z <_y x$. Subsets of cardinality $3$ or more of circularly ordered sets inherit circular orders in the obvious way.
The archetypal example of a circularly ordered set is an oriented circle where the linear orders $<_p$ on $S^1 \sigmaetminus \{p\}$ are those determined by the orientation. More generally, any subset of cardinality $3$ or more of an oriented circle inherits a circular order from the orientation on the circle.
Given a circularly ordered set $O$ of four or more elements, we define an ordered triple $(x, y, z) \in O^3$ to be {\it positively ordered} if there is a $p \in O \sigmaetminus \{x, y, z\}$ such that $x <_p y <_p z$. It is called {\it negatively ordered} if there is a $p \in O \sigmaetminus \{x, y, z\}$ such that $y <_p x <_p z$. We leave it to the reader to verify that a positively ordered triple is never negatively ordered and vice versa. Further, a triple of distinct points $(x, y, z)$ is positively ordered, respectively negatively ordered, if and only if $(y, z, x)$ is positively ordered, respectively negatively ordered.
A totally ordered set $S$ admits a natural topology with basis consisting of the open interval $(x, y) = \{p \in S : x <_S p <_S y\}$. A map $f: S \to T$ between totally-ordered sets is called {\it monotone} if it is surjective and if $f^{-1}(t)$ is a closed interval $[x, y] = \{p \in S : x \leq_S p \leq_S y\}$ for each $t \in T$. Monotone maps are continuous and satisfy $f(s_1) \leq_T f(s_2)$ whenever $s_1 <_S s_2$.
Analogous definitions are made for circularly ordered sets. Given distinct points $x, y$ in a circularly ordered set $O$, the {\it open interval} $(x, y)$ is $\{p \in O : (x, p, y) \hbox{ is positively ordered}\}$. Closed intervals are defined similarly. The complement of an open, respectively closed, interval is a closed, respectively open, interval.
The set of open intervals in $O$ forms a basis of the {\it order topology} on $O$. Closed intervals are closed in this topology. If $O$ is a subset of an oriented circle with the induced circular order, then the order topology coincides with the subspace topology and the open intervals of $O$ are intersections of $O$ with open arcs of the circle.
A map $f: O_1 \to O_2$ between circularly ordered sets is called {\it monotone} if it is surjective and point inverses are closed intervals. Then for any $p_2 \in O_2$ and $p_1 \in f^{-1}(p_2) \sigmaubset O_1$, the restriction of $f$ to $(O_1 \sigmaetminus f^{-1}(p_2), <_{p_1}) \to (O_2 \sigmaetminus \{p_2\}, <_{p_2})$ is a monotone map of totally ordered sets. Monotone maps are continous.
\sigmaubsection{Sections of $\betaar{E}_\infty$ and universal circles}
The key to the Calegari-Dunfield proof of the existence of a universal circle $S^1_{univ}$ of $\mathcal F$ is the construction of a certain set of sections $\mathcal{S} = \{\sigmaigma: \mathcal{L}\rightarrow \betaar{E}_\infty\}$ of the circle bundle $\betaar{E}_\infty$ which is circularly orderable and is closed under the action of $\pi_1(M)$ on $\betaar{E}_\infty$ (\cite[\S 6]{CD}). The set $\mathcal{S}$ is separable with respect to the order topology and contains no pair of distinct elements $\sigmaigma_1, \sigmaigma_2$ such that $(\sigmaigma_1, \sigmaigma_2) = \varepsilonsilonmptyset$ (i.e. $ \mathcal{S}$ has no {\it gaps}), so it can be embedded into an oriented circle as a dense ordered subspace. This circle turns out to be the universal circle $S^1_{univ}$.
The action of $\pi_1(M)$ on $\mathcal{S}$ is order-preserving and continuous in the order topology, which implies that it extends to an orientation-preserving action on $S^1_{univ}$, yielding a homomorphism $\rho_{univ}: \pi_1(M)\rightarrow {\rm Homeo}_+(S_{univ}^1)$.
For each leaf $\lambda$ of $\mathcal{L}$, the evaluation map $e_\lambda: \mathcal{S} \rightarrow \partial_\infty\lambda$ is continuous and sends the closed interval $[\sigmaigma_1, \sigmaigma_2]$ into the interval $[\sigmaigma_1(\lambda), \sigmaigma_2(\lambda)]$. It extends to a monotone map $\phi_\lambda: S^1_{univ} \to \partial_\infty \lambda$ which, in particular, is continuous of degree one.
It is shown in \cite{CD} that these objects satisfy the conditions of the following definition.
\betaegin{definition} {\rm (\cite[Definition 6.1]{CD})}
Let $\mathcal{F}$ be a co-oriented taut foliation of a closed oriented $3$-manifold $M$ and suppose that $M$ admits a Riemannian metric whose restriction to each leaf of $\mathcal F$ has constant curvature $-1$. A {\it universal circle} for $\mathcal{F}$ is a circle $S_{univ}^1$ together with the following data:
$(1)$ A nontrivial representation
\betaegin{displaymath}
\rho_{univ}: \pi_1(M)\rightarrow {\rm Homeo}_+(S_{univ}^1)
\varepsilonsilonnd{displaymath}
$(2)$ For every leaf $\lambda$ of the pull-back foliation $\widetilde{\mathcal{F}}$, there is a monotone map
\betaegin{displaymath}
\phi_\lambda: S_{univ}^1\rightarrow \partial_{\infty}\lambda
\varepsilonsilonnd{displaymath}
$(3)$ For every leaf $\lambda$ of $\widetilde{\mathcal{F}}$ and every $\alpha\in \pi_1(M)$, the following diagram commutes:
\betaegin{figure}[ht]
\centering
\betaegin{tikzpicture}[scale=0.8]
\draw [thick, ->] (2.8, 3) -- (5,3);
\draw [thick, ->] (2.8, 1) -- (5,1);
\draw [thick, ->] (2, 2.5) -- (2, 1.5);
\draw [thick, ->] (5.4, 2.5) -- (5.4, 1.5);
\node at (2,3) {$S_{univ}^1$};
\node at (2,1) {$\partial_{\infty} \lambda$};
\node [right] at (5,3) {$S_{univ}^1$};
\node [right] at (5,1) {$\partial_{\infty} (\alpha\cdot \lambda)$};
\node [right] at (5.5, 2) {$\phi_{\alpha\cdot \lambda}$};
\node [left] at (2, 2) {$\phi_\lambda$};
\node [above] at (3.9,3) {$\rho_{univ}(\alpha)$};
\node [above] at (3.9,1) {$\alpha\cdot $};
\varepsilonsilonnd{tikzpicture}
\varepsilonsilonnd{figure}
$(4)$ If $\lambda$ and $\mu$ are incomparable leaves of $\widetilde{\mathcal{F}}$, then the core of $\phi_\lambda$ is contained in the closure of a single gap of $\phi_\mu$ and vice versa.
\label{def:universal circle}
\varepsilonsilonnd{definition}
We refer the reader to \cite{CD} for more details on condition (4), which will not play a role below.
\betaegin{remark} \label{rem: rhouniv non-trivial}
Suppose that $\mathcal F$ is a co-oriented taut foliation on $M$ which admits, as above, a universal circle. We claim that the image of $\rho_{univ}$ is an infinite group which is non-ableian if $M$ is a rational homology $3$-sphere. To see this, first note that $\mathcal F$ must have a non-simply connected leaf. Otherwise each of its leaves is isometric to $\mathbb H^2$ and in particular homeomorphic to $\mathbb R^2$, which implies that $M$ is the $3$-torus (\cite{Ros}, \cite{Ga2}). But this is impossible; transverse loops to $\mathcal{F}$ are homotopically non-trivial (see \cite[Theorem 4.35(3)]{Cal2}, for instance), so the assumption that each of the leaves of $\mathcal{F}$ is isometric to $\mathbb H^2$ implies that $\pi_1(M) \cong \mathbb Z^3$ would have exponential growth (\cite[Lemma 7.2]{Pla}), a contradiction. Thus $\mathcal F$ has a non-simply-connected leaf $\betaar \lambda$. There is a leaf $\lambda$ of $\widetilde{\mathcal F}$ contained in the inverse image of $\betaar \lambda$ which is invariant under the deck transformations corresponding to $\pi_1(\betaar \lambda) \leq \pi_1(M)$. Since $\pi_1(\betaar \lambda)$ acts on the hyperbolic plane $\lambda$ by isometries, it induces a faithful action of $\pi_1(\betaar \lambda)$ on $\partial_\infty \lambda$. Hence as $\pi_1(\betaar \lambda)$ is non-trivial, it is infinite. So by (3) of the definition of a universal circle, the image of $\rho_{univ}$ is an infinite group. If $M$ is a rational homology $3$-sphere, the image cannot be abelian as otherwise it would be finite.
\varepsilonsilonnd{remark}
\sigmaection{The Euler class of the universal circle action}
\label{sec:euler class universal circle}
Recall that $T\mathcal{F}$ denotes the oriented $2$-plane field over $M$ determined by $\mathcal{F}$. The goal of this section is to prove the following proposition.
\betaegin{proposition} {\rm (Thurston)}
Assume that $\mathcal{F}$ is a co-oriented taut foliation on a closed oriented $3$-manifold $M$ and that there is a Riemannian metric $g$
on $M$ which restricts to a metric of constant curvature $-1$ on each leaf $\lambda$ of $\mathcal{F}$. Let $\rho_{univ}:\pi_1(M)\rightarrow {\rm Homeo}_+(S_{univ}^1)$ be a universal circle representation associated to $(M,g,\mathcal{F})$. Then the Euler class of the oriented circle bundle $E_{\rho_{univ}}$ equals that of $T\mathcal{F}$.
\label{prop:Euler class}
\varepsilonsilonnd{proposition}
\betaegin{remark}
We know of no proof of this fundamental result in the literature. We were made aware of it on separate occasions by Ian Agol, Danny Calegari, and Nathan Dunfield. \varepsilonsilonnd{remark}
\betaegin{proof}
As above, $(\widetilde M, \tilde g,\widetilde{\mathcal{F}})$ denotes the universal cover of $M$ equipped with the pull-back foliation $\widetilde{\mathcal{F}}$ and the pull-back metric $\tilde g$.
Let $\Phi: \widetilde M \times S^1_{univ} \to E_\infty$ be the fibre-preserving map sending $(\tilde{p},\sigmaigma)$ to $\phi_\lambda(\sigmaigma)$, where $\lambda$ is the leaf of $\mathcal F$ containing $\tilde p$ and $\phi_\lambda:S_{univ}\rightarrow \partial_\infty \lambda$ is the degree one monotone map of Definition \ref{def:universal circle}(2). The continuity of $\Phi$ will be verified in Lemma \ref{lem: continuity of Phi} and we assume it for now.
Recall the bundle isomorphism $G:UT\widetilde{\mathcal{F}}\rightarrow E_\infty$ with $(\tilde{p},v)\mapsto \gamma_{(\tilde{p},v)}$ defined in \S\ref{subsubsec:circle bundles at infinity}. By composing $\Phi$ with $G^{-1}$, we obtain a fibre-preserving map
\betaegin{displaymath}
\widetilde F:=G^{-1}\circ \Phi: \widetilde M\times S_{univ}^1\rightarrow UT\widetilde{\mathcal{F}}
\varepsilonsilonnd{displaymath}
which restricts to a degree one monotone map between fibres.
We claim that $\widetilde F$ descends to a fibre-preserving map $F: E_{\rho_{univ}} \rightarrow UT\mathcal{F}$. To see this, note that $\pi_1(M)$ acts on both $\widetilde{M}\times S^1_{univ}$ and $UT\widetilde{\mathcal{F}}$ with quotient spaces $E_{\rho_{univ}}$ and $UT\mathcal{F}$ respectively. The existence of $F$ will follow if we can show that $h\widetilde F=\widetilde Fh$ for any $h \in \pi_1(M)$. But given $\tilde p\in \lambda$ on $\widetilde{M}$ and $\sigmaigma\in S^1_{univ}$, we have $$\widetilde F(h\cdot(\tilde p, \sigmaigma)) =\widetilde F(h\tilde{p}, \rho_{univ}(h)\sigmaigma)= G^{-1}\circ\phi_{h\lambda}(\rho_{univ}(h)\sigmaigma).$$
By Definition \ref{def:universal circle}(3), we have $\phi_{h\lambda}(\rho_{univ}(h)h\sigmaigma)=h\phi_\lambda(\sigmaigma)$. Hence
\betaegin{align*}
\widetilde F(h\cdot(\tilde p, \sigmaigma))=G^{-1}\circ h\phi_\lambda(\sigmaigma).
\varepsilonsilonnd{align*}
Since $\pi_1(M)$ acts on $(\widetilde{M},\tilde{g})$ by isometries, $$\widetilde F(h\cdot(\tilde p, \sigmaigma))=G^{-1}\circ h\phi_\lambda(\sigmaigma)=hG^{-1}\circ \phi_\lambda=h\widetilde F(\tilde{p},\sigmaigma),$$
which is what we needed to show.
Let $D_{\rho_{univ}}$ and $DT\mathcal{F}$ be the oriented disk bundles associated to $E_{\rho_{univ}}$ and $UT\mathcal{F}$ respectively (cf. \S \ref{sec: euler class}), and let $F_D: D_{\rho_{univ}}\rightarrow DT\mathcal{F}$ denote the map induced by $F$. We have the following commutative diagram in which $D_\infty$ denotes the fibre of $D_{\rho_{univ}}$ at $p\in M$:
\betaegin{figure}[ht]
\centering
\betaegin{tikzcd}
(D_{\infty}, S_{univ}^1) \arrow[hookrightarrow]{r} \arrow{d}{(F_D,F)|_p}
& (D_{\rho_{univ}}, E_{\rho_{univ}}) \arrow{d}{(F_D,F)}
\\
(DT_p\mathcal{F}, UT_p\mathcal{F}) \arrow[hookrightarrow]{r}
& (DT\mathcal{F}, UT\mathcal{F})
\varepsilonsilonnd{tikzcd}
\varepsilonsilonnd{figure}
Since $F$ restricts to a degree one map between fibres, $(F_D,F)|_p^*$ is an isomorphism which sends the orientation class in $H^2(DT_p\mathcal{F}, UT_p\mathcal{F})$ to the orientation class in $H^2(D_{\infty}, S_{univ}^1)$ . Hence $(F_D,F)^*$ sends the Thom class of $DT\mathcal{F}$ in $H^2(DT\mathcal{F}, UT\mathcal{F})$ to the Thom class of $D_{\rho_{univ}}$ in $H^2(D_{\rho_{univ}}, E_{\rho_{univ}})$. By definition, then, we have $e(T\mathcal{F})=e(UT\mathcal{F})=e(E_{\rho_{univ}})$ (see \S \ref{sec: euler class}).
\varepsilonsilonnd{proof}
To complete the proof, it remains to prove that $\Phi$ is continuous.
\betaegin{lemma} \label{lem: continuity of Phi}
The map $\Phi: \widetilde M \times S^1_{univ} \to E_\infty$ is continuous.
\varepsilonsilonnd{lemma}
\betaegin{proof}
It suffices to show that for any foliation chart $(U_\alpha, \varphi_\alpha)$ of $\widetilde{\mathcal{F}}$, the restriction $\Phi|_{U_\alpha}: U_\alpha\times S^1_{univ}\rightarrow E_\infty|_{U_\alpha}$ is continuous. (See \S \ref{subsubsec:circle bundles at infinity}.)
Let $l_\alpha$ be the open interval on $\mathcal{L}$ corresponding to a transversal in $U_\alpha$ and $e_{l_\alpha}: \mathcal{S}\rightarrow \mathcal{S}|_{l_\alpha}$ the map which restricts a section in $\mathcal{S}$ to $l_\alpha$. Now define $\mathcal{S}_{l_\alpha}$ to be the image of $e_{l_\alpha}$. That is,
\betaegin{displaymath}
\mathcal{S}_{l_\alpha} = \{\sigmaigma|_{l_\alpha} \, : \, \sigmaigma \in \mathcal{S}\}
\varepsilonsilonnd{displaymath}
The inverse image by $e_{l_\alpha}$ of an element of $\mathcal{S}_{l_\alpha}$ is a closed interval in $\mathcal{S}$, since sections in $\mathcal{S}$ do not cross each other (\cite[\S 6.14]{CD}). Hence, the circular order on $\mathcal{S}$ defines a circular order on $\mathcal{S}_{l_\alpha}$ and if we equip $\mathcal{S}_{l_\alpha}$ with the associated order topology, then $e_{l_\alpha}$ is a monotone map between two circularly ordered sets. In particular, $e_{l_\alpha}$ is continuous.
On the other hand, $\mathcal{S}_{l_\alpha}$ is a subset of the set of continuous functions $C^0(l_\alpha, \betaar{E}|_{l_\alpha})$ from $l_\alpha$ to $\betaar{E}|_{l_\alpha}$. One can check that the order topology on $\mathcal{S}_{l_\alpha}$ agrees with the subspace topology induced by the compact-open topology on $C^0(l_\alpha, \betaar{E}|_{l_\alpha})$. We denote the closure of $\mathcal{S}_{l_\alpha}$ in $C^0(l_\alpha, \betaar{E}|_{l_\alpha})$ by $\betaar{\mathcal{S}}_{l_\alpha}$.
Note that for any leaf $\lambda\in l_\alpha$, the evaluation map $e_\lambda: \mathcal{S}\rightarrow \partial_\infty \lambda$ factors through $\mathcal{S}_{l_\alpha}$. That is, the left-hand diagram immediately below commutes and its maps extend by continuity to yield the right-hand diagram.
\betaegin{center}
\betaegin{tikzpicture}[scale=0.8]
\node at (8, 4.5) {\sigmamall $U_\alpha \times \mathcal{S}$};
\node at (13, 4.5) {\sigmamall $E_\infty|_{U_\alpha}$};
\draw [thick, ->] (9, 4.5) --(12.1,4.5);
\node at (10.5, 4.9) {\sigmamall $\Phi$};
\node at (8, 1.5) {\sigmamall $U_\alpha \times \mathcal{S}_{l_\alpha}$};
\draw [->, thick] (9,2) --(12.5, 4);
\node [right] at (10.8, 2.7) {\sigmamall $\Phi_{l_\alpha}$};
\draw [thick, ->] (8, 4) -- (8,2);
\node at (7, 3) {\sigmamall $1_{u_\alpha} \times e_{l_\alpha}$};
\varepsilonsilonnd{tikzpicture}
\hspace{1cm}
\betaegin{tikzpicture}[scale=0.8]
\node at (8, 4.5) {\sigmamall $U_\alpha \times S^1_{univ}$};
\node at (13, 4.5) {\sigmamall $E_\infty|_{U_\alpha}$};
\draw [thick, ->] (9, 4.5) --(12.1,4.5);
\node at (10.5, 4.9) {\sigmamall$\Phi$};
\node at (8, 1.5) {\sigmamall $U_\alpha \times \overline{\mathcal{S}}_{l_\alpha}$};
\draw [->, thick] (9,2) --(12.5, 4);
\node [right] at (10.8, 2.7) {\sigmamall $\Phi_{l_\alpha}$};
\draw [thick, ->] (8, 4) -- (8,2);
\node at (7, 3) {\sigmamall $1_{u_\alpha} \times \overline{e}_{l_\alpha}$};
\varepsilonsilonnd{tikzpicture}
\varepsilonsilonnd{center}
Since the evaluation map from $C^0(l_\alpha, \betaar{E}|_{l_\alpha}) \times l_\alpha \to \betaar{E}|_{l_\alpha}$ is continuous with respect to the compact open topology, it follows that $
\Phi_{l_\alpha}: U_\alpha\times \betaar{\mathcal{S}}_{l_\alpha}\rightarrow E_\infty|_{U_\alpha}$ is continous. Therefore, $\Phi$ is continuous over $U_\alpha\times S^1_{univ}$.
\varepsilonsilonnd{proof}
\sigmaection{Left-orderability of $3$-manifold groups and universal circles}
\label{sec: lo and taut foliations}
A group $G$ is said to be {\it left-orderable} if it is nontrivial and there exists a strict total order $<$ on $G$ such that if $a, b, c \in G$ and $a<b$, then $ca<cb$.
The group ${\rm Homeo}_+(\mathbb{R})$ is left-orderable (see, for instance, the proof of \cite[Theorem 6.8]{Ghy}) and serves as a universal host for countable left-orderable groups. Indeed, a countable group $G \ne \{1\}$ is left-orderable if and only if it admits a faithful representation into ${\rm Homeo}_+(\mathbb{R})$ (cf. \cite[Theorem 6.8]{Ghy})). If $G$ is the fundamental group of an orientable irreducible $3$-manifold, the condition that the representation be faithful can be removed.
\betaegin{theorem} {\rm (\cite[Theorem 1.1]{BRW})} \label{thm:brw}
Assume that $M$ is a compact, orientable, irreducible $3$-manifold. Then $\pi_1(M)$ is left-orderable if and only if it admits a homomorphism to ${\rm Homeo}_+(\mathbb{R})$ with non-trivial image. Equivalently, $\pi_1(M)$ is left-orderable if and only if it admits a left-orderable quotient.
\varepsilonsilonnd{theorem}
Consequently,
\betaegin{corollary}[\cite{HS, BRW}]
\label{cor:b1 is not zero lo}
Let $M$ be a compact, orientable, prime $3$-manifold and let $b_1(M)$ denote its first Betti number. If $b_1(M) > 0$, then $\pi_1(M)$ is left-orderable.
\qed
\varepsilonsilonnd{corollary}
The following theorem states a known criterion for the left-orderability of the fundamental group of a rational homology $3$-sphere.
\betaegin{theorem} \label{prop:taut foliation left orderability}
Let $M$ be a rational homology $3$-sphere which admits a co-orientable taut foliation whose tangent plane field has zero Euler class. Then $\pi_1(M)$ is left-orderable.
\varepsilonsilonnd{theorem}
\betaegin{proof}
First we observe that rational homology $3$-spheres which admit co-orientable taut foliations are irreducible \cite{Nov}. Hence $\pi_1(M)$ will be left-orderable if it admits a homomorphism to ${\rm Homeo}_+(\mathbb{R})$ with non-trivial image (Theorem \ref{thm:brw}).
Fix a Riemannian metric $g$ on $M$. Since $\mathcal{F}$ is orientable, any leaf of $\mathcal F$ which is not conformally negatively curved with respect to the induced metric gives rise to a nontrivial homology class in $H_2(M; \mathbb{R})$ (\cite[Corollary 6.4]{Pla}), contrary to the fact that $M$ is a rational homology sphere. Consequently, each leaf of $\mathcal{F}$ is conformally hyperbolic. Then by \cite[Theorem 4.1]{Can}, $g$ is conformal to a metric $g'$ whose restriction to each leaf has constant curvature $-1$. Hence there exists a universal circle action $\rho:\pi_1(M)\rightarrow {\rm Homeo}_+(S^1)$ which is non-trivial by Remark \ref{rem: rhouniv non-trivial}. By Proposition \ref{prop:Euler class}, we have $e(E_\rho)=e(T\mathcal{F})$, which is zero by hypothesis. Hence $\rho$ lifts to a non-trivial action of $\pi_1(M)$ on the real line (Lemma \ref{lem:milnor Euler class}) and therefore by Theorem \ref{thm:brw}, the fundamental group $\pi_1(M)$ is left-orderable.
\varepsilonsilonnd{proof}
\sigmaection{The left-orderability of the fundamental groups of cyclic branched covers of fibred knots}
\label{sec: fdtc and lo}
In this section we consider cyclic branched covers of hyperbolic fibred knots and prove Theorem \ref{thm:conjecture fibre knots}, Corollary \ref{cor:universal abelian cover} and Corollary \ref{cor: knots rational homology sphere}.
Recall that a $3$-manifold is called excellent if it is not an L-space, admits a co-orientable taut foliation and its fundamental group is left-orderable.
\betaegin{proposition}\label{prop: e=0 and c>1 implies lo}
Let $M$ be an oriented rational homology sphere admitting an open book $(S,h)$ with binding a knot $K$ and pseudo-Anosov monodromy $h$. Let $\mathcal{F}_0$ denote the foliation on the exterior of $K$ given by the locally-trivial fibre bundle structure. Suppose that $e(T\mathcal{F}_0) = 0$.
If $|c(h)| \geq 1$, then $M$ is excellent.
\varepsilonsilonnd{proposition}
\betaegin{proof}
If $c(h)\leq -1$, we can consider the open book decomposition $(-S, h^{-1})$ of $M$. By Lemma \ref{lem:poincare translation number}, $c(h^{-1})=-c(h)\geq 1$. Hence, we may assume that $c(h)\geq 1$.
By Theorem \ref{thm:cgeq1}, $M$ admits a co-oriented taut foliation $\mathcal F$ whose tangent plane field is homotopic to the contact structure $\xi$ supported by $(S,h)$. In particular, the restriction of $\xi$ to the knot complement $X(K)$ is homotopic to $\mathcal{F}_0$. It follows that the Euler class $e(\xi)$ is sent to $0$ under the inclusion-induced homomorphism $H^2(M) \to H^2(X(K))$. By Lemma \ref{lemma: homology of exterior}, this homomorphism is an isomorphism and hence, $e(T\mathcal{F})=e(\xi)= 0$. This implies that $M$ has a left-orderable fundamental group by Theorem \ref{prop:taut foliation left orderability}.
\varepsilonsilonnd{proof}
\betaegin{proof}[Proof of Theorem \ref{thm:conjecture fibre knots}]
Let $K$ be a hyperbolic fibred knot in an oriented integer homology $3$-sphere $M$ with fibre $S$ and monodromy $h$. Then $X_n(K)(\mu_n + q \lambda_n)$ has open book decomposition $(S, T_{\partial}^{-q} h^n)$ with binding the core of the filling solid torus, which we denote by $\widetilde K$. The exterior of $\widetilde K$ in $X_n(K)(\mu_n + q \lambda_n)$ is $X_n(K)$.
Since $T_\partial^{-q}h^n$ is freely isotopic to $h^n$, there is a fibre-preserving homeomoprhism between the mapping tori of $T_\partial^{-q}h^n$ and $h^n$. Hence the foliation on $X_n(K)$ determined by the open book $(S, h^n)$, denoted by $\widetilde{\mathcal{F}}_0$, is isomorphic to the one determined by the open book $(S,T_\partial^{-q}h^n)$ and the same holds for their tangent plane fields. We show that $e(T\widetilde{\mathcal{F}}_0)=0$.
Since $M$ is an integer homology $3$-sphere, $H^2(X(K)) \cong 0$, so that if $\mathcal{F}_0$ is the foliation of $X(K)$ determined by the open book $(S, h)$, then $e(T\mathcal{F}_0) = 0$. Since the Euler class $e(T\widetilde{\mathcal{F}}_0)$ is the image of $e(\mathcal{F}_0)$ under the homomorphism $H^2(X(K))\to H^2(X_n(K))$, it is also zero.
By Lemma \ref{lem:poincare translation number}, we have $|c(T_\partial^{-q} h^n)|=|nc(h)-q|\geq 1$. Hence by Proposition \ref{prop: e=0 and c>1 implies lo}, if $X_n(K)(\mu_n + q \lambda_n)$ is a rational homology sphere, it is excellent. Otherwise, the first Betti number of $X_n(K)(\mu_n + q \lambda_n)$ is positive and it is also excellent (\cite{BRW, Ga1}). This proves part (2) of Theorem \ref{thm:conjecture fibre knots}. Part $(1)$ is an immediate consequence of part (2) and Proposition \ref{prop:lower bound FDTC}.
\varepsilonsilonnd{proof}
\betaegin{proof}[Proof of Corollary \ref{cor:universal abelian cover}]
Assume that $n, q$, and $h$ are given as in the statement of the corollary and that $q \not \in \left\{
\betaegin{array}{cl}
\{nc(h)\} & \hbox{ if } nc(h) \in \mathbb Z \\
\{\lfloor nc(h) \rfloor, \lfloor nc(h) \rfloor+ 1\} & \hbox{ if } nc(h) \not \in \mathbb Z
\varepsilonsilonnd{array} \right.$. We must show that the $n$-fold cyclic cover of $X(K)(n\mu+q\lambda)$ is excellent. Since this cover is homeomorphic to the Dehn filling $X_n(K)(\mu_n+q\lambda_n)$ and the latter is excellent if $|nc(h)-q|\geq 1$ by Theorem \ref{thm:conjecture fibre knots}(2), we need only verify that this inequality holds.
But $|nc(h)-q|<1$ if and only if either $q = n c(h) \in \mathbb Z$ or $nc(h) \notin \mathbb{Z}$ and $q$ is either
$\lfloor nc(h) \rfloor$ or $ \lfloor nc(h) \rfloor + 1$. As each of these cases has been excluded, the corollary holds.
\varepsilonsilonnd{proof}
\betaegin{proof}[Proof of Corollary \ref{cor: knots rational homology sphere}]
Under the assumption, the $n$-fold cyclic branched cover of $X(K)(p\mu+q\lambda)$ is homeomorphic to $X_n(K)(\mu_n+mq\lambda_n)$, where $n=mp$ and $(p,q)=1$. The claim therefore follows from Theorem \ref{thm:conjecture fibre knots}(2).
\varepsilonsilonnd{proof}
\sigmaection{The left-orderability of the fundamental groups of cyclic branched covers of closed braids}
\label{sec:cyclic closed braids}
In this section we examine Conjecture \ref{conj: lspace} for cyclic branched covers of hyperbolic closed braids.
\sigmaubsection{Open book decomposition of cyclic branched covers of closed braids}
\label{subsec:lift open book}
Given a punctured surface $S$, we let $\betaar S$ denote the compact surface obtained from $S$ by filling its punctures.
If $f: S\rightarrow S'$ is a proper continuous map between two punctured surfaces, we use $\betaar f: \betaar S\rightarrow \betaar S'$ to denote its continuous extension.
Given a $m$-braid $b: D_m\rightarrow D_m$, the pair $(\betaar{D}_m,\betaar{b})$ defines an open book decomposition of $S^3$ with pages diffeomorphic to the interior of the unit disk $\betaar D_m$ and the monodromy $\betaar{b} : \betaar{D}_m \rightarrow \betaar{D}_m$ is the extension of the mapping class $b$ to $\betaar{D}_m$. This open book decomposition of $S^3$ lifts to an open book decomposition of the $n$-fold cyclic branched cover of $S^3$ along the closed braid $\hat{b}$, as we describe now.
Let $\mathfrak{p}:S_n \rightarrow D_m$ be the $n$-fold cyclic cover of the $m$-punctured disk associated with the epimorphism
$\pi_1(D_m; p)\rightarrow \mathbb{Z}/n$ which maps each
generator $x_i$ to the class of $1$ in $\mathbb{Z}/n$. See Figure \ref{fig:punctured disk generators basepoint}.
\betaegin{figure}[ht]
\centering
\betaegin{tikzpicture}[scale=0.75]
\draw (1.3,2.5) circle (0.05);
\node [below] at (1.3, 2.5) {\sigmamall $p_1$};
\node at (1.75, 2.5) {$\cdots$};
\draw (2.5,2.5) circle (0.05);
\draw [blue, thick] (2.5,2.5) circle (0.3);
\draw [blue, ->, thick] (2.49,2.2) -- (2.51,2.2);
\draw [blue, thick] (2.5, 4.3) -- (2.5, 2.8);
\node [below] at (2.5,2.16) {\sigmamall $p_i$};
\node at (3.35, 2.5) {$\cdots$};
\draw (3.7,2.5) circle (0.05);
\node [below] at (3.7,2.5) {\sigmamall $p_m$};
\node [blue] at (2.2,3) {$x_i$};
\filldraw (2.5,4.3) circle (0.05);
\node [above] at (2.5, 4.3) {$p$};
\draw [thick, ->] (2.5, 4.3) to [in=90,out=180] (0.7,2.5) to [in=180,out=270] (2.5,0.7);
\draw [thick] (2.5,0.7) to [in=270, out=0] (4.3,2.5) to [in=0, out=90] (2.5,4.3);
\filldraw (2.5,4.3) circle (0.05);
\varepsilonsilonnd{tikzpicture}
\caption{Generators $x_i$ of $\pi_1(D_m)$.}
\label{fig:punctured disk generators basepoint}
\varepsilonsilonnd{figure}
The automorphism $b_*: \pi_1(D_m;p)\rightarrow \pi_1(D_m;p)$ induced by $b$ sends each generator $x_i$ to a conjugate of a generator $x_j$ for some $j$. Consequently, $b_*$ restricts to an automorphism of the kernel of $\pi_1(D_m;p)\rightarrow \mathbb{Z}/n$ and therefore $b: D_m\rightarrow D_m$ lifts to a diffeomorphism $\psi: S_n \rightarrow S_n$ such that $\psi|_{\partial S_n}$ is the identity and $b \circ \mathfrak{p} = \mathfrak{p} \circ \psi$. Hence $\betaar b \circ \betaar{\mathfrak{p}} = \betaar{\mathfrak{p}} \circ \betaar \psi$ where $\betaar{\mathfrak{p}}: \betaSm\rightarrow \betaar{D}_m$ is an $n$-fold cyclic branched cover of the disk $\betaar D_m$ along $m$ points.
It is routine to check that the pair $(\betaSm,\betaar{\psi})$ defines an open book decomposition of the $n$-fold cyclic branched cover $\Sigma_n(\hat{b})$ of $S^3$ along the closed braid $\hat{b}$.
\betaegin{lemma}
If $b:D_m\rightarrow D_m$ is a pseudo-Anosov braid, then $\betaar{\psi}: \betaSm\rightarrow \betaSm$ is also pseudo-Anosov.
\label{lem:lift pseudo Anosov map}
\varepsilonsilonnd{lemma}
\betaegin{proof}
Let $h_t:D_m\rightarrow D_m$ be a free isotopy from $b$ to its pseudo-Anosov representative, denoted by $\betaeta$, whose stable and unstable measured singular foliations are denoted by $(\mathcal{F}^s,\mu^s)$ and $(\mathcal{F}^u,\mu^u)$. The isotopy $h_t$ lifts to an isotopy $H_t:S_n\rightarrow S_n$ with
$H_0=\psi$, where $\psi:S_n\rightarrow S_n$ is the lift of the braid $b$ as defined above.
By construction, $H_t$ leaves each component of $\partial S_n$ invariant.
Set $\varphi :=H_1: S_n\rightarrow S_n$ and note that $\varphi$ is a pseudo-Anosov homeomorphism whose stable and unstable singular foliations are lifts of singular foliations $(\mathcal{F}^s,\mu^s)$ and $(\mathcal{F}^u,\mu^u)$, which we denote by $(\widetilde{\mathcal{F}}^s,\tilde\mu^s)$ and $(\widetilde{\mathcal{F}}^u,\tilde\mu^u)$ respectively. Note that under this lift, any $1$-pronged puncture on $D_m$ is lifted to an $n$-pronged puncture on $S_n$.
\betaegin{figure}[ht]
\centering
\betaegin{tikzpicture}[scale=0.67]
\draw [blue, thick] (3,2.5) -- (1.5, 3.5);
\draw [blue, thick] (3,2.5) -- (4.5, 3.5);
\draw [blue, thick] (3,2.5) -- (3,1.2);
\filldraw [white] (3, 2.5) circle (0.06);
\draw [blue] (3, 2.5) circle (0.07);
\draw (2.7, 1.2) to [out=90, in=305] (2.3,2.4) to [out=125, in=330] (1.5, 3.1);
\draw (3.3, 1.2) to [out=90, in=235] (3.7,2.4) to [out=55, in=210] (4.5, 3.1);
\draw (2.4, 1.2) to [out=90, in=305] (2.1,2.1) to [out=125, in=330] (1.5, 2.7);
\draw (3.6, 1.2) to [out=90, in=235] (3.9,2.1) to [out=55, in=210] (4.5, 2.7);
\draw (1.8,3.7) to [out=320, in=180] (3, 3.1) to [out=0, in=220] (4.2,3.7);
\draw (2.1,3.85) to [out=320, in=180] (3, 3.4) to [out=0, in=220] (3.9,3.85);
\draw [thick,->] (5.5, 2.5) -- (7.5, 2.5);
\node [above] at (6.5,2.5) {$\mathfrak{p}$};
\node at (6.5,2) {\sigmamall $3$-fold cover};
\draw [thick, blue] (9,2.5) -- (11, 2.5);
\filldraw [white] (9, 2.5) circle (0.064);
\draw [blue] (9, 2.5) circle (0.07);
\draw (11,2.75) to (9,2.75) to [out=180, in=90] (8.75,2.5) to [out=270,in=180] (9, 2.25) to (11,2.25);
\draw (11,3) to (9,3) to [out=180, in=90] (8.5,2.5) to [out=270,in=180] (9, 2) to (11,2);
\varepsilonsilonnd{tikzpicture}
\caption{A $1$-pronged singular point in $D_m$ is covered by an $n$-pronged singular point in $S_n$}
\label{fig:singularity}
\varepsilonsilonnd{figure}
Hence $\widetilde{\mathcal{F}}^s$ and $\widetilde{\mathcal{F}}^u$ extend to a transverse pair of measured singular foliations on the branched cover $\betaSm$ which are invariant under $\betaar\varphi: \betaSm\rightarrow \betaSm$. Moreover, the extension $\betaar H_t: \betaSm\rightarrow \betaSm$ of the isotopy $H_t:S_n\rightarrow S_n$ defines a free isotopy between $\betaar \psi$ and the pseudo-Anosov homeomorphism $\betaar\varphi$. By definition, this shows that $\betaar\psi$ is pseudo-Anosov.
\varepsilonsilonnd{proof}
It is easy to check that the boundary of $S_n$ has $(m,n)$ components. In particular, $\partial S_n$ is not connected when $m$ and $n$ are not coprime. On the other hand, since the isotopy $H_t$ in the proof of Lemma \ref{lem:lift pseudo Anosov map} is equivariant with respect to the deck transformations of $\mathfrak{p}: S_n\rightarrow D_m$, the fractional Dehn twist coefficient of $\psi$ with respect to any two boundary components of $S_n$ are equal. We denote this number by $c(\psi)$. Similarly, the fractional Dehn twist coefficients of $\betaar\psi$ are equal with respect to all boundary components of the branched cover $\betaar{S}_n$, which we denote by $c(\betaar\psi)$.
\betaegin{lemma}
$c(\betaar \psi) = c(\psi) = \frac{(m,n)}{n}c(b)$.
\label{lem:FDTC of lifting monodromy}
\varepsilonsilonnd{lemma}
\betaegin{proof}
We continue to use the notation developed in the proof of Lemma \ref{lem:lift pseudo Anosov map}.
First of all, $c(\betaar\psi)=c(\psi)$ since the two isotopies $H_t$ and $\betaar H_t$ are identical over the collar neighborhoods of $\partialS_n$ and $\partial \betaSm$. It remains to show that $c(\psi)=\frac{(m,n)}{n}c(b)$.
Assume first that $(m,n)=1$. In this case, $\partial S_n$ is connected, and we denote it by $C$, so the restriction $\mathfrak{p}|_C: C \rightarrow \partial D_m$ is an $n$-fold cyclic cover. The proof that $c(\psi)=\frac{(m,n)}{n}c(b)$ is essentially contained in Figure \ref{fig:fractional Dehn twist lifting map}. To write it down more precisely, we need some notation.
Let $\{p_0,\cdots, p_{N-1}\}$ be the set of singular points on $\partial D_m$ of the stable foliation $\mathcal{F}^s$. Fix a preimage $q_0$ of $p_0$ on $C=\partial S_n$. For $k=0, \cdots, n-1$, let $q^k_i \in C$ be the $k^{th}$ lift of the singular point $p_i$. That is, if $\gamma_{p_0p_k}$ is the subarc of $\partial D_m$ with endpoints $p_0$ and $p_k$ (cf. \S \ref{subsec:FDTC isotopy}), then $q^k_i$ is the end point of the unique lift of the path $C^k\cdot \gamma_{p_0p_k}$ starting at $q_0$. To simplify notation, we denote $q^0_i$ by $q_i$. In particular, $q_0^0=q_0$. Note that $\{q^k_i\}$ is the set of singular points on $\partial S_n=C$ of the stable foliation $\widetilde{\mathcal{F}}^s$ of $\varphi$.
\betaegin{figure}[ht]
\centering
\betaegin{tikzpicture}[scale=0.9]
\node at (3.5, 0) {\footnotesize $\partial S_n\times [0,1]$};
\node at (10.5, 0) {\footnotesize $\partial D_m\times [0,1]$};
\draw [thick, ->] (6, 2.5) -- (8,2.5);
\node [above] at (7, 2.5) {\footnotesize $\mathfrak{p}|_C$ is a};
\node [below] at (7,2.5) {\footnotesize $3$-fold cover};
\filldraw [light-gray] (3.5,2.5) circle (1.5);
\filldraw [white] (3.5,2.5) circle (0.8);
\draw (3.5,2.5) circle (1.5);
\draw (3.5,2.5) circle (0.8);
\filldraw [light-gray] (10.5,2.5) circle (1.5);
\filldraw [white] (10.5,2.5) circle (0.8);
\draw (10.5,2.5) circle (1.5);
\draw (10.5,2.5) circle (0.8);
\betaegin{scope}[decoration={
markings,
mark=at position 0.5 with {\arrow{>}}}
]
\draw [blue,postaction={decorate}] (10.5, 1.7) to [out=-10, in=270] (11.45, 2.5) to [out=90,in=0] (10.5, 3.5) to [out=180, in=90] (9.5,2.5) to [out=270, in=180] (10.5, 1.4) to [out=0, in=250] (12, 2.5);
\node at (11.6, 3.2) {\color{blue} \tiny $h_t|_{p_0}$};
\draw [blue, postaction={decorate}] (3.52, 1.67) to [out=-30, in=230] (4.5,2) to [out=50, in=280] (4.7,3) to [out=100, in=310] (4.25, 3.8);
\node at (4, 1.4) {\color{blue}\tiny $H_t|_{q_0}$};
\varepsilonsilonnd{scope}
\foreach \a in {0}
{
\draw [yshift=2.5cm, xshift=3.5cm] (\a*360/4-90: 0.6) node {\tiny $q_{\a}$};
\filldraw [yshift=2.5cm, xshift=3.5cm] (\a*360/4-90: 0.8) circle (0.03);
}
\foreach \a in {0,1,2,3}
{
\draw [yshift=2.5cm, xshift=10.5cm] (\a*360/4-90: 1.75) node {\tiny $p_{\a}$};
\filldraw [yshift=2.5cm, xshift=10.5cm] (\a*360/4-90: 1.5) circle (0.03);
}
\foreach \a in {0}
{
\draw [yshift=2.5cm, xshift=10.5cm] (\a*360/4-90: 0.6) node {\tiny $p_{\a}$};
\filldraw [yshift=2.5cm, xshift=10.5cm] (\a*360/4-90: 0.8) circle (0.03);
}
\foreach \a in {0,1,2,3}
{
\draw [yshift=2.5cm, xshift=3.5cm] (\a*120/4-90: 1.75) node {\tiny $q_{\a}$};
\filldraw [yshift=2.5cm, xshift=3.5cm] (\a*120/4-90: 1.5) circle (0.03);
}
\foreach \a in {0,1,2,3}
{
\draw [yshift=2.5cm, xshift=3.5cm] (\a*120/4+30: 1.75) node {\tiny $q^1_{\a}$};
\filldraw [yshift=2.5cm, xshift=3.5cm] (\a*120/4+30: 1.5) circle (0.03);
}
\foreach \a in {0,1,2,3}
{
\draw [yshift=2.5cm, xshift=3.5cm] (\a*120/4+150: 1.75) node {\tiny $q^2_{\a}$};
\filldraw [yshift=2.5cm, xshift=3.5cm] (\a*120/4+150: 1.5) circle (0.03);
}
\draw [->] (4.3,2.45)--(4.3,2.5);
\draw [->] (11.3,2.45)--(11.3,2.5);
\varepsilonsilonnd{tikzpicture}
\caption{In the figure, $\{q^k_0, \cdots, q^k_3\}_{k=0,1,2}$ and $\{p_0, \cdots, p_3\}$ are singular points of the stable foliations $\widetilde{\mathcal{F}}^s$ and $\mathcal{F}^s$ respectively. The path $H_t|_{q_0}$ is the unique lift of the path $h_t|_{p_0}$ starting at $q_0$. By Definition \ref{def:fractional dehn twist}, we have $c(b)=1+\frac{1}{4}=\frac{5}{4}$ and $c(\psi)=\frac{5}{12}=\frac{1}{3}c(b)$.}
\label{fig:fractional Dehn twist lifting map}
\varepsilonsilonnd{figure}
Let $c(b)=l+\frac{a}{N}$. By Definition \ref{def:fractional dehn twist}, this means that the path
$h_t|_{p_0}$ is homotopic to the path $[\partial D_m]^l\cdot\gamma_{p_0p_a}$ on $\partial D_m$.
We write $l=ns+r$ where $0\leq r<n$. Then by the uniqueness of path lifting, $H_t|_{q_0}$ is homotopic to the path $C^s\cdot \gamma_{q_0q^r_0}\cdot \gamma_{q^r_0q^r_k}$, which is equal to $C^s\cdot\gamma_{q_0q^r_k}$. Therefore,
\betaegin{align*}
c(\varphi) & =s+\frac{rN+a}{Nn} \\
& = \frac{N(sn+r)+a}{Nn}=\frac{Nl+k}{Nn}\\
& = \frac{1}{n}c(b).
\varepsilonsilonnd{align*}
This deals with the case that $(m, n) = 1$.
In case that $(m,n)\neq 1$, the degree of the covering map $\mathfrak{p}: S_n\rightarrow D_m$ restricted to each boundary component of $S_n$ is $\frac{n}{(m,n)}$. Proceed as in the case that $(m,n) = 1$ to complete the proof.
\varepsilonsilonnd{proof}
\sigmaubsection{ The L-space conjecture and cyclic branched covers of closed braids}
In this section we study the left-orderability of branched covers of closed braids. We begin with the proof of Theorem \ref{thm:conjecture cyclic braids}: {\it Let $b\in B_{m}$ be an odd-strand braid whose closure $\hat{b}$ is an oriented hyperbolic link $L$ and let $c(b) \in \mathbb Q$ be the fractional Dehn twist coefficient of $b$. Suppose that $|c(b)| \geq 2$. Then all even order cyclic branched covers of $\hat{b}$ are excellent.}
\betaegin{proof}[Proof of Theorem \ref{thm:conjecture cyclic braids}]
First of all, the equivariant sphere theorem (\cite{MSY}) and the positive solution of the Smith Conjecture (\cite{MB}) imply that as $\hat{b}$ is prime, all cyclic branched covers of $\hat{b}$ are irreducible.
For each $n \geq 1$, there is an $n$-fold cyclic branched cover $\mathfrak{p}_n: \Sigma_{2n}(\hat{b}) \to \Sigma_{2}(\hat{b})$, branched over the lift $\tilde L$ of $\hat b$ to $\Sigma_{2n}(\hat{b})$, which the reader will verify is surjective on the level of fundamental groups. Hence if $b_1(\Sigma_2(\hat{b}))>0$, then $b_1(\Sigma_{2n}(\hat{b}))>0$ for all $n$. As such, Conjecture \ref{conj: lspace} holds for all even order cyclic branched covers of $\hat{b}$.
Suppose then that $\Sigma_2(\hat{b})$ is a rational homology sphere. If $c(b)\leq -2$, then $c(b^{-1})=-c(b)\geq 2$ by Lemma \ref{lem:poincare translation number}. Since $\widehat{b^{-1}}$ is the mirror image of $\hat{b}$, their cyclic branched covers are diffeomorphic, so without loss of generality we may assume that $c(b)\geq 2$.
By hypothesis, $b$ is a pseudo-Anosov mapping class of $D_{m}$. Then Lemma \ref{lem:lift pseudo Anosov map} shows that the $2$-fold cyclic branched cover of $\hat{b}$ admits an open book decomposition $(\betaar{S}_2,\betaar\psi)$, where $\betaar{S}_2$ is the $2$-fold cyclic branched cover of the unit disk branched over $m$ points and the monodromy $\betaar\psi$ is pseudo-Anosov. By Lemma \ref{lem:FDTC of lifting monodromy},
\betaegin{displaymath}
c(\betaar{\psi})=\frac{c(b)}{2}\geq 1.
\varepsilonsilonnd{displaymath}
Since $b$ is a braid on an odd number of strands, $\partial \betaar{S}_2$ is connected. Then by Theorem \ref{thm:cgeq1}, there exists a co-orientable taut foliation $\mathcal{F}$ on $\Sigma_2(\hat{b})$ and hence it cannot be an L-space \cite{OS1,Bn,KR2}. Moreover, Theorem \ref{thm:cgeq1} says that the tangent plane field of the foliation $\mathcal{F}$ is homotopic to the contact structure supported by the open book $(\betaar{S}_2,\betaar\psi)$. On the other hand, this contact structure is isotopic to the lift of the contact structure on $S^3$ that is supported by the open book $(\betaar{D}_{m},\betaar{b})$. (Here ``lift" is used in the sense of \S \ref{subsec: contact}.) Therefore, by Lemma \ref{lem:Euler class vanishes links} we have $e(T\mathcal{F})=0$. Applying Theorem \ref{prop:taut foliation left orderability}, we conclude that $\pi_1(\Sigma_2(\hat{b}))$ is left-orderable. This complete the proof for the $2$-fold cyclic branched cover.
Now consider $\Sigma_{2n}(\hat{b})$ where $n > 1$ and recall that there is an $n$-fold branched cyclic cover $\mathfrak{p}_n: \Sigma_{2n}(\hat{b}) \to \Sigma_{2}(\hat{b})$, branched over $\tilde L$. We noted above that $\mathfrak{p}_n$ is surjective on the level of $\pi_1$, so $\pi_1(\Sigma_{2k}(\hat{b}))$ is left-orderable by Theorem \ref{thm:brw}. Finally, since $\tilde{L}$ intersects the foliation $\mathcal{F}$ on $\Sigma_2(\hat b)$ transversely (cf. \cite[Lemma 4.4]{HKM2}), $\mathcal{F}$ lifts to a foliation $\mathcal{F}_n$ on $\Sigma_{2n}(\hat{b})$ which is easily seen to be co-oriented and taut. Consequently, $\Sigma_{2n}(\hat{b})$ is not an L-space for any $n > 1$, which completes the proof.
\varepsilonsilonnd{proof}
An identical argument yields the following more general statement. We omit the proof.
\betaegin{theorem}
Let $b\in B_m$ be a psudo-Anosov braid whose fractional Dehn twist coefficient satisfies $|c(b)|\geq N$. Then the $nk$-fold cyclic branched cover of the closed braid $\hat{b}$ admits a co-oriented taut foliation and has a left-orderable fundamental group for any $n$ with $2\leq n\leq N$, $(m,n)=1$ and $k\geq1$.
\qed
\label{thm:taut foliation in cyclic covers of a braid}
\varepsilonsilonnd{theorem}
\sigmaubsection{Dehornoy's braid ordering and cyclic branched covers of closed braids. }
\label{subsec:order of the braid group}
There is a special left order $<_D$ on the braid group $B_m$, due to Dehornoy, characterised by the condition that a braid $b$ is positive if and only if there is a $j \geq 1$ such that $b$ can be written in the standard braid generators (Figure \ref{fig:fig34}) as a word containing $\sigmaigma_j$, but no $\sigmaigma_j^{-1}$, and not containing $\sigmaigma_i^{\pm 1}$ for $i < j$. Set
${\mathcal D}elta_m = (\sigmaigma_1 \sigmaigma_2 \ldots \sigmaigma_{m-1})(\sigmaigma_1 \sigmaigma_2 \ldots \sigmaigma_{m-2}) \ldots (\sigmaigma_1 \sigmaigma_2)(\sigmaigma_1)$ in $B_m$.
The centre of $B_m$ is generated by ${\mathcal D}elta_m^2$ and for each $b \in B_m$ there is an integer $d > 0$ such that
$${\mathcal D}elta_m^{-2d} <_D b <_D {\mathcal D}elta_m^{2d}$$
In other words, the subgroup of $B_m$ generated by ${\mathcal D}elta_m$ is cofinal in $B_m$ with respect to $<_D$.
\betaegin{definition}
Given an element $b\in B_m$, the Dehornoy floor $\lfloor b \rfloor_D$ is the nonnegative integer defined to be
\betaegin{displaymath}
\lfloor b\rfloor_D=\min\{k\in \mathbb{Z}_{\geq 0}\mid {\mathcal D}elta_m^{-2k-2}<_D b <_D {\mathcal D}elta_m^{2k+2}\}.
\varepsilonsilonnd{displaymath}
\label{def:Dehornoy floor}
\varepsilonsilonnd{definition}
Although the Dehornoy's floor fails to be a topological invariant of the closed braid, it has proven to be a useful concept when studying links via closed braids \cite{Ito1,Ito2}.
Malyutin discovered a fundamental relationship between $c(b)$ and $\lfloor b\rfloor_D$. Though the Dehornoy's floor defined by Malyutin \cite[Definition 7.3]{Mal} is slightly different from the one given above, it is easy to check that these two agree for $b>_D 1$ in $B_m$.
\betaegin{proposition}{\rm (cf. \cite[Lemma 7.4]{Mal})} \label{prop: malyutin}
For each $b \in B_m$, $\lfloor b\rfloor_D \leq |c(b)| \leq \lfloor b\rfloor_D + 1$.
\label{prop:Dehornoy floor FDTC}
\varepsilonsilonnd{proposition}
\betaegin{proof}
Let $d= \lfloor b\rfloor_D$.
By Definition \ref{def:Dehornoy floor}, this means either ${\mathcal D}elta_m^{2d} \leq_D b <_D {\mathcal D}elta_m^{2d+2}$ or ${\mathcal D}elta_m^{-2d-2 } <_D b \leq_D {\mathcal D}elta_m^{-2d}$.
In the first case the claim follows directly from \cite[Lemma 7.4]{Mal}. In the second, the fact that ${\mathcal D}elta_m^2$ lies in the center of $B_m$ implies that ${\mathcal D}elta_m^{2d} \leq_D b^{-1} <_D {\mathcal D}elta_m^{2d + 2}$, so by the first case, $\lfloor b\rfloor_D \leq_D c(b^{-1})\leq_D \lfloor b \rfloor_D+1$. Since $c(b^{-1})=-c(b)=|c(b)|$, this completes the proof.
\varepsilonsilonnd{proof}
\betaegin{remark}\label{rmk:FDTC braid ordering}
The proof of Proposition \ref{prop:Dehornoy floor FDTC} shows that $c(b) \geq 0$ when $b>_D 1$ and $c(b) \leq 0$ when $b<_D 1$.
\varepsilonsilonnd{remark}
Given Proposition \ref{prop:Dehornoy floor FDTC}, Theorem \ref{thm:conjecture cyclic braids} has the following consequence.
\betaegin{theorem}
Let $b\in B_{m}$ be an odd-strand pseudo-Anosov braid. Suppose that $\lfloor b \rfloor_D>1$. Then all even order cyclic branched covers of $\hat{b}$ admit co-oriented taut foliations and have left-orderable fundamental groups.
\qed
\label{thm:conjecture cyclic braids braid ordering}
\varepsilonsilonnd{theorem}
\sigmaection{The L-space conjecture and genus one open books}
\label{sec:L-space conjecture genus one open books}
The goal of this section is to prove Theorem \ref{thm:lspace genus one open book decomposition} and Corollary \ref{cor: branched cover genus 1}.
A simple Euler characteristic calculation shows that the $2$-fold branched cover of a disk branched over three points is a genus $1$ surface with one boundary component (see Figure \ref{fig:double cover 3-punctured disk}). We denote this surface by $T_1$ and let $\theta$ be its covering involution.
\betaegin{figure}
\centering
\betaegin{tikzpicture}
\draw [thick,->] (11.6,2.5) [out=90,in=0] to (10.5,3.6);
\draw [thick] (10.5,3.6) [out=180, in=90] to (9.4,2.5) [out=270,in=180] to (10.5,1.4) [out=0,in=270] to (11.6, 2.5);
\draw [red,thick] (9.83,2.5) -- (10.47,2.5);
\draw [blue,thick] (11.17,2.5) -- (10.53,2.5);
\filldraw (10.5,2.5) circle (0.03);
\filldraw (9.8,2.5) circle (0.03);
\filldraw (11.2,2.5) circle (0.03);
\draw (10.5,2.5) circle (0.03);
\draw (9.8,2.5) circle (0.03);
\draw (11.2,2.5) circle (0.03);
\draw [->, thick] (6.6,2.5) -- (8.2, 2.5);
\draw [dashed, gray] (0.5,2.5) -- (6,2.5);
\draw [thick] (1.5,3.4) [out=10,in=180] to (3.4,3.8) [out=0,in=90] to (5,2.5);
\draw [thick] (5,2.5) [out=270, in=0] to (3.4, 1.2) [out=180, in=-10] to (1.5,1.6);
\draw [thick, blue] (3.8,2.5) [out=-45, in=225] to (5,2.5);
\draw [thick, dashed, blue] (5,2.5) [out=85, in=105] to (3.8,2.5);
\node [below] at (4.5,2.2) {\tiny \color{blue} $c_2$};
\node [below] at (3.25, 2.2) {{\tiny \color{red}$c_1$}};
\draw [thick, red] (2.7,2.5) [out=-60, in=240] to (3.8,2.5);
\draw [thick, red] (2.7,2.5) [out=60, in=120] to (3.8,2.5);
\draw [gray,thick](1.9,2.1) [out=0, in=280] to (2.7,2.5);
\draw [dashed,gray, thick] (2.7,2.5) [out=115, in=0] to (1.1,2.95);
\draw [thick] (1.5,1.6) [out=0,in=260] to (1.9,2.1) [out=80, in=-80] to (1.9, 2.9) [out=100, in=0] to (1.5, 3.4);
\draw [thick, ->] (1.94,2.5) [out=270, in=90] to (1.94,2.55);
\draw [thick] (1.5, 3.4) [out=195, in=80] to (1.1,2.9) [out=260, in=100] to (1.1, 2.1) [out=-80, in=160] to (1.5,1.6);
\filldraw (3.8,2.5) circle (0.03);
\filldraw (2.7,2.5) circle (0.03);
\filldraw (5,2.5) circle (0.03);
\draw (3.8,2.5) circle (0.03);
\draw (2.7,2.5) circle (0.03);
\draw (5,2.5) circle (0.03);
\draw [->] (5.45,2.35) [out=-60,in=180] to (5.6,2.2) [out=0, in=270] to (5.75,2.5) [out=90, in=0] to (5.6, 2.8) [out=180,in=60] to (5.45,2.65);
\draw [white] (0,4.5) circle (0.1);
\draw [white] (0,0.9) circle (0.1);
\draw [white] (12.7,2.5) circle (0.1);
\node at (5.9,2.9) {$\theta$};
\varepsilonsilonnd{tikzpicture}
\caption{$2$-fold cover of $3$-punctured disk}
\label{fig:double cover 3-punctured disk}
\varepsilonsilonnd{figure}
As in \S \ref{subsec:lift open book}, every element in $B_3 = \hbox{Mod}(D_3)$ admits a unique lift to ${\rm Mod}(T_1)$ which defines an embedding of groups $B_3 \to \hbox{Mod}(T_1)$. The Artin generators $\sigmaigma_1$ and $\sigmaigma_2$ of $B_3$ lift to the right-handed Dehn twists $T_{c_1}$ and $T_{c_2}$ respectively, where $c_i$ is the preimage of the segment connecting the $i^{th}$ and the $(i+1)^{st}$ punctures of the disk for $i=1,2$, as in Figure \ref{fig:double cover 3-punctured disk}. Since the Dehn twists $T_{c_1}$ and $T_{c_2}$ generate ${\rm Mod}(T_1)$, the embedding $B_3 \to \hbox{Mod}(T_1)$ constructed above is an isomorphism.
The identification between $B_3$ and the mapping class group ${\rm Mod}(T_1)$ can be used to show that any $3$-manifold which admits an open book decomposition with $T_1$-pages is the $2$-fold cyclic branched cover of a closed $3$-braid. Hence, the following conjugacy classification of $3$-braids leads to a complete list of diffeomorphism classes of genus $1$ open books with connected binding.
Set $C={\mathcal D}elta_3^2=(\sigmaigma_1\sigmaigma_2)^3$.
\betaegin{theorem} {\rm (\cite[Proposition 2.1]{Mur}, \cite[Theorem 2.2]{Bal})}
\label{thm:classification of 3 braids}
Up to conjugation, any braid $b$ in $B_3$ is equal to one of the following:
\betaegin{enumerate}
\item $C^d\cdot\sigmaigma_1\sigmaigma_2^{-a_1}\cdots\sigmaigma_1\sigmaigma_2^{-a_n}$, where $d\in \mathbb{Z}$, $a_i\geq 0$ and at least one of the $a_i$ is nonzero;
\item $C^d\cdot \sigmaigma_2^m$ for some $d\in \mathbb{Z}$ and $m\in \mathbb{Z}$.
\item $C^d\cdot \sigmaigma_1^m\sigmaigma_2^{-1}$, with $d\in \mathbb{Z}$ and $m\in \{-1,-2,-3\}$. \qed
\varepsilonsilonnd{enumerate}
\varepsilonsilonnd{theorem}
\betaegin{remark}
The conjugation classification of $3$-braids detailed in Theorem \ref{thm:classification of 3 braids} corresponds to Nielsen-Thurston classification of mapping classes in ${\rm Mod}(D_3)=B_3$ (\S \ref{subsec:nielsen-thurson classification}): braids of type (1) are pseudo-Anosov; those of type (2) are reducible; those of type (3) are periodic. This can be verified by considering their lifts in ${\rm Mod}(T_1)$, where the Nielsen-Thurston type of an element $h\in {\rm Mod}(T_1)$ is determined by the trace of the linear map $h_*: H_1(T_1)\rightarrow H_1(T_1)$ (cf. \cite[\S 13.1]{FM})\footnote{The Nielsen-Thurston type of an element $h\in {\rm Mod}(T_1)$ is, by definition, the same as that of its projection in the mapping class group of the once-punctured torus, which is isomorphic to $SL(2,\mathbb{Z})$.}.
\varepsilonsilonnd{remark}
Baldwin listed all closed $3$-braids whose $2$-fold branched covers are L-spaces.
\betaegin{theorem}{\rm (Theorem 4.1 of \cite{Bal})} \label{thm: baldwin}
The $2$-fold cyclic branched cover $\Sigma_2(\hat{b})$ of a closed braid $\hat{b}$ for $b\in B_3$ is an L-space if and only if $b$ belongs to one of the following lists:
\betaegin{enumerate}
\item $C^d\cdot\sigmaigma_1\sigmaigma_2^{-a_1}\cdots\sigmaigma_1\sigmaigma_2^{-a_n}$, where $a_i\geq 0$ and at least one of the $a_i$ is nonzero, and $d\in\{-1,0,1\}$.
\item $C^d\cdot \sigmaigma_2^m$, for some $m\in \mathbb{Z}$ and $d=\pm 1$.
\item $C^d\cdot \sigmaigma_1^m\sigmaigma_2^{-1}$, with $m\in \{-1,-2,-3\}$ and $d\in\{-1,0,1,2\}$.
\varepsilonsilonnd{enumerate}
\varepsilonsilonnd{theorem}
Conjecture \ref{conj: lspace} holds for the manifolds listed in Theorem \ref{thm: baldwin} since they admit no co-oriented taut foliations (\cite{OS1,Bn,KR2}) and it was shown by Li and Watson (\cite{LW}) that they have non-left-orderable fundamental groups.
\betaegin{proof}[Proof of Theorem \ref{thm:lspace genus one open book decomposition}]
Let $M$ denote an irreducible $3$-manifold which admits a genus $1$ open book decomposition with connected binding. Then $M$ is diffeomorphic to the $2$-fold cyclic branched cover of a closed $3$-braid $\hat{b}$, where $b\in B_3$ falls into one of the three families listed in Theorem \ref{thm:classification of 3 braids}.
Suppose first that $b$ is in family (3). Then $b=C^d\sigmaigma_1^m\sigmaigma_2^{-1}$ with $m\in\{-1,-2,-3\}$. We noted above that this implies that $b$ is periodic, so the mapping torus $N_b \cong D_m\times [0,1]/(x,1)\sigmaim (b(x),0)$ of $b$ is Seifert fibred. The reader will verify that $b$ has period $3, 4$, or $6$ depending on whether $m$ is $-3, -2$, or $-1$. This implies that the fibre class on $\partial N_b$ intersects $\nu$ (Figure \ref{fig:branched_cover_braid_fig0}) more than once algebraically. In particular, $\nu$ is not the fibre class. Thus $N_b(\nu)$, the exterior of $\hat b$ in $M$, is a Seifert manifold and therefore so is the irreducible manifold $\Sigma_2(\hat b)$. (The reader will verify that $\hat b$ is prime and so the equivariant sphere theorem implies that $\Sigma_2(\hat b)$ is irreducible.) Hence the theorem holds in this case by \cite{BRW, BGW, LS}.
If $b$ is in family (2), then $b = C^d \cdot \sigmaigma_2^m$ with $m\in \mathbb{Z}$. Note that $d \ne 0$ as otherwise $\hat b$ is a split link and therefore $\Sigma_2(\hat b)$ is reducible, contrary to our hypotheses. Hence, given the paragraph immediately after Theorem \ref{thm: baldwin}, we can assume that $|d| \geq 2$. We can also suppose that $m \ne 0$ as otherwise $\hat b$ is a $(3, 3d)$ torus link so that $\Sigma_2(\hat b)$ is Seifert fibred and the theorem's conclusion follows from \cite{BRW, BGW, LS}.
There is a circle $O$ in $\hbox{int}(D_3)$ which contains the second and third punctures of $D_3$ in its interior, but not the first, and is invariant under $b$. If $T$ denotes the torus obtained from $O$ in the mapping torus of $b$, the reader will verify that there is a genus $1$ Heegaard splitting $V_1 \cup_T V_2$ of $S^3$ where $V_1 \cap \hat b$ is an $(2, m+2d)$ torus link standardly embedded in the interior of $V_1$ and $V_2 \cap \hat b$ is a $(d,1)$ torus knot standardly embedded in the interior of $V_2$.
Then $\Sigma_2(\hat b)$ is the union of a $2$-fold branched cover $\Sigma_2(V_1, V_1 \cap \hat b)$ of $V_1$ branched over $V_1 \cap \hat b$ and a $2$-fold branched cover $\Sigma_2(V_2, V_2 \cap \hat b)$ of $V_2$ branched over $V_2 \cap \hat b$. Since $V_1 \sigmaetminus \hat b$ fibres over the circle with fibre a twice-punctured disc, $\Sigma_2(V_1, V_1 \cap \hat b)$ is homeomorphic to the product of a torus and an interval. On the other hand, $V_2$ admits a Seifert structure for which $V_2 \cap \hat b$ is a regular Seifert fibre, and therefore $\Sigma_2(V_2, V_2 \cap \hat b)$ admits a Seifert structure. Further, as $|d| \geq 2$, $\Sigma_2(V_2, V_2 \cap \hat b)$ is not a solid torus and so has an incompressible boundary. Thus $\Sigma_2(\hat b)$ is a graph manifold. In this case, Conjecture \ref{conj: lspace} has been confirmed in \cite{BC,HRRW}.
Finally suppose that $b$ is in family (1). Then $b = C^d\cdot \sigmaigma_1 \sigmaigma_2^{-a_1} \cdots \sigmaigma_1 \sigmaigma_2^{-a_n}$ is pseudo-Anosov, where $a_i\geq 0$ and $a_i\neq 0$ for some $i$. As some of the $a_i$ may be zero, we can write $b = C^d \cdot \sigmaigma_1^{b_1} \sigmaigma_2^{-c_1} \cdots \sigmaigma_1^{b_k} \sigmaigma_2^{-c_k}$ where each $b_i$ and each $c_i$ is positive. Further, $k \geq 1$. We claim that the fractional Dehn twist coefficient $c(\sigmaigma_1^{b_1} \sigmaigma_2^{-c_1} \cdots \sigmaigma_1^{b_k} \sigmaigma_2^{-c_k})$ is zero.
To see this, first observe that the conjugation by ${\mathcal D}elta_3=\sigmaigma_1 \sigmaigma_2 \sigmaigma_1$ in $B_3$ exchanges $\sigmaigma_1$ and $\sigmaigma_2$. Then by Lemma \ref{lem:poincare translation number}
\betaegin{equation}
c(\sigmaigma_2^{b_1} \sigmaigma_1^{-c_1} \cdots \sigmaigma_2^{b_k} \sigmaigma_1^{-c_k}) = c({\mathcal D}elta_3 (\sigmaigma_1^{b_1} \sigmaigma_2^{-c_1} \cdots \sigmaigma_1^{b_k} \sigmaigma_2^{-c_k}) {\mathcal D}elta_3^{-1} ) = c(\sigmaigma_1^{b_1} \sigmaigma_2^{-c_1} \cdots \sigmaigma_1^{b_k} \sigmaigma_2^{-c_k}).
\label{equ:FDTC zero}
\varepsilonsilonnd{equation}
But by the definition of the Dehornoy's order, $\sigmaigma_2^{b_1} \sigmaigma_1^{-c_1} \cdots \sigmaigma_2^{b_k} \sigmaigma_1^{-c_k}<_D 1 <_D \sigmaigma_1^{b_1} \sigmaigma_2^{-c_1} \cdots \sigmaigma_1^{b_k} \sigmaigma_2^{-c_k}$. Hence according to Remark \ref{rmk:FDTC braid ordering}, Equality (\ref{equ:FDTC zero}) shows
\betaegin{displaymath}
0 \leq c(\sigmaigma_1^{b_1} \sigmaigma_2^{-c_1} \cdots \sigmaigma_1^{b_k} \sigmaigma_2^{-c_k}) = c(\sigmaigma_2^{b_1} \sigmaigma_1^{-c_1} \cdots \sigmaigma_2^{b_k} \sigmaigma_1^{-c_k}) \leq 0.
\varepsilonsilonnd{displaymath}
Thus $c(\sigmaigma_1^{b_1} \sigmaigma_2^{-c_1} \cdots \sigmaigma_1^{b_k} \sigmaigma_2^{-c_k}) = 0$. But then as $C^d$ commutes with $\sigmaigma_1^{b_1} \sigmaigma_2^{-c_1} \cdots \sigmaigma_1^{b_k} \sigmaigma_2^{-c_k}$,
$$c(b) = c(C^d) +c(\sigmaigma_1\sigmaigma_2^{-a_1}\cdots \sigmaigma_1\sigmaigma_2^{-a_m}) = d+c(\sigmaigma_1\sigmaigma_2^{-a_1}\cdots \sigmaigma_1\sigmaigma_2^{-a_m}) = d$$
by Lemma \ref{lem:poincare translation number}.
By the discussion immediately preceding the statement of the theorem we can suppose that $|c(b)| = |d| \geq 2$. Theorem \ref{thm:conjecture cyclic braids} then implies that $\Sigma_2(\hat{b})$ is not an L-space, admits a co-orientable taut foliation, and has left-orderable fundamental group.
\varepsilonsilonnd{proof}
Let $b(h)\in B_3$ be the image of $h$ under the isomorphism from ${\rm Mod}(T_1)$ to $B_3$ described above. Note that $b(\delta)=C$, where $\delta=(T_{c_1}T_{c_2})^3$ as in Corollary \ref{cor: branched cover genus 1}.
\betaegin{proof}[Proof of Corollary \ref{cor: branched cover genus 1}]
Suppose that $K$ is a genus one fibred knot in a closed, connected, orientable, irreducible $3$-manifold $M$, with fibre $T_1$ and monodromy $h$. Since $K$ has genus one, it is prime and therefore each $\Sigma_n(K)$ is irreducible. Further, each $\Sigma_n(K)$ contains a genus one fibred knot so Theorem \ref{thm:lspace genus one open book decomposition} implies that $\Sigma_n(K)$ is excellent, respectively a total L-space, for some $n \geq 2$ if and only if it is not an L-space, respectively is an L-space.
Up to conjugation, we can suppose that the braid $b(h) \in B_3$ is one of the forms listed in Theorem \ref{thm:classification of 3 braids}.
First suppose that $h$ is pseudo-Anosov. If $c(h) \ne 0$, Theorem \ref{thm:conjecture fibre knots}(1) implies that $\Sigma_n(K)$ is excellent for all $n \geq 2$. Suppose then that $c(h) = 0$ and let $n \geq 2$. Since $h$ is pseudo-Anosov, $b(h)=C^d\cdot\sigmaigma_1\sigmaigma_2^{-a_1}\cdots\sigmaigma_1\sigmaigma_2^{-a_n}$, where $d \in \mathbb{Z}$, $a_i\geq 0$ and at least one of the $a_i$ is nonzero. The proof of Theorem \ref{thm:lspace genus one open book decomposition} and Lemma \ref{lem:FDTC of lifting monodromy} show that
\betaegin{equation} \label{eqn: c(f) via c(b(f))}
0 = 2c(h) = c(b(h)) = d.
\varepsilonsilonnd{equation}
and therefore, $b(h^n)=b(h)^n=(\sigmaigma_1\sigmaigma_2^{-a_1}\cdots\sigmaigma_1\sigmaigma_2^{-a_n})^n$. Theorem \ref{thm: baldwin}(1) now implies that $\Sigma_n(K)$ is an L-space. This is case (1) of Corollary \ref{cor: branched cover genus 1}.
Next suppose that $h$ is reducible. Then $b(h) = C^d\cdot \sigmaigma_2^m$ for some $d \in \mathbb{Z}$ and $m\in \mathbb{Z}$ (Theorem \ref{thm:classification of 3 braids}(2)). It follows that $b(h^n)=b(h)^n=C^{nd}\cdot \sigmaigma_2^{nm}$ and since $nd \ne \pm 1$ for $n \geq 2$, Theorem \ref{thm: baldwin} shows that $\Sigma_n(K)$ is not an L-space. Thus it is excellent.
Finally suppose that $h$ is periodic. Then $b(h) = C^d\cdot \sigmaigma_1^m\sigmaigma_2^{-1}$ where $d \in \mathbb{Z}$ and $m\in \{-1,-2,-3\}$ (Theorem \ref{thm:classification of 3 braids}(3)). The reader will verify that if $w_m = \sigmaigma_1^{-m} \sigmaigma_2^{-1}$, then
$$w_m^r = \left\{ \betaegin{array}{ll} C^{-1} & \hbox{ if $m = 1$ and $r = 3$} \\
C^{-1} & \hbox{ if $m = 2$ and $r = 2$} \\
C^{-2} & \hbox{ if $m = 3$ and $r = 3$}
\varepsilonsilonnd{array} \right.$$
In particular,
$$w_m^2 = \left\{ \betaegin{array}{ll} C^{-1}w_{1}^{-1} & \hbox{ if $m = 1$} \\
C^{-2}w_{3}^{-1} & \hbox{ if $m = 3$}
\varepsilonsilonnd{array} \right.$$
We consider the cases $m = 1, 2, 3$ separately.
Suppose that $m = 1$ and let $n = 3k+r \geq 2$ where $r \in \{0,1,2\}$. Then $k \geq 1$ if $r$ is $0$ or $1$ and $k \geq 0$ otherwise. The identities above imply that
$$b^n = (C^{d}w_{1})^{n} = \left\{ \betaegin{array}{ll} C^{nd - k} & \hbox{ if } r = 0 \\
C^{nd - k} w_{1} & \hbox{ if } r = 1 \\
(C^{k + 1 - nd} w_{1})^{-1}& \hbox{ if } r = 2
\varepsilonsilonnd{array} \right.$$
Theorem \ref{thm:classification of 3 braids} then shows
$$\hbox{\rm $\Sigma_n(K)$ is an L-space if and only if } \left\{ \betaegin{array}{cl}
nd = k \pm 1 & \hbox{ {\rm if }} r = 0 \\
k-1 \leq nd \leq k+2 & \hbox{ {\rm if }} r = 1, 2
\varepsilonsilonnd{array} \right. $$
Thus,
$$\hbox{\rm $\Sigma_n(K)$ is an L-space if and only if } b = \left\{ \betaegin{array}{r}
w_{1} \hbox{ {\rm and }} n \leq 5 \\
Cw_{1} \hbox{ {\rm and }} n = 2
\varepsilonsilonnd{array} \right. $$
Next suppose that $m = 2$ and let $n = 2k+r \geq 2$ where $r \in \{0,1\}$. Then $k \geq 1$ and
$$b^n = (C^{d}w_{2})^{n} = \left\{ \betaegin{array}{ll} C^{nd - k} & \hbox{ if } r = 0 \\
C^{nd - k} w_{2} & \hbox{ if } r = 1
\varepsilonsilonnd{array} \right.$$
Theorem \ref{thm:classification of 3 braids} then shows
$$\hbox{\rm $\Sigma_n(K)$ is an L-space if and only if } \left\{ \betaegin{array}{cl}
nd = k \pm 1 & \hbox{ {\rm if }} r = 0 \\
k-1 \leq nd \leq k+2 & \hbox{ {\rm if }} r = 1
\varepsilonsilonnd{array} \right. $$
Thus,
$$\hbox{\rm $\Sigma_n(K)$ is an L-space if and only if } b = \left\{ \betaegin{array}{r}
w_{2} \hbox{ {\rm and }} n \leq 3 \\
Cw_{2} \hbox{ {\rm and }} n \leq 3
\varepsilonsilonnd{array} \right. $$
Finally suppose that $m = 3$ and let $n = 3k+r \geq 2$ where $r \in \{0,1,2\}$. Then $k \geq 1$ if $r$ is $0$ or $1$ and $k \geq 0$ otherwise. We have,
$$b^n = (C^{d}w_{3})^{n} = \left\{ \betaegin{array}{ll} C^{nd - 2k} & \hbox{ if } r = 0 \\
C^{nd - 2k} w_{3} & \hbox{ if } r = 1 \\
(C^{2(k+1) - nd} w_{3})^{-1} & \hbox{ if } r = 2
\varepsilonsilonnd{array} \right.$$
Theorem \ref{thm:classification of 3 braids} then shows
$$\hbox{\rm $\Sigma_n(K)$ is an L-space if and only if } \left\{ \betaegin{array}{cl}
nd = 2k \pm 1 & \hbox{ {\rm if }} r = 0 \\
2k-1 \leq nd \leq 2k+2 & \hbox{ {\rm if }} r = 1 \\
2k \leq nd \leq 2k+3 & \hbox{ {\rm if }} r = 2
\varepsilonsilonnd{array} \right. $$
Thus,
$$\hbox{\rm $\Sigma_n(K)$ is an L-space if and only if } b = \left\{ \betaegin{array}{r}
w_{3} \hbox{ {\rm and }} n = 2\\
Cw_{3} \hbox{ {\rm and }} n \leq 5
\varepsilonsilonnd{array} \right. $$
This completes the proof.
\varepsilonsilonnd{proof}
\sigmaection{The left-orderability of the fundamental groups of cyclic branched covers of satellite links}
\label{sec:LO cyclic cover satellite knots}
Let $L$ be an oriented untwisted satellite link in an oriented integer homology $3$-sphere $M$ with pattern $P$, a link in the solid torus $N$, and companion $C$, a knot in $M$. The $n$-fold cyclic branched cover of $L$ is obtained in the usual way from the regular cover of the exterior of $L$ determined by the homomorphism which sends the oriented meridians of the components of $L$ to $1$ (mod $n$). It can be obtained by gluing copies of the cyclic covers of the knot exterior $X(C)$ to an $n$-fold cyclic cover of the solid torus $N$ branched over $P$.
In what follows we assume that $L, P$ and $C$ are as above and that $P$ is the closure of an $m$-strand braid pseudo-Anosov $b$. The reader will verify that $H_1(N \sigmaetminus P) \cong \mathbb Z^{|P| + 1}$, where $|P|$ is the number of components of $P$, is freely generated by the meridian classes of the components of $P$ and the class $\nu$ carried by a longitudinal loop on $\partial N = \partial X(C)$ (see Figure \ref{fig:branched_cover_braid_fig0}). We use $N_n(P)$ to denote the $n$-fold cyclic branched cover of $P$ in $N$ determined by the homomorphism $H_1(N \sigmaetminus P) \to \mathbb Z/n$ which sends the meridians of the components of $P$ to $1$ (mod $n$) and $\nu$ to $0$ (mod $n$).
We follow the notation developed in Section \ref{subsec:lift open book}. Our discussion there shows that $N_n(P)$ is homeomorphic to the mapping torus $\betaar{S}_n\times [0,1]/(x,1)\sigmaim (\betaar\psi(x),0)$, where $\betaar{S}_n$ is the $n$-fold cyclic branched cover of the disk branched at $m$ points and $\betaar{\psi}$ is the unique lift of $b$ satisfying $\betaar{\psi}|_{\partial \betaar {S}_n}$ is the identity. (See \S \ref{subsec:lift open book} for the details.) The boundary of $\betaar{S}_n$ is connected when $\gcd(m,n)=1$, so $\partial N_n(P)$ is a torus. It is clear that each of the curves $\partial \betaar{S}_n\times t_0$ carries the longitudinal slope of $N_n(P)$. Let $\mu$ be the class in $H_1(\partial N_n(P))$ carried by $(p_0 \times [0,1])/\sigmaim$, $p_0\in \partial \betaar{S}_n$.
\betaegin{proof}[Proof of Theorem \ref{thm: satellite knot c(b) and c(h) nonnegative}]
By assumption, $(S, h^n)$ is an open book decomposition of $X_n(C)$. As in \S \ref{sec: fdtc and lo} we use $\mu_n$ and $\lambda_n$ to denote the meridional and longitudinal slopes on $\partial X_n(C)$. Since $\gcd(m,n)=1$, we have
$$\Sigma_n(L)=N_n(P)\cup_{\varphi} X_n(C)$$
where the homeomorphism $\varphi: \partial N_n(P)\rightarrow \partial X_n(C)$ sends $\mu$ to $\lambda_n$ and $\lambda$ to $\mu_n$. We will show that $\Sigma_n(L)$ admits a co-orientable taut foliation and has a left-orderable fundamental group (and is therefore excellent) by showing that the Dehn fillings $N_n(P)(\mu-\lambda)$ and $X_n(C)(\mu_n-\lambda_n)$ have the same properties.
Consider $X_n(C)$ first. The Dehn filling $X_n(C)(\mu_n-\lambda_n)$ has an open book decomposition $(S, T_\partial \circ h)$, where $T_\partial$ denotes the right-handed Dehn twist along $\partial S$. The assumption that $c(h)\geq 0$ implies $nc(h)+1\geq 1$. By Theorem \ref{thm:conjecture fibre knots}(1) we know that $X_n(C)(\mu_n-\lambda_n)$ is excellent and hence has left-orderable fundamental group. However, to obtain a foliation on $\Sigma_n(L)$, we need to use that fact that Theorem \ref{thm:cgeq1} guarantees a co-oriented taut foliation on $X_n(C)(\mu_n-\lambda_n)$ which is transverse to the binding of the open book $(S, T_\partial \circ h)$. In our case, the binding is the core of the filling torus and consequently there is a co-oriented taut foliation on $X_n(C)$ which restricts to a linear foliation on $\partial X_n(C)$ of slope $\mu_n-\lambda_n$.
Next consider $N_n(P) \cong \betaar{S}_n\times [0,1]/(x,1)\sigmaim (\betaar\psi(x),0)$. (See the discussion just prior to the proof of Theorem \ref{thm: satellite knot c(b) and c(h) nonnegative}.) By Lemma \ref{lem:FDTC of lifting monodromy}, the assumption $c(b)\geq 0$ implies that $c(\betaar{\psi})=\frac{c(b)}{n}\geq 0$. Since $N_n(P)(\mu-\lambda)$ is homeomorphic to the open book $(\betaar{S}_n,T_\partial\circ\betaar{\psi})$, an argument analogous to that used in the previous paragraph shows that there is a co-orientable taut foliation $\mathcal{F}_0$ on $N_n(P)$ which intersects $\partial N_n(P)$ transversely in a linear foliation of slope $\mu-\lambda$.
We need to show that $\pi_1(N_n (P)(\mu-\lambda))$ is left-orderable. Let $\mathcal{F}$ denote the co-orientable taut foliation on $N_n(P)(\mu-\lambda)$ induced by $\mathcal{F}_0$. The existence of $\mathcal{F}$ implies that $N_n(P)(\mu-\lambda)$ is irreducible (\cite{Nov}). Hence, if $b_1(N_n(P)(\mu-\lambda)) > 0$, this is true by Corollary \ref{cor:b1 is not zero lo}. Assume then that $b_1(N_n(P)(\mu-\lambda)) = 0$. In other words, $N_n(P)(\mu-\lambda)$ is a rational homology $3$-sphere. We show that $e(T\mathcal{F})=0$.
It is clear from our argument that the tangent plane field $T\mathcal{F}$ is homotopic to the contact structure supported by the open book $(\betaar{S}_n,T_\partial\circ\betaar{\psi})$ (cf. Theorem \ref{thm:cgeq1}). We observe that the monodromy $T_\partial\circ\betaar{\psi}: \betaar{S}_n\rightarrow \betaar{S}_n$ is the lift of the braid ${\mathcal D}elta_m^{2n}b:D_m\rightarrow D_m$ (see \S \ref{subsec:lift open book}). Hence by Lemma \ref{lem:Euler class vanishes links}, the contact structure supported by $(\betaar{S}_n,T_\partial\circ\betaar{\psi})$ has zero Euler class and therefore so does $e(T\mathcal{F})$. As a result, $\pi_1(N_n(P)(\mu-\lambda))$ is left-orderable by Theorem \ref{prop:taut foliation left orderability}.
Finally, by piecing together the foliations on $N_n(P)$ and $X_n(C)$ constructed above we obtain a co-orientable taut foliation on $\Sigma_n(L)$. Further, an application of \cite[Theorem 2.7]{CLW} implies that $\pi_1(\Sigma(L))$ is left-orderable. Thus $\Sigma_n(L)$ is excellent.
\varepsilonsilonnd{proof}
\betaegin{proof}[Proof of Theorem \ref{thm:satellite c(h)>0 n>>0}]
The proof of this theorem is similar to the one used to prove Theorem \ref{thm: satellite knot c(b) and c(h) nonnegative}. As such, we only point out a few of the key points.
Note that when $c(h)\neq 0$ and $n\geq \frac{2}{|c(h)|}$, the fractional Dehn twist coefficient of the monodromy of the $n$-fold cyclic cover $X_n(C)$ satisfies $|c(h^n)|=n|c(h)|\geq 2$. Then the argument used in the proof of Theorem \ref{thm: satellite knot c(b) and c(h) nonnegative} to analyse $X_n(C)(\mu_n - \lambda_n)$ can be used to show that:
\betaegin{itemize}
\item $\pi_1(X_n(C)(\mu_n+ \lambda_n))$ and $\pi_1(X_n(C)(\mu_n- \lambda_n))$ are left-orderable ;
\item there is a co-orientable taut foliation on $X_n(C)$ which induces a linear foliation on $\partial X_n(C)$ with leaves of slope $\mu_n + \lambda_n$;
\item there is a co-orientable taut foliation on $X_n(C)$ which induces a linear foliation on $\partial X_n(C)$ with leaves of slope $\mu_n - \lambda_n$.
\varepsilonsilonnd{itemize}
When $c(h) = 0$, it is simple to see that $X_n(C)$ possess the same properties.
Based on these observations, the rest of the proof follows exactly as in that of Theorem \ref{thm: satellite knot c(b) and c(h) nonnegative} when $c(b)\geq 0$. In the case that $c(b)< 0$, one can apply the arguments of Theorem \ref{thm: satellite knot c(b) and c(h) nonnegative} but replacing $\mu_n - \lambda_n$ by $\mu_n+\lambda_n$ and $\mu - \lambda$ by $\mu+\lambda$ to complete the proof.
\varepsilonsilonnd{proof}
\betaegin{thebibliography}{HRRWS}
{\sigmamall
\betaibitem[Al]{Al} J.~W.~Alexander, {\it A Lemma on System of Knotted Curves}, Proc. Nat. Acad. Sci. USA {\betaf 9} (1923), 93--95.
\betaibitem[BM]{BM} K.~Baker and K.~Motegi, {\it Seifert vs slice genera of knots in twist families and a characterization of braid axes}, preprint (2017), arXiv:1705.10373.
\betaibitem[Bal]{Bal} J.~Baldwin, {\it Heegaard Floer homology and genus one, one-boundary component open books}, J. Topology {\betaf 4} (2008), 963--992.
\betaibitem[BBG]{BBG} M.~Boileau, S.~Boyer and C.~McA.~Gordon, {\it Branched covers of quasipositive links and L-spaces}, preprint (2017), arXiv:1710.07658.
\betaibitem[BC]{BC} S.~Boyer and A.~Clay, {\it Foliations, orders, representations, L-spaces and graph manifolds}, Adv. Math., {\betaf 310} (2017), 159--234.
\betaibitem[BGW]{BGW} S.~Boyer, C.~McA.~Gordon and L.~Watson, {\it On L-spaces and left-orderable fundamental groups}, Math. Ann. {\betaf 356} (2013), 1213--1245.
\betaibitem[BPH]{BPH} M.~Boileau, J.~Porti and M.~Heusener, {\betaf Geometrization of 3-orbifolds of cyclic type}, Ast\'erisque {\betaf 272}, Soc. Math. France, Paris, 2001.
\betaibitem[BRW]{BRW} S.~Boyer, D.~Rolfsen and B.~Wiest, {\it Orderable 3-manifold groups}, Ann. Inst. Fourier {\betaf 55} (2005), 243--288.
\betaibitem[Bn]{Bn} J.~Bowden, {\it Approximating $C^0$-foliations by contact structures}, Geo. Func. Anal. {\betaf 26} (2016), 1255--1296.
\betaibitem[BZ]{BZ} G.~Burde, H.~Zieschang, {\betaf Knots}, Second edition. De Gruyter Studies in Mathematics, 5. Walter de Gruyter \& Co., Berlin, 2003.
\betaibitem[Cal1]{Cal1} D.~Calegari, {\it Leafwise smoothing laminations}, Algebr. Geom. Topol. {\betaf 1} (2001), 579--585.
\betaibitem[Cal2]{Cal2} \betaysame, {\betaf Foliations and the geometry of 3-manifolds}, Oxford Mathematical Monographs, Oxford University Press, Oxford, 2007.
\betaibitem[Can]{Can} A.~Candel, {\it Uniformization of surface laminations}, Ann. Sci. Ec. Norm. Sup. {\betaf 26} (1993), 489--516.
\betaibitem[CC]{CC} A.~Candel and L.~Conlon, {\betaf Foliations II}, Graduate Studies in Mathematics 60, Amer. Math. Soc., 2003.
\betaibitem[CD]{CD} D.~Calegari and N.~Dunfield {\it Laminations and groups of homeomorphisms of the circle}, Inv. Math. {\betaf 152} (2003), 149--204.
\betaibitem[CLW]{CLW} A.~Clay, T.~Lidman and L.~Watson, {\it Graph manifolds, left-orderability and amalgamation}, Algebr. Geom. Topol. {\betaf 13} (2013), 2347--2368.
\betaibitem[DPT]{DPT} M.K.~Dabkowski, J.H.~Przytycki, and A.A.~Togha, {\varepsilonsilonm Non-left-orderable 3-manifold groups}, Canad. Math. Bull. {\betaf 48}(1) (2005), 32--40.
\betaibitem[Dun]{Dun} W.~Dunbar, {\varepsilonsilonm Geometric orbifolds}, Rev. Mat. Univ. Complut. Madrid {\betaf 1}(1) (1988), 67--99.
\betaibitem[FLP]{FLP} A.~Fathi, F.~Laudenbach, V.~ Po\'enaru, {\betaf Thurston's work on surfaces}, Translated from the 1979 French original by Djun M. Kim and Dan Margalit. Mathematical Notes, 48. Princeton University Press, Princeton, NJ, 2012.
\betaibitem[FM]{FM} B.~Farb and D.~Margalit, {\betaf A primer on mapping class groups}, Princeton University Press, 2011.
\betaibitem[Ga1]{Ga1} D.~Gabai, {\it Foliations and the topology of 3-manifolds}, J. Differ. Geom. {\betaf 18}(3) (1983), 445--503.
\betaibitem[Ga2]{Ga2} \betaysame, {\it Foliations and $3$-manifolds}, in {\betaf Proceedings of the {I}nternational {C}ongress of {M}athematicians}, {V}ol.\ {I}, {II} ({K}yoto, 1990), pages 609--619. Math. Soc. Japan, Tokyo, 1991.
\betaibitem[GO]{GO} D.~Gabai and U.~Oertel, {\it Essential laminations in $3$-manifolds}, Ann. of Math. {\betaf 130} (1989), 41--73.
\betaibitem[Gei]{Gei} H. ~Geiges, {\betaf An introduction to contact topology}, Volume {\betaf 109}, Cambridge University Press, 2008.
\betaibitem[Ghy]{Ghy} E.~Ghys, {\it Groups acting on the circle}, Ens. Math. {\betaf 47} (2001), 329--408.
\betaibitem[Ghi]{Ghi} P. ~Ghiggini, {\it Knot Floer homology detects genus-one fibred knots}, Amer. J. Math. {\betaf 130}(5) (2008), 1151--1169.
\betaibitem[Gor]{Gor} C.~McA.~Gordon, {\it Riley's conjecture on $SL(2,\mathbb R)$ representations of $2$-bridge knots}, J. Knot Theory Ramifications {\betaf 26} (2017), 6 pp.
\betaibitem[GLid1]{GLid1}
C.~McA.~Gordon and T.~Lidman, {\varepsilonsilonm Taut foliations, left-orderability, and cyclic branched covers}, Acta Math. Vietnam {\betaf 39} (2014), no.4, 599--635.
\betaibitem[GLid2]{GLid2}
\betaysame, {\varepsilonsilonm Corrigendum to ``Taut foliations, left-orderability, and cyclic branched covers"}, Acta Math. Vietnam {\betaf 42} (2017), 775--776.
\betaibitem[HRW]{HRW} J.~Hanselman, J.~Rasmussen and L.~Watson, {\it Bordered Floer homology for manifolds with torus boundary via immersed curves}, preprint (2016), arXiv:1604.03466.
\betaibitem[HRRW]{HRRW} J.~Hanselman, J.~Rasmussen, S.~Rasmussen, and L.~Watson, {\it Taut foliations on graph manifolds}, preprint (2015), arXiv:1508.05911v1.
\betaibitem[HKP]{HKP} S.~Harvey, K.~Kawamuro, and O.~Plamenevskaya, {\it On transverse knots and branched covers}, Int. Math. Res. Notices, {\betaf 3} (2009), 512--546.
\betaibitem[Hed]{Hed} M.~Hedden, {\it Notions of positivity and the {O}zsv\'ath-{S}zab\'o concordance invariant}, J. Knot Theory Ramifications {\betaf 19} (2010), 617--629.
\betaibitem[HKM1]{HKM1} K.~Honda, W.~Kazez, and G.~Mati{\'{c}}, {\it Rightveering diffeomorphisms of compact surfaces with boundary}, Invent. Math. {\betaf 169} (2007), 427--449.
\betaibitem[HKM2]{HKM2} \betaysame, {\it Rightveering diffeomorphisms of compact surfaces with boundary II}, Geom. \& Top. {\betaf 12} (2008), 2057--2094.
\betaibitem[Hom]{Hom} J.~Hom, {\it Satellite knots and L-space surgeries}, Bull. London Math. Soc. {\betaf 48} (2016), 771--778.
\betaibitem[HS]{HS} J.~Howie and H.~Short, {\it The band-sum problem}, J. Lond. Math. Soc. {\betaf 2} (1985), 571--576.
\betaibitem[Hu]{Hu} Y.~Hu, {\it Left-orderability and cyclic branched coverings}, Algebr. Geom. Topol.{\betaf 15} (2015), 399--413.
\betaibitem[Ito1]{Ito1} T.~Ito, {\it Braid ordering and knot genus}, J. Knot Theory Ramifications, {\betaf 9} (2011),1311--1323.
\betaibitem[Ito2]{Ito2} \betaysame, {\it Braid ordering and the geometry of closed braid}, Geom. \& Top. {\betaf 15} (2011), 473--498.
\betaibitem[IK]{IK} T.~Ito and K.~Kawamuro, {\it Essential open book foliations and fractional Dehn twist coefficient}, Geom. Dedicata {\betaf 187} (2017), 17--67.
\betaibitem[Ju]{Ju} A.~Juh\'asz, {\it A survey of Heegaard Floer homology}, in {\betaf New ideas in low dimensional topology}, 237--296. World Scientific, 2015.
\betaibitem[KR1]{KR1} W. ~Kazez and R. ~Roberts, {\it Fractional Dehn twists in knot theory and contact topology}, Algebr. Geom. Topol.,{\betaf 13} (2013), 3603--3637.
\betaibitem[KR2]{KR2} \betaysame, {\it Approximating $C^{1,0}$-foliations}, in {\betaf Interactions between low-dimensional topology and mapping class groups}, 21--72, Geom. Topol. Monogr., {\betaf 19}, Geom. Topol. Publ., Coventry, 2015.
\betaibitem[LS]{LS} P.~Lisca and A.~Stipsicz, {\it Ozsv\'ath-Szab\'o invariants and tight contact 3-manifolds, {III}}, J. Symplectic Geom. {\betaf 5} (2007), 357--384.
\betaibitem[LW]{LW} Y.~Li and L.~Watson, {\it Genus one open books with non-left-orderable fundamental group}, Proc. Amer. Math. Soc. {\betaf 142} (2014), 1425--1435.
\betaibitem[Mal]{Mal} A.~Malyutin, {\it Twist number of (closed) braids}, St. Petersburg Math. J. {\betaf 16} (2005), 791--813.
\betaibitem[MB]{MB} J.~W.~Morgan and H.~Bass, {\betaf The Smith Conjecture}, Pure and Applied Math. {\betaf 112}, Academic Press, 1984.
\betaibitem[Mi]{Mil} J.~Milnor, {\it On the existence of a connection with curvature zero}, Comm. Math. Helv. {\betaf 32} (1958), 215--223.
\betaibitem[MM]{MM} S.~Matsumoto and S.~Morita, {\it Bounded cohomology of certain groups of homeomorphisms}, Proc. Amer. Math. Soc. {\betaf 94} (1985), 539--544.
\betaibitem[Mor]{Mor} S.~Morita, {\betaf Geometry of Differential Forms}, Translations of Mathematical Monographs, {\betaf 201}, American Mathematical Society, 2001.
\betaibitem[MS]{MS} J.~Milnor, J.~Stasheff, {\betaf Characteristic classes}, Annals of Mathematics Studies, No. 76. Princeton University Press, 1974.
\betaibitem[MSY]{MSY} W.~Meeks, L.~Simon, and S.~T.~Yau, {\it Embedded minimal surfaces, exotic spheres, and manifolds with positive Ricci curvature}, Ann. Math. {\betaf 116} (1982), 621--659.fibred
\betaibitem[Mur]{Mur} K.~Murasugi, {\betaf On closed 3-braids}, Mem. Amer. Math. Soc. {\betaf 151}, American Mathematical Society, Providence, R.I., 1974.
\betaibitem[Ni]{Ni} Y.~Ni, {\it Knot Floer homology detects fibred knots}, Invent. Math. {\betaf 170} (2007), 577--608.
\betaibitem[Nov]{Nov} S.~P.~Novikov, {\it The topology of foliations}, Tru. Mosk. Mat. Obsc. {\betaf 14} (1965), 248--278.
\betaibitem[OS1]{OS1} P.~Ozsv\'ath and Z.~Szab\'o, {\it Holomorphic disks and genus bounds}, Geom. \& Top. {\betaf 8} (2004), 311-- 334.
\betaibitem[OS2]{OS2} \betaysame, \varepsilonsilonmph{On knot Floer homology and lens space surgeries}, Topology {\betaf 44} (2005), 1281--1300.
\betaibitem[Pe]{Pe} T.~Peters, {\varepsilonsilonm On L-spaces and non-left-orderable 3-manifold groups}, preprint (2009), arXiv:0903.4495.
\betaibitem[Pla]{Pla} J.~Plante, {\it Foliations with measure preserving holonomy}, Ann. Math. {\betaf 102} (1975), 327--361.
\betaibitem[Rob]{Rob} R.~Roberts, {\it Taut foliations in punctured surface bundles, II}, Proc. London Math. Soc. {\betaf 83} (2001) 443--471.
\betaibitem[Rol]{Rol} D.~Rolfsen, {\betaf Knots and links}, American Mathematical Soc. {\betaf 346}, 2003.
\betaibitem[Ros]{Ros} H.~Rosenberg, {\it Foliations by planes}, Topology {\betaf 7} (1968), 131--138.
\betaibitem[Spa]{Spa} E.~Spanier, {\betaf Algebraic topology}, Springer-Verlag, 1995.
\betaibitem[Thu1]{Thu1} W.~Thurston, {\it Hyperbolic structures on $3$-manifolds, II: Surface groups and $3$-manifolds which fiber over the circle}, preprint (1986), arXiv:math.GT/9801045.
\betaibitem[Thu2]{Thu2} \betaysame , {\it On the geometry and dynamics of diffeomorphisms of surfaces}, Bull. Amer. Math. Soc. {\betaf 19} (1988), 417--431.
}
\varepsilonsilonnd{thebibliography}
\varepsilonsilonnd{document} |
\begin{document}
\title{High-visibilty two-photon interference at a telecom wavelength\\ using picosecond regime separated sources}
\author{Pierre Aboussouan}
\affiliation{Laboratoire de Physique de la Matière Condensée, CNRS UMR 6622, Université de Nice -- Sophia Antipolis, Parc Valrose, 06108 Nice Cedex 2, France.
}
\author{Olivier Alibart}
\affiliation{Laboratoire de Physique de la Matière Condensée, CNRS UMR 6622, Université de Nice -- Sophia Antipolis, Parc Valrose, 06108 Nice Cedex 2, France.
}
\author{Daniel B. Ostrowsky}
\affiliation{Laboratoire de Physique de la Matière Condensée, CNRS UMR 6622, Université de Nice -- Sophia Antipolis, Parc Valrose, 06108 Nice Cedex 2, France.
}
\author{Pascal Baldi}
\affiliation{Laboratoire de Physique de la Matière Condensée, CNRS UMR 6622, Université de Nice -- Sophia Antipolis, Parc Valrose, 06108 Nice Cedex 2, France.
}
\author{Sébastien Tanzilli}
\email{[email protected]}
\affiliation{Laboratoire de Physique de la Matière Condensée, CNRS UMR 6622, Université de Nice -- Sophia Antipolis, Parc Valrose, 06108 Nice Cedex 2, France.
}
\date{\today}
\begin{abstract}
We report on a two-photon interference experiment in a quantum relay configuration using two picosecond regime PPLN waveguide based sources emitting paired photons at 1550\,nm. The results show that the picosecond regime associated with a guided-wave scheme should have important repercussions for quantum relay implementations in real conditions, essential for improving both the working distance and the efficiency of quantum cryptography and networking systems. In contrast to already reported regimes, namely femtosecond and CW, it allows achieving a 99\% net visibility two-photon interference while maintaining a high effective photon pair rate using only standard telecom components and detectors.
\end{abstract}
\pacs{03.67.-a, 03.67.Bg, 03.67.Dd, 42.50.Dv, 42.50.Ex, 42.65.Lm, 42.65.Wi}
\keywords{Quantum Communication, Two-photon interference, Guided-wave Optics}
\maketitle
For the realization of quantum networks, interference between photons produced by independent sources is necessary. Photon coalescence (or two-photon interference) lies at the heart of quantum operations and is seen as a first step towards achieving both teleportation~\cite{Landry_Teleswisscom_07} and entanglement swapping~\cite{DeRiedmatten_swapping_05,Yang_Syncro_Indep_06,Kaltenbaek_Inter_Indep_09,Halder_Ent_Indep_07,Takesue_Ent_swap_09,Jang_inter_CW_indep_09}.
This effect has been extensively studied theoretically~\cite{Rbook,LegeroRempe_SPcharac_coalecence_03} and experimentally, initially based on two photons coming from a single down conversion source and therefore sharing a common past~\cite{HOM1987,Rarity_dip_89,Halder_2times25km_05}. However, experiments involving truly independent photons represent an important challenge for achieving longer quantum links by means of quantum relays~\cite{Collins_QRelays_05}. In this frame, it has been demonstrated theoretically that a two-photon interference net visibility of at least 95\% is required for practical implementations using currently available photon pair sources and multimode quantum memories~\cite{Simon07}. Reaching such a high visibility therefore appears to be a hard task since a perfect synchronization between independent sources is necessary to prevent any kind of distinguishability between the interfering photons. Several papers focusing on the synchronization issue have demonstrated that entanglement swapping with fully independent sources is in principle feasible in the femtosecond~\cite{Yang_Syncro_Indep_06,Kaltenbaek_Inter_Indep_09} and CW~\cite{Halder_Ent_Indep_07,Jang_inter_CW_indep_09} regimes. Unfortunately, beyond the fundamental interest, the reported interference visibilities remain either far from 95\% or show very low overall photon pair rate.
For instance, the best visibility reported so far in the femtosecond regime has been obtained by compensating the synchronization-induced temporal distinguishability by dramatically increasing the photons coherence time up to a few picoseconds at the expense of the overall brightness~\cite{Kaltenbaek_Inter_Indep_09}. Since laser cavities can easily be synchronized to subpicosecond accuracy~\cite{Hudson_synchro_06,Kim_synchro_08}, the study of the picosecond regime~\cite{Takesue_Ent_swap_09,Fulconis_TwoPhotInterf_07}, its associated filter bandwidths, and the type of photon pair generators, becomes of prime interest to ensure simultaneously a high degree of indistinguishability and a high overall brightness.
Here, we demonstrate that to achieve this, the picosecond regime should provide an efficient trade-off enabling near-perfect two-photon interference and high effective photon pair rates, when associated with standard components available from the telecommunications industry. More precisely, we realized an experiment based on a single picosecond pump laser and two periodically poled lithium niobate (PPLN) waveguides emitting paired photons around 1550 nm to approximate independent sources. We report the highest two-photon interference net visibility, i.e. of 99\%, ever demonstrated in a configuration extendable to quantum relays. Such a proof-of-principle emphasizes why guided-wave technology, in the picosecond regime, should lead to realistic quantum relay schemes, namely by offering an reduced-constraint solution for the synchronization issue when two completely independent lasers are employed. In the following, we briefly introduce the entanglement swapping based quantum relay scheme and discuss why the picosecond regime is a valuable trade-off. We then focus on our experimental demonstration. Finally, we detail a comparative study of performance with similar reported experiments.
A basic scheme of a quantum relay based on entanglement swapping is given in~\figurename{\ref{Fig_basics_synchro_filter}} (see caption for details). For long distance quantum communication links, the preferred qubit carriers are photons at 1550\,nm allowing the users, namely Alice and Bob, to take advantage of standard optical fibers for distribution purposes.
\begin{figure}
\caption{Schematics of a quantum relay involving two pairs of entangled photons emitted by two synchronized sources (EPR). The two inner photons are sent to a 50/50 BS where a Bell state measurement (BSM), based on two-photon interference, is performed. Using a dedicated detection, entanglement can be swapped from these photons to the outer ones making Alice and Bob connected by entanglement, as if they had each received one photon from an entangled pair directly. The BSM serves as a trigger for Alice and Bob's detectors. It therefore enables reducing the SNR of the overall quantum link and further increasing the maximum achievable distance.}
\label{Fig_basics_synchro_filter}
\end{figure}
Theoretically, the two inner photons can come from any type of source, provided they are identical at the beam-splitter (BS) (pre-selection) or at the detectors (post-selection).
From the experimental side, we have to compare their coherence time, $\tau_{\mbox{\footnotesize coh}}$, to the time uncertainty, $t_{\mbox{\footnotesize uncert}}$, within which they are created (i.e. the pulse duration of the pump laser(s)) or are detected (detector's timing jitters), which can be written as:
\begin{equation}
\tau_{\mbox{\footnotesize coh}}\geq t_{\mbox{\footnotesize uncert}}\label{temp}
\end{equation}
Suitable bandpass filters are therefore employed to achieve optimal interference visibilities. Up to now, this issue has been addressed using different approaches based on pulsed or CW lasers (see also Table~\ref{comparison} for comparison).
On the one hand, the first experiments reported two independent crystals pumped by femtosecond lasers, since this regime allows working with broadband filters, i.e. on the order of a few nm~\cite{DeRiedmatten_swapping_05}. However, the large photon bandwidths makes them more prone to chromatic and polarization dispersions in optical fibers and leads to reported visibilities below 85\%~\cite{DeRiedmatten_Qint_SpatialSeparated_03}. Moreover when two independent lasers are used, the two related laser cavities have to be properly synchronized so as to be identical within the pump pulse duration ($\sim$100\,fs)~\cite{Yang_Syncro_Indep_06,Kaltenbaek_Inter_Indep_09}. This can be achieved using phase-locked loops or atomic clocks and dedicated electronics~\cite{Shelton01,dossierlaser}. Any remaining jitter between the two lasers has to be compensated by narrowing the filters which implies a substantial reduction of the pair production rate for equivalent powers~\cite{Kaltenbaek_Inter_Indep_09}.
On the other hand, the development of narrow-bandwidth fiber Bragg grating (FBG) filters allows using two independent CW lasers stabilized against an atomic transition~\cite{Halder_Ent_Indep_07,Takesue_Ent_swap_09}. Since this regime does not provide any reference clock, the timing function is transferred to the coincidence detection after the BS. To ensure a high quality interference, the photons coherence time has to be longer than the jitter of the detectors. Experimentally, this is made possible using a suitable combination of low-jitter detectors (based on superconducting or up-conversion technologies), on the order of 70\,ps, associated with ultra-narrow FBG filters, on the order of 10\,pm~\cite{Halder_Ent_Indep_07}. In such a situation, one gets rid of the synchronization but the price to pay is rather high in terms of sensitive fluctuations of the filters central wavelengths, low experimental rates due to the narrow filters coupled to the randomness of entangling photons by coincidence detection, and high statistical fluctuations due to low count rates. All these issues prevent reaching high visibilities.
Between these two extremes, the picosecond regime appears to be an efficient compromise. First, the timing condition~(\ref{temp}) is easily met by using FBG filters showing much larger bandwidths than in CW, i.e. on the order of 100\,pm~\cite{Fulconis_TwoPhotInterf_07}. Second, this allows taking advantage of off-the-shelf InGaAs avalanche photodiodes (APD). Last, when compared to the femtosecond regime, narrower bandwidths loosen the constraints on both path-length stabilization for the interfering photons and laser synchronization to subpicosecond accuracy~\cite{Hudson_synchro_06,Kim_synchro_08} to obtain high-visibility Hong-Ou-Mandel interference. All these key points motivate investigating this regime.
Assuming synchronization is fairly feasible, we therefore focused our efforts at increasing the overall spectral brightness by employing a picosecond pump laser associated with state-of-the-art waveguide sources emitting at telecom wavelengths. As shown in \figurename{\ref{Fig_HOM_setup}}, the pump laser (Coherent MIRA 900-D) provides 1.2\,ps-duration, time-bandwidth limited ($\Delta \lambda_{p}=$ 0.25\,nm) pulses, at the wavelength of $\lambda_{p}=$ 768\,nm and at a repetition rate of 76\,MHz. The pulses are sent to a BS whose outputs are directed towards two 10\,mm-long PPLN waveguides fabricated in our laboratory using the soft-proton exchange technique~\cite{Tanzilli_PPLNW_02}. These devices are single mode at telecom wavelengths enabling only one nonlinear process to occur avoiding any additionnal background noise within the bandwidth of interest which is a key advantage compared to four-wave mixing sources~\cite{Fulconis_TwoPhotInterf_07,Takesue_Ent_swap_09} limited by Raman background photons. Both samples opto-geometrical parameters were designed to produce identical degenerate paired photons at 1536\,nm within a bandwidth of 50\,nm when pumped at 768\,nm in the picosecond regime. Since the filter bandwidths are much narrower than the down conversion bandwidth ($\frac{\Delta\lambda_{spdc}}{\Delta\lambda_{filter}}\geq 10^{2}$), experimentally, the two sources are simply independently stabilized at the temperature of 343\,K to within 0.1\,K.
\begin{figure}
\caption{Coalescence experiment based on two PPLN waveguides pumped by a single picosecond laser. A very weak part of this beam is sent to a silicon APD (not represented) employed as a laser clock random divider. Its electrical output is used to trigger four InGaAs-APDs. $\Lambda$: poling period of the PPLN waveguides; I: isolator; R: retro-reflector; C: circulator; \&: AND-gate. The InGaAs-APDs feature 10\% quantum efficiency and a dark count probability of about $10^{-5}
\label{Fig_HOM_setup}
\end{figure}
From each source, we select pairs of photons meeting the Fourier transform criterion using narrowband demultiplexers made of two optical circulators and a pair of FBG filters. These are set to reflect energy-matched pairs of wavelength at 1537.4\,nm and 1534.6\,nm, i.e. around degeneracy, associated with bandwidths of 800\,pm and 250\,pm, respectively. A slightly wider (800\,pm) filter is used on the long wavelength (1537\,nm) photons mainly to minimize overall losses. This filtering solution based on standard telecom components provides a clever way to separate deterministically the photons at the output of the waveguides and makes them each available in a single mode fiber~\cite{Halder_Ent_Indep_07}. The 250\,pm filters bandwidth has been choosen for two reasons : (a) it is narrow enough to accommodate the 1.2\,ps-duration of the laser pulses and any possible jitter up to 4\,ps due to laser synchronization and/or dispersion in the fiber; (b) it is large enough to stabilize their central wavelength using two independent proportional-integral-derivative temperature controllers.
To observe the two-photon interference, four-fold coincidences are detected thanks to four InGaAs-APDs (idQuantique 201) triggered by a Peltier cooled Si-APD (idQuantique 100) placed on the path of an attenuated fraction of the pump beam. Since these InGaAs-APDs are designed to handle a maximum triggering rate of 1\,MHz, this technique allows working at an average clock of 600\,kHz randomly picked from the 76\,MHz laser repetition rate. In our case, faster InGaAs-APD would highly improve the overall coincidence rate as shown in Ref.~\cite{Takesue_Ent_swap_09}. The evolution of the effective four-fold coincidence rate is given in \figurename{\ref{Fig_HOM_dip}} in which a remarkable dip is obtained when the delay between the two inner photons is zero.
\begin{figure}
\caption{Four-fold coincidence rate as a function of the relative delay, $\delta t$, between the interfering photons ($\sim0.04$ photon/pulse). We clearly observe a dip for $\delta t = 0$ that reaches the noise level. The Gaussian fit of the interference pattern shows a net visibility of $99\%\pm3\%$. We also plotted one of the two-fold coincidences related to one of the sources after the BS to verify this figure is constant.}
\label{Fig_HOM_dip}
\end{figure}
\begin{table*}[htbp]
\caption{\label{comparison}Compared two-photon interference visibilities, source brightness, and overall experimental coincidence rates, between reported works in continuous wave (CW), picosecond (ps), and femtosecond (fs) regimes. Regarding time uncertainties, note that the first number is always associated with the pulse duration while the $\pm$ sign corresponds to the synchronisation jitter if two pulsed lasers are involved. In contrast, the time uncertainty for the CW case is associated with the detector's timing jitters. The coherence time is calculated for Gaussian filters using the standard relation $\tau_c=0.44\frac{\lambda^2}{c\Delta\lambda}$.}
\begin{ruledtabular}
\begin{tabular}{c|ccc|ccc|ccc}
Reference & Source & Wavelength & Brightness & Regime\footnotemark[1] & Filter bandwidth & Rate & Raw & Net \\
& & $nm$ & $pair/s/pm/mW$ & (time uncertainty) & (coherence time) & $pair/s$ & visibility & visibility \\
\hline
Geneva~\cite{Halder_Ent_Indep_07} & PPLN/w & 1550 & $0.9\times10^{3}$ & $2\times$ CW (70\,ps) & 0.01\,nm (350\,ps) & $3\times10^{-3}$ & NA & 77\%\\
\textbf{Nice} & \textbf{PPLN/w} & \textbf{1550} & $1.6\times10^{3}$ & \textbf{$1\times$ ps} (\textbf{1.2}\,ps) & \textbf{0.25}\,nm (\textbf{14}\,ps) & $3\times10^{-1}$ & \textbf{93\%} & \textbf{99\%}\\
Atsugi~\cite{Takesue_Ent_swap_09} & Fiber & 1550 & NA & $1\times$ ps (19\,ps) & 0.2\,nm (18\,ps) & $2$\footnotemark[2] & 64\% & NA\\
Bristol~\cite{Fulconis_TwoPhotInterf_07} & Fiber & 600 & NA & $1\times$ ps (1.5\,ps) & 0.3\,nm (1.8\,ps) & $4\times10^{-1}$ & 88\% & NA\\
Geneva~\cite{DeRiedmatten_Qint_SpatialSeparated_03} & Bulk LBO & 1310 & NA & $1\times$ fs (200\,fs) & 5\,nm (500\,fs) & $7\times10^{-1}$ & 77\% & 84\%\\
Beijing~\cite{Yang_Syncro_Indep_06} & Bulk BBO & 800 & $1.2\times10^{-2}$ & $2\times$ fs\footnotemark[3]
(60$\pm$ 2\,fs) & 2.8\,nm (335\,fs) & $3\times10^{-2}$ & 82\% & NA\\
Vienna~\cite{Kaltenbaek_Inter_Indep_09} & Bulk BBO & 800 & NA & $2\times$ fs (50$\pm$ 260\,fs ) & 0.4\,nm (2.3\,ps) & $1\times10^{-2}$ & 96\% & NA\\
\end{tabular}
\end{ruledtabular}
\begin{minipage}{20cm}
\footnotetext[1]{In this column, the figures associated with the pump regimes correspond to the number of lasers employed.}
\footnotetext[2]{The experiment operates at 500\,MHz repetition rate therefore increasing the available photon pair rate.}
\footnotetext[3]{The two lasers are not truly independent since they share the same Ti:Sapphire crystal Kerr medium for accurate synchronization.}
\end{minipage}
\end{table*}
Using only 1\,mW of mean pump power per source, we obtained $4\times10^{3}$ two-fold coincidences per hour and per source which corresponds to about 0.05 photon per pulse. In terms of normalized brightness it means that our sources reach $1.6\times10^{3}$\,pairs$\cdot$s$^{-1}\cdot$pm$^{-1}\cdot$mW$^{-1}$ after the filtering stage. This brightness corresponds to the state-of-the-art for PPLN waveguides and is basically five orders of magnitude larger than that of bulk crystals~\cite{Tanzilli_PPLNW_02,Halder_HighCoh_pairSource_08}. When the two photons are made indistinguishable in time ($\delta t$) thanks to the adjustment of the retro-reflector (R) placed in front of one sample, a $93\%\pm3\%$ reduction in the raw four-fold coincidence count is obtained. By correctly recording the accidental coincidences, we are able to demonstrate that the net visibility reaches $99\%\pm3\%$. The full width at half maximum of the dip is approximately $\Delta\tau\approx11\,$ps, which is in good agreement with the coherence time of 14\,ps expected from the filter bandwidths. This interference visibility is also extremely close to the maximum value of $99.9\%$ calculated from the theory taking into account our filtering bandwidths, as properly outlined in Refs.~\cite{LegeroRempe_SPcharac_coalecence_03,DeRiedmatten_Qint_SpatialSeparated_03,Rbook}. Note that the very high stability offered by our guided-wave scheme allowed us performing 48h-long measurement leading to more than 30 points evenly spread over the dip.
Table~\ref{comparison} presents the compared results reported in the literature for similar configurations which can be differentiated by four key points: (i) the type of nonlinear generators, (ii) the emitted photon wavelengths, (iii) the pumping regime and its associated time uncertainty, (iv) and, finally, the applied filtering bandwidths and their associated coherence times. From a comparison between the reported results, we conclude that the combination of the picosecond regime and the single mode properties of the employed telecommunication components is the most performant scheme. It allows matching efficiently the inequality~(\ref{temp}) and obtaining near-perfect two-photon interference. For practical long distance quantum communication, it is also interesting to compare the effective number of pairs available per second at Alice and Bob's locations. This figure of merit is calculated by normalizing the four-fold coincidence rates with respect to Alice and Bob's detector efficiencies. A comparison with similar configurations at telecom wavelengths indicates that the picosecond regime allows maintaining a high effective pair production rate equivalent to that of the femtosecond regime but shows a much higher visibility. One should also note the two orders of magnitude rate difference for equivalent source technologies and brightness between the CW and picosecond regimes. In addition, the best visibility reported in Ref.~\cite{Kaltenbaek_Inter_Indep_09} with photons in the visible comes from the fact that the filtering bandwidths are on the order of those necessary for the picosecond regime. Consequently, the overall brightness is very low considering the use of efficient Si-APDs. Finally, bright fiber sources are contaminated by intrinsic spontaneous Raman scattering noise, leading to accidental coincidences and to reduced visibilities~\cite{Takesue_Ent_swap_09,Fulconis_TwoPhotInterf_07}.
The results demonstrated in this experiment are of broad interest since the obtained net visibility is the best value reported to date for comparable configurations. According to reference~\cite{Simon07}, this allows, for the first time, considering the possibility of using quantum relays in actual quantum cryptography networks. In this context, where two synchronized picosecond lasers must be used, we not only expect results of the same order, thanks to the high experimental stability and versatility demonstrated, but also because of the reduced-constraint on the synchronization issue~\cite{Hudson_synchro_06,Kim_synchro_08}.
The authors thank M. P. de Micheli and S. Tascu for the realization of the PPLN wave\-guides, G. Sauder for technical support, and A. Beveratos, H. Zbinden, and K. Thyagarajan for fruitful discussions. This work has been supported by the CNRS, the University of Nice -- Sophia Antipolis, and the French Ministry of Research.
\end{document} |
\begin{document}
\title{Sesqui-regular graphs with fixed smallest eigenvalue}
\author{Jack H. Koolen$^{1,2}$, Brhane Gebremichel$^1$, Jae Young Yang$^3$,\\
Qianqian Yang$^{1}$\footnote{Q. Yang is the corresponding author.}
\\ \\
\small $^1$ School of Mathematical Sciences,\\
\small University of Science and Technology of China, \\
\small 96 Jinzhai Road, Hefei, 230026, Anhui, PR China\\
\small $^2$ Wen-Tsun Wu Key Laboratory of CAS,\\
\small 96 Jinzhai Road, Hefei, 230026, Anhui, PR China\\
\small ${}^3$ Samsung SDS,\\
\small Olympic-ro 35-gil 125, Songpa-gu, Seoul, 05510, Republic of Korea\\
\small {\tt e-mail: [email protected], [email protected],}\\
\small {\tt [email protected], [email protected]}
}
\date{}
\maketitle
\begin{abstract}
Let $\lambda\geq2$ be an integer. For strongly regular graphs with parameters $(v, k, a,c)$ and smallest eigenvalue $-\lambda$, Neumaier gave two bounds on $c$ by using algebraic property of strongly regular graphs. In this paper, we will study a new class of regular graphs called sesqui-regular graphs, which contains strongly regular graphs as a subclass, and prove that for a sesqui-regular graph with parameters $(v,k,c)$ and smallest eigenvalue at least $-\lambda$, if $k$ is very large, then either $c \leq \lambda^2(\lambda -1)$ or $v-k-1 \leq \frac{(\lambda-1)^2}{4} + 1$ holds.
\end{abstract}
\textbf{Keywords}: sesqui-regular graph, smallest eigenvalue, Hoffman graphs, Alon-Boppana Theorem
\textbf{AMS classification}: 05C50, 05C75, 05C62
\section{Introduction}
A {\em strongly regular graph} with parameters $(v, k, a, c)$ is a $k$-regular graph with $v$ vertices such that the number of common neighbors of any two adjacent vertices is exactly $a$ and the number of common neighbors of any two distinct non-adjacent vertices is exactly $c$.
Neumaier \cite{-m} proved the following two theorems on strongly regular graphs.
\begin{theorem}[{\cite[Theorem 3.1]{-m}}]\label{-m1}
Let $\lambda\geq 2$ be an integer. For any connected strongly regular graph $G$ with parameters $(v, k, a,c)$, if the smallest eigenvalue of $G$ is $-\lambda$, then either $G$ is a complete multipartite graph, or $c \leq \lambda^3 (2\lambda -3)$.
\end{theorem}
\begin{theorem}[{\cite[Theorem 5.1]{-m}}]\label{-m2} Let $\lambda\geq2$ be an integer. Except for finitely many exceptions, any strongly regular graph with smallest eigenvalue $-\lambda$ is a Steiner graph, a Latin square graph, or a complete multipartite graph.
\end{theorem}
To prove Theorem \ref{-m1} and Theorem \ref{-m2}, Neumaier used the Krein parameters of the underlying association scheme. In \cite{YK18}, Yang and Koolen used a new method and showed the following result on co-edge regular graphs. A {\em co-edge regular graph} with parameters $(v, k, c)$ is a $k$-regular graph with $v$ vertices such that the number of common neighbors of any two distinct non-adjacent vertices is exactly $c$.
\begin{theorem}[{\cite[Theorem 7.1]{YK18}}]\label{YK1}
Let $\lambda \geq 2$ be a real number. There exists a real number $M_1(\lambda)$ such that, for any connected co-edge regular graph $G$ with parameters $(v, k, c)$, if the smallest eigenvalue of $G$ is at least $-\lambda $, then either $c\leq M_1(\lambda)$ or $v-k-1 \leq \frac{(\lambda -1)^2}{4}+1$ holds.
\end{theorem}
This result can be regarded as a generalization of Theorem \ref{-m1}.
Before we state our main results, we need to define a larger class of regular graphs, which contains both the class of strongly regular graphs and the class of co-edge regular graphs, that is, the class of sesqui-regular graphs. A {\em sesqui-regular graph} with parameters $(v, k, c)$ is a $k$-regular graph with $v$ vertices such that the number of common neighbors of any two vertices at distance 2 is exactly $c$.
The first result is a slight generalization of Theorem \ref{YK1} on sesqui-regular graphs.
\begin{theorem}\label{YK2}
Let $\lambda \geq 2$ be a real number. There exists a real number $M_2(\lambda)$ such that, for any connected sesqui-regular graph $G$ with parameters $(v, k, c)$, if the smallest eigenvalue of $G$ is at least $-\lambda $, then either $c\leq M_2(\lambda)$ or $v-k-1 \leq \frac{(\lambda -1)^2}{4}+1$ holds.
\end{theorem}
We omit the proof of Theorem \ref{YK2}, since it can be proven by exactly the same method as in the proof of Theorem \ref{YK1}, for which we refer to \cite{YK18}. Our main result in the present paper is the following.
\begin{theorem}\label{main sesqui}
Let $\lambda \geq 2$ be an integer. There exists a real number $C(\lambda)$ such that, for any connected sesqui-regular graph $G$ with parameters $(v, k, c)$, if the smallest eigenvalue of $G$ is at least $-\lambda$ and $k \geq C(\lambda)$, then either $c\leq \lambda^2(\lambda-1)$ or $v-k-1\leq \frac{(\lambda -1)^2}{4}+1$ holds.
\end{theorem}
\begin{remark}
\begin{enumerate}[\rm(i)]
\item If we replace the condition that $\lambda\geq2$ is an integer in Theorem \ref{main sesqui} by that $\lambda\geq2$ is a real number with $\lambda-\lfloor\lambda\rfloor-\frac{1}{\lambda-1}<0$, then we can replace $c\leq\lambda^2(\lambda-1)$ in the conclusion by $c\leq\lfloor\lambda\rfloor\lfloor\lambda(\lambda-1)\rfloor$.
\item We wonder whether we can replace $c\leq\lambda^2(\lambda-1)$ in Theorem \ref{main sesqui} by $c\leq\lambda^2$. Then the result will be sharp, as a Steiner graph with smallest eigenvalue $-\lambda$ has $c=\lambda^2$.
\item Also, Theorem \ref{main sesqui} can be regarded as a combinatorial generalization of Theorem \ref{-m2}, since Theorem \ref{-m2} states that the only strongly regular graphs with smallest eigenvalue $-\lambda$ and large valency are the Steiner graps with $c=\lambda^2$, the Latin square graphs with $c= \lambda(\lambda -1)$ and the complete multipartite graphs $K_{t\times \lambda}$.
\end{enumerate}
\end{remark}
This paper is organized as follows. In Section \ref{sec:Hoffman}, we introduce the basic definitions and properties of Hoffman graphs, quasi-cliques, and associated Hoffman graphs which are main tools of this paper. In Section \ref{sec:few Hoffman graphs}, we give some Hoffman graphs with smallest eigenvalue less than $-\lambda$, which play a key role in our proof. In Section \ref{sec:proof}, we show a proof of Theorem \ref{main sesqui}.
\section{Preliminaries}\label{sec:Hoffman}
\subsection{Definitions and properties related to Hoffman graphs}
In this section, we introduce the definitions and basic properties of Hoffman graphs and associated Hoffman graphs. For more details or proofs, see \cite{Jang, KKY, KCY, Woo}.
\begin{definition}
A Hoffman graph $\mathfrak{h}$ is a pair $(H, \ell)$ with a labeling map $\ell : V(H) \rightarrow \{{ \textbf{\textit{f,s}}}\}$ satisfying two conditions:
\begin{enumerate}[\rm(i)]
\item the vertices with label \textbf{\textit{f}} are pairwise non-adjacent,
\item every vertex with label \textbf{\textit{f}} has at least one neighbor with label \textbf{\textit{s}}.
\end{enumerate}
\end{definition}
The vertices with label \textbf{\textit{f}} are called {\it fat} vertices, and the set of fat vertices of $\mathfrak{h}$ is denoted by $V_{\textbf{\textit{f}}}(\mathfrak{h})$. The vertices with label \textbf{\textit{s}} are called {\it slim} vertices, and the set of slim vertices is denoted by $V_{\textbf{\textit{s}}}(\mathfrak{h})$.
For a vertex $x$ of $\mathfrak{h}$, we define $N_{\mathfrak{h}}^{\textbf{\textit{s}}}(x)$ (resp. $N_{\mathfrak{h}}^{\textbf{\textit{f}}}(x)$) the set of slim (resp. fat) neighbors of $x$ in $\mathfrak{h}$. If every slim vertex of $\mathfrak{h}$ has a fat neighbor, then we call $\mathfrak{h}$ \emph{fat}. In a similar fashion, we define $N^{\textbf{\textit{s}}}_\mathfrak{h}(x_1,x_2)$ (resp. $N^{\textbf{\textit{f}}}_\mathfrak{h}(x_1,x_2)$) to be the set of common slim (resp. fat) neighbors of $x_1$ and $x_2$ in $\mathfrak{h}$.
The \emph{slim graph} of the Hoffman graph $\mathfrak{h}$ is the subgraph of $H$ induced on $V_{\textbf{\textit{s}}}(\mathfrak{h})$.
For a fat vertex $F$ of $\mathfrak{h}$, a \emph{quasi-clique} (with respect to $F$) is a subgraph of the slim graph of $\mathfrak{h}$ induced on the slim vertices adjacent to $F$ in $\mathfrak{h}$, and we denote it by $Q_{\mathfrak{h}}(F)$. Now, we give more basic definitions as follows.
\begin{definition} \label{def: induced sub}
A Hoffman graph $\mathfrak{h}_1 = (H_1, \ell_1)$ is called an {\it induced Hoffman subgraph} of a Hoffman graph $\mathfrak{h}=(H,\ell)$, if $H_1$ is an induced subgraph of $H$ and $\ell(x) = \ell_1 (x)$ for all vertices $x$ of $H_1$.
\end{definition}
\begin{definition}
Two Hoffman graphs $\mathfrak{h}=(H, \ell)$ and $\mathfrak{h}'=(H', \ell')$ are called {\it isomorphic} if there exists a graph isomorphism from $H$ to $H'$ which preserves the labeling.
\end{definition}
\begin{definition}For a Hoffman graph $\mathfrak{h}=(H,\ell)$, there exists a matrix $C$ such that the adjacency matrix $A$ of $H$ satisfies
\begin{eqnarray*}
A=\left(
\begin{array}{cc}
A_{\textbf{\textit{s}}} & C\\
C^{T} & O
\end{array}
\right),
\end{eqnarray*}
where $A_{\textbf{\textit{s}}}$ is the adjacency matrix of the slim graph of $\mathfrak{h}$, and $O$ is a zero matrix. The special matrix $Sp(\mathfrak{h})$ of $\mathfrak{h}$ is the real symmetric matrix $A_{\textbf{\textit{s}}}-CC^{T}.$
\end{definition}
The \emph{eigenvalues} of $\mathfrak{h}$ are the eigenvalues of its special matrix $Sp(\mathfrak{h})$, and the smallest eigenvalue of $\mathfrak{h}$ is always denoted by $\lambda_{\min}(\mathfrak{h})$.
Now, we discuss some spectral properties of the smallest eigenvalue of a Hoffman graph and its induced Hoffman subgraph.
\begin{lemma}[{\cite[Corollary 3.3]{Woo}}] If $\mathfrak{h}'$ is an induced Hoffman subgraph of $\mathfrak{h}$, then $\lambda_{\min}(\mathfrak{h}') \geq \lambda_{\min}(\mathfrak{h})$ holds.
\end{lemma}
Let $\mathfrak{h}$ be a Hoffman graph with $V_{\textbf{\textit{f}}}(\mathfrak{h}) = \{F_1, F_2, \ldots,F_t\}$. For a positive integer $p$, let $G(\mathfrak{h}, p)$ be the graph obtained from $\mathfrak{h}$ by replacing every fat vertex of $\mathfrak{h}$ by a complete graph $K_p$ of $p$ slim vertices, and connecting all vertices of the $K_p$ to all neighbors of the corresponding fat vertex by edges. For the smallest eigenvalue of $\mathfrak{h}$ and $G(\mathfrak{h}, p)$, we have the following:
\begin{theorem}[Hoffman and Ostrowski]\label{OH}
Let $\mathfrak{h}$ be a Hoffman graph and $p$ a positive integer. Then
$$ \lambda_{\min}(G(\mathfrak{h}, p)) \geq \lambda_{\min}(\mathfrak{h}), $$
and
$$ \lim_{p\rightarrow \infty} \lambda_{\min}(G(\mathfrak{h}, p)) = \lambda_{\min}(\mathfrak{h}). $$
\end{theorem}
For a proof, we refer to \cite[Theorem 2.14]{Jang} and \cite[Theorem 3.2]{KYY3}.
\subsection{Quasi-cliques and associated Hoffman graphs}
For a positive integer $m$, let $\widetilde{K}_{2m}$ be the graph on $2m +1$ vertices consisting of a complete graph $K_{2m}$ and a vertex which is adjacent to exactly half of the vertices of $K_{2m}$.
\begin{lemma}[{cf.~\cite[Lemma 3.2]{YK18}}]\label{min2} Let $\lambda\geq 1$ be a real number. There exist minimum positive integers $t(\lambda):=\lfloor\lambda^2\rfloor+1$ and $m(\lambda)$ such that, for any integers $t\geq t(\lambda)$ and $m\geq m(\lambda)$, the smallest eigenvalue of the graph $K_{1,t}$ and the smallest eigenvalue of the graph $\widetilde{K}_{2m}$ both are less than $-\lambda$.\end{lemma}
Let $G$ be a graph that does not contain $\widetilde{K}_{2m}$ as an induced subgraph. For a positive integer $n \geq (m+1)^2$, let $\mathcal{C}(n)$ be the set of maximal cliques of $G$ with at least $n$ vertices. Define the relation $\equiv_n^m$ on $\mathcal{C}(n)$ by $C_1 \equiv_n^m C_2$, if each vertex $x \in C_1$ has at most $m-1$ non-neighbors in $C_2$ and each vertex $y \in C_2$ has at most $m-1$ non-neighbors in $C_1$ for $C_1, C_2 \in \mathcal{C}(n)$. Note that $\equiv_n^m$ on $\mathcal{C}(n)$ is an equivalence relation if $n\geq(m+1)^2$ (see \cite [Lemma 3.1]{KKY}).
For a maximal clique $C \in \mathcal{C}(n)$, let $[C]_n^m$ denote the equivalence class containing $C$ under the equivalence relation $\equiv_n^m$. We can define the term {\it quasi-clique} as follows:
\begin{definition}\label{def: quasi-clique}
Let $m\geq2$ and $n\geq2$ be two integers where $n \geq (m+1)^2$, and let $G$ be a graph that does not contain $\widetilde{K}_{2m}$ as an induced subgraph. For a maximal clique $C \in \mathcal{C}(n)$, the quasi-clique $Q([C]_n^m)$, with respect to the pair $(m,n)$, is the induced subgraph of $G$ on the vertices which have at most $m-1$ non-neighbors in $C$.
\end{definition}
To check the well-definedness of the quasi-clique $Q([C]_n^m)$ for $C \in \mathcal{C}(n)$, we refer the readers to {\rm \cite[Lemma 3.2 and Lemma 3.3]{KKY}}. The reason to define a quasi-clique in Definition \ref{def: quasi-clique} is to construct a Hoffman graph in Definition \ref{def: asso hoff} which is highly related to a given graph. After Definition \ref{def: asso hoff}, we will explain that the quasi-clique with respect to a pair $(m,n)$ in Definition \ref{def: quasi-clique} essentially coincides with the quasi-clique with respect to a fat vertex defined before Definition \ref{def: induced sub}.
\begin{definition}\label{def: asso hoff}
Let $m\geq2$ and $n\geq2$ be two integers where $n \geq (m+1)^2$ and let $G$ be a graph which does not contain $\widetilde{K}_{2m}$ as an induced subgraph. Let $[C_1]_n^m, [C_2]_n^m, \ldots, [C_t]_n^m$ be all the equivalence classes of $G$ under $\equiv_n^m$. The associated Hoffman graph $\mathfrak{g} = \mathfrak{g}(G, m, n)$ is the Hoffman graph with the following properties:
\begin{enumerate}[\rm(i)]
\item $V_{\textbf{\textit{s}}}(\mathfrak{g}) = V(G)$ and $V_{\textbf{\textit{f}}}(\mathfrak{g}) = \{F_1, \dots, F_t\}$, where $t$ is the number of the equivalence classes of $G$ under $\equiv_n^m$,
\item the slim graph of $\mathfrak{g}$ is isomorphic to $G$,
\item the fat vertex $F_i$ is adjacent to every vertex of the quasi-clique $Q([C_i]_n^m)$ for $i=1,2,\dots,t$.
\end{enumerate}
\end{definition}
From the above definition of associated Hoffman graphs, we find that for each $i=1,\dots,t$, the quasi-clique $Q([C_i]_n^m)$ with respect to the pair $(m, n)$ is exactly the quasi-clique $Q_{\mathfrak{g}}(F_i)$ in $\mathfrak{g} = \mathfrak{g}(G, m, n)$ with respect to the fat vertex $F_i$.
The following proposition shows an important property of the associated Hoffman graph.
\begin{proposition}[{\cite[Proposition 4.1]{KKY}}]\label{asso}
Let $m \geq 2, \phi,\sigma,p \geq 1$ be integers.
There exists a positive integer $n = n(m, \phi, \sigma, p) \geq (m+1)^2$ such that, for any graph $G$, any integer $n' \geq n$ and any Hoffman graph $\mathfrak{h}$ with at most $\phi$ fat vertices and at most $\sigma$ slim vertices, the graph $G(\mathfrak{h}, p)$ is an induced subgraph of $G$, provided that the graph $G$ satisfies the following conditions:
\begin{enumerate}[\rm(i)]
\item the graph $G$ does not contain $\widetilde{K}_{2m}$ as an induced subgraph,
\item its associated Hoffman graph $\mathfrak{g} = \mathfrak{g}(G, m, n')$ contains $\mathfrak{h}$ as an induced Hoffman subgraph.
\end{enumerate}
\end{proposition}
\section{Some Hoffman graphs}\label{sec:few Hoffman graphs}
Let $\lambda\geq2$ be a real number. In this section, we study some Hoffman graphs with smallest eigenvalue less than $-\lambda$, which will be used in the proof of Theorem \ref{main sesqui} later.
For a graph $H$, let $\mathfrak{q}(H)$ be the Hoffman graph obtained by attaching one fat vertex to all vertices of $H$. Then it is easily checked that $\lambda_{\min}(\mathfrak{q}(H)) = -\lambda_{\max}(\overline{H})-1$ holds, where $\lambda_{\max}(\overline{H})$ is the largest eigenvalue of the complement $\overline{H}$ of $H$.
\begin{lemma}[{\cite[Lemma 3.1]{YK18}}]\label{l+2}
Let $\lambda \geq 2$ be a real number. Let $H$ be a graph with $\lfloor (\lambda-1)^2\rfloor +2$ vertices which has at least one isolated vertex. Then $\lambda_{\min}(\mathfrak{q}(H))< -\lambda$.
\end{lemma}
\begin{lemma}\label{pp}
Let $\lambda \geq 2$ be a real number. Let $\{H(\lambda)_i\}_{i=1}^{r(\lambda)}$ be the set of all graphs with $\lfloor (\lambda-1)^2\rfloor+2$ vertices which have at least one isolated vertex. There exists a positive integer $p'(\lambda)$ such that, for every integer $p'\geq p'(\lambda)$, the inequality $\lambda_{\min}(G(\mathfrak{q}(H(\lambda)_i), p'))< -\lambda$ holds for all $i= 1, \dots, r(\lambda)$.
\end{lemma}
\begin{proof}
For each $i$, we have $\lambda_{\min}(\mathfrak{q}(H(\lambda)_i)) < -\lambda$ by Lemma \ref{l+2}. Then for
$i=1, \dots, r(\lambda)$, there exist positive integers $p'_i(\lambda)$'s such that for every integer $p_i'\geq p'_i(\lambda)$, \[\lambda_{\min}(G(\mathfrak{q}(H(\lambda)_i), p'_i) )< -\lambda\]
holds by Theorem \ref{OH}. By taking $p'(\lambda) = \max\limits_i p'_i(\lambda)$, we complete the proof.\end{proof}
\begin{remark}
Let $H$ be the graph $K_{\lfloor (\lambda-1)^2\rfloor +1} \cup K_1$ and let $H'$ be any graph with ${\lfloor (\lambda-1)^2\rfloor+2}$ vertices which has at least one isolated vertex. It can be shown that $\lambda_{\min}(G(\mathfrak{q}(H), p')) \geq \lambda_{\min}(G(\mathfrak{q}(H'), p'))$ holds for every integer $p'\geq p'(\lambda)$.
\end{remark}
Now, we introduce three Hoffman graphs $\mathfrak{h}^{(t)}$, $\mathfrak{h}^{(t,1)}$ and $\mathfrak{c}_t$. The Hoffman graph $\mathfrak{h}^{(t)}$ is the Hoffman graph with one slim vertex adjacent to $t$ fat vertices. The Hoffman graph $\mathfrak{h}^{(t,1)}$ is the Hoffman graph with two adjacent slim vertices $s_1$ and $s_t$ such that $s_t$ is adjacent to $t$ fat vertices and $s_1$ is adjacent to one fat vertex different from the $t$ fat neighbors of $s_t$ (see Figure \ref{fig:cherry}). Note that $\lambda_{\min}(\mathfrak{h}^{(t)}) = -t$ and $\lambda_{\min}(\mathfrak{h}^{(t,1)}) = \frac{-t-1-\sqrt{t^2-2t+5}}{2}$.
\begin{figure}
\caption{The Hoffman graphs $\mathfrak{h}
\label{fig:cherry}
\end{figure}
\noindent The Hoffman graph $\mathfrak{c}_t$ is the Hoffman graph obtained by attaching one fat vertex to $t$ vertices of a $K_{t+1}$ (see Figure \ref{fig:kbb}). Then $\lambda_{\min}(\mathfrak{c}_t) = \frac{-1-\sqrt{1+4t}}{2}$.
\begin{figure}
\caption{The Hoffman graph $\mathfrak{c}
\label{fig:kbb}
\end{figure}
For the smallest eigenvalue of $\mathfrak{h}^{(t)}$, $\mathfrak{h}^{(t,1)}$ and $\mathfrak{c}_t$, we have the following.
\begin{lemma}\label{ppp}
Let $\lambda \geq 2$ be a real number with $\lambda-\lfloor\lambda\rfloor-\frac{1}{\lambda-1}<0$. There exists a positive integer $p''(\lambda)$ such that for every integer $p''\geq p''(\lambda) $, the following three inequalities hold.
\begin{enumerate}[\rm(i)]
\item $\lambda_{\min}(G(\mathfrak{h}^{(\lfloor\lambda\rfloor+1)}, p''))< -\lambda$,
\item $\lambda_{\min}(G(\mathfrak{h}^{(\lfloor\lambda\rfloor,1)}, p''))< -\lambda$,
\item $\lambda_{\min}(G(\mathfrak{c}_{\lfloor\lambda^2 -\lambda\rfloor+1}, p''))<-\lambda$.
\end{enumerate}
\end{lemma}
\begin{proof}
It is straightforward to obtain \[\lambda_{\min}(\mathfrak{h}^{(\lfloor\lambda\rfloor+1)}) < -\lambda,~ \lambda_{\min}(\mathfrak{h}^{(\lfloor\lambda\rfloor,1)}) < -\lambda,\text{ and }\lambda_{\min}(\mathfrak{c}_{\lfloor\lambda^2 -\lambda\rfloor+1}) <-\lambda.\] Theorem \ref{OH} implies that there exist positive integers $p''_1(\lambda)$, $p''_2(\lambda)$ and $p''_3(\lambda)$ such that, for any integers
$p''_1\geq p''_1(\lambda)$, $p''_2\geq p''_2(\lambda)$ and $p''_3\geq p''_3(\lambda)$,
\[\lambda_{\min}(G(\mathfrak{h}^{(\lfloor\lambda\rfloor+1)},p''_1))< -\lambda,~\lambda_{\min}(G(\mathfrak{h}^{(\lfloor\lambda\rfloor,1)}, p''_2))< -\lambda,\]
and \[\lambda_{\min}(G(\mathfrak{c}_{\lfloor\lambda^2 -\lambda\rfloor+1}, p''_3))<-\lambda\]
hold.
By setting $p''(\lambda) = \max\limits_i p''_i(\lambda)$, we complete the proof.
\end{proof}
\section{Proof of the main theorem}\label{sec:proof}
In this section, we will complete the proof of Theorem \ref{main sesqui}. Before that, the following theorem is necessary.
\begin{theorem}\label{thm:pre} Let $\lambda\geq2$ be an integer and $m(\lambda)$ the minimum positive integer such that for any integer $m\geq m(\lambda)$, the graph $\widetilde{K}_{2m}$ has smallest eigenvalue less than $-\lambda$. There exists a positive integer $n'\geq (m(\lambda)+1)^2$ such that for any integer $q\geq n'$, a real number $C'(\lambda,q)$ satisfying the following exists.
For any connected sesqui-regular graph $G$ with parameters $(v,k,c)$, where $v-k-1>\frac{(\lambda-1)^2}{4}+1$, and with smallest eigenvalue at least $-\lambda$, if $k\geq C'(\lambda,q)$, then the associated Hoffman graph $\mathfrak{g}:=\mathfrak{g}(G,m(\lambda),q)$ is fat and has the following properties:
\begin{enumerate}[\rm(i)]
\item $\mathfrak{g}$ has $G$ as its slim graph.
\item $|N_\mathfrak{g}^{\textbf{\textit{f}}}(x)|\leq \lambda$ for every $x\in V_{\textbf{\textit{s}}}(\mathfrak{g})$, and if the equality holds, then for each slim vertex $y$ adjacent to $x$, $|N_\mathfrak{g}^{\textbf{\textit{f}}}(x,y)|\geq1$.
\item For $x\in V_{\textbf{\textit{s}}}(\mathfrak{g})$ and $F\in V_{\textbf{\textit{f}}}(\mathfrak{g})$, if $x$ is adjacent to $F$, then $x$ has at most $(\lambda-1)^2$ non-neighbors in the quasi-clique $Q_\mathfrak{g}(F)$.
\item For every $F\in V_{\textbf{\textit{f}}}(\mathfrak{g})$, the quasi-clique $Q_\mathfrak{g}(F)$ is a clique.
\item For $x\in V_{\textbf{\textit{s}}}(\mathfrak{g})$ and $F\in V_{\textbf{\textit{f}}}(\mathfrak{g})$, if $x$ is not adjacent to $F$, then $x$ has at most $\lambda^2-\lambda$ neighbors in the quasi-clique $Q_\mathfrak{g}(F)$.
\item For $x\in V_{\textbf{\textit{s}}}(\mathfrak{g})$, there exist a slim vertex $y\in V_{\textbf{\textit{s}}}(\mathfrak{g})$ at distance $2$ from $x$ in $G$ and a fat vertex $F'\in N_\mathfrak{g}(y)$ such that for any slim vertex $z$ in $N_\mathfrak{g}^{\textbf{\textit{s}}}(x,y)$, either $|N_\mathfrak{g}^{\textbf{\textit{f}}}(x,z)|\geq1$ or $z$ is adjacent to $F'$.
\end{enumerate}
\end{theorem}
\begin{proof}
By Theorem \ref{YK2}, we obtain $c\leq M_2(\lambda)$ immediately, where $M_2(\lambda)$ is such that Theorem \ref{YK2} holds.
Let $p'(\lambda)$ and $p''(\lambda)$ be the integers in Lemma \ref{pp} and Lemma \ref{ppp} respectively, and let $p =\max\{p'(\lambda), p''(\lambda)\}$. Let $t:=t(\lambda)=\lambda^2+1$, where $t(\lambda)$ is the integer in Lemma \ref{min2}. Let $n=n(m(\lambda), \lambda+1, \lambda^2 -\lambda+2, p)$, where $n(m(\lambda), \lambda+1, \lambda^2 -\lambda+2, p)$ is the integer in Proposition \ref{asso}. Let $n' = \max\{n, M_2(\lambda) + 2(\lambda-1)^2 + 3\}$. For an integer $q\geq n'$, let $C'(\lambda,q)=R((\lambda^2-\lambda) R(q-1,t), t)$, where $R(,)$ denotes the Ramsey number of two positive integers.
Suppose $k \geq C'(\lambda,q)$. Note that $G$ contains neither $\widetilde{K}_{2m(\lambda)}$ nor $K_{1,t}$ as an induced subgraph, since
$G$ has smallest eigenvalue at least $-\lambda$, but both $\widetilde{K}_{2m(\lambda)}$ and $K_{1,t}$ have smallest eigenvalue less than $-\lambda$ by Lemma \ref{min2}. By the property of Ramsey numbers, every vertex of $G$ lies in a clique of size $(\lambda^2-\lambda)R(q-1,t)+1~(\geq q)$. This means that in the associated Hoffman graph $\mathfrak{g}:=\mathfrak{g}(G,m(\lambda),q)$, every slim vertex has a fat neighbor, and lies in at least one quasi-clique with size at least $ (\lambda^2-\lambda) R(q-1,t)+1$. By Proposition \ref{asso}, Lemma \ref{pp} and Lemma \ref{ppp}, the associated Hoffman graph $\mathfrak{g}$ does not contain Hoffman graphs in the set $\{\mathfrak{h}^{(\lambda+1)}, \mathfrak{h}^{(\lambda, 1)}, \mathfrak{c}_{\lambda^2 -\lambda+1}\}\cup \{\mathfrak{q}(H(\lambda)_i)\}_{i=1}^{r(\lambda)}$ as induced Hoffman subgraphs. We will show that $\mathfrak{g}$ satisfies {\rm (i)}--{\rm (v)}.
{\rm (i)} This follows from the definition of associated Hoffman graphs directly.
{\rm (ii)} If $|N_\mathfrak{g}^{\textbf{\textit{f}}}(x')|>\lambda$ for some $x'\in V_{\textbf{\textit{s}}}(\mathfrak{g})$, then $\mathfrak{g}$ will contain $\mathfrak{h}^{(\lambda+1)}$ as an induced Hoffman subgraph. This leads to a contradiction. Suppose that a vertex $x$ with exactly $\lambda$ fat neighbors has a slim neighbor $y$ which satisfies $|N_\mathfrak{g}^{\textbf{\textit{f}}}(x,y)|=0$. Then $\mathfrak{g}$ will contain $\mathfrak{h}^{(\lambda, 1)}$ as an induced Hoffman subgraph, as $\mathfrak{g}$ is fat. This leads to a contradiction again.
{\rm (iii)} If $x$ has $(\lambda-1)^2+ 1$ non-neighbors in $Q_\mathfrak{g}(F)$, then $\mathfrak{g}$ will contain one of $\mathfrak{q}(H(\lambda)_i)$'s as an induced Hoffman subgraph. This gives a contradiction.
{\rm (iv)} By the definition of $\mathfrak{g}$, we have that the size $|N_\mathfrak{g}^{\textbf{\textit{s}}}(F)|$ of the quasi-clique $Q_\mathfrak{g}(F)$ satisfies $|N_\mathfrak{g}^{\textbf{\textit{s}}}(F)|\geq q\geq n'\geq M_2(\lambda) + 2(\lambda-1)^2+3$. Suppose that there are two non-adjacent vertices $x$ and $y$ in $Q_\mathfrak{g}(F)$. Then both $x$ and $y$ have at most $(\lambda-1)^2$ non-neighbors in $Q_\mathfrak{g}(F)$ by {\rm (iv)}. This implies that they have more than $M_2(\lambda)$ common neighbors in $Q_\mathfrak{g}(F)$, which contradicts the assumption $c \leq M_2(\lambda)$.
{\rm (v)} By {\rm (iv)}, the quasi-clique $Q_\mathfrak{g}(F)$ is a clique. If $x$ has $\lambda^2 -\lambda+ 1$ neighbors in $Q_\mathfrak{g}(F)$, then $\mathfrak{g}$ will contain $\mathfrak{c}_{\lambda^2 -\lambda+1}$ as an induced Hoffman subgraph. This gives a contradiction.
{\rm (vi)} Assume $N_\mathfrak{g}^{\textbf{\textit{f}}}(x)=\{F_1,F_2,\ldots, F_s\}$. Let $T=N_\mathfrak{g}^{\textbf{\textit{s}}}(x)-\{w\mid w$ is adjacent to one of $ F_i$ for $i=1,\ldots, s\}$. If $T=\emptyset$, then {\rm(vi)} follows immediately. So we may assume $|T|\geq1$. If $|T|\geq R(q-1, t)$, then $T$ contains a clique of size at least $q-1$. By the definition of associated Hoffman graphs, there will be one more new fat vertex with respect to a quasi-clique containing $x$ and a clique of size at least $q-1$ in $T$ which can be attached to the vertex $x$. This contradicts the fact that $x$ has exactly $s$ fat neighbors. So we obtain $1\leq |T| \leq R(q-1,t)-1$. Let $w'$ be a vertex in $T$. We have concluded that $w'$ lies in a quasi-clique with size at least $(\lambda^2-\lambda)R(q-1,t)+1$ before. Let $F'$ be the fat vertex with respect to this quasi-clique. Note that for each vertex $w\in(T-N_\mathfrak{g}^{\textbf{\textit{s}}}(F'))\cup\{x\}$, $w$ has at most $\lambda^2 -\lambda$ neighbors in the quasi-clique $Q_\mathfrak{g}(F')$ by {\rm (v)}. Thus there exists at least one vertex in $Q_\mathfrak{g}(F')$ which has no neighbors in $(T-N_\mathfrak{g}^{\textbf{\textit{s}}}(F'))\cup\{x\}$, as $|N_\mathfrak{g}^{\textbf{\textit{s}}}(F')|>(\lambda^2 - \lambda)(R(q-1, t)-1)+(\lambda^2 - \lambda)\geq(\lambda^2 - \lambda)|(T-N_\mathfrak{g}^{\textbf{\textit{s}}}(F'))\cup\{x\}|$. Let $y$ be such a vertex in $Q_\mathfrak{g}(F')$. This shows the existence.
\end{proof}
Now, we are in the position to prove Theorem \ref{main sesqui}.
\noindent{\it Proof of Theorem \ref{main sesqui}.} Let $C(\lambda):=C'(\lambda,n')$, where $n'$ and $C'(\lambda,n')$ are such that Theorem \ref{thm:pre} holds. Let $G$ be a connected sesqui-regular graph with parameters $(v,k,c)$, where $k\geq C(\lambda)$, and with smallest eigenvalue at least $-\lambda$. We may assume $v-k-1>\frac{(\lambda-1)^2}{4}+1$. By Theorem \ref{thm:pre}, there exists a fat Hoffman graph $\mathfrak{g}$ which satisfies properties {\rm (i)--(vi)} in Theorem \ref{thm:pre}.
Let $x$ be a slim vertex of $\mathfrak{g}$. Suppose $N_\mathfrak{g}^{\textbf{\textit{f}}}(x)=\{F_1,F_2,\ldots,F_s\}$, where $s \leq \lambda$ by Theorem \ref{thm:pre} {\rm (ii)}. Let $y$ be a vertex at distance $2$ from $x$ in $G$ and $F'$ a fat neighbor of $y$ such that Theorem \ref{thm:pre} {\rm (vi)} holds. By Theorem \ref{thm:pre} {\rm (iv)}, $y$ does not lie in any of the quasi-cliques $Q_\mathfrak{g}(F_i)$'s. Now we look at the set $N_\mathfrak{g}^{\textbf{\textit{s}}}(x,y)$.
First, consider the case where every slim neighbor of $x$ lies in one of the quasi-clique $Q_\mathfrak{g}(F_i)$. By Theorem \ref{thm:pre} {\rm (v)}, $y$ has at most $\lambda^2 - \lambda$ neighbors in each $Q_\mathfrak{g}(F_i)$. Therefore, $c=|N_\mathfrak{g}^{\textbf{\textit{s}}}(x,y)|\leq s(\lambda^2-\lambda)\leq\lambda(\lambda^2-\lambda)=\lambda^2(\lambda-1)$.
Now suppose that there are slim neighbors of $x$ which do not lie in any of $Q_\mathfrak{g}(F_i)$'s. Then $s \leq \lambda-1$ by Theorem \ref{thm:pre} {\rm (ii)}. Theorem \ref{thm:pre} {\rm (vi)} says that every vertex $z\in N_\mathfrak{g}^{\textbf{\textit{s}}}(x,y)$ lies in either one of $Q_\mathfrak{g}(F_i)$'s or $Q_\mathfrak{g}(F')$. Note that $x$ has at most $\lambda^2 - \lambda$ neighbors in $Q_\mathfrak{g}(F')$, and $y$ has at most $\lambda^2 - \lambda$ neighbors in each $Q_\mathfrak{g}(F_i)$, thus $c=|N_\mathfrak{g}^{\textbf{\textit{s}}}(x,y)|\leq (\lambda^2-\lambda)+s(\lambda^2-\lambda)\leq\lambda(\lambda^2-\lambda)=\lambda^2(\lambda-1)$.
This completes the proof.
\qed
\end{document} |
\begin{document}
\begin{frontmatter}
\title{Preparation of Schr\"{o}dinger cat states with cold ions in a cavity beyond the
Lamb-Dicke limit}
\author[dfreitas]{Dagoberto S. Freitas}
\ead{[email protected]} \ead[url]{http://www.uefs.br}
\author[jrocha]{Jairo R. de Oliveira}
\ead{[email protected]} \ead[url]{http://www.ufrpe.br}
\address[dfreitas]{Departamento de Fisica, Universidade Estadual de Feira de Santana,
44036-900 Feira de Santana, BA, Brazil}
\address[jrocha]{Departamento de Fisica, Universidade Federal Rural de Pernambuco,
52171-900 Recife, PE, Brazil}
\begin{abstract}
We investigate the dynamics of a cold trapped ion coupled to the
quantized field inside a high-finesse cavity. We have used an
approach for preparing the {\bf SC} states of motion of ion. This
approach, based on unitary transformating the Hamiltonian, allows
its exact diagonalization without performing the Lamb-Dicke
aproximation. We show that is possible to generate a {\bf SC} states
having rather simple initial state preparation, e.g., the vacuum
sate for both cavity field and the ion motion.
\end{abstract}
\begin{keyword}
cat states \sep cold ions \sep beyond Lamb-Dicke limit
\PACS 42.50.Vk \sep 03.65.-w \sep 32.80.Pi
\end{keyword}
\end{frontmatter}
The manipulation of simple quantum systems such as trapped ions
\cite{wine98} has opened new possibilities regarding not only the
investigation of foundations of quantum mechanics, but also
applications on quantum information. In such a system, the internal
degrees of freedom of an atomic ion may be coupled to the
electromagnetic field as well as to the motional degrees of freedom
of the ion's center of mass. This system allows the preparation of
nonclassical states of the vibrational motion of the ion
\cite{cira1,vogel}. In fact, the generation of Fock, coherent,
squeezed, \cite{meek} and Schr\"{o}dinger-cat states \cite{monr} has
been already accomplished. Under certain circumstances, in which
full quantization of the three sub-systems becomes necessary, is
reasonable to assume that the single trapped ion interacting with a
quantized cavity field in such a way that the quantized field gets
coupled to the atom. The quantization of the field of course brings
new possibilities. Within that realm, it has been already
investigated the influence of the field statistics on the ion
dynamics \cite{zeng94,knight98}, as well as the transfer of
coherence between the motional states and the field
\cite{parkins99}, a scheme for generation of matter-field Bell-type
states \cite{ours01}, and even propositions of quantum logic gates
\cite{gates}.
In this paper we explore further the consequences of having the
trapped ion in interaction with a quantized field. Here we adopt an
approach to this problem. We depart from the full ion-field cavity
Hamiltonian, and perform a unitary transformation that allows us to
obtain a diagonalization Hamiltonian without the rotating-wave
approximation (RWA)\cite{ours99}. We try to prepare the {\bf SC}
states beyond the LDL. The {\bf SC} state, i.e., the superposition
of macroscopically distinguishable states. Experimentally,
\textbf{SC} states have been obtained in NIST group with single cold
$^{9}Be^{+}$\cite{gerry97}.
We consider a single trapped ion, within a Paul trap, placed inside
a high finesse cavity, and having a cavity mode coupled to the
atomic ion \cite{semiao02}. The vibrational motion is also coupled
to the field as well as to the ionic internal degrees of freedom, in
such a way that the hamiltonian will reads \cite{knight98,ours01}.
\begin{equation}
\hat{H}=\hbar\nu \hat{a}^{\dagger}\hat{a} +
\hbar\omega\hat{b}^{\dagger}\hat{b} +\hbar\frac{\omega_0}{2}\sigma_z
+\hbar g(\sigma_+ + \sigma_-)(\hat{b}^{\dagger}+
\hat{b})\cos\eta(\hat{a}^{\dagger}+\hat{a}), \label{H}
\end{equation}
where $\hat{a}^{\dagger}(\hat{a})$ denote the creation
(annihilation) operators of the center-of-mass vibrational motion of
the ion (frequency $\nu$), $\hat{b}^{\dagger}(\hat{b})$ are the
creation (annihilation) operators of photons in the field mode
(frequency $\omega$), $\omega_0$ is the atomic frequency transition,
$g$ is the ion-field coupling constant, and $\eta=2\pi a_0/\lambda$
is the Lamb-Dicke parameter, being $a_0$ the amplitude of the
harmonic motion and $\lambda$ the wavelength of light. Typically the
ion is well localized, confined in a region much smaller than
light's wavelenght, or $\eta\ll 1$ (Lamb-Dicke regime). Usually
expansions up to the first order in $\eta$ are made in order
simplify hamiltonians involving trapped ions, which results in
Jaynes-Cummings like hamiltonians. However, even for small values of
the Lamb-Dicke parameter, an expansion up to second order in $\eta$
interesting effects, such as long time scale revivals, are
observed\cite{semiao02}.
We first applying the unitary transformation\cite{ours99}
\begin{equation}
\hat{T} = {1\over \sqrt{2}} \left\{ {1\over 2} \left[
\hat{D}^{\dagger}\left(\beta \right) + \hat{D}\left(\beta \right)
\right]\hat{I} + {1\over 2} \left[
\hat{D}^{\dagger}\left(\beta\right) -\hat{D}\left(\beta \right)
\right] \hat{\sigma}_z +\hat{D}\left(\beta \right)\hat{\sigma}_{+}
-\hat{D}^{\dagger}\left(\beta \right) \hat{\sigma}_{-}
\right\},\label{T}
\end{equation}
to the Hamiltonian in Eq. (\ref{H}), where
$\hat{D}(\beta)=\exp(\beta\hat{a}^\dagger- \beta^*\hat{a})$ is
Glauber's displacement operator, with $\beta=i\eta/2$, we obtain the
following transformed Hamiltonian
\begin{equation}
\hat{\cal H} \equiv \hat{T}\hat{H}\hat{T}^{\dagger} = \hbar \nu
\hat{a}^{\dagger}\hat{a}+ \hbar \omega \hat{b}^{\dagger}\hat{b} +
\hbar g(\hat{b}^{\dagger}+\hat{b})\hat{\sigma}_z
-i\hbar{\eta\nu\over 2} \left[ (\hat{a}^{\dagger}-\hat{a})
-i\frac{\omega_o}{\eta\nu}\right]
\left(\hat{\sigma}_{-}+\hat{\sigma}_{+}\right)+ \hbar \nu {\eta^2
\over 4}.\label{HT0}
\end{equation}
This result holds for any value of the Lamb-Dicke parameter $\eta$.
In the regime $\eta\nu\gg g$, i.e., when the ion-field coupling
constant is much smaller than the frequency of the ion in the trap
($\nu$). In this regime the Hamiltonian in Eq.(\ref{HT0})will become
\begin{equation}
\hat{\cal H} = \hbar \nu \hat{a}^{\dagger}\hat{a}+ \hbar \omega
\hat{b}^{\dagger}\hat{b} -i\hbar{\eta\nu\over 2} \left[
(\hat{a}^{\dagger}-\hat{a}) -i\frac{\omega_o}{\eta\nu}\right]
\left(\hat{\sigma}_{-}+\hat{\sigma}_{+}\right)+ \hbar \nu {\eta^2
\over 4}.\label{HT1}
\end{equation}
The time evolution of the state vector, for an initial state
$|\psi(0)\rangle$ is
\begin{eqnarray}
|\psi(t) \rangle & = & \hat{T}^\dagger\hat{U}_{T}(t)\hat{T}|\psi(0)\rangle \nonumber
\\
\\
& = & \hat{U}(t)|\psi(0)\rangle ,\nonumber
\end{eqnarray}
where $\hat{U}_{T}(t)=\exp{(-i\hat{\cal H}t/\hbar)}$ is the
evolution operator in the transformation representation and
$\hat{U}(t)= \hat{T}^\dagger\hat{U}_{T}(t)\hat{T}$ is the time
evolution operator in the original representation. After some
algebra, the time evolution operation in the original representation
is
\begin{eqnarray}
\hat{U}(t)&=& \frac{e^{-i\omega\hat{b}^\dagger \hat{b}t}
e^{-i\nu\hat{a}^\dagger \hat{a}t}}{2} \left\{
\left[D^\dagger(\beta)+D(\beta)\right]-
\left[D^\dagger(\beta)-D(\beta)\right]\sigma_{z}\right\} \nonumber \\
&&\left[\cos\left(\frac{\omega_{o}t}{2}\right)+
i\sigma_{x}\sin\left(\frac{\omega_{o}t}{2}\right)\right]
\end{eqnarray}
Consider that the state vector having the following initial
condition for the ion-field state
\begin{equation}
|\psi(0) \rangle=|0\rangle_{\nu}|0\rangle_{\omega}\left[{1\over
\sqrt{2}}\left(|e\rangle + |g\rangle \right)\right],
\end{equation}
or the field prepared in a vacuum state $|0\rangle_{\omega}$ and the
ion's center-of-mass motion prepared in a vacuum state
$|0\rangle_{\nu}$, and the ion's internal levels prepared in a
superposition of atomics states. The time evolution of the state
vector is given by
\begin{equation}
|\psi(t) \rangle = \frac{e^{-\frac{i\omega_{o}t}{2}}}{\sqrt{2}}
\left[|e^{-i\nu t}\beta\rangle|e\rangle + |-e^{-i\nu
t}\beta\rangle|g\rangle\right]|0\rangle_{\omega}.
\end{equation}
Applying a pulse\cite{feng01}
\begin{equation}
\hat{V} = {1\over \sqrt{2}}\left(
\begin{array}{cc}
1 & 1 \\
-1 & 1 \\
\end{array}
\right)
\end{equation}
on the ion the superposition of coherent states will be obtained
\begin{equation}
\hat{V}|\psi(t) \rangle =
\frac{e^{-\frac{i\omega_{o}t}{2}}}{\sqrt{2}} \left(\Phi_{+}|e\rangle
+ \Phi_{-}|g\rangle\right)|0\rangle_{\omega}
\end{equation}
with the {\bf SC}
\begin{equation}
\Phi_{\pm} = {1\over \sqrt{2}}\left(|e^{-i\nu t}\beta\rangle \pm
|-e^{-i\nu t}\beta\rangle\right).
\end{equation}
The resulting state above is an entangled state involving the ion's
internal (electronic) degrees of freedom, the vibrational motion and
the cavity field. If one measures the internal state of the ion
(either in $|g\rangle$ or $|e\rangle$) and the cavity field in the
vacuum state, that action will collapse the $\hat{V}|\psi(t)
\rangle$ in a {\bf SC} states $\Phi_{\pm}$.
In summary, we have presented an approach for preparing the {\bf SC}
states of motion of cold trapped ion placed inside a high-Q cavity.
This approach, based on unitary transformating the Hamiltonian,
allows its exact diagonalization without performing the Lamb-Dicke
aproximation. We have shown that it is possible to generate a {\bf
SC} states having rather simple initial state preparation, e.g., the
vacuum sate for both cavity field and the ion motion.
\end{document} |
\begin{document}
\begin{abstract}
We investigate the problem of quantifying contraction coefficients of Markov transition kernels in Kantorovich ($L^1$ Wasserstein) distances. For diffusion processes, relatively precise quantitative bounds on contraction rates have recently been derived by combining appropriate couplings with carefully designed Kantorovich distances.
In this paper, we
partially carry over this approach from diffusions to Markov chains.
We derive quantitative lower bounds on contraction rates for Markov chains on general state spaces that are
powerful if the dynamics is dominated by small local moves. For Markov chains
on $\mathbb R^d$ with isotropic transition kernels, the general bounds can be used efficiently together with a coupling that combines maximal and reflection coupling.
The results are
applied to Euler discretizations of stochastic differential equations with non-globally contractive drifts, and to the Metropolis adjusted Langevin algorithm for sampling
from a class of probability measures on high dimensional state spaces that are not globally log-concave.
\end{abstract}
\maketitle
\section{Introduction}
In recent years, convergence bounds for Markov processes in
Kantorovich ($L^1$ Wasserstein) distances have emerged as a powerful alternative to more traditional approaches based on the total variation distance \cite{MT}, spectral gaps and $L^2$ bounds
\cite{JSTV, DSC, DLP}, or entropy estimates \cite{JSTV,DSC,BGL}. In particular, Hairer, Mattingly and Scheutzow have developed an analogue to Harris' Theorem assuming only
local strict contractivity in a Kantorovich distance on the \enquote{small} set
and a Lyapunov condition combined with non-strict contractivity outside, cf.\ \cite{HM,HMS}. Meanwhile there have been numerous extensions and applications of their result \cite{HSV, CH, Butkovsky, DFM}.
In \cite{JO}, Joulin and Ollivier have shown that
strict Kantorovich contractivity of the transition kernel implies bounds
for the variance and concentration estimates for ergodic averages of
a Markov chain.
Their results have since been extended to cover more general frameworks by Paulin \cite{Paulin}.
More recently, Pillai and Smith \cite{PS} as well as Rudolf and Schweizer \cite{RS} have developed a
perturbation theory for Markov chains that are contractive in a
Kantorovich distance, cf.\ also Huggins and Zou \cite{HugginsZou} as well as Johndrow and Mattingly \cite{JM} for related results. These works show that variants of the results in \cite{JO} carry over to perturbations
of the original chain, thus paving the way for a much broader range
of applications.
All the works mentioned above assume that, at least locally, strict contractivity
holds w.r.t.\ an $L^1$ Wasserstein distance based on some
underlying distance function on the state space of the Markov chain.
The contraction rate is the key quantity in the resulting bounds, and
it is hence important to develop applicable methods for quantifying
contraction rates.
Contractivity with respect to the $L^1$ Wasserstein distance based
on the Euclidean distance in $\mathbb{R}^d$ is sometimes interpreted as non-negative
Ricci curvature of the Markov chain w.r.t.\ this metric \cite{RenesseSturm,JO,Ollivier}.
This is a strong condition that is often not satisfied in applications.
However, in many cases it is still possible to obtain contractivity with respect to a Kantorovich distance in which the underlying distance function has been modified accordingly. This allows for applying the results from \cite{JO} to a significantly broader class of examples. For diffusion processes, a corresponding approach to
quantitative contraction rates in appropriately designed metrics has been developed systematically in recent years in a series of papers \cite{EberleCR,Eberle2015,Zimmer,EGZ,EZ}, see also \cite{CWa,CW,WangNeumann} for previous results. The approach has been extended
to L\'evy driven SDEs in \cite{Majka17, Majka}, see also \cite{LuoWang,JianWangBernoulli}.
Below we propose a corresponding approach for Markov chains on general metric state spaces. The approach is powerful in situations where the dynamics is dominated by small, local moves. This will be demonstrated below for Euler schemes for non-globally contractive stochastic differential equations, as well as for the Metropolis-adjusted Langevin Algorithm (MALA). In these cases, the Ricci curvature condition required in \cite{JO} is not satisfied in the standard $L^1$ Wasserstein distance and hence the construction of an alternative metric is required. For dynamics dominated by large or global moves, our approach does not apply in the form presented here. Sometimes, related approaches
can be used nevertheless, see e.g.\ \cite{BEZ} for the construction of a contractive distance for Hamiltonian Monte Carlo.
\section{Main results}
Let $p(x, dy)$ be a Markov transition kernel on a separable metric
space $(S,d)$. To study contraction properties of $p$ we
construct distance functions $\mathbb{R}ho :S\times S\to [0,\infty )$ by transforming the metric $d$ in an appropriate way. Note that if $f_0: [0,\infty) \to [0,\infty)$ is a concave, increasing function with $f_0(0) = 0$ and $f_0(r) > 0$ for $r \mathbb{N}eq 0$, then $\mathbb{R}ho(x,y) = f_0(d(x,y))$ is a metric on $S$. More generally,
let $a,\delta \ge 0$ be non-negative constants, and let $V\colon S\to [0,\infty )$ be a non-negative measurable function on $S$. We set
\[
f:=f_0+a\1_{(0,\infty )} ,
\]
and we consider distance functions of the form
\begin{equation}
\label{eq:1}
\begin{split}
\mathbb{R}ho (x,y) &= f(d(x,y))+\delta\, (V(x)+V(y))\1_{x\mathbb{N}eq y}\\
&= f_0(d(x,y))+(a+\delta V(x)+\delta V(y))\1_{x\mathbb{N}eq y} \,.
\end{split}
\end{equation}
We assume that $f_0$ is continuous and in some of our results we will choose $a=0$ and $V\equiv 0$. {Similar but slightly different classes of distance functions have been used e.g.\ in \cite{HM2,HMS,Butkovsky,EGZ} to study properties of Markov chains and diffusion processes.}
For probability measures
$\mu$ and $\mathbb{N}u$ on $S$, the Kantorovich distance ($L^1$
Wasserstein distance) $\mathcal W_\mathbb{R}ho (\mu ,\mathbb{N}u )$ based on the
underlying distance function $\mathbb{R}ho$ is defined as
\begin{equation}
\label{eq:2}
\mathcal W_\mathbb{R}ho (\mu ,\mathbb{N}u )\ =\inf_{X\sim\mu ,Y\sim\mathbb{N}u} \mathbb{E}[\mathbb{R}ho (X,Y)] .
\end{equation}
Here the infimum is over all couplings of $\mu$ and $\mathbb{N}u$, i.e.,
over all random variables $X,Y$ defined on a common probability
space $(\Omega ,\mathcal A,\mathbb{P} )$ such that $\mathbb{P}\circ X^{-1}=\mu$
and $\mathbb{P}\circ Y^{-1}=\mathbb{N}u $.
For $f_0\equiv 0$, $a=1$ and $V\equiv 0$, $\mathcal W_\mathbb{R}ho$ coincides with the total variation distance $d_{\text{TV}}(\mu,\mathbb{N}u)$ (or with $d_{\text{TV}}(\mu,\mathbb{N}u)/2$, depending on the convention used in the definition of the total variation distance),
whereas for $f_0(r)=r$, $a=0$ and $V\equiv 0$, $\mathcal W_\mathbb{R}ho$ is the
standard $L^1$ Wasserstein distance $\mathcal W_d$ on $(S,d)$.
The distance functions we consider are in between these two extremes. Notice, however, that if $a>0$ then
\begin{equation}
\label{eq:3}
d_{\mathbb{R}m TV}(\mu ,\mathbb{N}u )\le\ a^{-1} \mathcal W_\mathbb{R}ho (\mu ,\mathbb{N}u ),
\end{equation}
and if $f(r)\ge br$ for some constant $b>0$ then
\begin{equation}
\label{eq:4}
\mathcal W_d (\mu ,\mathbb{N}u )\le\ b^{-1} \mathcal W_\mathbb{R}ho (\mu ,\mathbb{N}u ),
\end{equation}
Therefore, in these cases, contraction properties w.r.t.\ $\mathcal W_\mathbb{R}ho$ directly
imply upper bounds for the total variation and $L^1$ Wasserstein
distances w.r.t.\ the metric $d$.
We now assume that we are given a {\em Markovian coupling} of the
transition probabilities $p(x,\wc )$ ($x\in S$) in the form of
measurable maps $X',Y':\Omega\to S$, defined on a measurable space $(\Omega ,\mathcal A)$, and a probability kernel $(x,y,A)\mapsto \mathbb{P}_{x,y}(A)$ from $S\times
S\times\Omega $ to $[0,1]$ such that for any $x,y\in S$,
\begin{equation}
\label{eq:4a}
X' \sim p(x,\wc )\qquad \mbox{and}\qquad
Y' \sim p(y,\wc )\qquad \mbox{under }\mathbb{P}_{x,y}.
\end{equation}
For probability measures $\mu$ on $S$ and $\gamma$ on $S\times S$ let
$(\mu p)(B) = \int \mu(d x) p(x,B)$ for $B\in\mathcal B(S)$, and $\mathbb{P}_\gamma (A ) = \int\gamma(d x\, d y)
\mathbb{P}_{x,y}(A )$ for $A\in\mathcal A$. Note that if $\gamma$ is a
coupling of two probability measures $\mu$ and $\mathbb{N}u$ on $S$, then
under $\mathbb{P}_\gamma$ the joint law of $(X',Y')$ is a coupling of the probability measures $\mu p$ and $\mathbb{N}u p$, i.e.,
\begin{equation}\label{eq:4b}
X'\sim \mu p\qquad\mbox{and}\qquad Y'\sim\mathbb{N}u p\qquad\mbox{under }\mathbb{P}_\gamma .
\end{equation}
Our goal is to derive explicit bounds of the form
\begin{equation}
\label{eq6}
\mathbb{E}_{x,y}[\mathbb{R}ho (X',Y')] \le (1-c) \mathbb{R}ho (x,y)\qquad\mbox{for any }
x,y\in S,
\end{equation}
where $c$ is a strictly positive constant. Here the choice of the metric $\mathbb{R}ho$ is adapted in order to maximize the value of $c$ in
our bounds. If \eqref{eq6} holds, then the transition kernel $p$ is
a strict contraction w.r.t.\ the distance $\mathcal W_\mathbb{R}ho$.
\begin{lemma}\label{lem:a}
Suppose that \eqref{eq6} holds for all $x,y\in S$. Then
\begin{equation}
\label{eq7}
\mathcal W_\mathbb{R}ho (\mu p,\mathbb{N}u p)\le (1-c) \mathcal W_\mathbb{R}ho (\mu ,\mathbb{N}u )\qquad\mbox{for all }\mu ,\mathbb{N}u\in {\mathcal P}(S).
\end{equation}
\end{lemma}
\begin{proof}
Let $\mu$ and $\mathbb{N}u$ be probability measures on $S$ and suppose that $\gamma $ is a coupling of $\mu$ and $\mathbb{N}u$. Then, under
$\mathbb{P}_\gamma $, the joint law of $(X',Y')$ is a coupling of
$\mu p$ and $\mathbb{N}u p$. Therefore by \eqref{eq6},
\begin{eqnarray*}
\mathcal W_\mathbb{R}ho (\mu p ,\mathbb{N}u p) &\le &\mathbb{E}_\gamma [\mathbb{R}ho (X',Y')]
=\int \mathbb{E}_{x,y}[\mathbb{R}ho (X',Y')] \gamma ( d x \dd y)\\
&\le & (1-c) \int\mathbb{R}ho (x,y) \gamma ( d x \dd y).
\end{eqnarray*}
The assertion follows by taking the infimum over all couplings of
$\mu $ and $\mathbb{N}u$.
\end{proof}
In the terminology of Joulin and Ollivier \cite{JO}, \eqref{eq7} says
that the Markov chain has a Ricci curvature lower bound $c$ on the
metric space $(S,\mathbb{R}ho )$. By general results, such a bound has many important consequences including quantitative convergence to a
unique equilibrium \cite{EberleMP}, upper bounds on biases and variances as well as concentration inequalities for ergodic averages
\cite{JO,Paulin}, a central limit theorem for ergodic averages \cite{KW},
robustness under perturbations \cite{PS,RS,HugginsZou,JM}, etc.
{However, in applications, it is usually not clear how to choose a distance function $\mathbb{R}ho$ such that we have good bounds for $c$.}
This is the problem addressed in this paper
for the case of a ``local dynamics'' where the Markov chain is mainly making
``small'' moves. Depending on whether or not the probability measures $p(x,\cdot )$ and $p(y,\cdot )$ have a significant overlap for $x$ close to $y$, we suggest
two different approaches.
\subsection{Contractivity with positive coupling probability}
\label{sec:firstresult}
Our first two general results apply in situations where the probability measures $p(x,\wc )$ and $p(y,\wc )$ have a significant overlap if
$x$ and $y$ are sufficiently close. In this case we can always consider a coupling $((X',Y'),\mathbb{P}_{x,y})$ of the transition probabilities such that
$\mathbb{P}_{x,y}[X'=Y']>0$ for $x$ close to $y$. This enables us to obtain strict
contractivity in metrics that have a total variation part, i.e., the function
$f$ defining the underlying distance has a discontinuity $a>0$ at $0$.
To state the results, we fix a positive constant $\varepsilon >0$ and couplings
$((X',Y'),\mathbb{P}_{x,y})$ as above. For $x,y\in S$ we set
\begin{align}\label{eq:5}
r&=d(x,y),& R'&=d(X',Y'),&\Delta R&=R'-r,
\end{align}
and we define
\begin{align}
\label{eq:8}\beta (x,y) &= \mathbb{E}_{x,y}[\Delta R] ,\\
\label{eq:9}\alpha (x,y) &= \mathbb{E}_{x,y}\left[ \lvert(\Delta R)^-\wedge \varepsilon \mathbb{R}vert^2\mathbb{R}ight] ,\\
\label{eq:10}\pi (x,y) &= \mathbb{P}_{x,y}[R'=0] ,
\end{align}
{where $(\Delta R)^-=\max (-\Delta R,0)$. In particular,}
\begin{equation}\label{eq:alphalb}
\alpha (x,y)\ \ge \ \mathbb{E}_{x,y}[ (\Delta R)^2 \1_{\{ R' \in (r - \mathop{\mathrm{Var}}\nolimitsepsilon, r ) \} } ].
\end{equation}
{One can think of $\beta (x,y)$ as a drift for the coupling distance, whereas $\alpha (x,y)$ provides a lower bound for fluctuations that decrease the distance and $\pi (x,y)$ is the probability of coupling successfully in the next step.}
Suppose that there exist functions $\overline{\beta }:(0,\infty )\to\mathbb{R} $ and $\underline{\alpha},\underline{\pi}:(0,\infty )\to [0,\infty )$ such that
for any $r>0$ and $x,y\in S$ with $d(x,y)=r$,
\begin{equation}\label{eq:11}
\beta (x,y)\ \le\ \overline{\beta} (r),\quad \alpha (x,y)\ \ge \ \underline{\alpha} (r),\quad \mbox{and}\quad
\pi (x,y)\ge \underline{\pi} (r).
\end{equation}
Hence $\overline{\beta} (r)$ is an upper bound for the expectation of the increase
$\Delta R$ of the distance during a single transition step of coupled Markov
chains with initial states $x$ and $y$ such that $d(x,y)=r$. Similarly,
$\underline{\alpha} (r)$ is a lower bound for distance decreasing fluctuations of $\Delta R$,
and $\underline{\pi} (r)$ is a lower bound for the coupling probability. We make the following assumptions on $\underline{\alpha} ,\overline{\beta} $ and $\underline{\pi}$:
\begin{enumerate}[label=(A\arabic*)]
\item\label{enum:a1} There exists a positive constant $r_0\in (0,\infty )$ such that
\begin{enumerate}
\item\label{enum:a1i} $\inf_{r\in (0,r_0]}\underline{\pi} (r)\ >\ 0$,\qquad {and}
\item\label{enum:a1ii} $\inf_{r\in (r_0,s)}\underline{\alpha} (r)\ >\ 0$\quad for any $s\in (r_0,\infty )$.
\end{enumerate}
\item\label{enum:a2} $\sup_{r\in (0,s)}\overline{\beta} (r)<\infty$\quad for any $s\in (0,\infty )$.
\item\label{enum:a3} $\limsup_{r\to\infty}r^{-1}\overline{\beta} (r)\ <\ 0$.
\end{enumerate}
\begin{thm}\label{thm:1}
Suppose that \mathbb{R}ef{enum:a1}, \mathbb{R}ef{enum:a2} and \mathbb{R}ef{enum:a3} are satisfied, and let
\begin{equation}\label{eq:13}
\mathbb{R}ho (x,y)=f(d(x,y)) \,,
\end{equation}
where $f:[0,\infty )\to [0,\infty )$ is the concave increasing function defined in
\eqref{eq:5000} below. Then for any $x,y\in S$,
\begin{equation}
\label{eq:14}
\mathbb{E}_{x,y}[\mathbb{R}ho (X',Y')]\le(1-c) \mathbb{R}ho (x,y) \,,
\end{equation}
where $c$ is an explicit strictly positive constant defined in \eqref{eq:5f} below.
\end{thm}
The proof is given in Section \mathbb{R}ef{sec:proofs12}. Explicit
expressions for the function $f$ and the contraction rate $c$ depending only on $\underline{\alpha} ,\overline{\beta} ,\underline{\pi} $ and $\varepsilon$ are given in Subsection \mathbb{R}ef{subsec:choiceOfMetricThm1}. Although these
expressions are somehow involved, they can be applied to derive
quantitative bounds in concrete models. In particular, the
asymptotic dependence of the contraction rate on parameters of
the model can often be made explicit. This will be demonstrated for the Euler scheme in Section \mathbb{R}ef{sec:euler}.
By Lemma \mathbb{R}ef{lem:a}, Theorem \mathbb{R}ef{thm:1} implies that the transition kernel $p$
is contractive with rate $c$ w.r.t.\ the $\mathcal W_\mathbb{R}ho$ distance on probability
measures on $S$. Since the function $f$ defined in \eqref{eq:5000} is bounded from below by a multiple of both $\1_{(0,\infty )}$ and of the identity, the theorem yields
quantitative bounds for convergence to equilibrium both w.r.t.\ the total
variation and the standard $L^1$ Wasserstein distance.
The assumption \mathbb{R}ef{enum:a3} imposed in Theorem \mathbb{R}ef{thm:1} is sometimes too restrictive. By a modification of
the metric, it can be replaced by the following Lyapunov condition:
\begin{enumerate}[resume, label=(A\arabic*)]
\item\label{enum:a4} There exist a measurable function $V:S\to [0,\infty )$ and
$C$, $\lambda\in (0,\infty )$ s.t.
\begin{enumerate}
\item \label{enum:a4i}$pV\ \le\ (1-\lambda ) V + C,\qquad\mbox{and}$
\item \label{enum:a4ii}$\inf\mathbb{N}olimits_{d(x,y)=r}\frac{V(x)+V(y)}{\overline{\beta} (r)^+}\ \longrightarrow\
\infty\qquad\mbox{as }r\to\infty $.
\end{enumerate}
\end{enumerate}
In \eqref{enum:a4ii} we use the convention that the value of the fraction is $+\infty$ if $\overline{\beta} (r)\le 0$.
\begin{thm}\label{thm:2}
Suppose that \mathbb{R}ef{enum:a1}, \mathbb{R}ef{enum:a2} and \mathbb{R}ef{enum:a4} are satisfied, and let
\begin{equation}\label{eq:15}
\mathbb{R}ho (x,y)=f(d(x,y)) + \frac{M}{2C} (V(x)+V(y)) \1_{x\mathbb{N}eq y} \,,
\end{equation}
where $f\colon [0,\infty )\to [0,\infty )$ is the concave increasing function defined in
\eqref{eq:6000} below, and the constant $M\in\mathbb{R}_+$ is defined in
\eqref{eq:6b}. Then for any $x,y\in S$,
\begin{equation}
\label{eq:16}
\mathbb{E}_{x,y}[\mathbb{R}ho (X',Y')]\le(1-c) \mathbb{R}ho (x,y) \,,
\end{equation}
where $c$ is an explicit strictly positive constant defined in \eqref{eq:6f} below.
\end{thm}
The proof of the theorem is given in Section \mathbb{R}ef{sec:proofs12} and explicit expressions for the function $f$ and the constants $M$ and $c$
in terms of $\underline{\alpha} ,\overline{\beta} ,\underline{\pi} ,\varepsilon ,V,C$ and
$\lambda$ are provided in Subsection \mathbb{R}ef{subsec:choiceOfMetricThm2}.
The idea of adding a Lyapunov function to the metric appears for example
in \cite{HairerConvergenceMP} and has been further worked out in the diffusion case in \cite{EGZ}. Theorem \mathbb{R}ef{thm:2} can be seen as a more quantitative
version of {Theorem 4.8 in \cite{HMS}, which is an extension of the classical Harris' Theorem.
Note, however, that contractivity in our result is expressed in an additive metric $\mathbb{R}ho$, as opposed to the multiplicative semimetric used in \cite{HMS}; see also \cite{EGZ} for a more detailed discussion on these two types of metrics.
An application of Theorem \mathbb{R}ef{thm:2} to the Euler scheme is given in Theorem \mathbb{R}ef{thm:8b} below.
\subsection{Contractivity without positive coupling probability}
\label{sec:secondresult}
The assumption that there is a significant overlap between the measures $p(x,\wc )$ and $p(y,\wc )$ for $x$ close to $y$ is sometimes too restrictive. For example, it may cause a bad dimension dependence of the resulting bounds in high dimensional applications. Therefore,
we now state an alternative contraction result that applies even
when $\pi (x,y)=0$ for all $x$ and $y$.
For any $r\in (0,\infty )$ we consider an interval near $r$ given by
\begin{equation}
\label{eq:17}
I_r=(r-l(r),r+u(r))
\end{equation}
where $l(r),u(r)\ge 0$ and $l(r)\le r$. Similarly as in \eqref{eq:8} and \eqref{eq:9}, we define
\begin{align}
\label{eq:18a}
\beta (x,y) &= \mathbb{E}_{x,y}[\Delta R],\\
\label{eq:19a}
\alpha (x,y) &= \mathbb{E}_{x,y}\left[\lvert(\Delta R\wedge u(r))\vee (-l(r))\mathbb{R}vert^2\mathbb{R}ight] ,
\end{align}
where $r,R'$ and $\Delta R$ are defined by \eqref{eq:5}.
{In particular,}
\begin{equation}\label{eq:alphalb2}
\alpha (x,y)\ \ge \ \mathbb{E}_{x,y}[ (\Delta R)^2 \1_{\{ R' \in I_r \} } ].
\end{equation}
In Subsection \mathbb{R}ef{sec:firstresult}, we have chosen $l(r)=\varepsilon$ and
$u(r)=0$, i.e., $I_r=(r-\varepsilon ,r)$. Now, we will assume instead that
there is a finite constant $r_0>0$ such that
\begin{equation}
\label{eq:18}
u(r)\ =\ 0\quad \mbox{for }r\ge r_0,\qquad \mbox{and}\qquad
u(r)=r_0\quad\mbox{for }r<r_0.
\end{equation}
As above, we assume that there exist functions
$\overline{\beta} :(0,\infty )\to\mathbb{R} $ and $\underline{\alpha} :(0,\infty )\to (0,\infty )$ such that
for any $r>0$ and $x,y\in S$ with $d(x,y)=r$,
\begin{equation}\label{eq:19}
\beta (x,y)\ \le\ \overline{\beta} (r)\quad\mbox{and}\quad \alpha (x,y)\ \ge \ \underline{\alpha} (r).
\end{equation}
We now impose the following conditions on $\underline{\alpha}$ and $\overline{\beta}$:
\begin{enumerate}[label=(B\arabic*)]
\item\label{enum:b1} $\inf\limits_{r\in (0,s)}\frac{\underline{\alpha} (r)}{r}\ >\ 0$\quad for any $s\in (0,\infty )$,
\item\label{enum:b2} $\sup\limits_{r\in (0,s)}\frac{\overline{\beta} (r)}{\underline{\alpha} (r)}\ <\ \infty$\quad for any $s\in (0,\infty )$,
\item\label{enum:b3} $\limsup_{r\to\infty}r^{-1}\overline{\beta} (r)\ <\ 0$.
\end{enumerate}
Thus we no longer assume a positive coupling probability for $r<r_0$.
Instead, we require in \mathbb{R}ef{enum:b1} and \mathbb{R}ef{enum:b2} that $\underline{\alpha} (r)=\Omega (r)$ and
$\mathbb{N}icefrac{\overline{\beta} (r)}{\underline{\alpha}} (r)=O(1)$ as $r\downarrow 0$. These assumptions can be verified for
example for Euler schemes if the coupling is constructed carefully. We will do this
in Section
\mathbb{R}ef{sec:euler} for Euler discretizations of SDEs with contractive drifts, whereas for more general drifts we
will follow a slightly different approach.
\begin{thm}\label{thm:3}
Suppose that \mathbb{R}ef{enum:b1}, \mathbb{R}ef{enum:b2} and \mathbb{R}ef{enum:b3} are satisfied, and let
\begin{equation}\label{eq:20}
\mathbb{R}ho (x,y)=f(d(x,y)) \,,
\end{equation}
where $f\colon [0,\infty )\to [0,\infty )$ is a continuous concave increasing function satisfying $f(0)=0$ which is defined explicitly in
\eqref{eq:8b} below. Then for any $x,y\in S$,
\begin{equation}
\label{eq:21}
\mathbb{E}_{x,y}[\mathbb{R}ho (X',Y')]\le(1-c) \mathbb{R}ho (x,y) \,,
\end{equation}
where $c$ is an explicit strictly positive constant defined in \eqref{eq:8f} below.
\end{thm}
The proof is given in Section \mathbb{R}ef{sec:proof3}. Notice that in contrast to Theorem \mathbb{R}ef{thm:1} and Theorem \mathbb{R}ef{thm:2}, the function $f$
in Theorem \mathbb{R}ef{thm:3} does not have a jump at $0$, i.e., the
Kantorovich metric $\mathcal W_\mathbb{R}ho$ does not contain a total
variation part. This corresponds to the fact that under Assumptions
\mathbb{R}ef{enum:b1}, \mathbb{R}ef{enum:b2} and \mathbb{R}ef{enum:b3}, it can not be expected in general that the coupled
Markov chains meet in finite time.
\subsection{Stability under perturbations}\label{sec:perturbations}
Contractions in Kantorovich distances can sometimes be carried over
to small perturbations of a given Markov chain. For instance,
in Subsection \mathbb{R}ef{sec:\MALA} we will deduce contractivity for the
Metropolis adjusted Langevin algorithm from corresponding
properties of the Euler proposal chain. Suppose as above that
$((X',Y'),\mathbb{P}_{x,y})$ is a Markovian coupling of the transition
probabilities $p(x,\wc )$ and $p(y, \wc )$. Moreover, let
$((\widetilde X,\widetilde Y),{\mathbb{P}}_{x,y})$ be a corresponding coupling
of $\widetilde p(x,\wc )$ and $\widetilde p(y,\wc )$ for another (perturbed)
Markov transition kernel $\widetilde p$ on $S$. Here we assume that for
given $x,y\in S$, $(X',Y')$ and $(\widetilde X,\widetilde Y)$ are defined on a
common probability space. We start with a simple observation.
If there exists a metric $\mathbb{R}ho$ on $S$ and a constant $c\in (0,\infty )$
such that for $x,y\in S$,
\begin{eqnarray}
\label{eq:22}
\mathbb{E}_{x,y}[\mathbb{R}ho (X',Y')] &\le & (1-c) \mathbb{R}ho (x,y),\qquad\qquad\mbox{and}\\
\label{eq:23}
{\mathbb{E}}_{x,y}[\mathbb{R}ho (\widetilde X,\widetilde Y)] &\le &\mathbb{E}_{x,y}[\mathbb{R}ho (X',Y')] +
\frac c2 \mathbb{R}ho (x,y),\qquad\text{then}\\
\label{eq:24}
{\mathbb{E}}_{x,y}[\mathbb{R}ho (\widetilde X,\widetilde Y)] &\le & \left(1-c/2\mathbb{R}ight) \mathbb{R}ho (x,y).
\end{eqnarray}
In applications it is often difficult or even impossible to verify Condition \eqref{eq:23} for $x$ very close to $y$. If $\mathbb{P}_{x,y}
[\widetilde X=\widetilde Y]>0$ for $x$ close to $y$, then this condition can be
relaxed.
\begin{thm}\label{thm:4}
Suppose that $\mathbb{R}ho (x,y)=f(d(x,y))$ for a concave increasing contraction $f\colon [0,\infty )\to [0,\infty )$ satisfying $f(0)=0$. Suppose
that there exist constants $c,b,p,r_0\in [0,\infty )$ such that for
all $x,y\in S$,
\begin{align}
\label{eq:27}
\mathbb{E}_{x,y}[\mathbb{R}ho (X',Y')] &\le (1-c) \mathbb{R}ho (x,y),\\
\label{eq:28}
{\mathbb{E}}_{x,y}\left[ ( d(\widetilde X,\widetilde Y)-d(X',Y'))^+\mathbb{R}ight] &\le b + \frac c2 \mathbb{R}ho (x,y) ,\qquad\mbox{and}\\
\label{eq:29}
\mathbb{P}_{x,y}[\widetilde X=\widetilde Y] &\ge p\qquad\mbox{if }d(x,y)<r_0.
\end{align}
Assume that $p>0$, $b\le \mathbb{N}icefrac{cf(r_0)}{4}$, and let $\widetilde\mathbb{R}ho$ be the
metric defined by
\begin{equation}
\label{eq:26}
\widetilde\mathbb{R}ho (x,y)=\mathbb{R}ho (x,y) + \frac{2b}{p} \1_{x\mathbb{N}eq y} .
\end{equation}
Then
\begin{equation}
\label{eq:30}
{\mathbb{E}}_{x,y}\left[\widetilde\mathbb{R}ho (\widetilde X,\widetilde Y)\mathbb{R}ight] \ \le\ \left(1-\frac 18\min (c,2p)\mathbb{R}ight) \widetilde\mathbb{R}ho (x,y)\qquad\mbox{for all }x,y\in S.
\end{equation}
\end{thm}
The proof is given in Section \mathbb{R}ef{sec:proof4}. In Section \mathbb{R}ef{sec:proofof\MALA} we will apply Theorem \mathbb{R}ef{thm:4} to our results for the Euler scheme in order to obtain contractivity for the Metropolis adjusted Langevin algorithm (MALA).
{Note that Theorem \mathbb{R}ef{thm:4} is related to the perturbation results in \cite{PS,RS,JM}. In all these papers, a Kantorovich contraction in some metric is assumed for the initially given unperturbed Markov chain. Then, in \cite{PS}, the authors obtain bounds on the distance to equilibrium of a perturbed Markov chain in the same Kantorovich metric. In \cite{RS, JM}, the metric also remains unchanged, but the object of interest is a bound on the distance between a perturbed and the unperturbed chain. A related result in continuous time, giving bounds on the distance between invariant measures of a perturbed and an unperturbed diffusion, has been obtained in \cite{HugginsZou}. In contrast to these results, we consider a perturbed metric in Theorem \mathbb{R}ef{thm:4}, but we obtain a stronger result showing that the perturbed Markov chain is again contractive w.r.t.\ the modified metric.}
\subsection{Application to Euler schemes}\label{sec:euler}
We now show how to apply the general methods developed above to Euler
discretizations of stochastic differential equations of the form
\begin{equation}
\label{eqSDE}
\dd X_t=b(X_t) \dd t + \dd B_t \,,
\end{equation}
where $(B_t)_{t\ge 0}$ is a Brownian motion in $\mathbb{R}^d$, and $b:\mathbb{R}^d\to\mathbb{R}^d$ is
a Lipschitz continuous vector field.
Quantifying contraction rates for Euler discretizations is important in connection with
the derivation of error bounds for the unadjusted Langevin algorithm (ULA), cf.\ \cite{Dalalyan,DurmusMoulinesULA0,DurmusMoulinesULA1,DurmusMoulinesULA2,DalalyanKaragulyan}
for corresponding results. Such applications of the techniques presented below will be discussed in detail in the upcoming paper \cite{MajkaMijatovicSzpruch} by the second author.
The transitions of the Markov chain for the Euler scheme with step size $h>0$ are given by
\begin{equation}
\label{eqES1}
x\mapsto\hat x + \sqrt h Z,\quad \text{where}\quad \hat x\coloneqq x + hb(x)\quad\text{and}\quad
Z\sim N(0,I_d).
\end{equation}
The corresponding transition probabilities are given by
\begin{equation}
\label{eqES1a}
p(x,\wc)\ =\ N(\hat x,hI_d)\qquad\mbox{for any }x\in\mathbb{R}^d,
\end{equation}
i.e., the transition density from $x$ is
\begin{equation}
\label{eqES1b}
p(x,x')\ =\ \phi_{\hat x,hI_d}(x')\ =\ (2\pi h)^{-\mathbb{N}icefrac{d}{2}}
\exp \left( -\frac{1}{2h}|x'-\hat x|^2\mathbb{R}ight) .
\end{equation}
In the case of $b\equiv 0$, the Markov chain is a Gaussian random walk
with transitions $x\mapsto x+\sqrt hZ$.
\subsubsection*{The coupling}
For $x,y\in\mathbb{R}^d$ let
\begin{equation}
\label{equnit}
\hat e\ =\ \frac{\hat x-\hat y}{|\hat x-\hat y|}\qquad\mbox{if }\hat x\mathbb{N}eq
\hat y,\qquad \hat e=
0 \quad\text{otherwise. }
\end{equation}
We consider the coupling of two transitions of the
Euler chain from $x$ and $y$ respectively given by
\begin{equation}
\label{eq:0c}
X' \ =\ \hat x + \sqrt hZ,\qquad Y'\ =\
\begin{cases}
X' &\mbox{if }U\le\mathbb{N}icefrac{\phi_{\hat y,hI}(X')}{\phi_{\hat x,hI}(X')},\\
Y_{\mathbb{R}m refl}^\prime &\mbox{otherwise,}
\end{cases}
\end{equation}
where $Z\sim N(0,I_d)$ and $U\sim \Unif (0,1)$ are independent
random variables, and
\begin{equation}
\label{eqRC}
Y_{\mathbb{R}m refl}^\prime\ =\ \hat y + \sqrt h (I_d-2\hat e\hat e^T)Z
\end{equation}
is obtained by adding to $\hat y$ the increment $\sqrt hZ$ added
to $\hat x$, reflected at the hyperplane between $\hat x$ and
$\hat y$.
\begin{figure}
\caption{Construction of the coupling of $p(x,\cdot )$ and $p(y,\cdot )$: Given
the value of $X'$, we set $Y'=X'$ with the maximal probability $\min (1,\mathbb{N}
\label{fig:coupling}
\end{figure}
Both $(X',Y_{\mathbb{R}m refl}^\prime )$ and $(X',Y')$ are couplings of the probability measures $p(x,\wc)$ and $p(y,\wc )$.
For the coupling \eqref{eq:0c}, $Y'=X'$ with the maximal probability
$\min (1,\mathbb{N}icefrac{p(y,X')}{p(x,X')})$. Furthermore, in the case where $Y'\mathbb{N}eq
X'$, the coupling coincides with the reflection coupling, i.e.,
$Y'=Y_{\mathbb{R}m refl}^\prime$. The resulting combination of reflection
coupling and maximal coupling is an optimal coupling of the Gaussian measures
$p(x,\wc )$ and $p(y,\wc )$ w.r.t.\ any Kantorovich distance
based on a metric $\mathbb{R}ho (x,y)=f(\lvert x-y\mathbb{R}vert)$ with $f$ concave, cf.\
\cite{McCann} for the one-dimensional case.
We will not use the optimality here, but it shows that \eqref{eq:0c} is an appropriate coupling to consider if we
are interested in contraction properties for single transition steps of the
Markov chain.
\begin{remark}[Relation to reflection coupling of diffusion processes]
A reflection coupling of two copies of a diffusion process satisfying a stochastic differential equation of the form \eqref{eqSDE} is given by
\begin{equation}\label{eq:diffusionCoupling}
\begin{split}
\dd X_t &= b(X_t) \dd t + \dd B_t,\\
dY_t &= b(Y_t) \dd t + (I-2e_te_t^T) \dd B_t\quad\mbox{for }t<T,
\quad X_t=Y_t\quad\mbox{for }t\ge T,
\end{split}
\end{equation}
where $e_t=\mathbb{N}icefrac{(X_t-Y_t)}{\lvert X_t-Y_t\mathbb{R}vert}$ and $T=\inf\{ t\ge 0:X_t=Y_t\}$ is the coupling time. Hence the noise increment is reflected up to the
coupling time, whereas after time $T$, $X_t$ and $Y_t$ move
synchronously. Our coupling in discrete time has a similar effect. If
$\hat x$ and $\hat y$ are far apart then the transition densities
$\phi_{\hat x,hI} $ and $\phi_{\hat y,hI}$ have little overlap,
and hence reflection coupling is applied with very high probability.
If, on the other hand, $\hat x$ and $\hat y$ are sufficiently close, then with a non-negligible probability, $X'=Y'$. Once both Markov
chains have reached the same position, they stick together since their transition densities coincide subsequently. In this sense, the coupling \eqref{eq:0c} is a natural discretization
of reflection coupling. Indeed, we would expect that as $h\downarrow 0$, the coupled Markov chains with
time rescaled by a factor $h$ converge in law to the reflection coupling \eqref{eq:diffusionCoupling} of the diffusion processes. {On the other hand, a coupling of Markov chains in which jumps are always reflected (i.e., a coupling without the positive probability of jumping to the same point) would converge as $h\downarrow 0$ to a reflection coupling of diffusions in which the coupled processes do not follow the same path after the coupling time.}
\end{remark}
We assume that under the probability measure $\mathbb{P}_{x,y}$, $(X',Y')$ is the coupling of $p(x,\wc)$
and $p(y,\wc)$ introduced above. We set
\begin{equation}
\label{eqchoicer0}
r_0 \coloneqq \sqrt h ,
\end{equation}
and we consider the intervals
\begin{equation}
\label{eq:int}
I_r\ =\ \begin{cases}
(0,r+\sqrt h )&\mbox{for }r<r_0,\\
(r-\sqrt h, r) &\mbox{for }r\ge r_0.\end{cases}
\end{equation}
Thus in the notation from Section \mathbb{R}ef{sec:secondresult}, we set
\begin{equation}
\label{equl}
u(r)\coloneqq \sqrt h \1_{r<r_0},\qquad l(r) \coloneqq \sqrt h \1_{r\ge r_0}.
\end{equation}
For given $x,y\in\mathbb{R}^d$ let $
r = \lvert x-y\mathbb{R}vert$, $ \hat r = \lvert \hat x-\hat y\mathbb{R}vert$, $R'\ =\ \lvert X'-Y'\mathbb{R}vert$,
\begin{eqnarray}
\label{eqhatbeta}
\hat\beta (x,y) &=& \mathbb{E}_{x,y}[R'-\hat r],\\
\label{eqhatalpha}
\hat\alpha (x,y) &=& \mathbb{E}_{x,y}\left[ \lvert((R'-\hat r)\wedge u(\hat r))\vee (-l(\hat r))\mathbb{R}vert^2\mathbb{R}ight],\qquad\mbox{and}\\
\label{eqhatpi}
\pi (x,y) &=& \mathbb{P}_{x,y}[R'=0].
\end{eqnarray}
In particular,
\begin{equation}\label{eqhatalpha2}
\hat\alpha (x,y) \ \ge\ \mathbb{E}_{x,y}[(R' - \hat{r})^2 \1_{\{ R' \in I_{\hat{r}} \}}] \,.
\end{equation}
Notice that the definitions of $\hat\beta$ and $\hat\alpha$ differ from those of $\beta$ and $\alpha$ given in \eqref{eq:18a} and \eqref{eq:19a},
since $\hat\beta$ and $\hat\alpha$ take into account only the coupled
random walk transition step from $(\hat x,\hat y)$ to $(X',Y')$, but not
the deterministic transition from $(x,y)$ to $(\hat x,\hat y)$. We also
consider
\begin{eqnarray}
\label{eq:beta}
\beta (x,y)& =& \mathbb{E}_{x,y}[R'-r]\ =\ \hat\beta (x,y)+\hat r-r,\qquad\text{and}\\
\alpha (x,y)& =& \mathbb{E}_{x,y}\left[ \lvert((R'-r)\wedge u( r))\vee (-l( r))\mathbb{R}vert^2\mathbb{R}ight] .
\end{eqnarray}
\subsubsection*{Assumptions}
In our main result for the Euler scheme we assume that there exist
constants $J\in [0,\infty )$ and $K,L,\mathcal R\in (0,\infty )$
such that the following conditions hold:
\begin{enumerate}[label=(C\arabic*)]
\item\label{enum:c1} {\em One-sided Lipschitz condition:}
\[
(x-y)\cdot (b(x)-b(y))\le J \lvert x-y\mathbb{R}vert^2\qquad \mbox{for any }x,y\in\mathbb{R}^d.
\]
\item\label{enum:c2} {\em Strict contractivity outside a ball:}
\[
(x-y)\cdot (b(x)-b(y))\le -K \lvert x-y\mathbb{R}vert^2\qquad \mbox{if }\lvert x-y\mathbb{R}vert\ge\mathcal R.
\]
\item\label{enum:c3} {\em Global Lipschitz condition:}
\[
\lvert b(x)-b(y)\mathbb{R}vert\le L \lvert x-y\mathbb{R}vert\qquad \mbox{for any }x,y\in\mathbb{R}^d.
\]
\end{enumerate}
Notice that by \mathbb{R}ef{enum:c2} and \mathbb{R}ef{enum:c3}, $L\ge K$. Of course, \mathbb{R}ef{enum:c3} implies \mathbb{R}ef{enum:c1} with $J=L$. Note, however, that we can often choose $J$ much smaller than $L$, e.g., we can even
choose $J=0$ if $b=-\mathbb{N}abla U$ for a convex function $U\in C^2(\mathbb{R}^d)$.
The global Lipschitz condition is required for the stability of the Euler
scheme, but the constant $L$ will affect our lower bound for the contraction rate only in
a marginal way. On the other hand, our bound on the contraction rate will depend in an essential way on the one-sided Lipschitz constant
$J$.
The bounds provided in the next lemma are crucial to apply the techniques developed above to the Euler scheme.
\begin{lemma}\label{lem:6}
Let $x,y\in\mathbb{R}^d$, and let $r=\lvert x-y\mathbb{R}vert$ and $\hat r=\lvert\hat x-\hat y\mathbb{R}vert$. Then
\begin{enumerate}
\item \label{enum:lem6i} $\hat\beta (x,y)= 0$,
\item \label{enum:lem6ii} $\hat\alpha (x,y)\ge c_0 \min (\hat r,\sqrt h) \sqrt h,$
\qquad{and}
\item \label{enum:lem6iii} $\pi (x,y)\ge p_0 \1_{\hat r\le 2 \sqrt h}$,
\end{enumerate}
where $c_0,p_0\in (0,1 )$ are explicit universal constants
($c_0\ge 0.007$, $p_0\ge 0.15$). Furthermore, if the assumptions
\mathbb{R}ef{enum:c1}, \mathbb{R}ef{enum:c2} and \mathbb{R}ef{enum:c3} hold true, then
\begin{enumerate}[resume]
\item\label{enum:lem6iv} $\beta (x,y)\ \le\ \min (L,J+\mathbb{N}icefrac{L^2h}{2}) hr$,
\item\label{enum:lem6v} $\beta (x,y)\ \le\ -(K-\mathbb{N}icefrac{L^2h}{2}) hr$\qquad if $r\ge\mathcal R$,
\item\label{enum:lem6vi} $\alpha (x,y)\ \ge\ \widetilde c_0h\1_{ r\ge \sqrt h}$\qquad if $ r\le
1/(4L\sqrt h)$,\qquad and
\item\label{enum:lem6vii} $\pi (x,y)\ \ge\ p_0 \1_{ r\le \sqrt h}$\qquad if $h\le 1/L$.
\end{enumerate}
Here $\widetilde c_0$ is an explicit universal constant
($\widetilde c_0\ge 0.0005$).
\end{lemma}
The proof of the lemma is contained in Section \mathbb{R}ef{sec:proofsEuler}.
\subsubsection*{Contractive case}
At first, we consider the case where the deterministic part of the Euler
transition is a contraction, i.e.,
\begin{equation}
\label{eq:30*}
\hat r=\lvert\hat x-\hat y\mathbb{R}vert\le\lvert x-y\mathbb{R}vert=r\qquad\mbox{for any }x,y\in\mathbb{R}^d.
\end{equation}
In this simple case, we can prove a rather sharp result.
We choose a metric $\mathbb{R}ho_a\colon \mathbb{R}^d\times\mathbb{R}^d\to\mathbb{R}_+$ of type
\begin{equation}
\label{eq:31}
\mathbb{R}ho_a (x,y)\ =\ a\1_{x\mathbb{N}eq y}\, +\, f_a(|x-y|),\qquad f_a(r)\ =\ \int_0^rg_a(s\wedge\mathcal R ) \dd s.
\end{equation}
Here $a$ is a non-negative constant, $\mathcal R$ is chosen as in Assumption \mathbb{R}ef{enum:c2}, and $g_a\colon [0,\mathcal R]\to\mathbb{R}$ is an appropriately chosen decreasing function (see \eqref{eq:7**f} for $a=0$ and \eqref{eq:7**f2} for $a\mathbb{N}eq 0$) satisfying
\begin{equation}
\label{eq:32}
g_a(0)=1 \qquad\mbox{and}\qquad g_a(s)\ \in\ [\mathbb{N}icefrac{1}{2},1]\quad
\mbox{for any }s\in [0,\mathcal R ].
\end{equation}
Hence $f_a$ is a concave increasing function satisfying $\mathbb{N}icefrac{r}{2}\le f_a(r)\le r$, and thus
\begin{equation}
\label{eq:33}
a\1_{x\mathbb{N}eq y}+\mathbb{N}icefrac{\lvert x-y\mathbb{R}vert}{2}\le\mathbb{R}ho_a (x,y)\le a\1_{x\mathbb{N}eq y}+\lvert x-y\mathbb{R}vert\qquad\mbox{for any }x,y\in\mathbb{R}^d.
\end{equation}
In particular, the distance $\mathbb{R}ho_0$ is equivalent to the Euclidean distance.
\begin{thm}[Euler scheme, contractive case]\label{thm:7}
Suppose that Conditions \mathbb{R}ef{enum:c1}, \mathbb{R}ef{enum:c2}, \mathbb{R}ef{enum:c3} and \eqref{eq:30*} are
satisfied, and let $h_0=\frac 1L\min \left(\frac{K}{L},\frac 12\mathbb{R}ight)$. Suppose that $a=0$ or $a\ge \sqrt h$, and let
$\mathbb{R}ho_a$ be defined by \eqref{eq:31} with $g_a$
specified in \eqref{eq:7**f}, \eqref{eq:7**f2}, respectively. Let
\begin{eqnarray}
\label{eq:c1h0}
c_1(0)& =& \frac{1}{4}\min \left( K,\, \frac{2c_0 }{\mathcal R^2+2\sqrt h\mathcal R+
12\, h}\mathbb{R}ight) \qquad\text{ and}\\
\label{eq:c1h0a}
c_1(a)& =& \frac{1}{4}\min \left( \frac{K}{1+a/\mathcal R},\, \frac{2c_0 }{\mathcal R^2+2(a+\sqrt h)\mathcal R},\, \frac{2p_0}{h}\mathbb{R}ight)\quad\text{for }a>0,
\end{eqnarray}
where $c_0$ is the explicit constant in Lemma \mathbb{R}ef{lem:6}. Then
\eqref{eq:32} and \eqref{eq:33} hold, and if $h\in (0,h_0)$, then
\begin{equation}
\label{eq:34}
\mathbb{E}_{x,y}[\mathbb{R}ho_a (X',Y')]\le\left(1-c_1(a)h\mathbb{R}ight) \mathbb{R}ho_a (x,y)\quad
\mbox{for all }x,y\in\mathbb{R}^d.
\end{equation}
\end{thm}
The proof, based on Theorem \mathbb{R}ef{thm:1} for $a > 0$ and
Theorem \mathbb{R}ef{thm:3} for $a = 0$, is given in Section \mathbb{R}ef{sec:proofsEuler}.
\begin{remark}[Dependence on parameters and dimension]\label{remark:thm1}
The lower bound for the contraction rate in
\eqref{eq:34} is of the correct order $\Omega (h\min (\mathcal R^{-2},K))$. This corresponds to the optimal contraction rate $\Theta (\min (\mathcal R^{-2},K))$ for the corresponding diffusion process, see \cite[Lemma 1 and Remark 5]{Eberle2015}. Note also that the lower bound for the contraction rate does not depend on the dimension $d$ provided the parameters $\mathcal R,K$ and $L$ can be chosen independent of $d$.
\end{remark}
\subsubsection*{General case}
We now turn to the general, not globally contractive case.
Here it is no longer possible
to obtain contractivity w.r.t.\ a metric satisfying \eqref{eq:33}, but we can still choose a metric that is comparable to the Euclidean distance, and apply the theorems above. We illustrate this at first by applying Theorem \mathbb{R}ef{thm:1}. Let
\begin{equation}
\label{Lambda} \mathcal{L}ambda \ =\ \min(L, J+L^2h/2).
\end{equation}
We now choose a metric $\mathbb{R}ho_a\colon \mathbb{R}^d\times\mathbb{R}^d\to\mathbb{R}_+$ of type
\begin{equation}
\label{eq:31x}
\mathbb{R}ho_a (x,y)\ =\ a\1_{x\mathbb{N}eq y}\, +\, f_a(|x-y|),\qquad f_a(r)\ =\ \int_0^rg_a(s\wedge r_2 )\mathop{\mathrm{Var}}\nolimitsphi (s\wedge r_2 ) \dd s.
\end{equation}
Here $a$ is a non-negative constant,
\begin{equation}
\label{eq:31y}
\mathop{\mathrm{Var}}\nolimitsphi (r)\ =\ \exp \left( -\widetilde c_0^{-1}\mathcal{L}ambda \left( (r\wedge\mathcal R)^2+2\sqrt h\, r\wedge \mathcal R\mathbb{R}ight)\mathbb{R}ight)
\end{equation}
with $\mathcal R$ and $\widetilde c_0$ chosen as in Assumption \mathbb{R}ef{enum:c2} and Lemma \mathbb{R}ef{lem:6}, respectively,
\begin{equation}
\label{eq:31ya}
r_2\ =\ \mathcal R +\sqrt{2\widetilde c_0/K},
\end{equation}
and $g_a\colon [0,r_2 ]\to\mathbb{R}$ is an appropriately chosen decreasing function (see \eqref{eq:7**f2}) satisfying
\begin{equation}
\label{eq:32x}
g_a(0)=1 \qquad\mbox{and}\qquad g_a(s)\ \in\ [\mathbb{N}icefrac{1}{2},1]\quad
\mbox{for any }s\in [0,r_2 ].
\end{equation}
\begin{thm}[Euler scheme, general case I]\label{thm:8a}
Suppose that Conditions \mathbb{R}ef{enum:c1}, \mathbb{R}ef{enum:c2} and \mathbb{R}ef{enum:c3} are satisfied, and let $h_0 = \frac{1}{L}\min (\frac{p_0}{2}, \frac{K}{L}, \frac{1}{64\, L r_2^2})$ with $r_2$
specified in \eqref{eq:31ya}. Let $a \in [2\sqrt{h}, \mathbb{P}hi (\mathcal R)]$ where $\mathbb{P}hi (\mathcal R ):=\int_0^\mathcal R\mathop{\mathrm{Var}}\nolimitsphi (r)\, dr$, let $\mathbb{R}ho_a$ be defined by \eqref{eq:31x} with $\mathop{\mathrm{Var}}\nolimitsphi$ and $g_a$
specified in \eqref{eq:31y} and \eqref{eq:7**f2}, respectively, and let
\begin{equation}\label{eq:34x}
c_2(a)\ =\ \frac{1}{8}\min \left( \frac{K\, \mathop{\mathrm{Var}}\nolimitsphi (\mathcal R )}{1+(a+/\sqrt h)\sqrt{2K/\widetilde c_0}},\, \frac{2\, \widetilde c_0\, \mathop{\mathrm{Var}}\nolimitsphi (\mathcal R ) }{\mathcal R^2+2(a+\sqrt h)\mathcal R},\, \frac{4p_0}{h}\mathbb{R}ight) .
\end{equation}
Then \eqref{eq:32x} holds, and if $h \in (0, h_0)$, then
\begin{equation*}
\mathbb{E}_{x,y}[\mathbb{R}ho_a (X',Y')]\le\left(1-c_2(a)h\mathbb{R}ight) \mathbb{R}ho_a (x,y)\quad
\mbox{for all }x,y\in\mathbb{R}^d.
\end{equation*}
\end{thm}
Note that except for the
additional factor $\mathop{\mathrm{Var}}\nolimitsphi (\mathcal R)=\exp ( -\widetilde c_0^{-1}\mathcal{L}ambda (\mathcal R^2+2\sqrt h\mathcal R))$, the expression for the contraction rate $c_2(a)$
is similar to the one for the rate $c_1(a)$ in the contractive case.
The proof based on Theorem \mathbb{R}ef{thm:1} is given in Section \mathbb{R}ef{sec:proofsEuler}. If the interval $[2\sqrt{h}, \mathbb{P}hi (\mathcal R)]$ is empty, the theorem can still be applied with $\mathcal R$ replaced by a slightly larger value.
It is also possible to replace Condition \mathbb{R}ef{enum:c2} by a Lyapunov condition and apply Theorem \mathbb{R}ef{thm:2} instead of Theorem \mathbb{R}ef{thm:1}. A corresponding result for the Euler scheme is
given in Section \mathbb{R}ef{sec:proofsEuler}, cf.\ Theorem \mathbb{R}ef{thm:8b}.
\begin{remark}[Dependence on parameters and dimension]\label{remark:thm2}
The lower bound for the contraction rate in
\eqref{eq:34x} does not depend on the dimension $d$ provided the parameters $\mathcal R,K$ and $\mathcal{L}ambda$ can be chosen independent of $d$. Moreover, by choosing $h$ sufficiently small, we can ensure that $\mathcal{L}ambda$ is close to the one-sided Lipschitz constant $J$. Hence the global Lipschitz constant $L$ is only required for controlling the step size $h$, whereas the contraction properties for sufficiently small $h$ can be controlled essentially by one-sided Lipschitz bounds. This is important since in many applications, only a one-sided Lipschitz condition is satisfied globally. In this case, our approach can still be
applied on a large ball if the step size is chosen sufficiently small depending on the radius of the ball and the growth of the local Lipschitz constant.
\end{remark}
The explicit expression for the metric in Theorem \mathbb{R}ef{thm:8a} is a bit complicated. As an alternative, we can use
a simplified metric without a discontinuity that is sufficient to derive bounds of similar order as for the
metric used above, whenever condition \mathbb{R}ef{enum:c2} is satisfied.
We assume $hL\le 1/6$,
and we set
\begin{equation}
\label{eq:35}
r_1\ :=\ (1+hL)\mathcal R\ \le\ \frac 76\mathcal R
\end{equation}
with $\mathcal R$ and $L$ as in Assumptions \mathbb{R}ef{enum:c2} and \mathbb{R}ef{enum:c3}. The choice of $r_1$ ensures that
\begin{equation}
\label{eq:36}
\hat r\ =\ |\hat x-\hat y|\ \le\ (1+hL)r\ \le\ r_1\qquad\text{whenever }r=|x-y|\le\mathcal R.
\end{equation}
Let $c_0$ denote the explicit constant in Lemma \mathbb{R}ef{lem:6}, and let
\begin{equation}
\label{eq:37}
q\ =\ 7c_0^{-1}\mathcal{L}ambda\mathcal R.
\end{equation}
We now consider a simplified metric of the form
\begin{align}
\label{eq:39}
\mathbb{R}ho (x,y)&=f(\lvert x-y\mathbb{R}vert),& f(r)&=\int_0^r\exp (-q(s\wedge r_1))
\,d s.
\end{align}
\begin{thm}[Euler scheme, general case II]\label{thm:8}
Suppose that Conditions \mathbb{R}ef{enum:c1}, \mathbb{R}ef{enum:c2} and \mathbb{R}ef{enum:c3} are
satisfied, and let $\mathbb{R}ho$ be defined by \eqref{eq:39} with $q$
specified in \eqref{eq:37}. Let
\begin{eqnarray}\label{eq:c2}
c_2& =& \min \left( \frac K2,\, \frac{245}{24 c_0} {\mathcal{L}ambda^2\mathcal R^2} \mathbb{R}ight)\, \exp\left({-\frac{49}{6 c_0}{\mathcal{L}ambda \mathcal R^2}}\mathbb{R}ight) \qquad \text{ and }\\
h_0&=&\frac 1L\min \left(\frac 16,\, \frac KL,\, \frac 13{L\mathcal R^2},\,\frac{c_0^2}{970}\frac 1{L\mathcal R^2}\mathbb{R}ight), \label{eq:h0general}
\end{eqnarray}
where $c_0$ is chosen as in Lemma \mathbb{R}ef{lem:6}. Then
\begin{equation}
\label{eq:45}
\mathbb{E}_{x,y}[\mathbb{R}ho (X',Y')]\le(1-c_2h) \mathbb{R}ho (x,y)\quad
\mbox{for any }x,y\in\mathbb{R}^d\mbox{ and }h\in (0,h_0].
\end{equation}
\end{thm}
The proof of the theorem is contained in Section \mathbb{R}ef{sec:proofsEuler}.
\begin{remark}\label{remark:thm3}
Again, the lower bound $c_2$ for the contraction rate only depends on
$\mathcal R$, $K$ and $\mathcal{L}ambda$. Furthermore, note that $r \exp (-qr_1) \leq f(r) \leq r$ for all $r \geq 0$, and hence the metric $\mathbb{R}ho$ is comparable to the Euclidean distance. As a consequence, Theorem \mathbb{R}ef{thm:8} implies weak contractivity in the standard $L^1$ Wasserstein distance. Note also that the function $f$ depends on the discretization parameter $h$ via $q$ and $r_1$. It is, however, possible to modify the definition of $f$ so that it no longer depends on $h$, at the cost of getting a worse constant $c_2$. We refer the interested reader to \cite{MajkaMijatovicSzpruch}, where similar bounds are used with a metric independent of $h$.
\end{remark}
Theorem \mathbb{R}ef{thm:8} can be extended to cover pseudo metrics based on functions that are strictly convex at infinity. This allows for obtaining upper $L^2$ bounds for Euler schemes under similar assumptions as above. Such bounds are applied to the analysis of Multi-level Monte Carlo algorithms in the upcoming paper \cite{MajkaMijatovicSzpruch}.
\subsection{Application to MALA}\label{sec:\MALA}
The Metropolis-adjusted Langevin Algorithm is a
Metropolis-Hastings method for approximate sampling from a given
probability measure $\mu$ where the proposals are obtained by
an Euler discretization of an overdamped Langevin SDE. In
\cite{EberleAAP}, the dimension dependence of contraction rates
of MALA chains w.r.t.\ standard Kantorovich distances has been
studied for a class of strictly log-concave probability measures that
have a density w.r.t.\ a Gaussian reference measure. Our goal is
to provide a partial extension of these results to non log-concave
measures. By considering the MALA transition step as a
perturbation of the Euler proposals, we obtain contraction rates
w.r.t.\ a modified Kantorovich distance provided the discretization
time step is of order $h=O(d^{-1})$.
We consider a similar setup as in \cite{EberleAAP}: $\mu$ is a
probability measure on $\mathbb R^d$ given by
\begin{equation}
\label{eq:m1}
\mu (d x)=\mathcal Z^{-1} \exp (-U(x)) \dd x=
(2\pi )^{\mathbb{N}icefrac{d}{2}} \mathcal Z^{-1} \exp (-V(x)) \gamma^d(d x) \,,
\end{equation}
where $V$ is a function in $C^4(\mathbb R^d)$,
\begin{equation}
\label{eq:m1a}
U(x)=\frac 12\lvert x\mathbb{R}vert^2 + V(x),
\end{equation}
$\gamma^d$ denotes the $d$-dimensional standard normal
distribution, and
\[
\mathcal Z=\int\exp (-U(x)) \dd x.
\]
We assume
that we are given a norm $\|\cdot\|_-$ on $\mathbb R^d$ such that
\begin{equation}
\label{eq:m2}
\| x\|_-\ \le\ |x|\ \le\ d \| x\|_-\qquad\mbox{for any }x\in \mathbb R^d,
\end{equation}
as well as finite constants $C_n\in [0,\infty )$, $p_n\in \{ 0,1,2,\ldots
\}$, and $K_c,\mathcal R_c\in (0,\infty )$ such that the following conditions hold for any $n \in \{1, \dots, 4\}$:
\begin{eqnarray}
\label{eq:d1} \quad\lvert \partial_{\xi_1, \dots, \xi_n}^n U(x) \mathbb{R}vert &\le& C_n \max (1, \| x \|_-^{p_n}) \| \xi_1 \|_- \cdots \| \xi_n \|_-\, \forall\, x, \xi_1, \dots, \xi_n \in \R^d. \\
\label{eq:d2} (\partial_{\xi \xi} U)(x) &\ge & K_c \lvert \xi \mathbb{R}vert^2 \qquad\forall\ x, \xi \in \R^d: \lvert x \mathbb{R}vert \ge \mathcal R_c .
\end{eqnarray}
Here \eqref{eq:d2} can be interpreted as strict convexity of $U$ outside a Euclidean ball.
\begin{remark}
\begin{enumerate}
\item
For discretizations of infinite-dimensional models, $\| \wc \|_-$ is typically a finite-dimensional approximation of a norm that is almost surely finite w.r.t. the limit measure in infinite dimensions, see for instance \cite[Example 1.6]{EberleAAP}. Correspondingly, we may assume that the measure concentrates on a ball of a fixed radius w.r.t. $\|\wc\|_-$. This will be relevant for the application of Theorem \mathbb{R}ef{thm:10} below, which states uniform contractivity on such balls.
\item
Condition \eqref{eq:d1} is the same condition that has been assumed in the strictly convex case in \cite{EberleAAP}.
\item
In \eqref{eq:d2}, we assume strict convexity outside a ball of fixed radius w.r.t.\ the Euclidean norm and not w.r.t.\ $\| \wc \|_-$. Such a bound can be expected to hold with $\mathcal R_c$ independent of the dimension if, for example, the non-convexity occurs only in finitely many directions. The application of a coupling approach in situations where \eqref{eq:d2} does not hold requires more advanced techniques, see e.g.\! \cite{Zimmer}.
\end{enumerate}
\end{remark}
The transition step of a Metropolis-Hastings chain with proposal density $p(x,y)$ and target distribution $\mu(d x) = \mu(x) \dd x$ is given by
\begin{equation}
\label{eq:m4} \widetilde{x} = \begin{cases}X' &\text{if } \widetilde{U} \le \alpha(x,X') \\ x & \text{otherwise.} \end{cases},
\end{equation}
where $x$ is the previous position, $X'$ is the proposed move,
\[
\alpha (x,y) = \min \left(1, \frac{\mu(y) p(y,x) }{\mu(x) p(x,y)}\mathbb{R}ight)
\]
is the Metropolis-Hastings acceptance probability, and $\widetilde{U} \sim \Unif (0,1)$ is a uniform random variable that is independent of $X'$. We consider the proposal
\begin{equation}
\label{eq:m6} X' = x-\frac{h}{2} x - \frac{h}{2} \mathbb{N}abla V(x) + \sqrt{h - \frac{h^2}{4}} Z, \quad Z \sim N(0, I_d) \,,
\end{equation}
where $h \in (0,2)$ is the step size of the time discretization. The corresponding proposal kernel is $p_h(x,\wc) = N(x-\frac{h}{2} x - \frac{h}{2} \mathbb{N}abla V(x), (h - \mathbb{N}icefrac{h^2}{4}) I_d)$. Substituting $h = \frac{\mathop{\mathrm{Var}}\nolimitsepsilon}{1+\mathbb{N}icefrac{\mathop{\mathrm{Var}}\nolimitsepsilon}{4}}$, we see that the proposal is a transition step of the semi-implicit Euler discretization
\begin{equation}
\label{eq:m7} X' = x - \frac{\mathop{\mathrm{Var}}\nolimitsepsilon}{2} \frac{X'+x}{2} - \frac{\mathop{\mathrm{Var}}\nolimitsepsilon}{2} \mathbb{N}abla V(x) + \sqrt{\mathop{\mathrm{Var}}\nolimitsepsilon} Z
\end{equation}
for the Langevin SDE $\dd X_t = -\frac{1}{2} X_t\dd t - \frac{1}{2} \mathbb{N}abla V(X_t) \dd t + \dd B_t$ with invariant measure $\mu$. The reason for considering the semi-implicit instead of the explicit Euler approximation is that under appropriate conditions, the acceptance probability
\begin{equation}
\label{eq:m8} \alpha_h (x,y) = \min \left(1, \frac{\mu(y) p_h(y,x)}{\mu(x) p_h(x,y)}\mathbb{R}ight)
\end{equation}
for the corresponding Metropolis-Hastings scheme has a better dimension dependence. Indeed, if $V$ vanishes, then $\alpha_h(x,y) = 1$. More generally, if \eqref{eq:d1} holds, then the average rejection probability is of order $O(h^{\frac{3}{2}})$.
\begin{lemma}[Upper bounds for rejection probability]\label{lem:3}
Suppose that \eqref{eq:d1} holds and let $k \in \N$. Then there exists an explicit polynomial $P_k \colon \R^2 \mathbb{R}ightarrow \R_+$ of degree $\max(p_3 + 3, 3p_2 + 2)$ such that for any $x \in \R^2$ and $h \in (0,2)$,
\[
\mathbb{E}[ (1-\alpha_h(x,X'))^k]^{\frac{1}{k}} \le P_k (\| x \|_-, \| x + \mathbb{N}abla V(x) \|_-)h^{\frac{3}{2}} \,.
\]
\end{lemma}
The proof of the lemma is given in \cite[Proposition 1.7]{EberleAAP}. The polynomials $P_k$ are explicit. Their coefficients depend only on the constants $C_2, C_3, p_2$ and $p_3$ in \eqref{eq:d1} and on the moments
\[
m_n = \mathbb{E}[\| Z\|_-^n], \, n \le k \max (p_3 + 3, 2p_2 + 2) \,.
\]
Apart from replacing $\sqrt{h}$ by $\sqrt{h - \frac{h^2}{4}}$, \eqref{eq:m6} coincides with the explicit Euler discretization of the SDE $\dd X_t = b(X_t) \dd t + \dd B_t$, where $b(x) = -\frac{1}{2} x - \frac{1}{2} \mathbb{N}abla V(x)$.
Therefore, the results in the last section apply to the proposal chain, thus yielding a contraction rate of order $\Omega (h)$. Since the rejection probability is of higher order, we can then apply the perturbation result in \eqref{thm:4} to prove a corresponding contractivity for the \MALA chain. To this end, we consider the coupling $(\widetilde{X}, \widetilde{Y})$ of transition steps of the \MALA chain from positions $x,y \in \R^d$ given by \eqref{eq:m4} and
\begin{equation}
\label{eq:m9} \widetilde{Y} = \begin{cases}
Y' & \text{if } \widetilde{U} \le \alpha(y,Y') \\ y &\text{otherwise.}
\end{cases}
\end{equation}
where $(X', Y')$ is the (optimal) coupling for the proposal steps considered in (\mathbb{R}ef{eq:0c}), and $\widetilde{U} \sim \Unif(0,1)$ is independent of both $X'$ and $Y'$. Hence, the proposals are coupled optimally and the same uniform random variable $\widetilde{U}$ is used to decide about acceptance or rejection for each of the steps. Nevertheless, in general $(\widetilde{X}, \widetilde{Y})$ is not an optimal coupling of the corresponding \MALA transition probabilities.
\begin{theorem}[Contraction rates for MALA] \label{thm:10}
Suppose that conditions \eqref{eq:d1} and \eqref{eq:d2} hold and fix $ R \in (0,\infty)$. Then there exists a concave strictly increasing function $\widetilde f \colon [0,\infty) \mathbb{R}ightarrow [0,\infty)$ with $\widetilde f(0) = 0$ and constants $c_3, h_0 \in (0,\infty)$ such that
for any $h \in (0,h_0d^{-1})$ and for any $x,y \in \R^d$ with $\| x \|_- \le R$, $\|y\|_- \le R$,
\begin{equation}
\label{eq:m10} \mathbb{E}_{x,y} [\widetilde f(|\widetilde{X}- \widetilde{Y}|) ] \le (1-c_3 h) \widetilde f(|x-y|) \,.
\end{equation}
The function $\widetilde f$ and the constants $c_3$ and $h_0$ depend only on $R$ and on the values of the constants $C_n$, $p_n$, $K_c$, $\mathcal R_c$ in assumptions \eqref{eq:d1}, \eqref{eq:d2}.
\end{theorem}
The proof of Theorem \mathbb{R}ef{thm:10} is given in Section \mathbb{R}ef{sec:proofof\MALA}.
\begin{remark}
The theorem shows that by choosing the step size of order $\Theta (d)$, a contraction rate of the same order holds on balls w.r.t. $\| \wc \|_-$ provided conditions \eqref{eq:d1} and \eqref{eq:d2} are satisfied. In the strictly convex case, it has been shown in \cite{EberleAAP} by a synchronous coupling that a corresponding result holds even for step sizes of order $\Theta (1)$ if the Euclidean norm in \eqref{eq:m10} is replaced by $\| \wc \|_-$. One could hope for a similar result in the not globally convex case, but the combination of reflection coupling with a different norm leads to further difficulties. A possibility to overcome these difficulties might be the two-scale approach developed in \cite{Zimmer}.
\end{remark}
\section{Proofs of Theorems \mathbb{R}ef{thm:1} and \mathbb{R}ef{thm:2}}\label{sec:proofs12}
In this section, we prove the first two theorems. We first specify the explicit choice of the metric and the explicit values of the contraction rate $c$. The reason for choosing the metric this way will become clear by the subsequent proofs of the theorems.
For $r, s > 0$, we consider the intervals
\begin{equation}
\label{eq:501} I_r = ((r-\mathop{\mathrm{Var}}\nolimitsepsilon)^+,r)
\end{equation}
and the dual intervals
\begin{equation}
\label{eq:502} \hat{I}_s = \{ r > r_0 : s \in I_r\} = (s,s+\mathop{\mathrm{Var}}\nolimitsepsilon) \cap (r_0, \infty) \,.
\end{equation}
For $r \in (r_0,\infty)$ we set
\begin{equation}
\label{eq:5stara} \overline{\gamma} (r) = \mathbb{N}icefrac{2\overline{\beta} (r)}{\underline{\alpha} (r)} \,.
\end{equation}
Let $\widetilde{\gamma} \colon [0,\infty) \mathbb{R}ightarrow [0,\infty)$ be a function satisfying
\begin{eqnarray}
\label{eq:5star} \sup_{r \in \hat{I}_s} \overline{\gamma} (r) &\le & \widetilde{\gamma} (s) \quad\text{ for any } s \in [0,\infty),\qquad\text{i.e.,}\\
\label{eq:503} \overline{\gamma}(r) &\le & \widetilde{\gamma} (s) \tforall r > r_0, \, s \in I_r \,.
\end{eqnarray}
By assumptions \mathbb{R}ef{enum:a1} and \mathbb{R}ef{enum:a2}, such a function exists. If \mathbb{R}ef{enum:a3} holds, then we may assume w.l.o.g. that $\widetilde{\gamma} (s) = 0$ for large $s$.
\subsection{Choice of the metric in Theorem \mathbb{R}ef{thm:1}}\label{subsec:choiceOfMetricThm1}
Suppose conditions \mathbb{R}ef{enum:a1}, \mathbb{R}ef{enum:a2} and \mathbb{R}ef{enum:a3} hold. We set
\begin{equation}\label{eq:choice r1}
r_1 \coloneqq \sup \{ r > 0 : \widetilde{\gamma} (r) > 0\},
\end{equation}
where $\sup \emptyset = 0$. By Assumption \mathbb{R}ef{enum:a3} we can choose $\widetilde{\gamma}$ such that $r_1$ is finite. We have
\begin{align}
\label{eq:50} \widetilde{\gamma}(r) &= 0 \,,& \overline{\gamma} (r) &\le 0 \,,&\overline{\beta}(r) &\le 0 \, \tforall r \ge r_1 \,.
\end{align}
We also fix a constant $r_2 \in (r_1, \infty)$. The value of $r_2$ will be determined in condition \eqref{eq:5d} below. The underlying metric we consider is given by \eqref{eq:13}, where $f \colon [0,\infty) \mathbb{R}ightarrow [0,\infty)$ is a concave increasing function defined by
\begin{equation}
\label{eq:5000} f(r) = a \1_{r > 0} + \int_0^r \mathop{\mathrm{Var}}\nolimitsphi (s \wedge r_2) g(s\wedge r_2) \dd s
\end{equation}
with decreasing differentiable functions $\mathop{\mathrm{Var}}\nolimitsphi$ and $g$ such that $\mathop{\mathrm{Var}}\nolimitsphi(0) = g(0) = 1$ and a constant $a \in (0,\infty)$ that are all specified below. Hence, $f$ is twice differentiable except at $0$, $f(0+) -f(0) = a$, $f' = \mathop{\mathrm{Var}}\nolimitsphi g$ on $(0,r_2)$, and $f'$ is constant on $[r_2, \infty)$.
The function $\mathop{\mathrm{Var}}\nolimitsphi$ and the constant $a$ are chosen such that
\begin{align}
\label{eq:5a} \mathop{\mathrm{Var}}\nolimitsphi(r) &= \exp\left(-\int_0^r \widetilde{\gamma} (s) \dd s \mathbb{R}ight),\qquad\text{and}\\
\label{eq:5c} a &\ge r_0 + 2 \sup_{\lvert x-y \mathbb{R}vert \le r_0} \frac{\beta(x,y)}{\pi(x,y)} .
\end{align}
Notice that by \eqref{eq:50}, the function $\mathop{\mathrm{Var}}\nolimitsphi (r)$ is constant for $r\ge r_1$.
Setting
\begin{equation}
\label{eq:5e} \mathbb{P}hi(r) = \int_0^r \mathop{\mathrm{Var}}\nolimitsphi(s) \dd s \,,
\end{equation}
the constant $r_2$ is chosen such that
\begin{equation}
\label{eq:5d}-\frac{\overline{\beta(r)}}{a+\mathbb{P}hi(r)} \ge \frac{1}{2} \left(\int_{r_1}^{r_2} \frac{\mathbb{P}hi(s)}{\underline{\alpha}(s)} \dd s \mathbb{R}ight)^{-1} \, \text{ for all } r \ge r_2 \,.
\end{equation}
Assumption \mathbb{R}ef{enum:a3} ensures that such a constant exists. Indeed, for $r \ge r_1$, $\widetilde{\gamma}$ vanishes, whence $\mathop{\mathrm{Var}}\nolimitsphi$ is constant and $\mathbb{P}hi$ is linear. By definition of $\alpha$, {we see that} $\underline{\alpha}$ is uniformly bounded by $\mathop{\mathrm{Var}}\nolimitsepsilon^2$. Therefore, the value on the right hand side of \eqref{eq:5d} goes to zero as $r_2 \mathbb{R}ightarrow \infty$, and \eqref{eq:5d} holds for large $r_2$ by \mathbb{R}ef{enum:a3}.
The contraction rate is now given by
\begin{equation}
\label{eq:5f} c = \min\left( \frac{1}{2} \inf_{r \leq r_0} \underline{\pi}(r), \frac{1}{4} \left( \int_0^{r_2} \frac{1}{\mathop{\mathrm{Var}}\nolimitsphi(s)} \sup_{u \in \hat{I}_s} \frac{a + \mathbb{P}hi (u)}{\underline{\alpha} (u)} \dd s \mathbb{R}ight)^{-1}\mathbb{R}ight)
\end{equation}
and the function $g$ is defined as
\begin{equation}
\label{eq:5g} g(r) = 1-2c \int_0^r \frac{1}{\mathop{\mathrm{Var}}\nolimitsphi(s)} \sup_{u \in \hat{I}_s} \frac{a + \mathbb{P}hi (u)}{\underline{\alpha} (u)} \dd s \,.
\end{equation}
Note that \eqref{eq:5f} guarantees that $g(r) \ge \frac{1}{2}$ for $r \le r_2$.
\subsection{Choice of the metric in Theorem \mathbb{R}ef{thm:2}}\label{subsec:choiceOfMetricThm2}
Now suppose that \mathbb{R}ef{enum:a1}, \mathbb{R}ef{enum:a2} and \mathbb{R}ef{enum:a4} hold. In this case we set
\begin{equation}
\label{eq:6r1} r_1 \coloneqq \sup \{ d(x,y) : x,y \in S \,, \, V(x) + V(y) < \mathbb{N}icefrac{4C}{\lambda}\} \,.
\end{equation}
By \mathbb{R}ef{enum:a4ii} and \mathbb{R}ef{enum:a2}, $r_1$ is finite. Moreover, by \mathbb{R}ef{enum:a4i},
\begin{equation}
\label{eq:60} \mathbb{E}_{x,y} [V(X') + V(Y') ] \le \left(1-\frac{\lambda}{2}\mathbb{R}ight) (V(x) + V(y) )\qquad \text{if } d(x,y) \ge r_1 \,.
\end{equation}
We also fix a constant $r_2 \in (r_1, \infty)$. The value of $r_2$ will be determined by condition \eqref{eq:6d} below.
The function $f \colon [0,\infty) \mathbb{R}ightarrow [0,\infty)$ determining the metric in \eqref{eq:15} is now defined by
\begin{equation}
\label{eq:6000} f(r) = a \1_{r>0} + \int_0^r \mathop{\mathrm{Var}}\nolimitsphi(s \wedge r_2) g(s\wedge r_2) \dd s
\end{equation}
with decreasing differentiable functions $\mathop{\mathrm{Var}}\nolimitsphi$ and $g$ such that $\mathop{\mathrm{Var}}\nolimitsphi(0) = g(0) = 1$, and a constant $a \in (0,\infty)$ that are all specified below. Hence, $f$ is twice differentiable except at $0$, $f(0+) - f(0) = a$, $f' = \mathop{\mathrm{Var}}\nolimitsphi g$ on $(0,r_2)$ and $f'$ is constant on $[r_2, \infty)$.
The function $\mathop{\mathrm{Var}}\nolimitsphi$ and the constants $a$ and $M$ in \eqref{eq:15} are chosen such that
\begin{align}
\label{eq:6a} \mathop{\mathrm{Var}}\nolimitsphi(r) &= \exp \left(-\int_0^r \widetilde{\gamma} (s) \dd s \mathbb{R}ight) \,, \\
\label{eq:6b} M &\le \frac{1}{4} \left(\int_0^{r_1} \frac{1}{\mathop{\mathrm{Var}}\nolimitsphi(s) } \sup_{u \in \hat{I}_s} \frac{1}{\underline{\alpha} (u)} \dd s \mathbb{R}ight)^{-1} \,, \\
\label{eq:6c} a &\ge r_0 + 2 \sup_{\lvert x-y \mathbb{R}vert \le r_0} \frac{\beta(x,y) + M}{\pi (x,y)} \,, \\
\label{eq:6d} \overline{\beta}(r) \mathop{\mathrm{Var}}\nolimitsphi(r) &\le \frac{\lambda M}{16\, C} (V(x) + V(y))\ \text{ if } \lvert x-y \mathbb{R}vert \ge r_2 \,.
\end{align}
By \mathbb{R}ef{enum:a4ii} and since $\mathop{\mathrm{Var}}\nolimitsphi \le 1$, there always exists a finite $r_2$ such that \eqref{eq:6d} holds. To optimize the estimates, we choose $r_2$ as small as possible, i.e., we set
\begin{equation}
\label{eq:6e} r_2 = r_1 \vee \sup \left\{ d(x,y) = r : x,y \in S, {V(x) + V(y)} < \frac{16\, C}{\lambda M} {\overline{\beta} (r)}\mathop{\mathrm{Var}}\nolimitsphi(r) \mathbb{R}ight\} \,.
\end{equation}
Setting
\begin{equation}
\mathbb{P}hi (r) = \int_0^r \mathop{\mathrm{Var}}\nolimitsphi(s) \dd s \,,
\end{equation}
the contraction rate $c$ is given by
\begin{equation}
\label{eq:6f} c\ =\ \min \left( \frac{1}{2} \inf_{r \leq r_0} \underline{\pi}(r) ,\, \frac{\lambda}{4} ,\, \frac{\lambda M}{16\, C}\inf_{r\ge r_2}\frac{V(x)+V(y)}{\mathbb{P}hi (r)} ,\, \frac{1}{8} \left( \int_0^{r_2} \frac{1}{\mathop{\mathrm{Var}}\nolimitsphi(s)} \sup_{u \in \hat{I}_s} \frac{a + \mathbb{P}hi (u)}{\underline{\alpha} (u)} \dd s \mathbb{R}ight)^{-1}\mathbb{R}ight) \,.
\end{equation}
and the function $g$ is defined as
\begin{equation}
\label{eq:6g} g(r) = 1-2c \int_0^r \frac{1}{\mathop{\mathrm{Var}}\nolimitsphi(s)} \sup_{u \in \hat{I}_s} \frac{a + \mathbb{P}hi (u)}{\underline{\alpha} (u)} \dd s - M \int_0^{r \wedge r_1} \frac{1}{\mathop{\mathrm{Var}}\nolimitsphi(s)} \sup_{u \in \hat{I}_s} \frac{1}{\underline{\alpha} (u)} \dd s \,.
\end{equation}
Note that \eqref{eq:6f} and \eqref{eq:6b} guarantee that $g(r) \ge \frac{1}{2}$ for $r \le r_2$. In the minimum defining $c$, the first term guarantees contractivity for $r \le r_0$, the second term is used for all $r$, the third term guarantees contractivity for $r \ge r_2$ and the last term ensures contractivity with rate $c$ for $r_0 < r \le r_1$.
\subsection{Proof of Theorem \mathbb{R}ef{thm:1} and Theorem \mathbb{R}ef{thm:2}}
Since the arguments are similar, we prove both theorems simultaneously, distinguishing cases where needed. In the situation of Theorem \mathbb{R}ef{thm:1}, we set {$M=0$}. Let $x,y \in \R^d$ such that $r = \lvert x-y \mathbb{R}vert > 0$. Since $f''(t) \le 0$ for all $t>0$,
\begin{eqnarray*}
f(R')-f( r)&=&-a \1_{R' = 0}\, +\, \int_{ r}^{R'}f'(s)\,ds\\
&=& -a \1_{R' = 0}\, +\, (R'- r)f'( r)\, +\, \int_{ r}^{R'}\int_{ r}^sf''(t)\, dt\, ds\\
&\le & -a \1_{R' = 0}\, +\, (R' - r) f' (r)\, +\, \frac{1}{2} ((R'-r)^- \wedge \mathop{\mathrm{Var}}\nolimitsepsilon)^2 \sup_{u \in {I}_r} f''(u),
\end{eqnarray*}
where $I_r = ((r-\mathop{\mathrm{Var}}\nolimitsepsilon)^+, r)$. By taking expectations, we conclude that
\begin{equation}
\label{eq:7*}\mathbb{E}_{x,y} [f(R' ) -f(r)] \le -a \pi (x,y) + \beta (x,y) f'(r) + \frac{1}{2} \alpha(x,y)\sup_{u \in {I}_r} f''(u).
\end{equation}
Our goal is to compensate the second term by the first term for $r \le r_0$ and by the last term for $r_0 < r \le r_2$ (and possibly by a Lyapunov part for $r \ge r_2$). In order to verify \eqref{eq:14} and \eqref{eq:16}, we now distinguish three cases.
\\
\emph{Case $r \in (r_0, r_2)$.} Since $f' = g\mathop{\mathrm{Var}}\nolimitsphi$ on $(0,r_2)$, we have
\begin{equation}
\label{eq:71} \sup_{ {I}_r} f''\ \le\ \sup_{ {I}_r} (g' \mathop{\mathrm{Var}}\nolimitsphi) + \sup_{ {I}_r} (g\mathop{\mathrm{Var}}\nolimitsphi') \,.
\end{equation}
Note that both summands are negative since $g$ and $\mathop{\mathrm{Var}}\nolimitsphi$ are decreasing.
Now we note first that our choice of $\mathop{\mathrm{Var}}\nolimitsphi$ guarantees that
\begin{equation}
\label{eq:72} \frac{1}{2} \alpha(x,y) \sup_{ {I}_r} (g\mathop{\mathrm{Var}}\nolimitsphi') + \beta(x,y) f' (r) \le 0 \,.
\end{equation}
Indeed, \eqref{eq:72} is satisfied provided
\begin{equation}
\label{eq:72'} \sup_{ {I}_r} (g\mathop{\mathrm{Var}}\nolimitsphi') \le - \overline{\gamma}(r) g(r) \mathop{\mathrm{Var}}\nolimitsphi(r) \,.
\end{equation}
Since $\mathop{\mathrm{Var}}\nolimitsphi' \le 0$ and $g$ is decreasing, we have
\[
\sup_{ {I}_r} (g\mathop{\mathrm{Var}}\nolimitsphi') \le \inf_{ {I}_r} g\, \sup_{ {I}_r}\mathop{\mathrm{Var}}\nolimitsphi' \le g(r) \sup_{ {I}_r}\mathop{\mathrm{Var}}\nolimitsphi' .
\]
Hence, \eqref{eq:72'} is satisfied if
\begin{equation}
\label{eq:72''} \sup_{s \in {I}_r}\mathop{\mathrm{Var}}\nolimitsphi'(s) \le -\overline{\gamma} (r) \mathop{\mathrm{Var}}\nolimitsphi(r).
\end{equation}
But indeed, by definition of $\mathop{\mathrm{Var}}\nolimitsphi$ and $\widetilde{\gamma}$, we have for $s \in I_r$
\[
\mathop{\mathrm{Var}}\nolimitsphi'(s) = -\widetilde{\gamma} (s) \mathop{\mathrm{Var}}\nolimitsphi(s) \le -\overline{\gamma} (r) \mathop{\mathrm{Var}}\nolimitsphi(r) \,.
\]
Next, we observe that our choice of $g$ (in particular, $g \leq 1$) guarantees that
\begin{equation}
\label{eq:73} \frac{1}{2} \alpha(x,y) \sup_{{I}_r}(g'\mathop{\mathrm{Var}}\nolimitsphi ) + M \1_{r < r_1} \le -c f(r) \,.
\end{equation}
Indeed, since $f(r) \le a + \mathbb{P}hi (r)$ and $f'(r) \le 1$, it is sufficient to show
\begin{equation}
\label{eq:73'} \sup_{s \in {I}_r}(g'(s)\mathop{\mathrm{Var}}\nolimitsphi(s)) \le -2c \frac{a+\mathbb{P}hi (r)}{\underline{\alpha} (r)} - \frac{M\1_{r<r_1}}{\underline{\alpha} (r)} \quad\text{ if } r_0 < r < r_2,
\end{equation}
or
\begin{equation}
\label{eq:73''} g'(s) \mathop{\mathrm{Var}}\nolimitsphi(s) \le -2c \sup_{r \in \hat{I}_s} \frac{a +\mathbb{P}hi (r)}{\underline{\alpha} (r)} - \sup_{r \in \hat{I}_s} \frac{M \1_{r < r_1}}{\underline{\alpha} (r)}\quad \text{ if } 0 < s \le r_2 \,.
\end{equation}
In \eqref{eq:5g}, \eqref{eq:6g} respectively, the function $g$ has been defined in such a way that this condition is satisfied.
Now, by combining \eqref{eq:7*}, \eqref{eq:72} and \eqref{eq:73}, and bounding the term $-a \pi(x,y)$ in \eqref{eq:7*} by zero, we obtain for $r \in (r_0, r_2)$:
\begin{equation}
\label{eq:73a} \mathbb{E}_{x,y} [ f(R') - f(r) ] \le - M \1_{r < r_1} - cf(r) \,.
\end{equation}
In the setup of Theorem \mathbb{R}ef{thm:1}, we have chosen $M=0$ and $\mathbb{R}ho(x,y) = f(d(x,y))$. Hence, \eqref{eq:73a} implies the assertion
\begin{equation}
\label{eq:7ba} \mathbb{E}_{x,y} [\mathbb{R}ho(X',Y') ] \le (1-c) \mathbb{R}ho(x,y) \qquad\text{for } r = d(x,y) \in (r_0, r_2) \,.
\end{equation}
In the setup of Theorem \mathbb{R}ef{thm:2}, by Assumption \mathbb{R}ef{enum:a4},
\begin{equation}
\label{eq:7bb} \mathbb{E}_{x,y} [V(X') + V(Y')] \le (1-\lambda) (V(x) + V(y)) + 2C \,.
\end{equation}
Since $\mathbb{R}ho(x,y) = f(d(x,y)) + \frac{M}{2 C} (V(x) + V(y)) \1_{x \mathbb{N}eq y} $, we obtain for $r\in (r_0,r_1)$:
\begin{eqnarray}
\label{eq:73b} \lefteqn{\mathbb{E}_{x,y} [\mathbb{R}ho(X',Y')]}\\ \mathbb{N}onumber
&\le &-M + (1-c) f(d(x,y)) + \frac{M}{2C} (1-\lambda )(V(x) + V(y))+ M \\ & \le &(1-c) \mathbb{R}ho(x,y).\mathbb{N}onumber
\end{eqnarray}
Here the last inequality holds since $\lambda \ge c $.
On the other hand, for $r \in [r_1,r_2)$, we have $V(x) + V(y) \ge {4C}/{\lambda}$ by \eqref{eq:6r1}. Hence in this case, by \eqref{eq:7bb},
\begin{equation}
\label{eq:7bba} \mathbb{E}_{x,y}[V(X') + V(Y')]\ \le\ \left(1-{\lambda}/{2}\mathbb{R}ight) (V(x) + V(y)) \,.
\end{equation}
Since $c \le {\lambda}/{2}$, \eqref{eq:73a} and \eqref{eq:7bba} then again imply
\begin{eqnarray}
\label{eq:7bc} {\mathbb{E}_{x,y} [\mathbb{R}ho (X',Y')]} &\le &(1-c) f(d(x,y)) + \left(1-\frac{\lambda}{2}\mathbb{R}ight) \frac{M}{2C} (V(x) + V(y)) \\ \mathbb{N}onumber & \le & (1-c) \mathbb{R}ho (x,y) \,.
\end{eqnarray}
\emph{Case $r \le r_0$.} Noting that $f'' \le 0$ and $f' \le 1$ and applying \mathbb{R}ef{enum:a2}, we see that for $r \in (0, r_0)$, \eqref{eq:7*} implies
\begin{equation}
\label{eq:74a} \mathbb{E}_{x,y}[f(R') - f(r)] \le -a \pi (x,y) + \beta(x,y) \le - M - c(a+ r_0) \le -M - cf(r)
\end{equation}
provided $c \le \frac{1}{2} \pi (x,y)$ and
\begin{equation}
\label{eq:74'} \frac{a}{2} \pi (x,y) \ge \frac{r_0}{2} \pi (x,y) + M + \beta(x,y) \,.
\end{equation}
This condition is satisfied by our choice of $a$, cf.\ \eqref{eq:6c}. Again, using \eqref{eq:7bb}, and since $c \le {\lambda}/{2}$, we obtain
\begin{multline*}
\mathbb{E}_{x,y} [\mathbb{R}ho(X',Y') ] \\\le -M + (1-c) f(r) + (1-\lambda) \frac{M}{2C} (V(x) + V(y)) + 2C \frac{M}{2C} \le (1-c) \mathbb{R}ho(x,y) \,.
\end{multline*}
\emph{Case $r \ge r_2$.} Here, we use the bound
$
f(R') - f(r) \le (R' - r) f'(r)
$
yielding
\begin{equation}
\label{eq:7**} \mathbb{E}_{x,y} [f(R') - f(r)] \le \beta(x,y) f'(r).
\end{equation}
Now, we consider first the setup of Theorem \mathbb{R}ef{thm:1}. Here, for $r \ge r_2$, we have
\[
\mathbb{E}_{x,y} [f(R') - f(r) ] \le \beta(r) f'(r) \le \beta(r) \frac{\mathop{\mathrm{Var}}\nolimitsphi(r_1)}{2} \,,
\]
where we have used that by \eqref{eq:50}, $\beta(r) \le 0$, $f' \ge \mathbb{N}icefrac{\mathop{\mathrm{Var}}\nolimitsphi}{2}$ and $\mathop{\mathrm{Var}}\nolimitsphi$ is constant on $[r_1, \infty)$. To prove that the right hand side is bounded from above by $-cf(r)$, it is sufficient to show
\[
c(a+\mathbb{P}hi (r)) \le -\beta(r) \frac{\mathop{\mathrm{Var}}\nolimitsphi(r_1)}{2} \tforall r \ge r_2 \,.
\]
We claim that this holds by the definition of $r_2$. Indeed, by the definition of $c$,
$$ c^{-1} \ge 4 \int_0^{r_2} \frac{1}{\mathop{\mathrm{Var}}\nolimitsphi(s)} \sup_{u \in \hat{I}_s} \frac{a +\mathbb{P}hi (u)}{\underline{\alpha} (u)} \dd s \\\ge 4 \int_{r_1}^{r_2}\frac{\mathbb{P}hi (s)}{\mathop{\mathrm{Var}}\nolimitsphi(s)\underline{\alpha}(s)} \dd s = \frac{4}{\mathop{\mathrm{Var}}\nolimitsphi(r_1)} \int_{r_1}^{r_2} \frac{\mathbb{P}hi (s)}{\underline{\alpha} (s)} \dd s \,.
$$
Hence, by \eqref{eq:5d},
\[
c(a+\mathbb{P}hi (r)) \le \frac{1}{4} \left(\int_{r_1}^{r_2} \frac{\mathbb{P}hi(s) }{\underline{\alpha} (s)} \dd s \mathbb{R}ight)^{-1} (a+ \mathbb{P}hi (r)) \mathop{\mathrm{Var}}\nolimitsphi(r_1) \le -\beta(r) \frac{\mathop{\mathrm{Var}}\nolimitsphi(r_1)}{2},
\]
and thus
\[
\mathbb{E}_{x,y} [\mathbb{R}ho (X', Y') - \mathbb{R}ho (x,y) ] \le -c (a+\mathbb{P}hi (r)) \le -c \mathbb{R}ho(x,y) \,.
\]
Finally, we now show contractivity for $r \ge r_2$ under the conditions in Theorem \mathbb{R}ef{thm:2}. Here, by \eqref{eq:7bba} and \eqref{eq:7*},
\begin{eqnarray}
\mathbb{E}_{x,y} [\mathbb{R}ho(X',Y')] &=& \mathbb{E}_{x,y} [f(R')] + \frac{M}{2C} \mathbb{E}_{x,y} [V(X') + V(Y')]\\
\mathbb{N}onumber &\le & f(r) + \overline{\beta} (r) f'(r) + \frac{M}{2C} \left(1-\frac{\lambda}{2}\mathbb{R}ight) (V(x) + V(y)) \,.
\end{eqnarray}
Since $c \le {\lambda}/{4}$ by its definition, we obtain
\begin{equation}
\label{eq:75} \mathbb{E}_{x,y}[\mathbb{R}ho(X',Y')] \le (1-c) \left(f(r) + \frac{M}{2C} (V(x) + V(y))\mathbb{R}ight) = (1-c) \mathbb{R}ho(x,y)
\end{equation}
provided
\begin{equation}
\label{eq:75i} cf(r) + \overline{\beta} (r)f'(r) \le \frac{M}{2C} \frac{\lambda}{4} (V(x) + V(y)) \,.
\end{equation}
However, due to our choice of $r_2$ in (\mathbb{R}ef{eq:6e}) and since $f' \leq \mathop{\mathrm{Var}}\nolimitsphi$, we have
\[
\overline{\beta} (r) f'(r) \le \frac{\lambda M}{16C} (V(x) + V(y))\quad \text{ if } \lvert x-y \mathbb{R}vert \ge r_2 \,.
\]
Moreover, due to our choice of $c$ in (\mathbb{R}ef{eq:6f}) and since $f \leq \mathbb{P}hi$, we get
\begin{equation*}
cf(r) \leq \frac{\lambda M}{16C} (V(x) + V(y))\quad \text{ if } \lvert x-y \mathbb{R}vert \ge r_2 \,.
\end{equation*}
Hence (\mathbb{R}ef{eq:75i}) is indeed satisfied for $r \geq r_2$ and the proof is complete.
\section{Proof of Theorem \mathbb{R}ef{thm:3}}\label{sec:proof3}
For proving Theorem \mathbb{R}ef{thm:3}, we proceed in a similar way as in the proofs of Theorem \mathbb{R}ef{thm:1} and Theorem \mathbb{R}ef{thm:2} above. Suppose that conditions \mathbb{R}ef{enum:b1}, \mathbb{R}ef{enum:b2} and \mathbb{R}ef{enum:b3} hold. Now, the intervals $I_r$, $r \in (0,\infty)$ are given by \eqref{eq:17} and we consider the dual intervals $\hat{I}_s$, $s \in (0,\infty)$ defined by
\begin{equation}
\label{eq:80} \hat{I}_s = \{ r \in (0,\infty) : s \in I_r\}.
\end{equation}
By \eqref{eq:18}, $I_r = (r-\ell (r), r)$ for $r \ge r_0$ and $I_r \subseteq (0,2r_0)$ for $r < r_0$. Therefore
\begin{align}
\label{eq:8.} \hat{I}_s &= \{ r > s : r-\ell(r) < s \}\qquad\text{ for } s \ge 2r_0,\ \text{ and} \\
\label{eq:8..} \hat{I}_s &\subseteq \{ r > s : r-\ell(r) < 2r_0 \}\qquad\text{ for } s< 2r_0 \,.
\end{align}
Let $\overline{\gamma} (r) = {2 \overline{\beta} (r)}/{\underline{\alpha} (r)}$ as in \eqref{eq:5stara}. Similarly as in the proof of Theorem \mathbb{R}ef{thm:1} and Theorem \mathbb{R}ef{thm:2}, we assume that $\widetilde{\gamma} \colon [0,\infty) \mathbb{R}ightarrow [0,\infty)$ is a function satisfying
\begin{align}
\label{eq:8**i} \sup_{\hat{I}_s} \overline{\gamma} &\le \widetilde{\gamma} (s) \tforall s \in (0,\infty) \,, \\
\label{eq:8**ii} 4\sup\limits_{\hat{I}_s} \overline{\gamma} &\le \widetilde{\gamma} (s) \tforall s \in (0,2r_0) \,, \quad\text{ and}\\
\label{eq:8**iii} \int_0^{2r_0} \widetilde{\gamma} (s) \dd s &\le \log 2 \, .
\end{align}
Note the additional factor $4$ that has been introduced for technical reasons for $s < 2r_0$. In applications, this will usually not affect the bounds too much, as typically $r_0$ is a small constant. Condition \eqref{eq:8**iii} can always be satisfied by choosing $r_0$ small enough. As in \eqref{eq:choice r1}, we set
\begin{equation}
\label{eq:8a}
r_1 \coloneqq \sup \{ r > 0 : \widetilde{\gamma} (r) > 0\},
\end{equation}
where $\sup \emptyset = 0$. Similarly as below \eqref{eq:choice r1}, by Assumption \mathbb{R}ef{enum:b3}, we can choose $\widetilde{\gamma}$ such that $r_1$ is finite.
The metric is chosen similarly as in the proof of Theorem \mathbb{R}ef{thm:1} above, where now $a=0$.
We define
\begin{equation}
\label{eq:8b} f(r) = \int_0^r \mathop{\mathrm{Var}}\nolimitsphi(r \wedge r_2) g (s \wedge r_2) \dd s \,.
\end{equation}
Here
\begin{equation}
\label{eq:8c} \mathop{\mathrm{Var}}\nolimitsphi(r) = \exp \left(-\int_0^r \widetilde{\gamma} (s) \dd s \mathbb{R}ight),\qquad
\mathbb{P}hi (r)= \int_0^r \mathop{\mathrm{Var}}\nolimitsphi(s) \dd s,
\end{equation}
the constant $r_2$ is chosen such that
\begin{equation}
\label{eq:8e} \frac{-\overline{\beta}(r)}{\mathbb{P}hi (r)} \ge \frac{1}{8} \left(\int_{r_1}^{r_2} \frac{\mathbb{P}hi (s)}{\underline{\alpha}(s)} \dd s \mathbb{R}ight)^{-1} \text{ for } r \ge r_2,
\end{equation}
and
\begin{equation}
\label{eq:8g} g(r) = 1-2c \int_0^r \frac{1}{\mathop{\mathrm{Var}}\nolimitsphi(s)} \sup_{u \in \hat{I}_s} \frac{\mathbb{P}hi (u)}{\underline{\alpha} (u)} \dd s,
\end{equation}
where the contraction rate $c$ is given by
\begin{equation}
\label{eq:8f} c = \frac{1}{4} \left(\int_0^{r_2} \frac{1}{\mathop{\mathrm{Var}}\nolimitsphi(s)} \sup_{u \in \hat{I}_s} \frac{\mathbb{P}hi (u)}{\underline{\alpha}(u)} \dd s\mathbb{R}ight)^{-1} \,.
\end{equation}
\begin{proof}[Proof of Theorem \mathbb{R}ef{thm:3}]
Let $x,y \in \R^d$ and $r = d(x,y)$.
\\
For $r \ge r_2$, \eqref{eq:21} follows in the same way as in the proof of Theorem \mathbb{R}ef{thm:1} (with $a=0$). The crucial assumption for this is \eqref{eq:8e}, which holds due to \mathbb{R}ef{enum:b3}, by analogy to \eqref{eq:5d} in the proof of Theorem \mathbb{R}ef{thm:1}, which holds due to \mathbb{R}ef{enum:a3}.
\\
Now assume that $r < r_2$. To prove \eqref{eq:21}, we show that
\begin{equation}
\label{eq:8**} \mathbb{E}_{x,y} [f(R')-f(r)] \le \overline{\beta} (r) f'(r) + \frac{1}{2} \underline{\alpha} (r) \sup_{ I_r} f''\ \le\ -c f(r) \,.
\end{equation}
The first inequality follows similarly as in the proof of Theorem \mathbb{R}ef{thm:1}, cf.\ \eqref{eq:7*}. To prove the second inequality, note that on $(0,r_2)$,
\begin{align}
\label{eq:8***} f' &= g \mathop{\mathrm{Var}}\nolimitsphi \,, & f'' &= g\mathop{\mathrm{Var}}\nolimitsphi' + g' \mathop{\mathrm{Var}}\nolimitsphi & \text{ and }& & f &\le \mathbb{P}hi \,.
\end{align}
By \eqref{eq:8***} it is sufficient to show that $\mathop{\mathrm{Var}}\nolimitsphi$, $g$ and $c$ have been chosen in such a way that
\begin{align}
\label{eq:8b1} \sup_{ I_r} (g \mathop{\mathrm{Var}}\nolimitsphi') &\le -2 \frac{\overline{\beta} (r)}{\underline{\alpha} (r)} g(r) \mathop{\mathrm{Var}}\nolimitsphi(r)\\
\label{eq:8b2} \sup_{ I_r} (g' \mathop{\mathrm{Var}}\nolimitsphi ) &\le -2 c\frac{\mathbb{P}hi (r)}{\underline{\alpha} (r)} \,.
\end{align}
Then, by \eqref{eq:8**} we can conclude that
\[
\mathbb{E}_{x,y} [\mathbb{R}ho(X',Y') - \mathbb{R}ho(x,y) ] = \mathbb{E}_{x,y} [f(R') - f(r)] \le - c\mathbb{P}hi (r) \le -c f(r) = -c \mathbb{R}ho(x,y) \,.
\]
We first verify \eqref{eq:8b1}. This condition is satisfied provided
\begin{equation}
\label{eq:8b3} g(s) \mathop{\mathrm{Var}}\nolimitsphi'(s) \le - \sup_{ \hat{I}_s} (\overline{\gamma} g \mathop{\mathrm{Var}}\nolimitsphi ) \tforall s \le r_2.
\end{equation}
For $s \ge 2r_0$ we have
\[
\sup_{\hat{I}_s} (\overline{\gamma} g \mathop{\mathrm{Var}}\nolimitsphi) \le \left(\sup_{\hat{I}_s} \overline{\gamma} \mathbb{R}ight)\left(\sup_{\hat{I}_s} g \mathop{\mathrm{Var}}\nolimitsphi\mathbb{R}ight) \le \widetilde{\gamma} (s) g(s) \mathop{\mathrm{Var}}\nolimitsphi(s) \,,
\]
because $\hat{I}_s \subseteq (s,\infty)$ by \eqref{eq:8.} and since $g\mathop{\mathrm{Var}}\nolimitsphi$ is decreasing. Hence, \eqref{eq:8b3} holds by definition of $\mathop{\mathrm{Var}}\nolimitsphi$.
For $s < 2r_0$ we have to argue differently, since, in general, {$\hat{I}_s$ is not contained in $(s,\infty)$} in this case. Observe first that if $\sup_{\hat{I}_s} (\overline{\gamma} g \mathop{\mathrm{Var}}\nolimitsphi) \leq 0$, then \eqref{eq:8b3} holds trivially since $\mathop{\mathrm{Var}}\nolimitsphi$ is decreasing. Hence it is sufficient to consider the case of $\sup_{\hat{I}_s} (\overline{\gamma} g \mathop{\mathrm{Var}}\nolimitsphi) > 0$. Noting that $g \mathop{\mathrm{Var}}\nolimitsphi \le 1$, we have by \eqref{eq:8**ii}
\[
\sup_{\hat{I}_s} (\overline{\gamma} g \mathop{\mathrm{Var}}\nolimitsphi) \le \sup_{\hat{I}_s} \overline{\gamma}^+\le \frac{1}{4} \widetilde{\gamma} (s)
\]
and hence, since $g \ge \frac{1}{2}$,
\[
g(s)\mathop{\mathrm{Var}}\nolimitsphi'(s) \le \frac{1}{2} \mathop{\mathrm{Var}}\nolimitsphi'(s) = - \frac{1}{2} \widetilde{\gamma} (s) \mathop{\mathrm{Var}}\nolimitsphi(s) \le -2 \mathop{\mathrm{Var}}\nolimitsphi(s) \sup_{\hat{I}_s} (\overline{\gamma} g \mathop{\mathrm{Var}}\nolimitsphi) \,.
\]
Thus, \eqref{eq:8b3} holds for $s < 2r_0$ since by \eqref{eq:8**iii},
\[
\mathop{\mathrm{Var}}\nolimitsphi(s) = \exp \left(-\int_0^s \widetilde{\gamma} (u) \dd u \mathbb{R}ight) \ge \frac{1}{2} \,.
\]
We thus have shown that \eqref{eq:8b3} and hence \eqref{eq:8b1} are satisfied. It remains to verify \eqref{eq:8b2}. This condition holds provided
\begin{equation}
\label{eq:8b4} g'(s) \mathop{\mathrm{Var}}\nolimitsphi(s) \le \inf_{\hat{I}_s} \frac{-2c\mathbb{P}hi }{\underline{\alpha}}, \tforall s \le r_2
\end{equation}
or, equivalently,
\begin{equation}
\label{eq:8b5} g'(s) \le -2c \frac{1}{\mathop{\mathrm{Var}}\nolimitsphi(s)} \sup_{\hat{I}_s} \frac{\mathbb{P}hi }{\underline{\alpha}}, \tforall s \le r_2 \,.
\end{equation}
The function $g$ has been chosen in \eqref{eq:8g} in such a way that this condition is satisfied.
\end{proof}
\section{Proof of perturbation result}\label{sec:proof4}
We now prove the perturbation result in Theorem \mathbb{R}ef{thm:4}. Let $x,y \in S$, $x \mathbb{N}eq y$. By \eqref{eq:26}, \eqref{eq:27}, \eqref{eq:28} and \eqref{eq:29},
\begin{eqnarray*}
\lefteqn{\mathbb{E}_{x,y} [\widetilde{\mathbb{R}ho}(\widetilde{X}, \widetilde{Y}) - \widetilde{\mathbb{R}ho}(x,y)] \ \le\ \mathbb{E}_{x,y} [\mathbb{R}ho(\widetilde{X}, \widetilde{Y})-\mathbb{R}ho(x,y)] - \frac{2b}{p} \mathbb{P}_{x,y} [\widetilde X=\widetilde Y]} \\
&\le &\mathbb{E}_{x,y}[(d(\widetilde{X}, \widetilde{Y}) - d(X', Y'))^+]
+ \mathbb{E}_{x,y} [\mathbb{R}ho(X',Y') - \mathbb{R}ho(x,y)] -\frac{2b}{p} \mathbb{P}_{x,y} [\widetilde X=\widetilde Y] \\
&\le &b -\frac{c}{2} \mathbb{R}ho(x,y) - 2b \1_{d(x,y) < r_0} \,.
\end{eqnarray*}
{Note that in the second inequality we have used that $f$ is a contraction.} For $d(x,y) < r_0$ we obtain
\begin{equation}
\label{eq:31*} \mathbb{E}_{x,y} [\widetilde{\mathbb{R}ho}(\widetilde{X}, \widetilde{Y}) - \widetilde{\mathbb{R}ho}(x,y)] \le - \frac{p}{2} \frac{2b}{p} - \frac{c}{2} \mathbb{R}ho(x,y) \le -\frac{1}{2} \min (c,p) \widetilde{\mathbb{R}ho}(x,y) \,.
\end{equation}
For $d(x,y) \ge r_0$, we use the fact that $b = \mathbb{N}icefrac{cf(r_0)}{4}$. Hence,
\[
\widetilde{\mathbb{R}ho} (x,y) = \mathbb{R}ho(x,y) + \frac{2b}{p} \le \left(1+\frac{c}{2p}\mathbb{R}ight) \mathbb{R}ho(x,y) \le \max \left(2, \frac{c}{p}\mathbb{R}ight) \mathbb{R}ho(x,y),\qquad\text{and}
\]
\begin{equation}
\label{eq:32*} \mathbb{E}_{x,y} [\widetilde{\mathbb{R}ho} (\widetilde{X}, \widetilde{Y}) - \widetilde{\mathbb{R}ho}(x,y)] \le b-\frac{c}{2} \mathbb{R}ho(x,y) \le -\frac{c}{4} \mathbb{R}ho(x,y) \le -\frac{1}{8} \min (c,2p) \widetilde{\mathbb{R}ho} (x,y).
\end{equation}
The assertion of Theorem \mathbb{R}ef{thm:4} follows from \eqref{eq:31*} and \eqref{eq:32*}.
\section{Proof of results for the Euler scheme}\label{sec:proofsEuler}
In this section, we prove the contraction results for the Euler scheme.
\begin{proof}[Proof of Lemma \mathbb{R}ef{lem:6} \mathbb{R}ef{enum:lem6i}, \mathbb{R}ef{enum:lem6ii} and \mathbb{R}ef{enum:lem6iii}]
We start with reduction steps. At first, we observe that the definitions of $\hat{\beta} (x,y), \hat{\alpha} (x,y)$ and $\hat{\pi} (x,y)$ only depend on $\hat{r} = \lvert \hat{x} - \hat{y}\mathbb{R}vert$ and $R' = \lvert X' -Y'\mathbb{R}vert$. Thus, the assertions \mathbb{R}ef{enum:lem6i} \mathbb{R}ef{enum:lem6ii}, \mathbb{R}ef{enum:lem6iii} are statements about the coupled random walk transition step $(\hat{x}, \hat{y}) \mathbb{R}ightarrow (X', Y')$ defined by \eqref{eq:0c}, and we may assume w.l.o.g.\ that $(\hat{x}, \hat{y}) = (x,y)$. Furthermore, $\hat{r}$ and the law of $R'$ under $\mathbb{P}_{x,y}$ are invariant under translations and rotations of the underlying state space $\R^d$. Therefore, we may even assume w.l.o.g.\ that $\hat{x} = x = 0$ and $\hat{y} = y = r e_1$, where $r = \hat{r} $ and $e_1, \dots, e_d$ denotes the canonical basis of $\R^d$. Then
\begin{eqnarray}\label{eq:65a}
X' \ =\ \sqrt{h} Z, \qquad Y_\text{refl}'& =& re_1 + \sqrt{h} (I_d - 2e_1e_1^T)Z, \qquad\text{and}\\
\label{eq:200} \mathbb{N}icefrac{\phi_{\hat{y},hI} (X')}{\phi_{\hat{x}, hI} (X')} &=& \mathbb{N}icefrac{\phi_{r,h} (X_1')}{\phi_{0, h} (X_1')} \,,
\end{eqnarray}
{where $X_i' = e_i^TX'$.} Thus, by \eqref{eq:0c}, $Y_i' = X_i'$ for $i \ge 2$, and
\begin{align}
\label{eq:201} Y_1' &= \begin{cases} X_1' & \text{ if } U\le {\phi_{r,h} (X_1') }/{\phi_{0,h} (X_1')}, \\ r -\sqrt{h} Z &\text{ otherwise.}\end{cases}
\end{align}
In particular,
$ R' = \lvert X'-Y'\mathbb{R}vert = \lvert (X'-Y') e_1\mathbb{R}vert = \lvert X_1' - Y_1'\mathbb{R}vert $.
Since this is distributed as in the one-dimensional case, we may assume w.l.o.g.\ $d=1$.
We are now left with a simple one-dimensional problem where $x = 0$, $y = r$, and $\hat{r} = r = \lvert x-y \mathbb{R}vert$. The coupling is given by
\begin{eqnarray}
\label{eq:201b} X'&=&\sqrt hZ,\qquad Y' \ =\ \begin{cases} X' & \text{ if } U\le {\phi_{r,h} (X') }/{\phi_{0,h} (X')}, \\ r -X' &\text{ otherwise,}\end{cases}
\end{eqnarray}
where $Z \sim N(0,1)$ and $U \sim \Unif (0,1)$ are independent.
Hence $X'\sim N(0,h)$, the conditional probability given $Z$ that $Y' = X'$ is $\min (1,{\phi_{r,h}}(X')/{\phi_{0,h} (X')})$, and if $Y' \mathbb{N}eq X'$, then $R' = \lvert X' -Y' \mathbb{R}vert = \lvert r - 2X' \mathbb{R}vert$. Since $ \phi_{r,h}(t)\le\phi_{0,h}(t)$ if and only if $t\le \mathbb{N}icefrac{r}{2}$, we obtain
\begin{eqnarray*}
\mathbb{E}_{x,y} [R'] &=& \int_{-\infty}^{\infty } |r-2t |\, (1-{\phi_{r,h}(t)}/{\phi_{0,h} (t)})^+\, \phi_{0,h} (t) \, d t \\
&=& \int_{-\infty}^{{r}/{2}} (r-2t ) ({\phi_{0,h}(t)}-{\phi_{r,h} (t)}) \, d t \\
&=& \frac 12\int_{-\infty}^{\infty} (r-2t ) ({\phi_{0,h}(t)}-{\phi_{r,h} (t)}) \, d t
\ =\ r.
\end{eqnarray*}
Here we have used in the third step that the integrand is symmetric w.r.t.\ $t=r/2$, i.e., invariant under the
transformation $t\mapsto r-t$.
Thus $\hat\beta (x,y)=\mathbb{E}_{x,y} [R'-r]=0$, which proves Assertion (i).
Next,
we are going to prove the lower bound for $\hat{\alpha} (x,y)$. Recall from \eqref{eqchoicer0}
and \eqref{eq:int} that $I_r = (0, r+ \sqrt{h})$ for $r < \sqrt{h}$ and $I_r = (r -\sqrt{h}, r)$ for $r \ge \sqrt{h}$. We first consider the case $r \ge \sqrt{h}$.
Similarly as above, we obtain
\begin{eqnarray}
\mathbb{N}onumber
\hat{\alpha} (x,y) &\ge & \mathbb{E}_{0,r}[(R'-r)^2 ; R' \in I_r] \\
\mathbb{N}onumber &\ge &\int_{-\infty}^{{r}/{2}} (r-2t -r)^2 \1_{ I_r}(r-2t)\, ({\phi_{0,h}(t)}-{\phi_{r,h} (t)})\, d t \\
\label{eq:204}&= &4 \int_{0}^{{\sqrt{h}}/{2}} t^2\, (1-e^{\mathbb{N}icefrac{(rt - {r^2}/{2})}{h}})\, \phi_{0,h} (t) \, d t \\
\mathbb{N}onumber &=& 4h \int_0^{{1}/{2}} u^2\, (1-e^{\frac{r}{\sqrt{h}} (u-\frac{r}{2\sqrt{h}})})\, \phi_{0,1}(u) \,d u \\
\mathbb{N}onumber &\ge & 4h \int_0^{{1}/{2}}u^2\, (1-e^{u-1/2})\, \phi_{0,1} (u) \,d u.
\end{eqnarray}
Here we have used in the last step that $s \mapsto s (u-{s}/{2})$ is decreasing for $s\ge u$, and $r / \sqrt{h} \ge 1\ge u$ for $u\in [0,1/2]$.
Note that in the second step we only use the reflection behaviour of the coupling. This is due to the fact that the contribution from jumping to the same point would be of negligible order in $h$.
Now assume $r < \sqrt{h}$. Then $r-2t \in I_r$ if and only if $t \in (-\frac{\sqrt{h}}{2},\frac{r}{2})$. Thus,
\begin{eqnarray}
\mathbb{N}onumber
\hat{\alpha} (x,y) &\ge &\int_{-{\sqrt{h}}/{2}}^0 (r-2t-r)^2\, ({\phi_{0,h}(t)} -\phi_{r,h} (t)) \,d t \\
\label{eq:205}&= &4h \int_{-{1}/{2}}^0 u^2\, (1-e^{\frac{r}{\sqrt{h}} (u-\frac{r}{2\sqrt{h}})})\, \phi_{0,1}(u) \,d u \\
\mathbb{N}onumber &\ge &4(1-e^{-1} )\, h \frac{r}{\sqrt{h}} \int_0^{{1}/{2}} u^3\, \phi_{0,1} (u) \,d u.
\end{eqnarray}
Here, we have used in the last step that for $r < \sqrt{h}$ and $u \in [-\mathbb{N}icefrac{1}{2},0]$, we have $s \coloneqq \frac{r}{\sqrt{h}} (u - \frac{r}{2\sqrt{h}}) \in [-1,0]$ and hence
$
e^s - 1\le (1-e^{-1}) s
$.
By combining \eqref{eq:204} and \eqref{eq:205}, we obtain
$
\hat{\alpha} (x,y) \ge c_0 \min (r,\sqrt{h}) \sqrt{h}
$,
where
\[
c_0 = 4\min \left(\int_0^{{1}/{2}} u^2 (1-e^{u-1/2}) \phi_{0,1} (u) \,d u ,\, (1-e^{-1}) \int_0^{{1}/{2}} u^3 \phi_{0,1} (u)\,d u \mathbb{R}ight) \ge 0.007.
\]
This proves Assertion (ii).
Finally, for $X' \ge {r}/{2}$, we have $\phi_{r,h} (X') \ge \phi_{0,h} (X')$,
and hence $Y'=X'$. Thus,
\begin{eqnarray*}
\pi (x,y) &=& \mathbb{P}_{x,y} [R' = 0] \ =\ \mathbb{P}_{x,y} [X' = Y']\ \ge\ \mathbb{P}_{x,y} [X' \ge {r}/{2}] \\
&=&\int_{{r}/{2}}^\infty \phi_{0,h}(t) \,d t\ =\ \int_{\frac{r}{2\sqrt{h}}}^\infty \phi_{0,1} (t) \, d t\ \ge\ \int_1^\infty \phi_{0,1}(t) \,d t \
\ge\ 0.15
\end{eqnarray*}
provided $\hat{r} = r \le 2 \sqrt{h}$. Therefore, Assertion (iii) holds as well.
\end{proof}
\begin{proof}[Proof of Lemma \mathbb{R}ef{lem:6} \mathbb{R}ef{enum:lem6iv}, \mathbb{R}ef{enum:lem6v},
\mathbb{R}ef{enum:lem6vi} and \mathbb{R}ef{enum:lem6vii}] Note that unlike in the proof of assertions \mathbb{R}ef{enum:lem6i}-\mathbb{R}ef{enum:lem6iii}, here it is important to consider $\hat{r} \mathbb{N}eq r$. Assertions \mathbb{R}ef{enum:lem6iv} and \mathbb{R}ef{enum:lem6v} are straightforward consequences of Assertion \mathbb{R}ef{enum:lem6i}. Indeed, by \eqref{eq:beta} and Lemma \mathbb{R}ef{lem:6}\mathbb{R}ef{enum:lem6i},
\begin{equation}
\label{eq:406} \beta(x,y)\ =\ \hat{\beta} (x,y) + \hat{r}-r \ =\ \hat{r}-r.
\end{equation}
Assuming \mathbb{R}ef{enum:c1} and \mathbb{R}ef{enum:c3}, this implies \mathbb{R}ef{enum:lem6iv}, because
\begin{eqnarray}
\label{eq:407a} |\hat{r}-r|& \le& \lvert (\hat{x} - \hat{y})-(x-y) \mathbb{R}vert \ =\ \lvert h(b(x) - b(y))\mathbb{R}vert\ \le\ hL r, \quad\text{and}
\\
\label{eq:407} \hat{r} &=& \sqrt{\lvert x-y \mathbb{R}vert^2 + 2h(x-y)\cdot (b(x) - b(y)) + h^2 \lvert b(x) - b(y) \mathbb{R}vert^2}\\
&\le &r \sqrt{1+2hJ + h^2 L^2 }\ \le\ r(1+hJ+\mathbb{N}icefrac{h^2L^2}{2}),\mathbb{N}onumber
\end{eqnarray}
where we use $\sqrt{1+x} \leq 1 + x/2$ for $x \ge -1$. Similarly, assuming \mathbb{R}ef{enum:c2} and \mathbb{R}ef{enum:c3}, \eqref{eq:406} implies \mathbb{R}ef{enum:lem6v}, since $K\le L$ and thus $-2hK + h^2L^2 \geq - 1$ and
\begin{equation}
\label{eq:e1}\hat{r}\ \le\ r \sqrt{1-2hK + h^2L^2}\ \le\ r \left(1-hK + \mathbb{N}icefrac{h^2L^2}{2}\mathbb{R}ight)\text{\quad for }r \ge \mathcal{R}.
\end{equation}
In order to prove \mathbb{R}ef{enum:lem6vi} we assume $\sqrt h\le r\le 1/(4L\sqrt h)$. Then by
\eqref{eq:407a}, $ |\hat{r}-r|\le \sqrt h/4$. Therefore, by a similar computation as in \eqref{eq:204},
\begin{eqnarray*}
\alpha (x,y) &=& \mathbb{E}_{0,r}[(R'-r)^2 ; R' \in (r-\sqrt h,r)]\
\ge \ \frac{h}{16}\, \mathbb{P}_{0,r}[ R' \in (r-\sqrt h,r-\frac{\sqrt h}4)]\\
&\ge & \frac{h}{16}\, \mathbb{P}_{0,r}[ R' \in (\hat r-\frac{3\sqrt h}4,\hat r-\frac{\sqrt h}2)]\
\ge \ \frac{h}{16}\, \int_{{\sqrt{h}}/{4}}^{{3\sqrt{h}}/{8}} ({\phi_{0,h}(t)} -\phi_{\hat{r},h} (t)) \,d t \\
&= &\frac h{16} \int_{{1}/{4}}^{3/8} (1-e^{\frac{\hat{r}}{\sqrt{h}} (u-\frac{\hat{r}}{2\sqrt{h}})})\, \phi_{0,1}(u) \,d u \
\ge \ \frac h{16}\int_{{1}/{4}}^{3/8} (1-e^{ u-\frac{1}{2}})\, \phi_{0,1}(u) \,d u.
\end{eqnarray*}
This shows that \mathbb{R}ef{enum:lem6vi} holds with $\widetilde c_0:=\frac{1}{16}\int_{1/4}^{3/8}(1-e^{u-1/2})\phi_{0,1}(u)\, du\ge 0.0005$.
\\
Finally, Assertion \mathbb{R}ef{enum:lem6vii} is a direct consequence of Assertion \mathbb{R}ef{enum:lem6iii}, since by
\eqref{eq:407a}, $|\hat r-r|\le \sqrt h$ if $r\le\sqrt h$ and $h\le 1/L$.
\end{proof}
The following proof of Theorem \mathbb{R}ef{thm:7} follows the argumentation in the proofs of Theorems \mathbb{R}ef{thm:1} and \mathbb{R}ef{thm:3} in the case $r\le\mathcal R$. For $r>\mathcal R$, the contractivity is shown by a direct argument based on Lemma \mathbb{R}ef{lem:6} (v).
\begin{proof}[Proof of Theorem \mathbb{R}ef{thm:7}]
Let $x,y \in \R^d$, $h\in (0,h_0]$ and $a\in \{ 0\}\cup [\sqrt h,\infty )$.
\emph{(i)}. We first consider the case where $r = \lvert x-y\mathbb{R}vert > \mathcal R$. By the choice of $h_0$ in the statement of the theorem, $h \le \mathbb{N}icefrac{K}{L^2}$. Therefore, by Lemma \mathbb{R}ef{lem:6},
\[
\mathbb{E}_{x,y} [R'-r]\ =\ \beta(x,y) \ \le\ -\left(K-\mathbb{N}icefrac{L^2h}{2}\mathbb{R}ight)hr\ \le\ -\mathbb{N}icefrac{Khr}{2}.
\]
Since $f_a$ is concave with $f'_a \ge \mathbb{N}icefrac{1}{2}$, we immediately obtain
\begin{equation}\label{eq:a130}
\mathbb{E}_{x,y} [f_a(R') -f_a(r) ]\ \le\ f'_a(r) \mathbb{E}_{x,y} [R'-r]\ \le\ -\mathbb{N}icefrac{Khr}{4} ,
\end{equation}
and hence, as $f_a(r)\le r$ and $r>\mathcal R$,
\begin{equation}\label{eq:a130b}
\mathbb{E}_{x,y} [ \mathbb{R}ho_a(X',Y') - \mathbb{R}ho_a(x,y) ] \ \le\ \frac{-Khr/4}{a+f_a(r)}\mathbb{R}ho_a(x,y)\ \le\ \frac{-Kh/4}{1+a/\mathcal R}\mathbb{R}ho_a(x,y).
\end{equation}
\emph{(ii)}. Now suppose $r \le \mathcal R$. Since $\hat{r} \le r$ by \eqref{eq:30*}, we have
\begin{equation}
\label{eq:a13} \mathbb{E}_{x,y}[\mathbb{R}ho_a(X',Y') - \mathbb{R}ho_a(x,y) ]\ \le\ \mathbb{E}_{x,y} [\mathbb{R}ho_a(X',Y') - \mathbb{R}ho_a(\hat x,\hat y)] \,.
\end{equation}
We can now apply the arguments in the proofs of Theorems \mathbb{R}ef{thm:1} and \mathbb{R}ef{thm:3} with $\alpha$ and $\beta$ replaced by the corresponding quantities $\hat{\alpha}$ and $\hat{\beta}$ for the coupled random walk transition $(\hat{x}, \hat{y}) \mapsto (X',Y')$, with $r_2$ and $r_1$ replaced by $\mathcal R$. Indeed,
note that since the case of $r>\mathcal R$ has already been considered above, we only need to use the parts of the proofs of Theorems \mathbb{R}ef{thm:1} and \mathbb{R}ef{thm:3} concerned with the case of $r \leq \mathcal{R}$ and thus Assumptions \mathbb{R}ef{enum:a3} and \mathbb{R}ef{enum:b3} are not required.
We consider first $a=0$. In this case, we can proceed as in the proof of Theorem \mathbb{R}ef{thm:3} with $r_0=\sqrt h$. By Lemma \mathbb{R}ef{lem:6}, we can choose
$\underline{\alpha} (\hat{r}) = c_0 \min (\hat{r}, \sqrt{h}) \sqrt{h}$,
$\overline{\beta} \equiv 0$, $ \overline{\gamma} \equiv 0$, $ \widetilde{\gamma} \equiv 0$,
$ \mathop{\mathrm{Var}}\nolimitsphi \equiv 1$,
\begin{equation}\label{eq:7**f}
\mathbb{P}hi (u)=u,\quad g_0(u) = 1-2c \int_0^u \sup_{\hat{I}_s} \frac{\mathbb{P}hi}{\underline{\alpha}}\, ds,\quad\text{and}\quad
c = \frac{1}{4} \left(\int_0^{\mathcal{R}} \sup_{\hat{I}_s} \frac{\mathbb{P}hi}{\underline{\alpha}} \dd s \mathbb{R}ight)^{-1}
\end{equation}
in order to satisfy \eqref{eq:8**i}, \eqref{eq:8**ii}, \eqref{eq:8**iii}, \eqref{eq:8c}, \eqref{eq:8g} and \eqref{eq:8f}. Here $\hat I_s$ is defined by \eqref{eq:80}. With these choices we obtain as in the proof of Theorem \mathbb{R}ef{thm:3}
\begin{equation}
\label{eq:a14} \mathbb{E}_{x,y}[f_0(R')-f_0(\hat{r}) ] \le -c f_0(\hat{r}) \qquad\text{for } \hat{r} \le \mathcal R,
\end{equation}
where $f_0$ is defined by \eqref{eq:31}. Noting that $\hat{r} \le r$ by \eqref{eq:30*},
the bounds in \eqref{eq:a13} and \eqref{eq:a14} now imply that for $r \le \mathcal R$,
\begin{equation}
\label{eq:a15} \mathbb{E}_{x,y} [\mathbb{R}ho_0(X',Y')]=
\mathbb{E}_{x,y}[f_0(R')] \le (1-c) f_0(\hat{r}) \le (1-c) f_0(r) = (1-c)\mathbb{R}ho_0(x,y).
\end{equation}
It only remains to show $c\ge c_1(0)h$.
Suppose first that $s < 2\sqrt{h}=2r_0 $. Then $\hat{I}_s \subseteq (0,3\sqrt{h})$. Since $\mathbb{P}hi(u) =u$ and $\underline{\alpha} (u) = c_0 \min (u,\sqrt{h}) \sqrt{h} \ge c_0 u \sqrt{h}/3$ for $u < 3 \sqrt{h}$, we obtain
\begin{equation}
\label{eq:a16} \sup_{\hat{I}_s} \frac{\mathbb{P}hi}{\underline{\alpha}}\ \le\ \sup_{u < 3 \sqrt{h}} \frac{u}{\underline{\alpha} (u)} \ \le\ 3 c_0^{-1} h^{-\mathbb{N}icefrac{1}{2}}\tforall s < 2 \sqrt{h}.
\end{equation}
For $s \ge 2\sqrt{h}$, $\hat{I}_s = (s,s+\sqrt{h})$. Hence $\underline{\alpha} \equiv c_0 h$ on $\hat{I}_s$, and
\begin{equation}
\label{eq:a17}\sup_{\hat{I}_s} \frac{\mathbb{P}hi}{\underline{\alpha}} = c_0^{-1} h^{-1} (s+\sqrt{h}) \tforall s \ge 2 \sqrt{h}.
\end{equation}
By \eqref{eq:7**f}, \eqref{eq:a16}, \eqref{eq:a17} we see that
\begin{equation}
\label{eq:a18} c^{-1}
\le 24 c_0^{-1} + 2c_0^{-1} h^{-1} \mathcal{R}^2 + 4c_0^{-1}h^{-\mathbb{N}icefrac{1}{2}} \mathcal{R} = 2c_0^{-1} h^{-1} (\mathcal{R}^2 + 2h^{\mathbb{N}icefrac{1}{2}} \mathcal{R} + 12 h).
\end{equation}
The assertion for $a=0$ now follows by \eqref{eq:a130}, \eqref{eq:a15} and \eqref{eq:a18}.
Now consider the case $a\ge \sqrt h$. Here we can proceed as in the proof of Theorem \mathbb{R}ef{thm:1} with $r_0=\varepsilon =\sqrt h$. We now choose the intervals $I_r$ and the dual intervals $\hat I_s$ according to \eqref{eq:501} and \eqref{eq:502}, i.e., $I_r=((r-\sqrt h)^+,r)$ and $\hat I_s=(\max (s,\sqrt h),s+\sqrt h)$. By Lemma \mathbb{R}ef{lem:6}, we can choose
$\underline{\alpha} $,
$\overline{\beta} $, $ \overline{\gamma} $, $ \widetilde{\gamma} $,
$ \mathop{\mathrm{Var}}\nolimitsphi$ and $\mathbb{P}hi $ as above so that conditions \eqref{eq:5stara}, \eqref{eq:5star}, \eqref{eq:503}, \eqref{eq:50}, \eqref{eq:5a}, \eqref{eq:5c}, \eqref{eq:5e}, \eqref{eq:5f} and \eqref{eq:5g} are satisfied. In particular, choosing $a\ge\sqrt h =r_0$ guarantees that
\eqref{eq:5c} is satisfied since $\beta(x,y) \leq 0$ for all $x$, $y \in \mathbb{R}^d$.
Note that for $u\in \hat I_s$ we have $\underline{\alpha} (u)\ge c_0h$, because $u\ge \sqrt h$. Setting
\begin{equation}\label{eq:7**f2}
g_a(u) = 1-2c \int_0^u \sup_{\hat{I}_s} \frac{a+\mathbb{P}hi}{\underline{\alpha}}\, ds,\quad
c = \min\left( \frac{p_0}{2}\, ,\,\frac{1}{4} \left(\int_0^{\mathcal{R}} \sup_{\hat{I}_s} \frac{a+\mathbb{P}hi}{\underline{\alpha}} \dd s \mathbb{R}ight)^{-1} \mathbb{R}ight),
\end{equation}
we obtain
\begin{equation}
\label{eq:a15a} \mathbb{E}_{x,y} [\mathbb{R}ho_a(X',Y')]\ \le\ (1-c) \mathbb{R}ho_a(\hat x,\hat y)\ \le\ (1-c) \mathbb{R}ho_a(x,y),
\end{equation}
where $\mathbb{R}ho_a$ is defined by \eqref{eq:31}. The bound $c \geq c_1(a)$ follows as in (\mathbb{R}ef{eq:a17}) and (\mathbb{R}ef{eq:a18}).
\end{proof}
\begin{proof}[Proof of Theorem \mathbb{R}ef{thm:8a}]
In order to apply Theorem \mathbb{R}ef{thm:1}, we set $\mathop{\mathrm{Var}}\nolimitsepsilon = r_0 = \sqrt{h}$, and hence $I_r = ((r-\sqrt{h})^{+},r)$ and $\hat{I}_s = (s \vee \sqrt{h}, s + \sqrt{h})$ for all $r$, $s > 0$.
By Lemma \mathbb{R}ef{lem:6}, condition \eqref{eq:11} is satisfied for $h\le h_0$ with
\begin{equation}\label{eq:alphapib}
\underline{\alpha} (r) = \widetilde c_0 h\, \1_{\sqrt h\le r\le 1/(4L\sqrt h)}, \quad \underline{\pi} (r)=p_0\,1_{r\le\sqrt h} , \quad \text{and}
\end{equation}
\begin{equation}\label{eq:betab}
\overline{\beta} (r) = \begin{cases} \mathcal{L}ambda h r &\text{for } r < \mathcal{R}, \\ -{K}hr/2, &\text{for } r \geq \mathcal{R}. \end{cases}
\end{equation}
Here we have used that by the assumptions, $h_0L\le 1$ and $h_0L^2\le K$.
Moreover, the assumption on $h_0$ implies $1/(4L\sqrt h_0)\ge\mathcal R$ since $r_2 > \mathcal{R}$. Hence by \eqref{eq:5stara},
$$
\overline{\gamma}(r) = 2\overline{\beta} (r)/\underline{\alpha} (r) ={2}{\widetilde c_0^{-1}}\mathcal{L}ambda r\qquad
\text{for }\sqrt h\le r <\mathcal{R}, $$
$\overline{\gamma}(r)\le 0$ for $r\ge\mathcal R$, and thus
\eqref{eq:5star}, \eqref{eq:choice r1} and \eqref{eq:5a} are satisfied with
\begin{eqnarray}\mathbb{N}onumber
\widetilde{\gamma}(r)& =& {2}{\widetilde c_0^{-1}} \mathcal{L}ambda (r + \sqrt{h})\, \1_{r<\mathcal R},\quad r_1=\mathcal R,\quad\text{ and}\\
\label{eq:EulerGeneralThm1DefVarphi}
\mathop{\mathrm{Var}}\nolimitsphi(r) &=& \exp\left({ -{\widetilde c_0^{-1}}{\mathcal{L}ambda} \left( (r \wedge \mathcal{R})^2 + 2 \sqrt{h} (r \wedge \mathcal{R}) \mathbb{R}ight)}\mathbb{R}ight) \,.
\end{eqnarray}
For $a\ge 2\sqrt h$, condition \eqref{eq:5c} is satisfied by \eqref{eq:alphapib},
\eqref{eq:betab}, and since by assumption, $ \sqrt{h} + 2 \mathcal{L}ambda h^{3/2} / p_0\le 2\sqrt h\le a$ for $h\le h_0$.
In order to verify (\mathbb{R}ef{eq:5d}) we need to choose $r_2 \geq r_1 = \mathcal{R}$ such that
\begin{equation}\label{eq:proofEulerLyapunov1}
2 \int_{\mathcal{R}}^{r_2} \frac{\mathbb{P}hi(s)}{\underline{\alpha}(s)} ds \geq \frac{a + \mathbb{P}hi(r)}{- \overline{\beta}(r)} \qquad\text{for all } r \geq r_2 \,.
\end{equation}
To this end, note that for $r \geq \mathcal{R}$, we have $ \mathbb{P}hi(r) = \mathbb{P}hi(\mathcal{R}) + (r - \mathcal{R})\mathop{\mathrm{Var}}\nolimitsphi(\mathcal{R})$. Furthermore, since $1/(4L\sqrt{h}) \leq r_2$ by assumption, on $[\mathcal{R}, r_2]$ we can use the formula for $\underline{\alpha}$ given in (\mathbb{R}ef{eq:alphapib}). Hence (\mathbb{R}ef{eq:proofEulerLyapunov1}) is satisfied if
\begin{equation}\label{eq:proofEulerLyapunov2}
\frac{2\mathbb{P}hi(\mathcal{R})}{\widetilde c_0} (r_2 - \mathcal{R}) + \frac{\mathop{\mathrm{Var}}\nolimitsphi(\mathcal{R})(r_2 - \mathcal{R})^2}{\widetilde c_0} \geq \frac{a + \mathbb{P}hi (\mathcal R) +r\mathop{\mathrm{Var}}\nolimitsphi (\mathcal R) }{Kr/2} \quad\text{ for } r \geq r_2 .
\end{equation}
Since we assume that $a \leq \mathbb{P}hi (\mathcal R)$, this condition holds if we choose
\begin{equation}\label{eq:EulerGeneralThm1Defr2}
r_2 \ =\ \mathcal{R}+\sqrt{2\widetilde c_0/K} \,.
\end{equation}
Hence from Theorem \mathbb{R}ef{thm:1} we obtain $\mathbb{E}_{x,y}[\mathbb{R}ho_a(X',Y')] \leq (1-c)\mathbb{R}ho_a(x,y)$ with $c$ given by (\mathbb{R}ef{eq:5f}), for $\mathbb{R}ho_a(x,y) = \1_{x \mathbb{N}eq y} + f_a(|x-y|)$, where
\begin{equation}\label{eq:EulerGeneralThm1Deffa}
f_a(r) = \int_0^r \mathop{\mathrm{Var}}\nolimitsphi(s \wedge r_2) g_a(s \wedge r_2) ds
\end{equation}
with $\mathop{\mathrm{Var}}\nolimitsphi$ given by (\mathbb{R}ef{eq:EulerGeneralThm1DefVarphi}) and $g_a = g$ given by (\mathbb{R}ef{eq:5g}).
Moreover, we can easily bound the second quantity appearing in the definition (\mathbb{R}ef{eq:5f}) of $c$. Indeed, for $s<r_2$ and $u\in\hat I_s$ we have $\sqrt h<u<r_2+\sqrt h\le 1/(4L\sqrt h)$ for $h<h_0$. Therefore, $\underline{\alpha} (u)\ge \widetilde c_0h$ by \eqref{eq:alphapib}.
Since $\mathop{\mathrm{Var}}\nolimitsphi (s)\ge \mathop{\mathrm{Var}}\nolimitsphi (\mathcal R )$ and $\mathbb{P}hi (u)\le u$, we obtain
\begin{equation*}
\int_0^{r_2} \frac{1}{\mathop{\mathrm{Var}}\nolimitsphi(s)} \sup_{u \in \hat{I}_s} \frac{a + \mathbb{P}hi(u)}{\underline{\alpha} (u)} ds \
\leq\ \frac{1}{\widetilde c_0 h \mathop{\mathrm{Var}}\nolimitsphi(\mathcal{R})} \left( a r_2 + \int_0^{r_2} (s + \sqrt{h}) ds \mathbb{R}ight),
\end{equation*}
and hence
\begin{equation*}
c \geq \min \left( \frac{1}{2}p_0 , \frac{\widetilde c_0 h \mathop{\mathrm{Var}}\nolimitsphi(\mathcal{R})}{4r_2(a + \sqrt{h}) + 2r_2^2} \mathbb{R}ight) \,.
\end{equation*}
This implies the assertion, since by \eqref{eq:EulerGeneralThm1Defr2},
$$r_2^2+2r_2(a+\sqrt h)\le 2\max (\mathcal R^2+2(a+\sqrt h)\mathcal R\, ,\, 2\widetilde c_0K^{-1}+2(a+\sqrt h )\sqrt{2\widetilde c_0/K}).$$
\end{proof}
In the following variation of Theorem \mathbb{R}ef{thm:8a}, Condition \mathbb{R}ef{enum:c2} is replaced by a Lyapunov condition:
\begin{thm}[Euler scheme, general case with Lyapunov condition]\label{thm:8b}
\ \\Suppose that Conditions \mathbb{R}ef{enum:c1} and \mathbb{R}ef{enum:c3} are satisfied and that the transition kernel $p$ of the Euler scheme satisfies Assumption \mathbb{R}ef{enum:a4i} with a Lyapunov function $V$, i.e., there exist constants $C$, $\lambda > 0$ such that $pV \leq (1- \lambda)V + C$. Moreover, assume that $\lim_{r \to \infty} \frac{V(x) + V(y)}{r} = \infty$. Let $h_0 = \min \left( \left( \frac{2L}{p_0} + \frac{\widetilde c_0 \mathop{\mathrm{Var}}\nolimitsphi(r_1)}{4(r_1 + 1)} \mathbb{R}ight)^{-2}, (16L^2r_2^2)^{-1} \mathbb{R}ight)$, where $r_1$, $r_2 > 0$ are constants specified in (\mathbb{R}ef{eq:proofEulerLyapunovDefr1}) and (\mathbb{R}ef{eq:proofEulerLyapunovDefr2}). Suppose further that $a \in (2\sqrt{h}, r_2)$ and let $\mathbb{R}ho_a(x,y) = (a + \frac{M}{2C}(V(x) + V(y))) \1_{x \mathbb{N}eq y} + f_a(|x-y|)$ with $M$ given by (\mathbb{R}ef{eq:EulerLyapunovDefM}) and $f_a$ defined in (\mathbb{R}ef{eq:proofEulerLyapunovDeffa}). Let
\begin{equation*}
c_2(a) = \frac{1}{4} \min \left( \frac{2p_0}{h} , \frac{\lambda}{h} , 4\mathop{\mathrm{Var}}\nolimitsphi(r_1) \mathcal{L}ambda, \frac{\widetilde c_0 \mathop{\mathrm{Var}}\nolimitsphi(r_2)}{2r_2(a + \sqrt{h}) + r_2^2} \mathbb{R}ight)
\end{equation*}
with $\mathop{\mathrm{Var}}\nolimitsphi$ given by (\mathbb{R}ef{eq:proofEulerLyapunovDefVarphi}).
Then for all $h \in (0, h_0)$ we have
\begin{equation*}
\mathbb{E}_{x,y}[\mathbb{R}ho_a (X',Y')]\le\left(1-c_2(a)h\mathbb{R}ight) \mathbb{R}ho_a (x,y)\quad
\mbox{for all }x,y\in\mathbb{R}^d.
\end{equation*}
\end{thm}
\begin{example}
It is easy to see that if the drift $b$ satisfies a linear growth condition $|b(x)|^2 \leq L_0(1 + |x|^2)$ for all $x \in \mathbb{R}^d$ with a constant $L_0 > 0$ (which is implied by \mathbb{R}ef{enum:c3} with $L_0 = 2 \max (L^2, |b(0)|^2)$) and a dissipativity condition
\begin{equation}\label{eq:dissipativity}
\langle b(x) , x \mathbb{R}angle \leq M_1 - M_2 |x|^2 \text{ for all } x \in \mathbb{R}^d
\end{equation}
with constants $M_1$, $M_2 > 0$, then the transition kernel $p$ of the Euler scheme satisfies the Lyapunov condition $pV \leq (1-\lambda)V + C$ with the Lyapunov function $V(x) = |x|^2$ and constants $\lambda = 2hM_2 - h^2 L_0$ and $C = h^2 L_0 + 2hM_1 + hd$, whenever $h < 2M_2/L_0$. Since the quadratic function satisfies the growth condition required in Theorem \mathbb{R}ef{thm:8b} and the dissipativity condition (\mathbb{R}ef{eq:dissipativity}) is significantly weaker than Assumption \mathbb{R}ef{enum:c2}, we can apply this result to more general cases than the ones covered by Theorems \mathbb{R}ef{thm:8a} and \mathbb{R}ef{thm:8}.
\end{example}
\begin{proof}[Proof of Theorem \mathbb{R}ef{thm:8b}]
Here we want to apply Theorem \mathbb{R}ef{thm:2} and hence we need to verify the conditions listed in Subsection \mathbb{R}ef{subsec:choiceOfMetricThm2}. Exactly as in the proof of Theorem \mathbb{R}ef{thm:8a}, we choose $\mathop{\mathrm{Var}}\nolimitsepsilon = r_0 = \sqrt{h}$ and we have the intervals $I_r = ((r-\sqrt{h})^{+},r)$ and $\hat{I}_s = (s \vee \sqrt{h}, s + \sqrt{h})$ for all $r$, $s > 0$. By Lemma \mathbb{R}ef{lem:3} we get
\begin{equation*}
\underline{\alpha} (r) = \widetilde c_0 h, \qquad \overline{\beta} (r) = \mathcal{L}ambda h r, \qquad \text{and} \qquad \overline{\gamma}(r) = \frac{2}{\widetilde c_0}\mathcal{L}ambda r .
\end{equation*}
Similarly as in the previous proof, the formula for $\underline{\alpha}(r)$ is valid for all $r \in (\sqrt{h}, r_2)$ since $h_0 \leq (16L^2r_2^2)^{-1}$, although here $r_2$ is given by (\mathbb{R}ef{eq:proofEulerLyapunovDefr2}). Moreover, we have
\begin{equation}\label{eq:proofEulerLyapunovDefVarphi}
\widetilde{\gamma}(s) = \frac{2}{\widetilde c_0} \mathcal{L}ambda (s + \sqrt{h}), \qquad \text{and} \qquad \mathop{\mathrm{Var}}\nolimitsphi(r) = \exp\left( - \frac{\mathcal{L}ambda}{\widetilde c_0}\left( r^2 + 2 \sqrt{h} r \mathbb{R}ight) \mathbb{R}ight) \,.
\end{equation}
Now we choose $r_1$ as in (\mathbb{R}ef{eq:6r1}), based on Assumption \mathbb{R}ef{enum:a4i}. Namely,
\begin{equation}\label{eq:proofEulerLyapunovDefr1}
r_1 := \sup \left\{ |x-y| = r : x, y \in \mathbb{R}^d \,, V(x) + V(y) < 4C/\lambda \mathbb{R}ight\} \,.
\end{equation}
In order for (\mathbb{R}ef{eq:6b}) to be satisfied, it is sufficient to choose $M$ such that
\begin{equation*}
M \leq \frac{1}{4} \left( \int_0^{r_1} \frac{1}{\mathop{\mathrm{Var}}\nolimitsphi(s)} \sup_{u \in \hat{I}_s} \frac{1}{\widetilde c_0 h} ds \mathbb{R}ight)^{-1} = \frac{h \widetilde c_0}{4} \left( \int_0^{r_1} \frac{1}{\mathop{\mathrm{Var}}\nolimitsphi(s)} ds \mathbb{R}ight)^{-1} \,.
\end{equation*}
Note, however, that $\mathop{\mathrm{Var}}\nolimitsphi(s) \geq \mathop{\mathrm{Var}}\nolimitsphi(r_1)$ for all $s > 0$ and hence
\begin{equation*}
\frac{h \widetilde c_0}{4} \left( \int_0^{r_1} \frac{1}{\mathop{\mathrm{Var}}\nolimitsphi(s)} ds \mathbb{R}ight)^{-1} \geq \frac{h \widetilde c_0}{4} \left( \int_0^{r_1} \frac{1}{\mathop{\mathrm{Var}}\nolimitsphi(r_1)} ds \mathbb{R}ight)^{-1} = \frac{h \widetilde c_0}{4} \frac{\mathop{\mathrm{Var}}\nolimitsphi(r_1)}{r_1} \geq \frac{h \widetilde c_0 \mathop{\mathrm{Var}}\nolimitsphi(r_1)}{4(r_1 + 1)} \,.
\end{equation*}
Thus if we choose
\begin{equation}\label{eq:EulerLyapunovDefM}
M = \frac{h \widetilde c_0 \mathop{\mathrm{Var}}\nolimitsphi(r_1)}{4(r_1 + 1)} \,,
\end{equation}
then condition (\mathbb{R}ef{eq:6b}) is indeed satisfied. Note that $r_1 + 1$ is chosen here instead of $r_1$ in order to prevent the value of $M$ from being too large when $r_1$ is very small (or even zero). Now condition (\mathbb{R}ef{eq:6c}) reads as
\begin{equation}\label{eq:proofEulerLyapunov3}
a \geq \sqrt{h} + \frac{2}{p_0} \left( \mathcal{L}ambda h^{3/2} + \frac{h \widetilde c_0 \mathop{\mathrm{Var}}\nolimitsphi(r_1)}{4(r_1 + 1)} \mathbb{R}ight) \,.
\end{equation}
However, since we choose $h \leq h_0 \leq \left( \frac{2L}{p_0} + \frac{\widetilde c_0 \mathop{\mathrm{Var}}\nolimitsphi(r_1)}{4(r_1 + 1)} \mathbb{R}ight)^{-2}$, we see that (\mathbb{R}ef{eq:proofEulerLyapunov3}) holds for all $a \geq 2 \sqrt{h}$. It remains to verify condition (\mathbb{R}ef{eq:6d}), for which we need
\begin{equation*}
V(x) + V(y) \geq \frac{16C}{\lambda M} \mathop{\mathrm{Var}}\nolimitsphi(r) \mathcal{L}ambda h r
\end{equation*}
to hold for all $r \geq r_2$. Since $\mathop{\mathrm{Var}}\nolimitsphi$ is decreasing, using the choice of $M$ in (\mathbb{R}ef{eq:EulerLyapunovDefM}), we see that it is sufficient to have
\begin{equation}\label{eq:proofEulerLyapunov4}
V(x) + V(y) \geq \frac{16C}{\lambda} \frac{4(r_1 + 1)}{\widetilde c_0} \mathcal{L}ambda r \text{ for all } r \geq r_2 \,.
\end{equation}
Since we assume that $\limsup_{r \to \infty} \frac{V(x) + V(y)}{r} = \infty$, we can indeed choose $r_2$ large enough so that (\mathbb{R}ef{eq:proofEulerLyapunov4}) and hence (\mathbb{R}ef{eq:6d}) holds. More precisely, we can choose
\begin{equation}\label{eq:proofEulerLyapunovDefr2}
r_2 :=\sup \left\{ |x-y| = r : x, y \in \mathbb{R}^d \,, \frac{V(x) + V(y)}{r} < \frac{64C(r_1 + 1)\mathcal{L}ambda}{\lambda \widetilde c_0} \mathbb{R}ight\} \,.
\end{equation}
As a consequence, from Theorem \mathbb{R}ef{thm:2} we get $\mathbb{E}_{x,y}[\mathbb{R}ho_a(X',Y')] \leq (1-c)\mathbb{R}ho_a(x,y)$ with $\mathbb{R}ho_a(x,y) = (a + \frac{M}{2C}(V(x) + V(y)))\1_{x \mathbb{N}eq y} + f_a(|x-y|)$, where
$c$ is given by (\mathbb{R}ef{eq:6f}) and
\begin{equation}\label{eq:proofEulerLyapunovDeffa}
f_a(r) = \int_0^r \mathop{\mathrm{Var}}\nolimitsphi(s \wedge r_2) g(s \wedge r_2) ds
\end{equation}
with $\mathop{\mathrm{Var}}\nolimitsphi$ given by (\mathbb{R}ef{eq:proofEulerLyapunovDefVarphi}) and $g$ given by (\mathbb{R}ef{eq:6g}).
Now it only remains to prove the lower bound on the constant $c$. Similarly as in the proof of Theorem \mathbb{R}ef{thm:8a}, we have
\begin{equation*}
\frac{1}{8} \left( \int_0^{r_2} \frac{1}{\mathop{\mathrm{Var}}\nolimitsphi(s)} \sup_{u \in \hat{I}_s} \frac{a + \mathbb{P}hi(u)}{\underline{\alpha} (u)} ds \mathbb{R}ight)^{-1} \geq \frac{\widetilde c_0 h \mathop{\mathrm{Var}}\nolimitsphi(r_2)}{8r_2(a + \sqrt{h}) + 4r_2^2} \,,
\end{equation*}
since $\mathop{\mathrm{Var}}\nolimitsphi(s) \geq \mathop{\mathrm{Var}}\nolimitsphi(r_2)$ for $s \leq r_2$. Moreover, due to our choice of $M$ in (\mathbb{R}ef{eq:EulerLyapunovDefM}), using $\mathbb{P}hi(r) \leq r$ and (\mathbb{R}ef{eq:proofEulerLyapunov4}), we have
\begin{equation*}
\frac{\lambda M}{16 C} \frac{V(x) + V(y)}{\mathbb{P}hi(r)} \geq \mathop{\mathrm{Var}}\nolimitsphi(r_1) h \mathcal{L}ambda \text{ for all } r \geq r_2 \,.
\end{equation*}
This finishes the proof.
\end{proof}
\begin{proof}[Proof of Theorem \mathbb{R}ef{thm:8}]
Let $x,y \in \R^d$ and set $r = \lvert x-y \mathbb{R}vert$ and $\hat{r} = \lvert \hat{x} - \hat{y} \mathbb{R}vert$. We assume $h \in (0,h_0]$ where $h_0$ is given by
\eqref{eq:h0general}.
We consider at first the case
where $r\ge\mathcal R$. By the choice of $h_0$, we have $L^2h\le K$ for $h\le h_0$. Therefore, for $r\ge \mathcal R$, the concavity of $f$ and Lemma \mathbb{R}ef{lem:6} (v) imply
\begin{equation}
\label{eq:93} \mathbb{E}_{x,y} [f(R') - f(r)]\, \le\, \mathbb{E}_{x,y} [R' - r]f'(r)\, \le\, -\frac K2hr f'(r)
\, \le\, -\frac K2e^{-qr_1}hf(r).
\end{equation}
Here, we have used in the last step that $f(r)\le r$ and $f'(r)\ge \exp (-qr_1)$.
The assertion \eqref{eq:45} now follows by the choice of $r_1$ and $q$ in \eqref{eq:35} and \eqref{eq:37}.
From now on, we assume $r<\mathcal R$. Recall that
\begin{equation}\label{IrhatEuler}
I_{\hat{r}} = \begin{cases}
(0, \hat{r} + \sqrt{h}) & \text{ if } \hat{r} \leq \sqrt{h}, \\
(\hat{r} - \sqrt{h}, \hat{r}) & \text{ if } \hat{r} > \sqrt{h},
\end{cases}
\end{equation}
cf.\ \eqref{eq:int}, i.e., $u(\hat r)= \sqrt h \1_{\hat r<\sqrt h}$, $l(\hat r)= \sqrt h \1_{\hat r\ge \sqrt h}$. Since by Taylor's formula and by concavity of $f$,
\begin{eqnarray*}
f(R')-f(\hat r)&=&\int_{\hat r}^{R'}f'(s)\,ds\ =\ (R'-\hat r)f'(\hat r)\, +\, \int_{\hat r}^{R'}\int_{\hat r}^sf''(t)\, dt\, ds\\
&\le &(R'-\hat r)f'(\hat r)\, +\,\frac 12\left[ ((R'-\hat r)\wedge u(\hat r))\vee (-l(\hat r))\mathbb{R}ight]^2\, \sup_{I_{\hat r}}f'',
\end{eqnarray*}
we can conclude by Lemma \mathbb{R}ef{lem:6} \mathbb{R}ef{enum:lem6i} and \mathbb{R}ef{enum:lem6ii} that
\begin{eqnarray}
\mathbb{N}onumber \mathbb{E}_{x,y} [f(R') - f(r)] &=& f(\hat{r} ) - f(r)\, +\, \mathbb{E}_{x,y} [f(R') - f(\hat{r})] \\
&\le &(\hat r -r)f'(r)\, +\, \frac{1}{2} c_0\min (\hat r\sqrt h,h) \sup_{I_{\hat{r}}} f''.\label{eq:9*}
\end{eqnarray}
We are going to show that the expression on the right hand side of \eqref{eq:9*}
is bounded from above by $-c_2hf(r)$. Note first that by \eqref{eq:407a} and \eqref{eq:407},
\begin{equation}
\label{eq:hatrr} \hat r-r \le \min (L,J+L^2h/2)\, hr\ =\ \mathcal{L}ambda hr.
\end{equation}
By the choice of $q$ and $h_0$ in \eqref{eq:37} and \eqref{eq:h0general}, $e^{qh_0L\mathcal R}\le e^{c_0/28}\le e^{1/28}\le 3/2$. Therefore,
\begin{eqnarray}
\mathbb{N}onumber f'(r)\ =\ e^{-qr}\ = \ e^{q(\hat r-r)}f'(\hat r)& \le & e^{qLhr}f'(\hat r)\ \le\ \frac 32f'(\hat r),\quad\text{and thus}\\
\label{eq:Q1} (\hat r-r)f'(r) &\le &\frac 32\mathcal{L}ambda h\hat rf'(\hat r).
\end{eqnarray}
Here we have used that $\hat{r} - r \leq \mathcal{L}ambda h \hat{r}$ by \eqref{eq:hatrr} if $r \leq \hat{r}$, whereas for $r > \hat{r}$ \eqref{eq:Q1} is automatically satisfied and hence it holds for all $r < \mathcal{R}$.
Furthermore, by \eqref{eq:h0general}, $hL\le h_0L\le 1/6$. Therefore,
$\hat r -r \le Lhr\le r/6$, and thus
\begin{equation}
\label{eq:Q2}
f(\hat r) \ \ge \ f\left(\frac 56r\mathbb{R}ight)\ \ge\ \frac 56 f(r),
\end{equation}
because $f$ is increasing and concave with $f(0)=0$. {Note that our choice of the bound $h_0L \leq 1/6$ is to some extent arbitrary and a different choice would lead to $5/6$ above being replaced by a different factor.} By \eqref{eq:9*}, \eqref{eq:Q1} and
\eqref{eq:Q2}, we see that the contractivity condition \eqref{eq:45} holds
provided
\begin{equation}
\label{Q0a}\frac 32\mathcal{L}ambda\, h\hat rf'(\hat r)\, +\, \frac{1}{2} c_0\min (\hat r\sqrt h,h) \sup_{I_{\hat{r}}} f''\ \le\ -\frac 65c_2hf(\hat r).
\end{equation}
Furthermore, by \eqref{eq:hatrr}, $\hat r\le (1+Lh)\mathcal R=r_1$.
Since
$f''(r)=-qe^{-qr}1_{r\le r_1}$ is increasing, \eqref{IrhatEuler} implies
\begin{equation}\label{eq:sup}
\sup_{I_{\hat{r}}} f'' = \begin{cases}
-qe^{-q(\hat{r} + \sqrt{h})} & \text{ if } \hat{r} \leq \sqrt{h}, \\
-qe^{-q\hat{r}} & \text{ if } \hat{r} > \sqrt{h}.
\end{cases}
\end{equation}
We now consider these two cases separately:
\emph{(i) $\hat r>\sqrt h$}. Noting that $f'(\hat r)=e^{-q\hat r}$ and
$f(\hat r)=(1-e^{-q\hat r})/q\le 1/q$, we see that \eqref{Q0a} is satisfied in this case provided
\begin{equation}
\label{Q0b} 3\mathcal{L}ambda\, \hat r\, -\, c_0q\ \le\ -\frac {12c_2}{5q}e^{q\hat r}\qquad\text{for }\hat r< r_1.
\end{equation}
We have chosen $q$ in \eqref{eq:37} such that
$$c_0q\ =\ 7\mathcal{L}ambda\, \mathcal R\ \ge\ 6\mathcal{L}ambda\,r_1.$$
Therefore, the left hand side in \eqref{Q0b} is bounded from above by $-c_0q/2$, and thus \eqref{Q0b} and \eqref{Q0a} are satisfied if
\begin{equation}
\label{Q0c} c_2\ \le \ \frac 5{24}c_0q^2e^{-qr_1}.
\end{equation}
By \eqref{eq:35} and \eqref{eq:37},
we see that
the constant $c_2$ has been defined in \eqref{eq:c2} in such a way that \eqref{Q0c} holds true, and thus the assertion \eqref{eq:45} is indeed satisfied.
\emph{(ii) $\hat r\le\sqrt h$}. Noting that $f'(\hat r)\le 1$ and
$f(\hat{r}) \leq \hat{r}$, we see by \eqref{eq:sup} that \eqref{Q0a} is satisfied for $\hat r\le\sqrt h$ provided
\begin{equation}
\label{Q0d} 3\mathcal{L}ambda h\, +\, \frac {12}{5}c_2h\ \le\ c_0q\sqrt he^{-2q\sqrt h}.
\end{equation}
This condition holds if both
\begin{equation}
\label{Q0e} 2q\sqrt h\le 1/2\qquad\text{and}\qquad
3e^{1/2}(\mathcal{L}ambda +c_2) \sqrt h\le c_0q.
\end{equation}
It can now be easily verified that our choice of $h_0$ in \eqref{eq:h0general}
ensures that \eqref{Q0e} holds for $h\le h_0$. Indeed, since $q$ is given by \eqref{eq:37}, we have $2q\sqrt h= 14 c_0^{-1}\mathcal{L}ambda\mathcal R\sqrt h\le 1/2$. Moreover, since {$c_2\le 11c_0^{-1}\mathcal{L}ambda^2\mathcal R^2$}, we obtain
$$\frac{c_0q}{3e^{1/2}(\mathcal{L}ambda +c_2)}= \frac{7\mathcal{L}ambda\mathcal R}{3e^{1/2}(\mathcal{L}ambda +c_2)}
\ge \frac{7}{3e^{1/2}}\frac{\mathcal R}{1+11c_0^{-1}\mathcal{L}ambda\mathcal R^2}\ge
\frac{7}{6e^{1/2}}\min\left(\mathcal R,\frac{c_0}{22\mathcal{L}ambda\mathcal R}\mathbb{R}ight).$$
Hence \eqref{Q0a} holds true, and thus the assertion \eqref{eq:45} is satisfied in this case as well.
\end{proof}
\section{Proof of results for \MALA}\label{sec:proofof\MALA}
\begin{proof}[Proof of Theorem \mathbb{R}ef{thm:10}]
By \eqref{eq:m6}
\begin{equation}
\label{eq:100} X'\ =\ x+ h\, b(x) + \sqrt{h - {h^2}/{4}}\, Z
\end{equation}
where $b(x) = - \frac{1}{2} \mathbb{N}abla U(x)$. By \eqref{eq:d1} and \eqref{eq:m2}, $b$ is Lipschitz continuous on $B_R^{-} \coloneqq \{ x \in \R^d : \| x \|_{-} \le R\}$.
Therefore conditions \mathbb{R}ef{enum:c1} and \mathbb{R}ef{enum:c3} in Section \mathbb{R}ef{sec:euler} hold when we restrict to $B_R^-$. Moreover, by \eqref{eq:d2}, condition \mathbb{R}ef{enum:c2} is satisfied as well for appropriate values of $K$ and $R$ depending on $K_c$ and $\mathcal{R}_c$.
Noting that $h<2$ implies $ h - \frac{h^2}{4} > \frac{h}{2}$, it is not difficult to see that the proof of Theorem \mathbb{R}ef{thm:8} carries over to our slightly modified setup.
Therefore, similarly to Theorem \mathbb{R}ef{thm:8}, we can find for any fixed $R \in (0,\infty)$ a concave strictly increasing function $f$ with $f(0)=0$ and constants $c_2 > 0$, $h_0 > 0$ such that for $h \in (0,h_0)$,
\begin{equation}
\label{eq:101} \mathbb{E}_{x,y} [f(\lvert X' - Y' \mathbb{R}vert)] \le (1-c_2 h) f(\lvert x-y\mathbb{R}vert), \tforall x,y \in B_{R}^{-} \,.
\end{equation}
We now want to apply the perturbation result in Theorem \mathbb{R}ef{thm:4}. Setting $d(x,y) = \lvert x-y \mathbb{R}vert$ and $\mathbb{R}ho(x,y) = f(\lvert x-y \mathbb{R}vert)$, we see that condition \eqref{eq:27} holds with $c = c_2 h$. Moreover, by Lemma \mathbb{R}ef{lem:10b} below, there exists a constant $p > 0$ depending only on $R$, such that for $h_0$ sufficiently small and $h \in (0,h_0)$, condition \eqref{eq:29} is satisfied for any $x,y \in B_R^-$ with $r_0 = \sqrt{h}$. Thus, to apply Theorem \mathbb{R}ef{thm:4}, it remains to show that \eqref{eq:28} holds with a constant $b \ge 0$ satisfying
\begin{equation}
\label{eq:102} b \le {c f(r_0)}/{4} = c_2 {h f(\sqrt{h})}/{4} \,.
\end{equation}
To this end notice that for $x,y \in B_R^{-}$,
\begin{equation}
\label{eq:103} \mathbb{E}_{x,y} [(\lvert \widetilde{X} - \widetilde{Y} \mathbb{R}vert - \lvert X' - Y' \mathbb{R}vert)^+] \le \mathbb{E}_{x,y} [\lvert \widetilde{X} - X' \mathbb{R}vert] + \mathbb{E}_{x,y} [ \lvert \widetilde{Y} - Y' \mathbb{R}vert] \,.
\end{equation}
Furthermore, since $\widetilde{X} = X'$ if the proposal is accepted and $\widetilde{X} = x$ otherwise, we obtain by \eqref{eq:100} and {Lemma \mathbb{R}ef{lem:3}}, that for any $x \in B_R^-$ and $h \in (0,2)$,
\begin{align*}
\mathbb{E}_{x,y} [\lvert \widetilde{X} - X' \mathbb{R}vert ] &= \mathbb{E}_{x,y}[ \lvert X' -x \mathbb{R}vert ; \widetilde{U} > \alpha_h (x,X') ] \\
&= \mathbb{E}_{x,y} [\lvert X' -x \mathbb{R}vert (1-\alpha_h (x,X'))] \\
&\le h\lvert b(x) \mathbb{R}vert \mathbb{E}_{x,y}[1-\alpha_h (x,X')] + \sqrt{h} \mathbb{E}_{x,y} [\lvert Z \mathbb{R}vert (1-\alpha_h (x,X')]\\
&\le c' h^{\frac{5}{2}} \lvert b(x) \mathbb{R}vert + c'' h^2 \mathbb{E}_{x,y} [\lvert Z \mathbb{R}vert^2]^{\frac{1}{2}},
\end{align*}
where $c'$ and $c''$ are finite constants. Noting that $\mathbb{E}_{x,y} [\lvert Z \mathbb{R}vert^2] = d$ and
\[
|b(x)| \le d \| b(x) \|_{-} = d {\| \mathbb{N}abla U(x) \|_-}/{2}
\]
we see that there exists a finite constant $c'''$ such that for $x \in B_R^-$ and $h \in (0,2)$
\[
\mathbb{E}_{x,y} [\lvert \widetilde{X} - X' \mathbb{R}vert ] \le c''' h^{\frac{3}{2}} (dh + d^{\frac{1}{2}} h^{\frac{1}{2}}) \,.
\]
A corresponding bound holds for $\mathbb{E}_{x,y} [\lvert \widetilde{Y} - Y'\mathbb{R}vert]$ with $y \in B_R^-$. Hence, by \eqref{eq:103}, condition \eqref{eq:28} is satisfied with
\[
b = 2c''' h^{\frac{3}{2}} (dh + d^{\frac{1}{2}} h^{\frac{1}{2}}) \,.
\]
Since the right hand side in \eqref{eq:102} is of order $\Omega(h^{{3}/{2}})$, we conclude that \eqref{eq:102} holds for $dh < h_1$ provided $h_1 \in (0,\infty)$ is chosen sufficiently small. Hence, Theorem \mathbb{R}ef{thm:4} applies and by \eqref{eq:30} we obtain
\[
\mathbb{E}_{x,y}[\widetilde{f} (\lvert \widetilde{X} - \widetilde{Y} \mathbb{R}vert)] \le (1-c_3 h) \widetilde{f} (\lvert x -y \mathbb{R}vert)
\]
for any $x,y \in B_R^-$ and $h < h_1 d^{-1}$, where $c_3 = \min (\mathbb{N}icefrac{c_2}{8}, \mathbb{N}icefrac{p}{4h})$ and $\widetilde{f} (r) = f(r) + 2bp^{-1} \1_{r>0}$.
\end{proof}
\begin{lemma}\label{lem:10b}
For any fixed $R \in (0,\infty)$ there exist constants $p, h_0 \in (0,\infty)$ such that
\[
\mathbb{P}_{x,y}[\widetilde{X} = \widetilde{Y} ] \ge p
\]
for any $h \in (0,h_0)$ and $x,y \in B_R^-$ with $\lvert x-y \mathbb{R}vert \le \sqrt{h}$.
\end{lemma}
\begin{proof}
Let $\hat{x} = x + hb(x)$ where $b(x) = -\frac{1}{2} \mathbb{N}abla U(x)$. By \eqref{eq:d1} and \eqref{eq:m2}, $b$ is Lipschitz continuous on $B_R^{-}$. For $x,y \in B_R^-$ and $h \in (0, L_R^{-1})$
\[
\lvert \hat{x} - \hat{y}\mathbb{R}vert \le (1+hL_R) \lvert x-y \mathbb{R}vert \le 2\lvert x-y\mathbb{R}vert \,,
\]
where $L_R \in (0,\infty)$ is the Lipschitz constant of $b$ on $B_R^-$. Hence, by Lemma \mathbb{R}ef{lem:6}, there exists a constant $p_0 \in (0,\infty)$ such that for any $x,y \in B_R^-$ and $h \in (0, L_R^{-1})$
\begin{equation}
\label{eq:10b1} \mathbb{P}_{x,y} [X' = Y'] \ge p_0 \1_{\lvert \hat{x} - \hat{y} \mathbb{R}vert \le 2 \sqrt{h}} \ge p_0 \1_{\lvert x-y \mathbb{R}vert \le \sqrt{h}} \,.
\end{equation}
Furthermore,
\begin{equation}
\label{eq:10b2} \mathbb{P}_{x,y} [\widetilde{X} \mathbb{N}eq \widetilde{Y}] \le \mathbb{P}_{x,y} [X' \mathbb{N}eq Y'] + \mathbb{P}_{x,y}[ \widetilde{X} \mathbb{N}eq X'] + \mathbb{P}_{x,y}[\widetilde{Y} \mathbb{N}eq Y'] \,.
\end{equation}
By \eqref{eq:10b1}, the first probability on the right hand side is bounded by $1-p_0$ for $\lvert x-y \mathbb{R}vert \le \sqrt{h}$. Moreover, by Lemma \mathbb{R}ef{lem:3}, there exists a finite constant $c' \in (0,\infty)$ such that for any $x,y \in B_R^-$ and $h \in (0,2)$
\begin{equation}
\label{eq:10b3} \mathbb{P}_{x,y}[\widetilde{X} \mathbb{N}eq X'] = \mathbb{E}_{x,y}[1-\alpha_h(x,X')] \le c' h^{\frac{3}{2}} \,.
\end{equation}
A corresponding upper bound holds for $\mathbb{P}_{x,y} [\widetilde{Y} \mathbb{N}eq Y']$. Hence, by combining \eqref{eq:10b1}, \eqref{eq:10b2} and \eqref{eq:10b3}, we conclude that there exist constants $h_0> 0$ and $p = \frac{p_0}{2}> 0$ such that
\[
\mathbb{P}_{x,y}[\widetilde{X} \mathbb{N}eq \widetilde{Y}] \le 1-p
\]
for any $h \in (0,h_0)$ and $x,y \in B_R^{-}$ with $\lvert x-y \mathbb{R}vert \le \sqrt{h}$.
\end{proof}
\end{document} |
\betaegin{document}
\tauitle{Primitive stability and the Bowditch conditions revisited}
\alphauthor{Caroline Series}
\alphaddress{\betaegin{flushleft} sm {\tauexttt{[email protected] \\http://www.maths.warwick.ac.uk/$\sigmaim$masbb/} }\\ Mathematics Institute,
University of Warwick \\
Coventry CV4 7AL, UK
\epsilonsilonnd{flushleft}}
\betaegin{abstract}
The equivalence of two conditions on the primitive elements in an $SL(2,{m}athbb C)$ representation of the free group $F_2 = <a,b>$, namely Minsky's condition of primitive stability and the $BQ$-conditions introduced by Bowditch and generalised by Tan, Wong and Zhang, has been proved by Lee and Xu and independently by the author in arXiv:1901.01396. This note is a revised version of our original proof, which is greatly simplified by incorporating some of the ideas introduced by Lee and Xu, combined with the language of the Bowditch tree.
{\betaf Keywords: Free group on two generators, Kleinian group, non-discrete representation, palindromic generator, primitive stable} \\
\gammaammaentering{ \epsilonsilonmph{ To Ser Peow Tan on his 60th birthday.} }
\epsilonsilonnd{abstract}
\
\deltaate{\tauoday}
{m}aketitle
\noindent {\betaf MSC classification:} {30F40 (primary), 57M50 (secondary).}
\sigmaection{Introduction}
In this note we show the equivalence of two conditions on the primitive elements in an ${\mathcal S}L$ representation $sho$ of the free group
$F_2 = <a,b>$ on two generators, which may hold even when the image $ sho(F_2)$ is not discrete. One is the condition of primitive stability $PS$ introduced by Minsky~\gammaammaite{Minsky} and the other is the so-called $BQ$-conditions introduced by Bowditch~\gammaammaite{bow_mar} and generalised by Tan, Wong and Zhang~\gammaammaite{tan_gen}.
This result was proved in~\gammaammaite{LX} and independently in~\gammaammaite{serPS}. This note is a revised version of
~\gammaammaite{serPS}, which can be greatly simplified by incorporating the elegant estimates and ideas in~\gammaammaite{LX}. The reason for writing it is to give a concise presentation using the language of the Bowditch tree developed in~\gammaammaite{bow_mar} and~\gammaammaite{tan_gen} and used in~\gammaammaite{serPS}.
Both~\gammaammaite{LX} and~\gammaammaite{serPS} introduced a third condition which we call the \epsilonsilonmph{bounded intersection property} $BIP$, which they showed was implied by but does not imply the other two. We also explain this condition and prove the implication here.
We begin by explaining these three conditions one by one.
Recall that an element $ u {\bf i}n F_2$ is called \epsilonsilonmph{primitive} if it forms one of a generating pair $(u,v)$ for $F_2$. Let ${\cal P}$ denote the set of primitive elements in $F_2$. It is well known that up to inverse and conjugacy, the primitive elements are enumerated by the rational numbers $\hat {{m}athbb Q}= {m}athbb Q \gammaammaup {\bf i}nfty$, see Section~sef{farey} for details.
\sigmaubsection{The primitive stable condition $PS$}
The notion of primitive stability was introduced by Minsky in~\gammaammaite{Minsky} in order to construct an $Out(F_2)$-invariant subset of the ${\mathcal S}L$ character variety $\gammaammahi(F_2)$ strictly larger than the set of discrete free representations.
Let $d(P,Q)$ denote the hyperbolic distance between points $P, Q$
in hyperbolic $3$-space ${m}athbb H^3$.
Recall that a path $t {m}apsto \gammaamma(t) \sigmaubset {m}athbb H^3$ for $t {\bf i}n I$ (where $I$ is a possibly infinite interval in $ {m}athbb R$) is called a $(K, \epsilonsilonpsilon)$-quasigeodesic if there exist constants $K, \epsilonsilonpsilon >0 $ such that
\betaegin{equation} {\lambda}abel{qgeod} K^{-1}|s-t| - \epsilonsilonpsilon {\lambda}eq d(\gammaamma(s), \gammaamma(t)) {\lambda}eq K|s-t| + \epsilonsilonpsilon \ \ {m}box{sm {for all}} \ \ s, t {\bf i}n I. \epsilonsilonnd{equation}
For a representation $sho \gammaammao F_2 \tauo {\mathcal S}L$, in general we will denote elements in $ F_2$ by lower case letters and their images under $sho$ by the corresponding upper case, thus $X = sho (x)$ for $x {\bf i}n F_2$. In particular if $(u,v)$ is a generating pair for $F_2$ we write $U = sho(u), V = sho(v)$.
Fix once and for all a basepoint $O {\bf i}n {m}athbb H^3$ and suppose that $w= e_{1} {\lambda}dots e_{n} , e_{k} {\bf i}n \{u^{\pm}, v^{\pm} \} , k = 1, {\lambda}dots, n$ is a cyclically shortest word in the generators $(u,v)$.
The \epsilonsilonmph{broken geodesic} $\betar _{sho}(w; (u,v))$ of $w$ with respect to $(u,v)$ is the infinite path of geodesic segments joining vertices $${\lambda}dots, \ , E_n^{-1}E_{n-1}^{-1} E_{n-2}^{-1}O, E_n^{-1}E_{n-1}^{-1} O, E_n^{-1}O, O, E_1O, E_1 E_2 O, {\lambda}dots, E_1E_2 {\lambda}dots E_n O, E_1E_2 {\lambda}dots E_n E_1 O, {\lambda}dots. $$
where $E_i = sho(e_i)$.
\betaegin{definition} {\lambda}abel{definePS}
Let $(u,v)$ be a fixed generating pair for $F_2$. A representation $sho \gammaammao F_2 \tauo {\mathcal S}L$ is \epsilonsilonmph{primitive stable}, denoted $PS$, if the broken geodesics $\betar_{sho} (w; (u,v))$ for all words $w= e_{1} {\lambda}dots e_{n} {\bf i}n {\cal P}, e_{k} {\bf i}n \{ u^{\pm},v^{\pm}\}, k = 1, {\lambda}dots, n$, are uniformly $(K,\epsilonsilon)$-quasigeodesic for some fixed constants $(K,\epsilonsilon)$.\epsilonsilonnd{definition}
Notice that this definition is independent of the choice of basepoint $O$ and makes sense since the change
from
$\betar_{sho} (w; (u,v))$ to $\betar_{sho} (w; (u',v'))$ for some other generator pair $(u',v')$
changes all the constants for all the quasigeodesics uniformly.
For $g {\bf i}n F_2$ write $||g||$ or more precisely $||g||_{u,v}$ for the word length of $g$, that is the shortest representation of $g$ as a product of generators $(u,v)$.
It is easy to see that for fixed generators, the condition $PS$ is equivalent to the existence of $K, \epsilonsilonpsilon >0 $ such that
\betaegin{equation} {\lambda}abel{qgeod1}
K^{-1}||g'|| - \epsilonsilonpsilon {\lambda}eq d(O, sho(g')O) {\lambda}eq K||g'|| + \epsilonsilonpsilon \epsilonsilonnd{equation}
for all finite subwords $g'$ of the infinite reduced word
$ {\lambda}dots e_{1} {\lambda}dots e_{ n} {\lambda}dots e_{ 1} {\lambda}dots e_{ n}{\lambda}dots $
Recall that an irreducible representation $sho \gammaammao F_2 \tauo {\mathcal S}L$ is determined up to conjugation by the traces of $U = sho(u),V = sho(v)$ and $UV= sho(uv)$ where $(u,v)$ is a generator pair for $F_2$. More generally, if we take the GIT quotient of all (not necessarily irreducible) representations, then the resulting $SL(2,{m}athbb C)$ character variety of $F_2$ can be identified with ${m}athbb C^3$ via these traces, see for example \gammaammaite{goldman2} and the references therein. (The only non-elementary (hence reducible) representation occurs when
${\cal T}r [U,V] = 2$. We exclude this from the discussion, see for example~\gammaammaite{sty} Remark 2.1.)
\betaegin{proposition}[\gammaammaite{Minsky} Lemma 3.2] {\lambda}abel{prop:psopen} The set of primitive stable $sho \gammaammao F_2 \tauo {\mathcal S}L$ is open in the ${\mathcal S}L$ character variety of $F_2$. \epsilonsilonnd{proposition}
Minsky showed that not all $PS$ representations are discrete.
\sigmaubsection{The Bowditch $BQ$-conditions}
The $BQ$-conditions were introduced by Bowditch in~\gammaammaite{bow_mar} in order to give a purely combinatorial proof of McShane's identity.
Again let $(u,v)$ be a generator pair for $F_2$ and let $sho \gammaammao F_2 \tauo {\mathcal S}L$.
\betaegin{definition} {\lambda}abel{defineBQ}
Following~\gammaammaite{tan_gen}, an irreducible representation $sho \gammaammao F_2 \tauo {\mathcal S}L$ is said to satisfy the $BQ$-conditions if
\betaegin{equation} {\lambda}abel{eqn:B2}
\betaegin{split} & {\cal T}r sho(g) \notin [-2,2] \ \ \forall g {\bf i}n {\cal P} \ \ {m}box {sm and} \ \ \gammaammar
& \{ g {\bf i}n {\cal P}: |{\cal T}r sho(g)| {\lambda}eq 2 \} \ {m}box {sm is finite}.\epsilonsilonnd{split}\epsilonsilonnd{equation}
\epsilonsilonnd{definition}
We denote the set of all representations satisfying the $BQ$-conditions by ${ \mathcal B}$.
\betaegin{proposition}[\gammaammaite{bow_mar} Theorem 3.16, \gammaammaite{tan_gen} Theorem 3.2] {\lambda}abel{prop:bqopen} The set ${ \mathcal B}$ is open in the ${\mathcal S}L$ character variety of $F_2$. \epsilonsilonnd{proposition}
Bowditch's original work~\gammaammaite{bow_mar} was on the case in which the commutator $[X,Y] = XYX^{-1}Y^{-1}$
is parabolic and ${\cal T}r [X,Y] = -2 $. He conjectured that all representations in ${ \mathcal B}$ of this type are quasifuchsian and hence discrete. While this question remains open, it is shown in~\gammaammaite{sty} that without this restriction, there are definitely representations in ${ \mathcal B}$ which are not discrete.
\sigmaubsection{The bounded intersection property BIP}{\lambda}abel{sec:BIP}
Recall that a word $w= e_1e_2 {\lambda}dots e_n$ in generators $(u,v)$ of $F_2$ is \epsilonsilonmph{palindromic} if it reads the same forwards and backwards, that is, if $ e_1e_2 {\lambda}dots e_n = e_ne_{n-1} {\lambda}dots e_1$. Palindromic words have been studied by
Gilman and Keen in~\gammaammaite{gilmankeen1, gilmankeen2}.
Suppose that $sho \gammaammao F_2 \tauo {\mathcal S}L$ and let $(u,v)$ be a generating pair. Denote the extended common perpendicular of the axes of $U = sho(u), V = sho(v)$ by ${\mathcal E}(U,V)$.
By applying the $\pi$ rotation about ${\mathcal E}(U,V)$, it is not hard to see that if a word $w$ is palindromic in a generator pair $(u,v)$ then the axis of $W = sho(w)$ intersects ${\mathcal E}(U,V)$ perpendicularly, see for example~\gammaammaite{BSeries}. (See~\gammaammaite{LX} for an interesting remark on the failure of the converse.)
Fix generators $(a,b)$ for $F_2$. We call the pairs $(a,b), (a,ab)$ and $(b, ab)$ the \epsilonsilonmph{basic generator pairs}.
Now given $sho \gammaammao F_2 \tauo {\mathcal S}L$ let $A = sho(a), B = sho(b)$ and consider the three common perpendiculars ${\mathcal E}(A,B), {\mathcal E}(A,AB)$ and ${\mathcal E}(B,AB)$.
(We could equally well chose to use $BA$ in place of $AB$; the main point is that the choice is fixed once and for all.)
We call these lines the \epsilonsilonmph{special hyperelliptic axes}.
\betaegin{definition} {\lambda}abel{defineBIP}
Fix a basepoint $O {\bf i}n {m}athbb H^3$. A representation
$sho \gammaammao F_2 \tauo {\mathcal S}L$ satisfies the \epsilonsilonmph{bounded intersection property} $BIP$ if there exists $D>0$ so that
if a generator $w$ is palindromic with respect to one of the three basic generators pairs, then its axis intersects the corresponding special hyperelliptic axis in a point at distance at most $D$ from $O$. Equivalently, the axes of all palindromic primitive elements intersect the appropriate hyperelliptic axes in bounded intervals.
\epsilonsilonnd{definition}
Clearly this definition is independent of the choices of $(a,b)$ and $O$.
A similar condition but related to \epsilonsilonmph{all} palindromic axes was used in~\gammaammaite{gilmankeen2} to give a condition for discreteness of geometrically finite groups.
In Section~sef{BIP} we show that every generator is conjugate to one which is palindromic with respect to one of the three basic generator pairs. In fact each primitive element can be conjugated (in different ways) to be palindromic with respect to two out of the three possible basic pairs. For a more precise statement see Proposition~sef{uniquepalindromes}.
\sigmaubsection{The main result}
The main results of this paper are:
\betaegin{introthm} {\lambda}abel{introthmA} The conditions $BQ$ and $PS$ are equivalent. \epsilonsilonnd{introthm}
\betaegin{introthm} {\lambda}abel{introthmB} The conditions $BQ$ and $PS$ are both imply, but are not implied by, the condition $BIP$. \epsilonsilonnd{introthm}
In the case of real representations, Damiano Lupi~\gammaammaite{lupi} showed by case by case analysis following \gammaammaite{goldman} that the conditions $BQ$ and $PS$ are equivalent.
To see that $BIP$ does not imply the other conditions, first note that conditions $PS$ and $BQ$ both imply that no element in $sho({\cal P})$ is elliptic or parabolic. The condition $BIP$ rules out parabolicity (consider the fixed point of a palindromic parabolic element to be a degenerate axis which clearly meets the relevant hyperelliptic axis at infinity). However the condition does not obviously rule out elliptic elements in $sho({\cal P})$.
In particular, consider any $SO(3)$ representation, discrete or otherwise. Here all axes are elliptic and all pass through a central fixed point which is also at the intersection of all three hyperelliptic axes. Such a representation clearly satisfies $BIP$.
\betaegin{remark} {\lambda}abel{binbin} {sm We remark that the second statement of Theorem II in~\gammaammaite{LX} is false: there are $F_2$ representations with discrete image which have property $BIP$ but which are not in ${ \mathcal B}$, for example the finite orthogonal group consisting of order two rotations round three mutually perpendicular axes. This group clearly satisfies $BIP$ but all its elements are elliptic. (The error stems from an oversight about accumulation points in their proof.) }
\epsilonsilonnd{remark}
\betaegin{comment}
\epsilonsilonnd{comment}
The plan of the paper is as follows. The hardest part of the work is to prove Theorem~sef{BQimpliesPS}, that if $sho$ satisfies the $BQ$-conditions then $sho$ is primitive stable. In~\gammaammaite{serPS} this was done by first showing that if $sho$ satisfies the $BQ$-conditions then $sho$ has the bounded intersection property, and using this to deduce $PS$. However, as explained in Section~sef{geometry}, this is shown to be unnecessarily complicated by the improved estimates and methods of~\gammaammaite{LX}.
In Section~sef{farey} we present background on the Farey tree and also introduce Bowditch's condition of Fibonacci growth. In Section~sef{Bowditchbackground}, we summarise Bowditch's method of assigning an orientation to the edges of the Farey tree ($T$-arrows) and, subject to the $BQ$-conditions, the existence of a finite attracting subtree. In~sef{Warrows} we introduce a second way of orientating edges based on word length ($W$-arrows), and show that for all but finitely many words these two orientations coincide.
In Section~sef{geometry} we collect the background and estimates used to prove Theorem~sef{introthmA}. This is based almost entirely on~\gammaammaite{LX}, in particular we need the amplitude of a right angle hexagon whose three alternate sides correspond to the axes of a generator triple $(u,v,uv)$. As we shall explain, this quantity defined in~\gammaammaite{Fen} is an invariant of the representation $sho$ and plays a crucial part what follows.
We then continue following~\gammaammaite{LX} to get the crucial result Proposition~sef{longwordsqgeod}.
Theorem~sef{introthmA} is proved in Section~sef{sec:BQimpliesBIP}. That $PS$ implies $BQ$ follows easily from the condition of Fibonacci growth (see Definition~sef{fibonaccidefn}). This was proved in~\gammaammaite{lupi}.
Proposition~sef{longwordsqgeod} and the results of Section~sef{Bowditchbackground} then lead to the proof of Theorem~sef{BQimpliesPS}, that $BQ$ implies $PS$.
In Section~sef{BIP} we discuss the condition $BIP$. We begin with a result which may be of independent interest on the palindromic representation of primitive elements, Proposition~sef{uniquepalindromes}. Theorem~sef{introthmB}, that $BQ$ implies $BIP$, is then easily deduced from Theorem~sef{BQimpliesPS}. In Theorem~sef{direct} we give an alternative direct proof using Equation~\epsilonsilonqref{distbound}, which uses the invariance of the amplitude of $sho$ to give an improved version of the estimates in~\gammaammaite{serPS}.
We would like to thank Tan Ser Peow and Yasushi Yamashita for initial discussions about the original version~\gammaammaite{serPS} of this paper. The work involved in Lupi's thesis~\gammaammaite{lupi} also made a significant contribution. We also thank Tan for pointing us to the work of Lee and Xu, and for a careful reading of this paper. The idea of introducing the condition $BIP$ arose while trying to interpret some very interesting computer graphics involving non-discrete groups made by Yamashita. We hope to return to this topic elsewhere.
As we hope we have made clear above, there is little in this revised version of~\gammaammaite{serPS} which is not essentially contained in~\gammaammaite{LX} and we wish to fully acknowledge the elegance and ingenuity of their method.
\sigmaection{Primitive elements, the Farey tree and Fibonacci growth} {\lambda}abel{farey} The Farey tessellation ${ \mathcal F}$ as shown in Figures~sef{fig:farey} and sef{fig:colouredtree} consists of the images of the ideal triangle with vertices at $1/0,0/1$ and $1/1$ under the action of $SL(2,{m}athbb Z)$ on the upper half plane, suitably conjugated to the position shown in the disk. The label $p/q$ in the disk is just the conjugated image of the actual point $p/q {\bf i}n {m}athbb R$.
\betaegin{figure}[ht]
{\bf i}ncludegraphics[width=5.5cm]{Figs/fareydiagram.pdf}
\hspace{1cm}
{\bf i}ncludegraphics[width=5.5cm]{Figs/fareywords.pdf}
\gammaammaaption{The Farey diagram, showing the arrangement of rational numbers on the left with the corresponding primitive words on the right. The dual graph shown on the left is the Farey tree ${\cal T}$. }{\lambda}abel{fig:farey}
\epsilonsilonnd{figure}
Since the rational points in $\hat {{m}athbb Q}= {m}athbb Q\gammaammaup {\bf i}nfty$ are precisely the images of ${\bf i}nfty$ under $SL(2,{m}athbb Z)$, they correspond bijectively to the vertices of ${ \mathcal F}$.
A pair $p/q , r/s {\bf i}n \hat {m}athbb Q$ are the endpoints of an edge if and only if $pr-qs = \pm 1$; such pairs are called \epsilonsilonmph{neighbours}.
A triple of points in $\hat {m}athbb Q$ are the vertices of a triangle precisely when they are the images of the vertices of the initial triangle $(1/0,0/1,1/1)$; such triples are always of the form
$(p/q , r/s,(p+r )/( q+s))$ where $p/q , r/s$ are neighbours.
In other words, if $p/q , r/s$ are the endpoints of an edge, then the vertex of the triangle on the side away from the centre of the disk is found by `Farey addition' to be $(p+r )/( q+s)$. Starting from $1/0 = -1/0= {\bf i}nfty$ and $0/1$, all points in $\hat {m}athbb Q$ are obtained recursively in this way. Note we need to start with $-1/0= {\bf i}nfty$ to get the negative fractions on the left side of the left hand diagram in Figure~sef{fig:farey}.
As noted in the introduction, up to inverse and conjugation, the equivalence classes of primitive elements in $F_2$ are enumerated by ${\cal Q}hat$.
Formally, we set $\overline {\cal P} $ to be the set of equivalence classes of cyclically shortest primitive elements under the relation $u \sigmaim v$ if and only if either $v = gug^{-1}$ or $v = gu^{-1}g^{-1}, g {\bf i}n F_2$. We call the equivalence classes, \epsilonsilonmph{extended conjugacy classes} and denote the equivalence class of $u {\bf i}n {\cal P}$ by $\betau$. In particular, the set of all cyclic permutations of a given word are in the same extended class. Since we are working in the free group, a word is \epsilonsilonmph{cyclically shortest} if it, together with all its cyclic permutations, is reduced, that is, contains no occurrences of $x$ followed by $x^{-1}, x {\bf i}n \{a^{\pm}, b^{\pm}\}$.
The right hand picture in Figure~sef{fig:farey}
shows an enumeration of representative elements from $\overline {\cal P} $, starting with initial triple $(a,b,ab)$. Each vertex is labelled by a certain cyclically shortest generator $w_{p/q}$. Corresponding to the process of Farey addition, the words $w_{p/q}$ can be found by juxtaposition as indicated on the diagram. Note that for this to work it is important to preserve the order: if $u,v$ are the endpoints of an edge with $u$ before $v$ in the anti-clockwise order round the circle, the correct concatenation is $uv$, see Figure~sef{fig:regionboundary}. Note also that the words on the left side of the diagram involve $b^{-1}$ and $a$, rather than $b$ and $a$, corresponding to starting with ${\bf i}nfty = -1/0 $.
It is not hard to see that pairs of primitive elements form a generating pair if and only if they are at the two endpoints of an edge of the Farey tessellation, while the words at the vertices of a triangle correspond to a generator triple of the form $(u,v,uv)$.
The word $w_{p/q}$ is a representative of the extended conjugacy class identified with $p/q {\bf i}n \hat {m}athbb Q $. It is almost but not exactly the same as the \epsilonsilonmph{Christoffel word} as described~\gammaammaite{LX}. We denote this class by $ [p/q]$ and call $w_{p/q}$ the \epsilonsilonmph{standard representative} of $ [p/q]$. Likewise if $p/q, r/s {\bf i}n {\cal Q}hat$ are neighbours we call $(w_{p/q},w_{r/s})$ the standard (unordered) generator pair.
It is easy to see that $e_a(w_{p/q}) / e_b(w_{p/q}) = p/q$, where $e_a(w_{p/q}), e_b(w_{p/q})$ are the sum of the exponents in $w_{p/q}$ of $a,b$ respectively. All other words in $ [p/q]$ are cyclic permutations of $w_{p/q}$ or its inverse. For more details on primitive words in $F_2$, see for example~\gammaammaite{serInt} or~\gammaammaite{CMZ}.
Later it will be essential to distinguish between a word $w_{p/q}$ and its inverse, while for an arbitrary generator pair $(u,v)$ we need to distinguish between $uv$ (or its cyclic conjugate $vu$), and $uv^{-1}$ (or its cyclic conjugate $v^{-1}u$). We do this using:
\betaegin{definition} {\lambda}abel{signchoice}
The word $w {\bf i}n F_2$ is \epsilonsilonmph{positive} if it is cyclically shortest and if all exponents of $a$ in $w$ are positive. A generator pair $(u,v)$ is positive if both $u$ and $v$ are positive.
\epsilonsilonnd{definition}
We remark that if $(u,v)$ is positive then $||uv||_{a,b} = ||u||_{a,b} + || v||_{a,b}$.
In particular, the standard word $w_{p/q}$ constructed as indicated in Figure~sef{fig:farey} is positive, as is the standard generator pair $(w_{p/q},w_{r/s})$ whenever $p/q, r/s {\bf i}n {\cal Q}hat$ are neighbours, see also Figure~sef{fig:regionboundary}.
\sigmaubsection{Fibonacci growth}
Since all words in an extended conjugacy class have the same length, and since $w_{p/q}$ can found by concatenation starting from the initial generators $(a, b)$, it follows that $||w||_{(a,b)}= p+q$ for all $w {\bf i}n [p/q]$.
This leads to the following definition from~\gammaammaite{bow_mar}:
\betaegin{definition} {\lambda}abel{fibonaccidefn} A representation $sho \gammaammao F_2 \tauo {\mathcal S}L $ has \epsilonsilonmph{Fibonacci growth} if there exists $c>0$ such that for all cyclically reduced words $w {\bf i}n {\cal P}$ we have $ {\lambda}og^+|{\cal T}r sho(w)| < c ||w||_{(a,b)}$ and $ {\lambda}og^+|{\cal T}r sho(w)| > ||w||_{(a,b)}/c$
for all but finitely many cyclically reduced $w {\bf i}n {\cal P}$ where ${\lambda}og^+x = {m}ax \{0, {\lambda}og |x| \}$. \epsilonsilonnd{definition}
Notice that although the definition is made relative to a fixed pair of generators for $F_2$, it is in fact independent of this choice.
The following result is fundamental. It is proved using the technology described in the next section.
\betaegin{proposition}[\gammaammaite{bow_mar} Proof of Theorem 2, \gammaammaite{tan_gen} Theorem 3.3] {\lambda}abel{fibonacci} If $sho \gammaammao F_2 \tauo {\mathcal S}L$ satisfies the $BQ$-conditions then $sho$ has Fibonacci growth. \epsilonsilonnd{proposition}
\sigmaection{More on the Bowditch condition}{\lambda}abel{Bowditchbackground}
In this section we explain some further background to the $BQ$-conditions. For more detail see~\gammaammaite{bow_mar} and~\gammaammaite{tan_gen}, and for a quick summary~\gammaammaite{sty}. The \epsilonsilonmph{Farey tree} ${\cal T}$ is the trivalent dual tree to the tessellation ${ \mathcal F}$, shown superimposed on the left in Figure~sef{fig:farey}. As above, $\overline {\cal P}$ is identified ${\cal Q}hat$ and hence with the set ${\Omega}mega$ of complementary regions of ${\cal T}$. We label the region associated to a generator $u$ by $\betaf u$, thus $\betaf u' = \betaf u$ for all $u' \sigmaim u$. If $e$ is an edge of
${\cal T}$ we denote the adjacent regions by $\betau(e), \betav(e)$.
For a given representation $sho \gammaammao F_2 \tauo {\mathcal S}L$, note that ${\cal T}r [U,V]$ and hence ${m}u = {\cal T}r [A,B]+2$ is independent of the choice of generators of $F_2$, where as usual $U = {\cal T}r sho(u)$ and so on.
Since $ {\cal T}r U$ is constant on extended equivalence classes of generators, for $\betau {\bf i}n {\Omega}mega$ we can define $\phi(\betau) = \phi_{sho}(\betau)= {\cal T}r U$ for any $ u {\bf i}n \betau$.
For notational convenience we will sometimes write $\hat \betau$
in place of $\phi(\betau)$.
For matrices $X,Y {\bf i}n {\mathcal S}L$ set $x = {\cal T}r X, y = {\cal T}r Y, z = {\cal T}r XY$. Recall the trace relations:
\betaegin{equation}{\lambda}abel{eqn:inverse} {\cal T}r XY^{-1} = xy-z \epsilonsilonnd{equation} and
\betaegin{equation} {\lambda}abel{eqn:commreln} x^2+y^2+z^2 = xyz + {\cal T}r {[X,Y]} +2.
\epsilonsilonnd{equation}
Setting ${m}u = {\cal T}r {[X,Y]} + 2$, this last equation takes the form
$$x^2+y^2+z^2 - xyz = {m}u.$$
As is well known and can be proven by applying the above trace relations inductively, if $\betau,\betav,\betaw$ is a triple of regions round a vertex of ${\cal T}$, then $\hat \betau,\hat \betav,\hat \betaw$ satisfy~\epsilonsilonqref{eqn:commreln} with $x = \hat \betau$ and so on. Likewise if $e$ is an edge of
${\cal T}$ with adjacent regions $\betau,\betav$ and if $\betaw, \betaz$ are the third regions at either end of $e$, then $\hat \betau,\hat \betav,\hat \betaw, \hat \betaz$ satisfy~\epsilonsilonqref{eqn:inverse}, that is, $\hat \betaz = \hat \betau \hat \betav-\hat \betaw$. (A map $\phi: {\Omega}mega \tauo {m}athbb C$ with this property is called a \epsilonsilonmph{Markoff map} in~\gammaammaite{bow_mar}.)
Given $sho \gammaammao F_2 \tauo {\mathcal S}L$, let $e$ be an edge of $ {\cal T}$ and suppose that the regions meeting its two end vertices are $ \betaw, \betaz$.
Following Bowditch~\gammaammaite{bow_mar}, orient $e$ by putting an arrow from $ \betaz$ to $\betaw$ whenever $|\hat \betaz| > |\hat \betaw|$. If both moduli are equal, make either choice; if the inequality is strict, say that the edge is \epsilonsilonmph{oriented decisively}. We denote the oriented edge by $\vec e$ and refer to this oriented tree as the \epsilonsilonmph{Bowditch tree}, denoted $ {\cal T}_{sho}$. If $\vec e$ is a directed edge then its \epsilonsilonmph{head} and \epsilonsilonmph{tail} are its two ends, chosen so that the arrow on $\vec e$ points towards its head.
We say a path of oriented edges $\vec e_r, 1 {\lambda}eq r {\lambda}eq m$ is \epsilonsilonmph{descending to} $\vec e_m$ if the head of $\vec e_r$ is the tail of $\vec e_{r+1}$ for $r = 1, {\lambda}dots, m-1$. It is \epsilonsilonmph{strictly descending} if each arrow is oriented decisively. A vertex at which all three arrows are incoming is called a \epsilonsilonmph{sink}.
For any $m \gammaeq 0$ and $sho \gammaammao F_2 \tauo {\mathcal S}L$ define ${\Omega}mega_{sho}(m) = \{ \betau {\bf i}n {\Omega}mega : |\phi_{sho}(\betau)| {\lambda}eq m\}$. From the definition, if $sho {\bf i}n { \mathcal B}$ then ${\Omega}mega_{sho}(2)$ is finite and $\phi(\betau) \notin [-2,2]$ for $\betau {\bf i}n {\Omega}mega$.
These following two lemmas show that starting from any directed edge $\vec e_1$, there is a unique descending path to an edge $\vec e_{m}$ which is adjacent to a region in ${\Omega}mega(2)$.
\betaegin{lemma}[{\gammaammaite [Lemma 3.7]{tan_gen}}]{\lambda}abel{forkvertex}
Suppose $\betau,\betav,\betaw {\bf i}n {\Omega}mega$ meet at a vertex $q$ of ${\cal T}_{sho}$ with the arrows on both the edges adjacent to $\betau$ pointing away from $q$. Then either $|\phi(\betau)| {\lambda}eq 2$ or $\phi(\betav) = \phi(\betaw) = 0$. In particular, if $sho {\bf i}n { \mathcal B}$ then $|\phi(\betau)| {\lambda}eq 2$. \epsilonsilonnd{lemma}
\betaegin{lemma}[{\gammaammaite[Lemma 3.11]{tan_gen} and following comment}] {\lambda}abel{infiniteray}
Suppose $\betaeta$ is an infinite ray consisting of a sequence of edges of ${\cal T}_{sho}$ all of whose arrows point away from the initial vertex. Then $\betaeta$ meets at least one region $\betau {\bf i}n {\Omega}mega$ with $|\phi( \betau)| < 2$. \epsilonsilonnd{lemma}
\betaegin{lemma}{\lambda}abel{connected}
For any $m \gammae 2$, the set ${\Omega}mega_{sho}(m)$ is connected. Moreover if $sho {\bf i}n { \mathcal B}$ then $|{\Omega}mega_{sho}(m)|<{\bf i}nfty$.
\epsilonsilonnd{lemma}
\betaegin{proof} The first statement is~\gammaammaite{tan_gen} Theorem 3.1(2).
That ${\Omega}mega_{sho}(m)$ is finite follows from Proposition~sef{fibonacci}, see~\gammaammaite{tan_gen} P. 773. \epsilonsilonnd{proof}
The result which we mainly use is the following:
\betaegin{theorem}{\lambda}abel{sinktree} There is a constant $M_0 \gammaeq 2$ and a finite connected non-empty subtree tree $T_F$ of ${\cal T}_{sho}$ so that for every edge $\vec e$ not in $T_F$, there is a strictly descending path from $\vec e$ to an edge of $T_F$.
Moreover if regions $\betau,\betav$ are adjacent to an edge of ${\cal T}$, then $ |{\cal T}r U|, |{\cal T}r V| {\lambda}eq M_0$ implies $e {\bf i}n T_F$. For any $M\gammaeq M_0$, the tree $T_F = T_F(M_0)$ can be enlarged
to a larger tree $T_F(M)$ with similar properties, and in addition $T_F$ can be enlarged to include any finite set of edges. \epsilonsilonnd{theorem}
\betaegin{proof} Most of the assertions are proved on p. 782 of ~\gammaammaite{tan_gen}, see also Corollary 3.12 of~\gammaammaite{bow_mar}. To see that $T_F$ can always be enlarged to a tree $T_F(M)$ with similar properties, see the proofs of Theorem 3.2 of~\gammaammaite{tan_gen} and Theorem 3.16 of~\gammaammaite{bow_mar}. (In fact there is a precise condition to determine which edges are in $T_F$, see~\gammaammaite{tan_gen} Lemma 3.23.) Finally, let ${m}athcal K$ be any finite subset of ${\cal T}$ and let
$M = {m}ax \{ \phi(\betau), \phi(\betav) : \betau, \betav \ \ {m}box{sm{are adjacent to an edge in}} \ \ {m}athcal K \}$. Enlarging $T_F$ to $T_F(M)$ the result is clear.
\epsilonsilonnd{proof}
\betaegin{definition} {\lambda}abel{wake} Let $\vec e$ be a directed edge. The \epsilonsilonmph{wake} of $\vec e$, denoted ${\mathcal W}(\vec e)$, is the set of regions whose boundaries are contained in the component of ${\cal T} \sigmaetminus \{\vec e\}$ which contains the tail of $\vec e$, together with the two regions adjacent to $\vec e$. \epsilonsilonnd{definition}
We remark that the wake ${\mathcal W}(\vec e)$ is the subset of ${\Omega}mega$ denoted ${\Omega}mega^{0-}(\vec e)$ in~\gammaammaite{bow_mar} and \gammaammaite{tan_gen}.
Also denote by $ {\mathcal W}_{{\mathcal E}}(\vec e)$ the set of edges $\vec e$ which are adjacent to two regions in ${\mathcal W}(\vec e)$.
Theorem~sef{sinktree} says that if $\vec e \notin T_F$ then the arrow on $\vec e$ points towards $T_F$. We note the following slight variation:
\betaegin{lemma}{\lambda}abel{wake1} If $\vec e \notin T_F$ then every edge in ${\mathcal W}_{{\mathcal E}}(\vec e)$ is oriented towards $\vec e$. \epsilonsilonnd{lemma}
\betaegin{proof} This follows easily from the definitions. In detail, let $\deltad(T_F)$ be the boundary of $T_F$, that is, the set of edges in $T_F$ whose tails meet the head of an edge not in $T_F$. If $\vec e {\bf i}n \deltad(T_F)$ then by Theorem~sef{sinktree} the arrow on every edge in ${\mathcal W}_{{\mathcal E}}(\vec e)$ points towards $\vec e$. Now suppose that $\vec e \notin \deltad(T_F)$ and that $\vec f {\bf i}n {\mathcal W}_{{\mathcal E}}(\vec e)$. Suppose that the descending path $\beta(e) $ from $ \vec e$ lands on $\vec g {\bf i}n \deltad(T)$ while the descending path $\beta(f) $ from $ \vec f$ lands on $\vec h {\bf i}n \deltad(T)$. Then $\beta(e) \sigmaubset {\mathcal W}_{{\mathcal E}}(\vec g)$ while $\vec f {\bf i}n \beta(f) \sigmaubset {\mathcal W}_{{\mathcal E}}(\vec h)$. Since ${\mathcal W}_{{\mathcal E}}(\vec g)$ and ${\mathcal W}_{{\mathcal E}}(\vec h)$ are disjoint unless $g = h$ and $\vec f {\bf i}n {\mathcal W}_{{\mathcal E}}(\vec e) \sigmaubset {\mathcal W}_{{\mathcal E}}(\vec g)$ this gives the result.
\epsilonsilonnd{proof}
Finally, for the proof of Theorem~sef{direct} we need the following refinement of Theorem~sef{fibonacci}, which is a minor variation of Lemmas 3.17 and Lemma 3.19 of~\gammaammaite{tan_gen}.
For $\betau {\bf i}n {\mathcal W}(\vec e)$ let $d(\betau)$ be the number of edges in the shortest path from $\betau$ to the head of $\vec e$.
Following~\gammaammaite{tan_gen} P.777, define the \epsilonsilonmph{Fibonacci function} $F_{\vec e}$ on ${\mathcal W}(\vec e)$ as follows:
$F_{\vec e}(\betaw) = 1$ if $\betaw$ is adjacent to $\vec e$ and
$F_{\vec e}(\betau) = F_{\vec e}(\betav)+ F_{\vec e}(\betaw)$ otherwise, where $\betav,\betaw$ are the two regions meeting $\betau$ and closer to $\vec e$ than $\betau$, that is, with $d(\betav) < d(\betau),d(\betaw) < d(\betau)$.
\betaegin{lemma} {\lambda}abel{Increasing3} {\lambda}abel{fibonacciwake} Suppose that $sho {\bf i}n { \mathcal B}$
and that ${\mathcal V}ec e$ is a directed edge such at most one of the adjacent regions is in ${\Omega}mega(2)$. Suppose also that no edge in
${\mathcal W}_{{\mathcal E}}(\vec e)$ is adjacent to regions in ${\Omega}mega(2)$ on both sides.
Then there exist $c>0, n_0 {\bf i}n {m}athbb N$, independent of ${\mathcal V}ec e$ (but depending on $sho$), so that ${\lambda}og |\phi_{sho}(\betau)| \gammaeq c F_{\vec e}(\betau)$ for all but at most $n_0$ regions $\betau {\bf i}n {\mathcal W}(\vec e)$. \epsilonsilonnd{lemma}
\betaegin{proof} This essentially Lemmas 3.17 and 3.19 of~\gammaammaite{tan_gen}, see also Corollary 3.6 of~\gammaammaite{bow_mar}.
Since ${\Omega}mega(M)$ is finite for any $M>2$, the set $\{ {\lambda}og |\phi(\betau)|: \betau \notin {\Omega}mega(2) \} $ has a minimum $m > {\lambda}og 2$. By Lemma 3.17, if neither adjacent region to ${\mathcal V}ec e$ is in ${\Omega}mega(2)$, we can take $c = m - {\lambda}og 2$ and $n_0 = 0$.
Suppose then that exactly one of the adjacent regions $\betax_0$ to ${\mathcal V}ec e$ is in ${\Omega}mega(2)$. To apply Lemma 3.19, we need to verify that $ {\mathcal W}(\vec e) \gammaammaap {\Omega}mega(2) = \{\betax_0\}$. Note that no region which meets the boundary $\partial \betax_0$ of $\betax_0$ can be in ${\Omega}mega(2)$ by hypothesis. Let $\vec \epsilonsilon_n, n {\bf i}n {m}athbb N$ be the oriented edges whose heads meet $\partial \betax_0$ but which are not contained in $\partial \betax_0$, numbered so that $\vec \epsilonsilonpsilon_1$ is the edge not contained in $\partial \betax_0$
whose head meets $\vec e$.
Then neither of the two adjacent regions to $\vec \epsilonsilonpsilon_n$ are in ${\Omega}mega(2)$ for any $n$. It follows from Lemma 3.17 that ${\mathcal W}(\vec \epsilonsilonpsilon_n) \gammaammaap {\Omega}mega(2) = \epsilonsilonmptyset$ for $n {\bf i}n {m}athbb N$. Since clearly
$ {\mathcal W}(\vec e) = \{\betax_0\} \gammaammaup \betaigcup_{n{\bf i}n {m}athbb N} {\mathcal W}(\vec \epsilonsilonpsilon_n)$ the claim follows.
Now Lemma 3.19 gives $c>0$ and $n_0{\bf i}n {m}athbb N$, depending only on $\betax_0$, so that ${\lambda}og |\phi_{sho}(\betau)| \gammaeq c F_{\vec e}(\betau)$ for all but at most $n_0$ regions $\betau {\bf i}n {\mathcal W}(\vec e)$. Since ${\Omega}mega(2)$ is finite and
$\betax_0 {\bf i}n {\Omega}mega(2)$, we can adjust the constants so as to be uniform independent of $\vec e$. \epsilonsilonnd{proof}
\betaegin{comment}
Let $\betay_i, i {\bf i}n {m}athbb Z$ be the regions in order around the boundary $\deltad \betau$ of a single region $\betau {\bf i}n {\Omega}mega$. It is easy to see (see the proof of Proposition~sef{uniquepalindromes}) that the values $\phi (\betay_i)$ satisfy a simple recurrence relation and hence grow exponentially unless $\phi(\betau)$ is in the exceptional set $E = [-2,2] \gammaammaup \{\pm \sigmaqrt{{m}u}\} \sigmaubset {m}athbb C$. If $sho {\bf i}n { \mathcal B}$ then
by definition $\phi(\betau) \notin [-2,2]$, while if $\phi(\betau)= \pm \sigmaqrt{{m}u}$ the values approach zero in one direction round $\deltad \betau$ (see \gammaammaite{tan_gen} Lemma 3.10) and hence $sho \not{\bf i}n { \mathcal B}$ since condition \epsilonsilonqref{eqn:B2} is not satisfied.
Thus we find:
\betaegin{lemma}[{\gammaammaite[Lemma 3.20]{tan_gen}}] {\lambda}abel{finiteboundary}
Suppose that $sho {\bf i}n { \mathcal B}$ and $\betau {\bf i}n {\Omega}mega$ and consider the regions $\betay_i, i {\bf i}n {m}athbb Z$ adjacent to $\betau$ in order round $\deltad \betau$. Then away from a finite subset, the values $|\phi_{sho}(\betay_i)|$ are increasing and approach infinity as $ i \tauo {\bf i}nfty$ in both directions. Moreover there exists a finite segment of $\partial \betau$ such that the edges adjacent to $\betau$ and not in this segment are directed towards this segment.
\epsilonsilonnd{lemma}
\epsilonsilonnd{comment}
\sigmaubsection{The W-arrows}{\lambda}abel{Warrows}
There is another way to orient the edges of ${\cal T}$, this time in relation to word length. For $\betau {\bf i}n {\Omega}mega$, define $||\betau|| = ||u||_{(a,b)}$ for any cyclically reduced positive word $u {\bf i}n \betau$; clearly this is independent of the choice of $u$. Provided $e$ is not the edge $e_0$ separating the regions $(\betaf a,\betaf b)$, then if $\betaz,\betaw$ are the regions at the two ends of $e {\bf i}n {\cal T}$, put an arrow pointing from $\betaz$ to $\betaw$ whenever $||\betaz||_{a,b} > ||\betaw||_{a,b}$.
We call these arrows, $W$-arrows, while the previously assigned arrows defined by the condition $|\phi(\betaz)| \gammaeq |\phi(\betaw)|$ we refer to as $T$-arrows (for word length and trace respectively). Clearly every edge is connected by a strictly descending path of $W$-arrows to one of the two vertices at the ends of the edge $e_0$. \epsilonsilonmph{We retain the notation $\vec e$ exclusively to refer to the orientation of the $T$-arrow, likewise the terms head and tail.}
If $e$ is an edge of ${\cal T}$, as usual denote by $\betau(e), \betav(e)$ the regions adjacent to $e$. Notice that if $u{\bf i}n \betau(e), v {\bf i}n \betav(e)$ are a positive generator pair, then we have $||\betaf{uv}||> ||\betaf{uv^{-1}}||$ so that the $W$-arrow points from $\betaf{uv}$ to $\betaf{uv^{-1}}$.
For $N {\bf i}n {m}athbb N$
let $B((a,b), N) = \{ e {\bf i}n {\cal T} : {m}ax \{||\betau(e)||_{a,b}, ||\betav(e)||_{a,b}\} {\lambda}eq N \}$.
The next proposition shows that for all but finitely many arrows, the $W$- and $T$- arrows point in the same direction.
\betaegin{proposition} {\lambda}abel{wordsandtraces} There exists $N_0>0$ such that if $\vec e \notin B((a,b), N_0)$ is an oriented edge of ${\cal T}_{sho}$ with regions $\betaz,\betaw$ at its tail and head respectively, then $||\betaz|| > ||\betaw||$.
\epsilonsilonnd{proposition}
\betaegin{proof} This is a general result about attracting trees. Enlarge the finite sink tree $T_F$ of Theorem~sef{sinktree} if necessary so that $e_0 {\bf i}n {\cal T}_F$. Choose $N_0$ large enough that $T_F(M_0) \sigmaubset B= B((a,b), N_0)$. Then every edge not in $B$ is connected by a path of decreasing $T$-arrows to an edge of $T_F$.
If the result is false, there is an edge $\vec e$ not in $B$ with regions $\betaz,\betaw$ at its tail and head respectively such that $||z||_{a,b} < ||w||_{a,b}$ for $z {\bf i}n \betaz, w {\bf i}n \betaw$. By Lemma~sef{wake1}, every edge in ${\mathcal W}_{{\mathcal E}}(\vec e)$ is connected by a strictly descending path of $T$-arrows to the tail of $\vec e$.
On the other hand, $\vec e$ is connected by a strictly descending path of $W$-arrows to one of the two vertices at the ends of
$e_0$. But these $W$-arrows are contained in ${\mathcal W}(\vec e)$ and, following on from the initial edge $e$, must all point in the opposite direction to the $T$-arrows. Thus
one of the two vertices at the ends of
$e_0$ is outside $B$, which is impossible.
\epsilonsilonnd{proof}
\betaegin{corollary} {\lambda}abel{headsandtails} There exists $N_0 {\bf i}n {m}athbb N$ such that if $\vec e$ is an edge outside $B(N_0)$, then every edge $\vec f {\bf i}n {\mathcal W}(\vec e)$ has head $\betaf{uv}^{-1} $ and tail $\betaf {uv}$ whenever $u {\bf i}n \betau(f), v {\bf i}n \betav(f)$ are a positive generator pair associated to $\vec f$.
\epsilonsilonnd{corollary}
\sigmaection{Results from~\gammaammaite{LX}}{\lambda}abel{geometry}
In this section we collect the main results from~\gammaammaite{LX} needed to prove Theorem~sef{BQimpliesPS}.
\sigmaubsection{The double cone lemma}{\lambda}abel{cone}
Suppose that $H, H'$ are hyperbolic half planes and let $\hat H$ be one of the two closed half spaces defined by $H$. By an inward (resp. outward) pointing normal to $\hat H$ we mean a normal to $H$ which points into (resp. out of) $\hat H$. If $\hat H'$ is another half space such that $\hat H\sigmaupset \hat H'$ and $d( H, H')>0$ we say that $\hat H, \hat H'$ are \epsilonsilonmph{properly nested}.
\betaegin{lemma}{\lambda}abel{conelemma} Suppose $ 0 < \alpha < \pi/2$. Then there exists $L_0>0$ with the following property.
Suppose that $H, H'$ are hyperbolic half planes defining half spaces $\hat H, \hat H'$. Let ${\cal M}$ be a line joining points $O {\bf i}n H, P {\bf i}n H'$ such that ${\cal M}$ is orthogonal to $\hat H'$ and makes an angle $0 {\lambda}eq \tauheta < \alpha$ with the inward pointing normal to $\hat H$. Then $\hat H \sigmaupset \hat H'$ are properly nested whenever $d(O,P)> L_0$. \epsilonsilonnd{lemma}
\betaegin{proof}
If this is false, then $H'$ meets $H$ in a point $Q {\bf i}n {m}athbb H^3 \gammaammaup \deltad {m}athbb H^3$. Then $OPQ$ is a triangle with angle $\psi = \pi/2 - \tauheta$ at $O$ and $\pi/2$ at $P$. Let $ L_0$ be the length of the finite side of a triangle with angles $\pi/2 - \alpha, \pi/2,0$. Since
$\psi = \pi/2 - \tauheta > \pi/2 - \alpha$ then $d(O,P)< L_0$. Clearly from the directions of the normals, $\hat H\sigmaupset \hat H'$ and moreover $d( H, H')>0$.
\epsilonsilonnd{proof}
\betaegin{corollary}{\lambda}abel{doubleconelemma}(\gammaammaite{LX} Lemma 3.5)
Suppose that $H, H'$ are hyperbolic half planes with corresponding half spaces $\hat H, \hat H'$ and let ${\cal M}$ be a line joining points $O {\bf i}n H, P {\bf i}n H'$ which makes
angles $0 {\lambda}eq \tauheta, \tauheta' {\lambda}eq \alpha$ with the inward pointing normal to $\hat H$ and the outward pointing normal to $\hat H'$ respectively. Then $\hat H\sigmaupset \hat H'$ are properly nested provided $d(O,P)> 2L_0$.
\epsilonsilonnd{corollary}
\betaegin{proof} Let $H''$ be the plane perpendicular to ${\cal M}$ through its mid-point and apply Lemma~sef{conelemma} to
$H, H''$ and $H'', H'$.
\epsilonsilonnd{proof}
\sigmaubsection{Generators and the amplitudes of a right angled hexagon}{\lambda}abel{sec:amplitude}
Let ${\cal H}$ be a right angled hexagon with consistently oriented sides $s_1, {\lambda}dots , s_6$ and let $\sigma_i$ be the complex distance between sides $s_{i-1}, s_{i+1}$.
The amplitude ${ A}m(\sigma_{i-2}, \sigma_i, \sigma_{i+2})$ introduced in~\gammaammaite{Fen} VI.5, is, up to sign, an invariant of the triple of alternate sides $s_{i-2}, s_i, s_{i+2}$. Its importance is that if ${\cal H}$ is constructed as described below from a positive ordered generator pair $(u,v)$, then up to sign the amplitude relative to the three sides ${ A}x U, { A}x V, { A}x U^{-1}V^{-1}$, is the trace of the square root of commutator ${U,V}$ and hence independent of the choice of generators. This point was used crucially in~\gammaammaite{LX}.
\betaegin{definition} {\lambda}abel{amplitude} Let ${\cal H}$ be a consistently oriented right angled hexagon with oriented sides $ s_1, {\lambda}dots , s_6$ and let $\sigma_i$ be the complex distance between sides $s_{i-1}, s_{i+1}$. Define the \epsilonsilonmph{amplitude} ${ A}m(\sigma_1,\sigma_3 , \sigma_5) = -i \sigmah \sigma_2\sigmah \sigma_3 \sigmah \sigma_4$.
\epsilonsilonnd{definition}
See for example~\gammaammaite{Fen} or \gammaammaite{serwolp} for a discussion of complex length and hyperbolic right-angled hexagons.
Let $\sigma_{14}$ be the complex distance between the oriented lines $s_1$ and $s_4$. Using the cosine formula in the oriented right angled pentagon with the sides $s_1, s_2, s_3, s_4, s_{14}$ (where $s_{14}$ is the common perpendicular of $s_1$ and $s_4$, oriented from $s_1$ to $s_4$), we find $ \gammaammah \sigma_{14} = - \sigmah \sigma_2\sigmah \sigma_3$. Thus we can alternatively write the amplitude as
$Am(\sigma_1,\sigma_3 , \sigma_5) = i \gammaammah \sigma_{14} \sigmah \sigma_4$.
We now fix a choice of lift $R {\bf i}n {\mathcal S}L$ of the order two rotation about an oriented line using line matrices as described in \gammaammaite{Fen} V.2. Denote the oriented line with endpoints $\zetaeta, \zetaeta' {\bf i}n \hat {{m}athbb C}$, oriented from $\zetaeta$ to $\zetaeta'$, by $[\zetaeta, \zetaeta']$.
The \epsilonsilonmph{line matrix} $ R ([\zetaeta, \zetaeta']) {\bf i}n {\mathcal S}L$ is a choice of matrix representing the $\pi$-rotation about $[\zetaeta, \zetaeta']$.
If $\zetaeta, \zetaeta' {\bf i}n {m}athbb C$ then
$$ R ([\zetaeta, \zetaeta']) = \deltafrac{i}{\zetaeta'-\zetaeta} \betaegin{pmatrix} \zetaeta + \zetaeta'& -2 \zetaeta \zetaeta' \gammaammar 2 & -\zetaeta - \zetaeta'\epsilonsilonnd{pmatrix},$$
while $$ R ([\zetaeta, {\bf i}nfty])= i \betaegin{pmatrix} 1& -2 \zetaeta \gammaammar 0 & -1\epsilonsilonnd{pmatrix}, \ \ R([{\bf i}nfty, \zetaeta' ]) = - i \betaegin{pmatrix} 1& -2 \zetaeta \gammaammar 0 & -1\epsilonsilonnd{pmatrix}.$$
As shown~\gammaammaite{Fen}, this definition respects the orientation of lines and is invariant under conjugation in ${\mathcal S}L$.
If $R_i$ is the line matrix associated to the oriented side $s_i$ of ${\cal H}$ as above, then $R_i^2 = -id$ and $R_iR_{i+1} = -R_{i+1} R_i$ . Moreover $R_{i-1}R_{i+1} $ is a loxodromic which translates by complex distance $2 \sigma_i$ along an axis which extends $s_i$. By ~\gammaammaite{Fen} V.3, ${\cal T}r R_{i-1}R_{i+1} = - 2 \gammaammaosh \sigma_i$ and
${\cal T}r R_{i -1}R_iR_{i+1} = -2 i \sigmah \sigma_i$. These formulae can be easily checked by letting $\zetaeta = e^{\sigma_i}$ and arranging $s_{i-1}, s_i$ and $s_{i+1}$ to be the oriented lines joining $[-1,1] , [0,{\bf i}nfty], [-\zetaeta, \zetaeta] $ respectively so that
$$ R_{i-1} = \betaegin{pmatrix} 0 & i \gammaammar i & 0 \epsilonsilonnd{pmatrix}, \ R_{i } = \betaegin{pmatrix} i & 0 \gammaammar 0 & -i \epsilonsilonnd{pmatrix}, \ R_{i+1}= \betaegin{pmatrix} 0 & i\zetaeta \gammaammar i/\zetaeta & 0 \epsilonsilonnd{pmatrix}. $$
It follows from the above formulae, that we can alternatively define ${ A}m(\sigma_1,\sigma_3 , \sigma_5) =-\deltafrac{1}{2}{\cal T}r (R_5 R_3 R_1)$. Moreover this expression is unchanged under even cyclic permutations and changes sign under odd ones.
We now explain the invariance of the amplitude under change of generator.
Suppose that $(u,v)$ is a positive ordered generator pair.
Construct an oriented right angled hexagon ${\cal H} = {\cal H}(u,v)$ with the axes of $(U,V, U^{-1}V^{-1})$ oriented in their natural directions, i.e. pointing in their respective translation directions, forming three alternate sides. The orientations of the three remaining sides then follow. We call this the \epsilonsilonmph{standard hexagon} associated to $(u,v)$.
\betaegin{proposition}{\lambda}abel{amplitudeinvt}
Let ${\cal H} = {\cal H}(u,v)$ be the standard hexagon associated to the image of an positive ordered generator pair $(u,v)$. Let $s_2 = { A}x U, s_4 = { A}x V, s_6 = { A}x U^{-1}V^{-1} $ and label the other sides accordingly. Then up to sign, ${ A}m (\sigma_1,\sigma_3 , \sigma_5)$ is independent of the choice of $(u,v)$.
\epsilonsilonnd{proposition}
\betaegin{proof}
With ${\cal H} = {\cal H}(u,v)$ as defined in the statement, we have $R_{3} R_1 = U $ and $R_{5}R_3 = V$ so that $R_1 R_{5} = U^{-1}V^{-1}$. Hence
$$UVU^{-1}V^{-1} = R_{3} R_1R_{5}R_3 R_1R_3 R_3 R_5 = - ( R_{3} R_1R_{5})^2.$$
On the other hand,
$$ {\cal T}r (R_{5} R_4R_{3}) {\cal T}r (R_4 R_1) = {\cal T}r (R_{5} R_4R_{3} R_4 R_1)+ {\cal T}r (R_{5} R_4R_{3} R_1 R_4)=- 2 {\cal T}r R_{5} R_3R_{1}.$$
By the above, $ {\cal T}r (R_{5} R_4R_{3}) {\cal T}r (R_4 R_1) = -4 i \sigmah \sigma_4 \gammaammah \sigma_{14} = 4 { A}m(\sigma_1,\sigma_3 , \sigma_5)$.
Since as we have seen the trace of the commutator is an invariant of generator triples,
it follows that so is ${ A}m^2(\sigma_1,\sigma_3 , \sigma_5)$ and hence, up to sign, so is ${ A}m(\sigma_1,\sigma_3 , \sigma_5)$.
\epsilonsilonnd{proof}
We refer to ${ A}m(\sigma_1,\sigma_3 , \sigma_5) = -i \sigmah \deltaelta_{UV}\sigmah {\lambda}ambda(U) \sigmah {\lambda}ambda(V)$ as the \epsilonsilonmph{amplitude} of ${\cal H}(u,v)$.
\sigmaubsection{Some simple observations}{\lambda}abel{observations}
We need a few more simple observations.
\betaegin{lemma} {\lambda}abel{cxinequality} sm{(See~\gammaammaite{bow_mar}.)} Suppose that $\betau,\betav {\bf i}n {\Omega}mega$ are adjacent to an oriented edge $\vec e$ of ${\cal T}$ with $\betaw, \betaz$ being the regions at the head and tail of $\vec e$ respectively. Then $ {\mathcal R}e \betaigl ( \deltafrac{ \hat \betaz }{\hat \betau \hat \betav} \betaigr ) \gammaeq 1/2$, where $\hat \betaz = \phi_{sho}(\betaz)$ and so on as in Section~sef{Bowditchbackground}. \epsilonsilonnd{lemma}
\betaegin{proof}
It is easy to check that if $\xi,\epsilonsilonta {\bf i}n {m}athbb C$ and $\xi+\epsilonsilonta = 1, |\epsilonsilonta| {\lambda}eq |\xi|$, then $ {\mathcal R}e \xi \gammaeq 1/2$.
With $\betau,\betav, \betaw, \betaz$ as in the statement
we have $\hat \betaz + \hat \betaw = \hat \betau \hat \betav$ and $|\hat \betaz |\gammaeq | \hat \betaw|$.
Now apply the above with $\xi = \deltafrac{ \hat \betaz}{ \hat \betau \hat \betav} , \epsilonsilonta = \deltafrac{\hat \betaw}{ \hat \betau \hat \betav} $.
\epsilonsilonnd{proof}
\betaegin{lemma} {\lambda}abel{tanhinequality} If $\xi {\bf i}n {m}athbb C$ and ${\mathcal R}e \xi>0$ then $ {\mathcal R}e (\tauanh \xi) \gammaeq 0$. \epsilonsilonnd{lemma}
\betaegin{proof} If $\xi = x+iy$ then ${\mathcal R}e (\tauanh \xi) = \deltafrac{\sigmah x \gammaammah x}{|\gammaammah x \gammaammah y + i\sigmah x \sigmah y|^2 }$.\epsilonsilonnd{proof}
We will also need a comparison of hyperbolic translation lengths and traces.
For a loxodromic element $X {\bf i}n {\mathcal S}L$ let $\epsilonsilonll(X)>0$ denote the (real) translation length and let ${\lambda}ambda (X) = (\epsilonsilonll(X) + i \tauheta(X))/2$ be \epsilonsilonmph{half} the complex length, so that ${\cal T}r X = \pm 2 \gammaammaosh {\lambda}ambda(X)$.
\betaegin{lemma}{\lambda}abel{compare2} There exists $L_0>0$ so that if
$\xi + i \epsilonsilonta {\bf i}n {m}athbb C$ with $ \xi >L_0 $ then
$\xi - {\lambda}og 3 {\lambda}eq {\lambda}og |\gammaammaosh (\xi + i \epsilonsilonta)| {\lambda}eq \xi$. In particular, for $X {\bf i}n {\mathcal S}L$ we have $e^{\epsilonsilonll(X)} /3{\lambda}eq |{\cal T}r X|/2 {\lambda}eq e^{\epsilonsilonll(X)}$ whenever $\epsilonsilonll(X) > L_0$.
\epsilonsilonnd{lemma}
\betaegin{proof}
For the right hand inequality, since $ |\gammaammaosh (\xi + i \epsilonsilonta)| = e^{\xi} | (1+ e^{-2\xi -2i\epsilonsilonta} )|/2$ we have
$${\lambda}og |\gammaammaosh (\xi + i \epsilonsilonta)| = {\xi} + {\lambda}og | (1+ e^{-2\xi -2i\epsilonsilonta} ) |/2 {\lambda}eq \xi$$ since $| (1+ e^{-2\xi -2i\epsilonsilonta}) |/2 {\lambda}eq 1$.
For the left hand inequality, since $\xi > L_0$ we have, choosing $L_0$ large enough, $| (1+ e^{-2\xi -2i\epsilonsilonta}) |/2 \gammaeq 1/3$ so that ${\lambda}og | (1+ e^{-2\xi -2i\epsilonsilonta} ) |/2 \gammaeq -{\lambda}og 3$ and hence
${\lambda}og |\gammaammaosh (\xi + i \epsilonsilonta)| \gammaeq \xi -{\lambda}og 3$.
\epsilonsilonnd{proof}
\sigmaubsection{The key step}{\lambda}abel{keystep}
We now come to the key steps from~\gammaammaite{LX} used to prove Theorem~sef{BQimpliesPS}.
\betaegin{proposition} {\lambda}abel{angleinequality} (\gammaammaite{LX} Lemma 5.1)
Suppose that $sho {\bf i}n { \mathcal B}$ and that $ 0 < \alpha < \pi/2$ is given. Suppose also that as in Lemma~sef{cxinequality}, $\betau,\betav {\bf i}n {\Omega}mega$ are adjacent to an oriented edge $\vec e$ of ${\cal T}$. With $N_0$ as in Corollary~sef{headsandtails}, suppose $ u {\bf i}n \betau, v {\bf i}n \betav$ are a positive generator pair and that ${m}ax \{||u||, ||v||\} > N_0$.
Let $\deltaelta_{UV}$ be the complex distance between the axes of $U = sho(u), V = sho(v)$, oriented in the direction of positive translation.
Then there exists $L_1>0$ depending only on $\alpha$ and $sho$ such that $|{\cal I}m \deltaelta_{UV}| {\lambda}eq \alpha$ whenever ${m}ax \{\epsilonsilonll(U), \epsilonsilonll(V)\} > L_1$.
\epsilonsilonnd{proposition}
\betaegin{proof} Without loss of generality, suppose that $ \epsilonsilonll(U) \gammaeq \epsilonsilonll(V)$. Let $\deltaelta_{UV} = d + i \tauheta$. Since by assumption ${\Omega}mega(2)$ is finite and ${\cal T}r sho(g) \neq \pm 2$ for all $g {\bf i}n {\cal P}$, there exists $c>0$ such that $|{\cal T}r sho(g) \pm 2| > c$ for all $g {\bf i}n {\cal P}$. Hence $|\sigmah {\lambda}ambda(G)|$ is uniformly bounded away from $0$ for all $g {\bf i}n {\cal P}$, where $G = sho (g)$. By Proposition~sef{amplitudeinvt}
the absolute value of the amplitude of ${\cal H}(u,v)$, that is, $| \sigmah \deltaelta_{UV}\sigmah {\lambda}ambda(U) \sigmah {\lambda}ambda(V)|$, is independent of $(u,v)$. Combined with Lemma~sef{compare2}, it follows that provided that $\epsilonsilonll(U)> L_0$ we have
\betaegin{equation}{\lambda}abel{distbound}
| \sigmah \deltaelta_{UV}| {\lambda}eq k e^{- \epsilonsilonll(U)}
\epsilonsilonnd{equation}
for a constant $k$ which depends only on the representation $sho$.
Since $| \sigmah \deltaelta_{UV}|^2 = \gammaammah^2 d \sigmain^2 \tauheta + \sigmah^2 d \gammaammaos^2 \tauheta$ we deduce that $d \tauo 0$ and either $ \tauheta \tauo 0$ or $ \tauheta \tauo \pi$ as $\epsilonsilonll(U)\tauo {\bf i}nfty$.
Now the cosine formula in ${\cal H}(u,v)$ gives
\betaegin{equation*}
\gammaammaosh \delta_{UV} = \frac{\gammaammaosh {\lambda}ambda{(U^{-1}V^{-1})} - \gammaammaosh {\lambda}ambda(U) \gammaammaosh {\lambda}ambda(V) } {\sigmainh {\lambda}ambda(U) \sigmainh {\lambda}ambda(V)}
\epsilonsilonnd{equation*}
and hence
\betaegin{equation*}
\gammaammaosh \delta_{UV} \tauanh {\lambda}ambda(U) \tauanh {\lambda}ambda(V) = \frac{\gammaammaosh {\lambda}ambda (VU)} { \gammaammaosh {\lambda}ambda(U) \gammaammaosh {\lambda}ambda(V) } - 1
\epsilonsilonnd{equation*}
which gives
\betaegin{equation*}
1+ {\mathcal R}e ( \gammaammaosh \delta_{UV} \tauanh {\lambda}ambda(U) \tauanh {\lambda}ambda(V) ) = {\mathcal R}e { \mathcal B}igl( \frac {\gammaammaosh {\lambda}ambda (VU)} { \gammaammaosh {\lambda}ambda(U) \gammaammaosh {\lambda}ambda(V)} { \mathcal B}igr).
\epsilonsilonnd{equation*}
By Corollary~sef{headsandtails} the $T$-and $W$-arrows on $\vec e$ agree. Hence $\betau \betav$ is the region at the tail of $\vec e$ and $\betau \betav^{-1}$ the one at its head. Thus by Lemma~sef{cxinequality} we have $ {\mathcal R}e { \mathcal B}igl( \deltafrac {\gammaammaosh {\lambda}ambda (VU)} { \gammaammaosh {\lambda}ambda(U) \gammaammaosh {\lambda}ambda(V)} { \mathcal B}igr) \gammaeq 1$ from which it follows that
$ {\mathcal R}e ( \gammaammaosh \delta_{UV} \tauanh {\lambda}ambda(U) \tauanh {\lambda}ambda(V) ) \gammaeq 0$. By Lemma~sef{tanhinequality}, $ \tauanh {\lambda}ambda(U) , \tauanh {\lambda}ambda(V) \gammaeq 0$ so that ${\mathcal R}e \gammaammaosh \delta_{UV} = \gammaammah d \gammaammaos \tauheta \gammaeq 0$ from which we deduce that $\tauheta \tauo 0$.
This completes the proof. \epsilonsilonnd{proof}
\betaegin{proposition} {\lambda}abel{sephyperplanes} (\gammaammaite{LX} Theorem 5.4)
Suppose that $\betau,\betav {\bf i}n {\Omega}mega$ are adjacent to an edge $e$ of ${\cal T}$. Then there is a half space $\hat H$ and $L_2>0$ so that if ${m}ax \{ \epsilonsilonll(U), \epsilonsilonll(V)\} \gammaeq L_2$, then for any $X,Y {\bf i}n \{ U,V \}$, the half spaces $ X^{-1} \hat H \sigmaupset \hat H \sigmaupset Y\hat H$ are properly nested.
\epsilonsilonnd{proposition}
\betaegin{proof}
Suppose for definiteness that $\epsilonsilonll(U) \gammaeq \epsilonsilonll(V)$. Let $H$ be the hyperplane orthogonal to ${ A}x V$ and containing the common perpendicular $D$ to ${ A}x U, { A}x V$. Let $\hat H$ be the half space cut off by $H$ and containing the forward pointing unit tangent vector $\betaf t_V$ to ${ A}x V$ at $P = { A}x V \gammaammaap D$. Note that $ V^{-1} \hat H \sigmaupset \hat H \sigmaupset V \hat H$ are properly nested since $V$ is loxodromic and translates $H$ disjointly from itself.
Now suppose $Y = U$. Note that for $L$ sufficiently large, by Proposition~sef{fibonacci}, $\epsilonsilonll(U) > L$ implies that $||u||_{a,b}> N_0$ with $N_0$ as in Proposition~sef{angleinequality}. Hence
by Proposition~sef{angleinequality} we can choose $L= L_1(\pi/4)$ so that $|{\cal I}m \delta_{UV}| {\lambda}eq \pi/4$ whenever $\epsilonsilonll(U) \gammaeq L$. Let $ Q$ be the intersection point of $ { A}x U$ with $D$ and let $\betaf t_U$ be the forward pointing unit tangent vector along ${ A}x U$ at $Q$. Then $\betaf t_V$ is translated by distance ${\mathcal R}e \delta_{UV}$ and rotated by angle ${\cal I}m \delta_{UV}$ along $D$ to coincide with $\betaf t_U$ at $Q$. Thus $\betaf t_U$
makes an angle at most $\pi/4$ with the inward pointing normal $\betaf n_Q$ to $\hat H$ at $Q$. Likewise $U(\betaf t_U)$ makes an angle at most $\pi/4$ with the inward pointing normal $U(\betaf n_Q)$ to $U(\hat H)$.
It follows by Corollary~sef{doubleconelemma} that for $\epsilonsilonll(U)$ sufficiently large, the half planes
$\hat H \sigmaupset U (\hat H)$ are properly nested and hence so are $U^{-1} (\hat H) \sigmaupset \hat H$.
This completes the proof.
\epsilonsilonnd{proof}
\betaegin{proposition}{\lambda}abel{longwordsqgeod} (\gammaammaite[Theorem 5.4]{LX})
Suppose that $(u,v)$ is a positive generator pair such that that ${m}ax \{ \epsilonsilonll(U), \epsilonsilonll(V)\} > L_2$ with $L_2 $ as in Proposition~sef{sephyperplanes}.
Let ${ \mathcal C} (u,v)$ denote the set of all cyclically shortest words which
are products of positive powers of $u$'s and $v$'s.
Then the collection of broken geodesics $\{\betar_{sho}(w; (u,v)), w {\bf i}n { \mathcal C} (u,v)\}$ is uniformly quasigeodesic.
\epsilonsilonnd{proposition}
\betaegin{proof} With the notation of Proposition~sef{sephyperplanes}, pick a basepoint $O$ in the hyperplane $H$
and let $d$ be the minimum distance between any pair of the planes $H, U(H), V(H)$.
Label the vertices of $\betar_{sho}(w; (u,v))$ in order as $P_n, n {\bf i}n {m}athbb Z$ with $O = P_0$ and denote the image of $H$ containing $P_n$ by $H_n$.
Any three successive vertices $P_n, P_{n+1}, P_{n+2}$ are of the form $ZX^{-1}{O}, ZO, ZYO$ for some $X,Y {\bf i}n \{ U = sho(u),V= sho(v) \}, Z {\bf i}n sho(F_2)$. Therefore by
Proposition~sef{sephyperplanes} the corresponding half spaces $\hat H_n, \hat H_{n+1}, \hat H_{n+2}$ are properly nested.
It follows that each consecutive pair of half spaces in the sequence ${\lambda}dots, \hat H_n, \hat H_{n+1}, \hat H_{n+2}, {\lambda}dots$ are properly nested
and hence that $d(P_n, P_m) \gammaeq d(\hat H_n, \hat H_m) = |n-m|d$ which proves the result.
\epsilonsilonnd{proof}
\sigmaection{The Bowditch condition implies primitive stable}{\lambda}abel{sec:BQimpliesBIP}
In this section we prove Theorem~sef{introthmA}, that a representation $sho \gammaammao F_2 \tauo {\mathcal S}L$ satisfies the $BQ$-conditions if and only if $ sho$ is primitive stable.
The result in one direction is not hard, see for example~\gammaammaite{lupi}.
\betaegin{proposition}{\lambda}abel{PSimpliesBQ} The condition $PS$ implies the Bowditch $BQ$-conditions.
\epsilonsilonnd{proposition}
\betaegin{proof} Let $u {\bf i}n {\cal P}$. If the broken geodesic $\betar(u; (a,b))$ is quasigeodesic then it is neither elliptic nor parabolic, so the first condition ${\cal T}r U \notin [-2,2]$ holds.
If the collection of broken geodesics $\betar(u; (a,b)), u {\bf i}n {\cal P}$ is uniformly quasigeodesic then $\betar(u; (a,b))$ is at a uniformly bounded distance from ${ A}x U$ for each $u {\bf i}n {\cal P}$. We deduce that
$$c' ||u||_{a,b} - \epsilonsilonpsilon {\lambda}eq d_{{m}athbb H}(O, UO) {\lambda}eq c + \epsilonsilonll(U) $$ for uniform constants $c,c', \epsilonsilonpsilon>0$.
Since only finitely many words have word length less than a given bound, this implies that only finitely many elements have hyperbolic translation lengths and therefore, by Lemma~sef{compare2}, traces, less than a give bound. \epsilonsilonnd{proof}
It remains to prove the converse.
The following lemma is well known.
\betaegin{lemma} {\lambda}abel{singleqgeod}
Let $w$ be a cyclically shortest word in $F_2$ and let $sho \gammaammao F_2 \tauo {\mathcal S}L$. Suppose that the image $W = sho(w)$ is loxodromic and that $(u,v)$ is a generator pair. Then the broken geodesic $\betar_{sho}(w; (u,v))$ is quasigeodesic with constants depending only on $sho, w, $ and $( u, v)$.
\epsilonsilonnd{lemma}
\betaegin{proof}
Suppose that $||w||_{(u,v)} = k$ and number the vertices $P = sho(x)O, x {\bf i}n F_2$ of $\betar_{sho}(w; (u,v))$ in order as $P_r, r {\bf i}n {m}athbb Z$ with $P_0 = O$. We have to show that there exist constants $K,\epsilonsilon>0$ so that if $n<m$ then
$$(m-n )/K - \epsilonsilon{\lambda}eq d(P_n,P_m) {\lambda}eq K(m-n) + \epsilonsilon.$$
Pick $c>0$ so that $ d(O,sho(h)O) {\lambda}eq c $ for $h {\bf i}n \{ u,v\}$. Clearly $d(P_n,P_m) {\lambda}eq c (m-n)$.
For the lower bound, write $m -n = rk + k_1$ for $r \gammaeq 0, 0 {\lambda}eq k_1 < k$.
Then for some cyclic permutation of $w$, say $w'$, setting $W' = sho(w')$ we have
$W'^r(P_n) = P_{n+rk} $ so that $d(P_n, P_{n+rk} ) \gammaeq r \epsilonsilonll(W)$.
Thus $$ d(P_n,P_m) \gammaeq d(P_n,P_{n+rk} ) - d(P_{n+rk}, P_m) \gammaeq (m-n) \epsilonsilonll(W)/k - kc - \epsilonsilonll(W)/k.$$
\epsilonsilonnd{proof}
\betaegin{theorem}{\lambda}abel{BQimpliesPS} The Bowditch $BQ$-conditions implies $PS$.
\epsilonsilonnd{theorem}
\betaegin{proof}
Choose a finite sink tree $T_F = T_F(M_0)$ as in Theorem~sef{sinktree}.
Use Proposition~sef{wordsandtraces} to enlarge $T_F = T_F(M_0)$ if necessary so that the $W$- and $T$-arrows coincide for every edge outside $T_F$.
By further increasing $M_0$ if necessary we can assume that
$|{\cal T}r sho(u)|> M_0$ implies $\epsilonsilonll(U)> {m}ax \{ L_0, L_2\}$ with $L_0, L_2$ as in Lemma~sef{compare2} and Proposition~sef{longwordsqgeod} respectively.
Suppose now that $e \notin T_F$. Then at least one of the regions $\betau$ adjacent to $e$ has $\epsilonsilonll(U)> {m}ax \{ L_0, L_2\}$ and moreover
the $W$- and $T$-arrows on $e$ coincide. Let $\betav$ be the other region adjacent to $e$ and suppose that $u {\bf i}n \betau, v {\bf i}n \betav$ are a positive pair, so that $||uv|| > ||uv^{-1}||$. Since the $W$-arrow on $e$ points the same direction as the $T$-arrow
it follows that $|{\cal T}r UV| \gammaeq |{\cal T}r UV^{-1}|$.
For the same reason, every region in ${\mathcal W}(\vec e)$ corresponds to a word which is a product of positive powers of $u$'s and $v$'s.
Thus by
Proposition~sef{longwordsqgeod} the collection of all broken geodesics corresponding to regions in ${\mathcal W}(\vec e)$ is uniformly quasigeodesic.
Since $T_F$ is finite, there are finitely many edges $\{\vec e_i,i=1, {\lambda}dots, k\}$ whose heads meet $T_F$. Moreover every region not adjacent to an edge in $T_F$ is in ${\mathcal W}(\vec e_i)$ for some $i $.
There are only finitely many regions $\betaw$ adjacent to some edge of $T_F$.
By Lemma~sef{singleqgeod}, for each such $\betaw$ and $w {\bf i}n \betaw$, the broken geodesic $\betar_{sho}(w; (a,b))$ is quasigeodesic with constants depending on $\betaw$.
It follows that there is a finite set of generator pairs ${\mathcal S}$, such that any $w {\bf i}n F_2$ can be expressed as a word in some $(s,s' ) {\bf i}n {\mathcal S}$ in such a way that $\betar_{sho}(w; (s,s' ))$ is quasigeodesic with constants depending only on $(s,s' )$. For fixed $(s,s')$ each quasigeodesic $\betar_{sho}(w; (s,s' ))$ can be replaced by a broken geodesic $\betar_{sho}(w; (a,b ))$
which is also quasigeodesic with a change of constants depending only on $(s,s')$ and not on $w$. The total number of replacements required involves only finitely many constants and the result follows.
\epsilonsilonnd{proof}
\sigmaection{Palindromicity and the Bounded Intersection Property}{\lambda}abel{BIP}
It is easy to prove Theorem~sef{introthmB}, that $sho {\bf i}n { \mathcal B}$ implies that $sho $ has the bounded intersection property, using Theorem~sef{BQimpliesPS}.
\betaegin{proposition} {\lambda}abel{prop:PSImpliesbounded} If a representation $sho \gammaammao F_2 \tauo {\mathcal S}L$ is primitive stable then it satisfies $BIP$.
\epsilonsilonnd{proposition}
\betaegin{proof} The broken geodesic corresponding to any primitive element by definition passes through the basepoint $O$.
The broken geodesics $\{\betar_{sho}(u ; (a,b)) \}, u {\bf i}n {\cal P}$ are by definition uniformly quasigeodesic, so each is at uniformly bounded distance to its corresponding axis. Hence all the axes are at uniformly bounded distance to $O$ and so in particular axes corresponding to primitive palindromic elements cut the three corresponding special hyperelliptic axes in bounded intervals.\epsilonsilonnd{proof}
This result is of course much more interesting once we know that all primitive elements have palindromic representatives. We make a precise statement in Proposition~sef{uniquepalindromes}.
In Theorem~sef{direct} we then give a direct proof that $sho {\bf i}n { \mathcal B}$ implies that $sho $ has the bounded intersection property.
\sigmaubsection{Generators and palindromicity}{\lambda}abel{genpalin}
Let ${m}athbb E = \{0/1,1/0, 1/1\}$ and define a map $\betaeta \gammaammao \hat {m}athbb Q \tauo {m}athbb E$ by $\psi(p/q) = \betaar p /\betaar q$, where $\betaar p ,\betaar q$ are the mod 2 representatives of $p,q$ in $\{0,1\}$. We refer to $\psi(p/q) $ as the mod 2 equivalence class of $p/q$.
Say $p/q {\bf i}n \hat {{m}athbb Q}$ is of type $\eta {\bf i}n {m}athbb E$ if $\psi(p/q) = \eta$. Say a generator $u {\bf i}n F_2$ is of type $\eta$ if $u {\bf i}n [p/q]$ and $p/q$ is of type $\eta$; likewise a generator pair $(u,v)$ is type $(\eta, \eta') $ if $u, v$ are of types $\eta, \eta'$ respectively.
As in Section~sef{sec:BIP}, we fix once and for all a generator pair $(a,b)$ and identify $a$ with $0/1$, $b$ with $1/0 $ and $ab$ with $1/1$. The \epsilonsilonmph{basic generator pairs} are the three (unordered) generator pairs $(a,b)$, $(a,ab)$ and $(b,ab)$ corresponding to $(0/1,1/0 )$, $(0/1,1/1 )$ and $(1/0,1/1 )$ respectively. (Here the order $ba$ or $ab$ is not important but fixed.)
For $\eta,\eta' {\bf i}n {m}athbb E$ we say $u$ is palindromic with respect to $(\eta,\eta'), \eta \neq \eta'$ if it is palindromic when rewritten in terms of the basic pair of generators corresponding to $(\eta,\eta')$; equally we say that a generator pair $(u,v)$ is cyclically shortest (respectively palindromic with respect to the pair $(\eta,\eta')$) if each of $u,v$ have the same property. We refer to a generator pair $(u,v)$ which is palindromic with respect to some pair of generators, as a \epsilonsilonmph{palindromic pair}. Finally, say a generator pair $(u,v)$ is conjugate to a pair $(u',v')$ if there exists $g {\bf i}n F_2$ such that
$gug^{-1} = u'$ and $gvg^{-1} = v'$.
\betaegin{proposition} {\lambda}abel{uniquepalindromes}
If $u {\bf i}n {\cal P}$ is positive and of type $\eta {\bf i}n {m}athbb E$, then, for each $\eta' \neq \eta$, there is exactly one conjugate generator $u'$ which is positive and palindromic with respect to $(\eta,\eta')$.
If $(u,v)$ is a positive generator pair of type $(\eta,\eta')$, then there is exactly one conjugate generator pair $(u',v')$ which is positive and palindromic with respect to $(\eta,\eta')$.
\epsilonsilonnd{proposition}
\betaegin{proof}
We begin by proving the existence part of the second statement. Observe that the edges of the Farey tree ${\cal T}$ may be divided into three classes, depending on the mod two equivalence classes of the generators labelling the neighbouring regions.
In this way we may assign colours $r,g,b$ to the pairs
$(0/1,1/0); (0/1, 1/1 ); (1/0, 1/1 )$ respectively and extend to a map $col$ from edges to $\{r,g,b\}$, see Figure~sef{fig:colouredtree}. Note that no two edges of the same colour are adjacent, and that the colours round the boundary of each complementary region alternate.
\betaegin{figure}[ht]
{\bf i}ncludegraphics[width=7.5cm]{Figs/poster.pdf}
\gammaammaaption{The coloured Farey tree. The colours round the boundary of each complementary region alternate. The picture is a conjugated version of the one in Figure~sef{fig:farey}, arranged so as to highlight the three-fold symmetry between $(a,b,ab)$. Image courtesy of Roice Nelson.}{\lambda}abel{fig:colouredtree}
\epsilonsilonnd{figure}
As usual let $e_0$ be the edge of ${\cal T}$ with adjacent regions labelled by $(\betaf a, \betaf b)$ and let $ q^+(e_0)$ and $ q^-(e_0)$ denote the vertices at the two ends of $e_0$, chosen so that the neighbouring regions are $(\betaf a, \betaf b, \betaf {ab})$ and $(\betaf a,\betaf b, \betaf {ab}^{-1})$ respectively.
Removing either of these two vertices disconnects ${\cal T}$. We deal first with
the subtree ${\cal T}^+$ consisting of the connected component of ${\cal T} \sigmaetminus \{q^-(e_0)\}$ which contains $q^+(e_0)$. Note that the regions adjacent to all edges of ${\cal T}^+$ correspond to non-negative fractions.
Let $e$ be a given edge of ${\cal T}^+$ and let $q^+(e)$ denote the vertex of $e$ furthest from $q^-(e_0)$. Let $\gamma = \gamma(e)$ be the unique shortest edge path joining $q^+(e)$ to $q^-(e_0)$, hence including both $e$ and $e_0$. The \epsilonsilonmph{coloured level} of $e$, denoted $col.lev(e)$, is the number of edges $e'$ including $e$ itself in $\gamma(e)$ with $col(e') =col(e)$.
Note that $\gamma(e)$ necessarily includes $e_0$, and, provided $ e \neq e_0$, one or other of the two edges emanating from $q^+(e_0)$ other than $e_0$. Thus $col.lev(e) = 1$ for all three edges meeting $q^+(e_0)$ while for all other edges of ${\cal T}^+$ we have $col.lev(e) > 1$.
Now suppose that $e$ is the edge of ${\cal T}^+$ whose neighbouring regions are labelled by the given generator pair $(u,v)$.
The proof will be by induction on $col.lev(e)$.
Suppose first $col.lev(e)=1$. If $e = e_0$ the result is clearly true, since the pair $(a,b)$ is palindromic with respect to itself.
The other two edges emanating from $q^+(e_0)$ have neighbouring regions corresponding to the base pairs $(a,ab)$ and $(ab,b)$, each of which pair is palindromic with respect to itself, proving the claim.
Suppose the result is proved for all edges of coloured level $k \gammaeq 1$. Let $e$ be an edge whose adjacent generators are of type
$(\epsilonsilonta, \epsilonsilonta')$. Suppose that $col(e) =c$ and let $e'$ be the next edge of $\gamma$ with $col(e') = c$ along the path $\gamma(e)$ from $q^+(e)$ to $q^-(e_0)$. (Note that such $e'$ always exists since $k+1\gammaeq 2$.)
By the induction hypothesis the standard generator pair $(u,v)$ adjacent to $e'$ is conjugate to a positive pair $(u',v')$ which is palindromic of the same type $(\epsilonsilonta, \epsilonsilonta')$.
Let $q^+(e')$ be the vertex of $e'$ closest to $e$, so that
the subpath path $\gamma'$ of $ \gamma$ from $q^+(e')$ to $q^-(e)$ contains no other edges of colour $c$, where $q^-(e)$ is the vertex of $e$ other than $q^+(e)$. Since there cannot be two adjacent edges of the same colour, the edges of $\gamma'$ must alternate between the two other colours. This implies (see Figure~sef{fig:colouredtree}) that $\gamma'$ forms part of the boundary of a complementary region $R$ of ${\cal T}^+$. Moreover the third edge at each vertex along $\deltad R$ (that is, the one which is not contained in $\deltad R$), is coloured $c$.
\betaegin{figure}
{\bf i}ncludegraphics[width=7.5cm]{Figs/regionboundary}
\gammaammaaption{Labels of regions round $\deltad {\mathcal R}$ showing the $W$-arrows. Note that labels are concatenated in anticlockwise order round the boundary circle.}{\lambda}abel {fig:regionboundary}
\epsilonsilonnd{figure}
Without loss of generality, suppose that $u$ is before $v$ in the anti-clockwise order round $\deltad {m}athbb D$.
Then the generator associated to $R$ is $uv$.
Since $(u,v)$ is a standard positive pair, moving in anticlockwise order around $\deltad {\mathcal R}$ starting from $v$, successive regions have labels $$v,u, u^2v, u^2vuv, {\lambda}dots, u^2v(uv)^n, {\lambda}dots ,$$ see Figure~sef{fig:regionboundary}. Any successive pair, in particular the pair adjacent to $e$, can be simultaneously conjugated to the form $(u v(uv)^ku, (uv(uv)^{k+1}u)$ for some $k \gammaeq 0$. Since by hypothesis the generator pair $(u,v)$ is conjugate to a pair $(u',v')$ palindromic with respect to $(\epsilonsilonta, \epsilonsilonta')$, so is $(u v(uv)^ku, (uv(uv)^{k+1}u)$.
Similarly, the regions moving clockwise around $\deltad R$ starting from $u$ have standard labels $u,v, uv^2, uvuv^2, {\lambda}dots, (uv)^nuv^2, {\lambda}dots $. Thus any successive pair can be simultaneously conjugated into the form $(v(uv)^kuv , v(uv)^{k+1}uv)$ for some $k \gammaeq 0$ which
is likewise conjugate to a pair palindromic with respect to $(\epsilonsilonta, \epsilonsilonta')$.
By the same argument for the tree $ {\cal T}^-$ consisting of the connected component of ${\cal T} \sigmaetminus \{q^+(e_0)\}$ which contains $q^-(e_0)$ we arrive at the statement that the generators associated to each edge of $ {\cal T}^-$ can be written in a form which is palindromic with respect to one of the three generator pairs associated to the edges emanating from $q^-(e_0)$, that is,
$(a, b^{-1})$, $(a, b^{-1}a)$ or $(b^{-1}a, b^{-1})$. The first pair is obviously palindromic with respect to $(a, b^{-1})$.
Noting that $b^{-1}a = (b^{-1}a^{-1})a^2$ which is conjugate to the word $a(b^{-1}a^{-1})a $ palindromic with respect to $(a,ab)$, and that $b^{-1}a = b^{-1} (ab) b^{-1} $ which is palindromic with respect to $(b,ab)$,
the result follows.
Now we prove the existence part of the first claim. Suppose that $u {\bf i}n {\cal P}$ is of type $\eta {\bf i}n {m}athbb E$ and that $\eta' \neq \eta$. Choose a generator $v$ of type $\eta'$ so that $(u,v)$ is a positive generator pair. By the above there is a conjugate pair $(u',v')$ palindromic with respect to $(\eta,\eta')$ and $u'$ is a generator as required.
To see that $u'$ is unique,
suppose that cyclically shortest positive primitive elements $u$ and $u'$ are in the same conjugacy class and are both palindromic with respect to the same pair of generators, which we may as well take to be $\{0/1,1/0\}$. Notice that $u$ necessarily has odd length, for otherwise the exponents of $a$ and $b$ are both even.
Let $u = e_{r} {\lambda}dots e_{1} f e_{1} {\lambda}dots e_{r}$ and suppose that $f' = e_{k}$ is the centre point about which $u'$ is palindromic for some $1 {\lambda}eq k {\lambda}eq r$. Then
${\lambda}dots uu {\lambda}dots $ is periodic with minimal period of length $2r+1$ and contains the subword $$ e_{r} {\lambda}dots e_{1} f e_{1} {\lambda}dots e_{{k-1}}f' e_{{k-1}} {\lambda}dots e_{1} f e_{1}{\lambda}dots e_{r}$$ so after $f e_{1} {\lambda}dots e_{{k-1}}f' e_{{k-1}} {\lambda}dots e_{1}$ the sequence repeats. Since this subword has length $2k<2r+1$ this contradiction proves the result.
The claimed uniqueness of generator pairs follows immediately.
\epsilonsilonnd{proof}
\sigmaubsection{Direct proof of Theorem~sef{introthmB}}{\lambda}abel{sec:direct}
It may also be of interest to give a direct proof that $sho {\bf i}n { \mathcal B}$ implies that $sho $ has the bounded intersection property.
Theorem~sef{direct} below is a simplified version of the proof of this result from~\gammaammaite{serPS}. It is based on estimating the distance between pairs of palindromic axes along their common perpendicular. We use the estimate~\epsilonsilonqref{distbound} derived from the invariance of the amplitude (up to sign) under change of generators to improve the corresponding estimate in Proposition 4.6 in~\gammaammaite{serPS}.
\betaegin{theorem}{\lambda}abel{direct}(Direct proof of Theorem~sef{introthmB}.)
If $sho {\bf i}n { \mathcal B}$ then $sho $ has the bounded intersection property.
\epsilonsilonnd{theorem}
\betaegin{proof}
Assume that $sho {\bf i}n { \mathcal B}$ and choose $M_0 \gammaeq 2$ and a finite connected non-empty subtree tree $T_F$ of ${\cal T}$ as in Theorem~sef{sinktree}. Let ${\Omega}mega(T_F) $ be the set of regions $\betau {\bf i}n {\Omega}mega$ such that $\betau$ is adjacent to an edge of $T_F$.
By enlarging $T_F$ if necessary, we can ensure that every region in ${\Omega}mega(2)$ is adjacent to some edge of $T_F$.
In addition, since there are only finitely many possible pairs of elements of ${\Omega}mega(2)$, we may yet further enlarge $T_F$ so that no edge outside $T_F$ is adjacent to a region in ${\Omega}mega(2)$ on both sides.
Suppose the generator $u = u_1$ is palindromic with respect $ \eta$ and that $\eta' \neq \eta$. Without loss of generality, we may take $u$ positive. Let ${\mathcal E} = {\mathcal E}_{\eta,\eta'}$ be the corresponding special hyperelliptic axis.
Let ${{m}athcal X}i$ denote the set of axes corresponding to palindromic representatives of $\betav {\bf i}n {\Omega}mega(T_F)$ which are of types either $\eta$ or $\eta'$. It is sufficient to see that ${ A}x U$ meets ${\mathcal E}$ at a uniformly bounded distance to one of the finitely many axes in ${{m}athcal X}i$.
If $\betau_1 {\bf i}n {\Omega}mega(T_F)$ there is nothing to prove, so suppose that $\betau_1 \notin {\Omega}mega(T_F)$. Choose an oriented edge $\vec e_1$ in $\deltad \betau_1$. Then there is a strictly descending path $\betaeta$ of $T$-arrows $\vec e_1, {\lambda}dots, \vec e_n$ so that the head of $\vec e_n$ meets an edge in $T_F$, and this is the first edge in $\beta$ with this properly.
We claim that there is a sequence of positive generators
$u_1 = u, u_2, {\lambda}dots, u_k {\bf i}n {\cal P}$ such that for $i = 1, {\lambda}dots, k-1$:
\betaegin{enumerate}
{\bf i}tem $(\betau_i, \betau_{i+1})$ are neighbours adjacent to an edge of $\betaeta$.
{\bf i}tem $u_i {\bf i}n \betau_i, u_{i+1} {\bf i}n \betau_{i+1}$ and
$(u_i, u_{i+1})$ is a positive generator pair palindromic with respect to $(\epsilonsilonta, \epsilonsilonta')$.
{\bf i}tem $\betau_k {\bf i}n {\Omega}mega(T_F)$ but $\betau_i \notin {\Omega}mega(T_F), 1 {\lambda}eq i <k$.
\epsilonsilonnd{enumerate}
Suppose that $u_1 , {\lambda}dots, u_i$ have been constructed with properties (1) and (2) with $i \gammaeq 1$ and that $\betau_i \notin {\Omega}mega(T_F)$. The path $\beta$ travels round $\deltad \betau_i$, eventually leaving it along an arrow $\vec e$ which points out of $\deltad \betau_i$. If $\betau_i$ is of type $\eta$ (respectively $\eta'$) then of the two regions adjacent to ${\mathcal V}ec e$, one, $\betau'$ say, is of type $\eta'$ (respectively $\eta$). Set $\betau_{i+1}= \betau'$ and choose $u_{i+1} {\bf i}n \betau_{i+1}$ so that $(u_i, u_{i+1})$ is positive and palindromic with respect to $(\eta, \eta')$. (Notice that we are using the uniqueness of the palindromic form for $u_i$, in other words if $(u_{i-1}, u_{i})$ is the positive palindromic pair associated to the regions
$(\betau_{i-1}, \betau_{i})$ then $(u_{i}, u_{i+1})$ is the positive palindromic pair associated to the regions
$(\betau_{i}, \betau_{i+1})$.) If $\betau_{i+1} {\bf i}n {\Omega}mega(T_F)$ we are done, otherwise continue as before. Since $\beta$ eventually lands on an edge of $T_F$, the process terminates. This proves the claim.
Since $(u_i, u_{i+1})$ are palindromic with respect to $(\epsilonsilonta, \epsilonsilonta')$, the axes ${ A}x U_i, { A}x U_{i+1}$ are orthogonal to the hyperelliptic axis ${\mathcal E}_{\eta,\eta'}$ and hence Equation~\epsilonsilonqref{distbound} gives $d({ A}x U_i, { A}x U_{i+1}) {\lambda}eq O(e^{-\epsilonsilonll(U_i)}), 1 {\lambda}eq i <k$.
Now let ${\mathcal V}ec e$ be the oriented edge between $\betau_{k-1}, \betau_k$ and let ${\mathcal W}({\mathcal V}ec e)$ be its wake. Then since the edge between $\betau_i, \betau_{i+1}$ is always oriented towards ${\mathcal V}ec e$, we see that $\betau_i {\bf i}n {\mathcal W}({\mathcal V}ec e), 0 {\lambda}eq i {\lambda}eq k$.
Let $F_{\vec {e}}$ be the
Fibonacci function on ${\mathcal W}({\mathcal V}ec e)$ defined immediately above Lemma~sef{fibonacciwake}.
It is not hard to see that for $0 {\lambda}eq i {\lambda}eq k$ we have $F_{\vec {e}}(\betau_i) \gammaeq k-i$.
By construction, $\betau_{k-1} \notin {\Omega}mega(T_F) $ so that, by our assumption on $T_F$, we have $\betau_{k-1} \notin {\Omega}mega(2)$.
Moreover by connectivity of $T_F$, no edge in ${\mathcal W}_{{\mathcal E}}({\mathcal V}ec e)$ is in $T_F$ and hence none of these edges is adjacent on both sides to regions in ${\Omega}mega(2)$.
Thus by Lemma~sef{fibonacciwake}, there exist $c>0, n_0 {\bf i}n {m}athbb N$ depending only on $sho$ and not on ${\mathcal V}ec e$ such that ${\lambda}og^+{|{\cal T}r U_i|} \gammaeq c (k-i)$ for all but at most $n_0$ of the regions $\betau_i$.
Hence for all except some uniformly bounded number of the regions $\betau_i$, $ \epsilonsilonll(U_i) \gammaeq c(k-i) -{\lambda}og 2$. Since all axes ${ A}x U_i$ intersect ${\mathcal E}$ orthogonally in points $P_i$ say, it follows that
$d({ A}x U_1, { A}x U_k) $ is bounded above by the sum $\sigmaum_1^{k-1} d({ A}x U_i, { A}x U_{i+1}) $ of the distances between the points $P_i, P_{i+1}$. Since $d({ A}x U_i, { A}x U_{i+1}) {\lambda}eq O(e^{-\epsilonsilonll(U_i)}), 1 {\lambda}eq i <k$,
the distance from $ { A}x U_1$ to one of the finitely many axes in ${{m}athcal X}i$ is uniformly bounded above, and we are done.
\epsilonsilonnd{proof}
\betaegin{comment}
\betaegin{lemma} {\lambda}abel{plughole} Choose $M_1 \gammaeq M_0$ so that every edge adjacent to a sink is in $T_F(M_1)$. (Since every sink is in $T_F$ there are at most finitely many sinks.) Let $M \gammaeq M_1$ and suppose that $\betau {\bf i}n {\Omega}mega \sigmaetminus {\Omega}mega_{sho}(M)$. Then there is an oriented edge ${\mathcal V}ec e$ pointing out of $\betau$ so that $ {\mathcal V}ec e$ is not contained in $ T_F$.
\epsilonsilonnd{lemma}
\betaegin{proof}
Label the regions adjacent to $\betau$ consecutively round $\deltad \betau$ by $\betay_n, n {\bf i}n {m}athbb Z$ and let $\vec e_n$ denote the edge between $\betay_n, \betau$. By~\gammaammaite{tan_gen} Lemma 3.9, for large enough $|n|$ the $T$-arrows on the edges round $\deltad \betau$ point in the direction of decreasing $|n|$. Thus there is at least one $r{\bf i}n {m}athbb Z$
so that the heads of $\vec e_r$ and $\vec e_{r+1}$ meet at a common vertex $q {\bf i}n \deltad \betau$. The remaining arrow at $q$ must point out of $\betau$ for otherwise $q$ is a sink vertex and hence by the above choice of $T_F$, all the edges meeting at $q$ are in $T_F$, so that $\betau {\bf i}n {\Omega}mega(M_1) \sigmaubset {\Omega}mega(M)$ contrary to assumption.
\epsilonsilonnd{proof}
We call such an edge a \epsilonsilonmph{plughole} of $\betau$.
\betaegin{theorem}{\lambda}abel{direct}(Direct proof of Theorem~sef{introthmB})
If $sho {\bf i}n { \mathcal B}$ then $sho $ has the bounded intersection property.
\epsilonsilonnd{theorem}
\betaegin{proof}
Choose $L> L_0$ as in Lemma~sef{compare2} so that $|{\cal T}r U| > 2 e^{L}$ implies $\epsilonsilonll(U) >L$. With $M_1$ as in Lemma~sef{plughole} choose $M = {m}ax \{M_1, 2e^L\}$. Suppose the generator $u = u_1$ is palindromic with respect $ \eta$ and that $\eta' \neq \eta$. Let ${\mathcal E} = {\mathcal E}_{\eta,\eta'}$ be the corresponding special hyperelliptic axis.
Let ${{m}athcal X}i$ denote the set of axes corresponding to palindromic representatives of $\betav {\bf i}n {\Omega}mega(M)$ which are of types either $\eta$ or $\eta'$. It is sufficient to see that ${ A}x U$ meets ${\mathcal E}$ at a uniformly bounded distance to one of the finitely many axes in ${{m}athcal X}i$.
Suppose that $\betau_1 \notin {\Omega}mega(M)$. Choose an oriented edge $\vec e_1$ in $\deltad \betau_1$. Then there is a strictly descending path $\betaeta$ of $T$-arrows $\vec e_1, {\lambda}dots, \vec e_n$ so that the head of $\vec e_n$ meets an edge in $T_F$.
We claim that there is a sequence of positive generators
$u_1 = u, u_2, {\lambda}dots, u_k {\bf i}n {\cal P}$ such that for $i = 1, {\lambda}dots, k-1$:
\betaegin{enumerate}
{\bf i}tem $(\betau_i, \betau_{i+1})$ are neighbours adjacent to an edge of $\betaeta$.
{\bf i}tem $u_i {\bf i}n \betau_i, u_{i+1} {\bf i}n \betau_{i+1}$ and
$(u_i, u_{i+1})$ is a positive generator pair palindromic with respect to $(\epsilonsilonta, \epsilonsilonta')$.
{\bf i}tem $\betau_k {\bf i}n {\Omega}mega(M)$ but $\betau_i \notin {\Omega}mega(M), 1 {\lambda}eq i <k$.
\epsilonsilonnd{enumerate}
Suppose that $u_1 = u, {\lambda}dots, u_i$ have been constructed with these properties. If $\betau_i {\bf i}n {\Omega}mega(M)$ we are done. Otherwise, since $\betau_i \notin {\Omega}mega(M)$, from Theorem~sef{sinktree} the edge $\vec f $ between the pair $(\betau_i, \betau_{i+1})$ is not in $T_F$. Thus $\vec f = \vec e_{n_i}$ for some $n_i {\lambda}eq n$. Let $\betaeta_i$ be the descending path $\vec e_{n_i}, {\lambda}dots, \vec e_n$.
Starting at $\vec e_{n_i}$, the path $\betaeta_i$ follows the boundary $\deltad \betau_i$ until it reaches a plughole.
Following the arrows, note that the oriented edge ${\mathcal V}ec e$ pointing out of this plughole is an edge of $\betaeta_i$.
If $\betau_i$ is of type $\eta$ (respectively $\eta'$) then of the two regions adjacent to ${\mathcal V}ec e$, one, $\betau'$ say, is of type $\eta'$ (respectively $\eta$). Set $\betau_{i+1}= \betau'$ and choose $u_{i+1}$ so that $(u_i, u_{i+1})$ is positive and palindromic with respect to $(\eta, \eta')$. (Notice that we are using the uniqueness of the palindromic form for $u_i$, in other words if $(u_{i-1}, u_{i})$ is the positive palindromic pair associated to the regions
$(\betau_{i-1}, \betau_{i})$ then $(u_{i}, u_{i+1})$ is the positive palindromic pair associated to the regions
$(\betau_{i}, \betau_{i+1})$.) If $\betau_{i+1} {\bf i}n {\Omega}mega(M)$ we are done, otherwise continue as before. This proves the claim.
Since $(u_i, u_{i+1})$ are palindromic with respect to $(\epsilonsilonta, \epsilonsilonta')$ the axes ${ A}x U_i, { A}x U_{i+1}$ are orthogonal to the hyperelliptic axis ${\mathcal E}_{\eta,\eta'}$ and hence Equation~\epsilonsilonqref{distbound}
gives $d({ A}x U_i, { A}x U_{i+1}) {\lambda}eq O(e^{-\epsilonsilonll(U_i)}), 1 {\lambda}eq i <k$.
Now let ${\mathcal V}ec e$ be the oriented edge between $\betau_{k-1}, \betau_k$ and let ${\mathcal W}({\mathcal V}ec e)$ be its wake. Then since the edge between $\betau_i, \betau_{i+1}$ is always oriented towards ${\mathcal V}ec e$, we see that $\betau_i {\bf i}n {\mathcal W}({\mathcal V}ec e), 0 {\lambda}eq i {\lambda}eq k$.
Let $F_{\vec {e}}$ be the
Fibonacci function on ${\mathcal W}({\mathcal V}ec e)$ defined immediately above Lemma~sef{fibonacciwake}.
It is not hard to see that for $0 {\lambda}eq i {\lambda}eq k$ we have $F_{\vec {e}}(\betau_i) \gammaeq k-i$.
By construction, $\betau_{k-1} \notin {\Omega}mega(M) $ so that $\betau_{k-1} \notin {\Omega}mega(2)$. Thus by Lemma~sef{fibonacciwake}, there exist $c>0, n_0 {\bf i}n {m}athbb N$ depending only on $sho$ and not on ${\mathcal V}ec e$ such that ${\lambda}og^+{|{\cal T}r U_i|} \gammaeq c (k-i)$ for all but at most $n_0$ of the regions $\betau_i$.
Hence for all except some uniformly bounded number of the regions $\betau_i$, $ \epsilonsilonll(U_i) \gammaeq c(k-i) -{\lambda}og 2$. Since all axes ${ A}x U_i$ intersect ${\mathcal E}$ orthogonally in points $P_i$ say, it follows that
$d({ A}x U_1, { A}x U_k) $ is the sum $\sigmaum_1^{k-1} d({ A}x U_i, { A}x U_{i+1}) $ of the distances between the points $P_i, P_{i+1}$. Since $d({ A}x U_i, { A}x U_{i+1}) {\lambda}eq O(e^{-\epsilonsilonll(U_i)}), 1 {\lambda}eq i <k$,
the distance from $ { A}x U_1$ to one of the finitely many axes in ${{m}athcal X}i$ is uniformly bounded above, and we are done.
\epsilonsilonnd{proof}
\epsilonsilonnd{comment}
\betaegin{thebibliography}{000}
\betaibitem{BSeries}
J. Birman and C. Series.
\newblock Geodesics with multiple self-intersections and
symmetries on Riemann surfaces.
\newblock In {\epsilonsilonm Low dimensional topology and Kleinian
groups}, D. Epstein ed., LMS Lecture Notes 112, Cambridge Univ. Press, 3 -- 12, 1986.
\newblock {\epsilonsilonm Proc. London Math. Soc. 77}, 697--736, 1998.
\betaibitem{bow_mar}
B.~H. Bowditch.
\newblock {M}arkoff triples and quasi-{F}uchsian groups.
\newblock {\epsilonsilonm Proc. London Math. Soc. 77}, 697--736, 1998.
\betaibitem{CMZ}
M.~Cohen, W.~Metzler and A.~Zimmermann.
\newblock Enumerating palindromes and primitives in rank two free groups.
\newblock {\epsilonsilonm Math. Annalen}, 257, 435-- 446, 1981.
\betaibitem{Fen}
W.~Fenchel.
\newblock {\epsilonsilonm Elementary geometry in hyperbolic space.}
\newblock De Gruyter Studies in Mathematics, Vol. 11, 1989.
\betaibitem{gilmankeen1}
J.~Gilman and L.~Keen.
\newblock Enumerating palindromes and primitives in rank two free groups.
\newblock {\epsilonsilonm Journal of algebra}, 332, 1--13, 2011.
\betaibitem{gilmankeen2}
J.~Gilman and L.~Keen.
\newblock Discreteness criteria and the hyperbolic geometry of palindromes.
\newblock {\epsilonsilonm Conformal geometry and dynamics}, 13, 76 --90, 2009.
\betaibitem{goldman}
W. Goldman.
\newblock The modular group action on real $SL(2)$-characters of a one-holed torus.
\newblock {\epsilonsilonm Geometry and Topology} 7, 443 -- 486, 2003.
\betaibitem{goldman2}
W. Goldman.
\newblock Trace coordinates on Fricke spaces of some
simple hyperbolic surfaces.
\newblock In {\epsilonsilonm Handbook of Teichm\"uller theory Vol. II}, IRMA Lect. Math. Theor. Phys., 13, Euro. Math. Soc., Z\"urich, 611-- 684, 2009.
\betaibitem{ksriley} L. Keen and C. Series.
\newblock The Riley slice of Schottky space.
\newblock {\epsilonsilonm Proc. London Math. Soc.}, 69, 72 -- 90, 1994.
\betaibitem{LX} J. Lee and B. Xu.
\newblock Bowditch's Q-conditions and Minsky's primitive stability.
\newblock {\epsilonsilonm Trans. AMS}, 373, 1265 -- 1305, 2020.
\betaibitem{lupi} D. Lupi.
\newblock Primitive stability and Bowditch conditions for rank 2 free group representations.
\newblock {\epsilonsilonm Thesis, University of Warwick}, 2016.
\betaibitem{Minsky} Y. Minsky.
\newblock On dynamics of $Out(F_n)$ on $PSL(2,{m}athbb C)$ characters.
\newblock {\epsilonsilonm Israel Journal of Mathematics}, 193, 47 -- 70, 2013.
\betaibitem{serwolp}
C.~Series.
\newblock An extension of Wolpert's derivative formula.
\newblock {\epsilonsilonm Pacific J. Math.},
197, 223 -- 239, 2001.
\betaibitem{serPS}
C.~Series.
\newblock Primitive stability and Bowditch's BQ-conditions are equivalent.
\newblock {\epsilonsilonm arXiv:2530070 [math.GT]}, 2019.
\betaibitem{serInt}
C.~Series.
\newblock The Geometry of Markoff Numbers.
\newblock {\epsilonsilonm Math. Intelligencer}, 7, 20 -- 29, 1985.
\betaibitem{sty}
C.~Series, S.P.~Tan, Y.~Yamashita.
\newblock The diagonal slice of Schottky space.
\newblock {\epsilonsilonm Algebraic and Geometric Topology}, 17, 2239 -- 2282, 2017.
\betaibitem{tan_gen}
S.P. Tan, Y. L. Wong and Y. Zhang.
\newblock Generalized {M}arkoff maps and {M}c{S}hane's identity.
\newblock {\epsilonsilonm Adv. Math. 217}, 761--813, 2008.
\epsilonsilonnd{thebibliography}
\epsilonsilonnd{document} |
\begin{document}
\title{Brownian Dynamics of Globules}
\centerline{\textbf{Abstract}}
We prove the existence and uniqueness of a strong solution of a stochastic differential equation with normal reflection representing the random motion of finitely many globules. Each globule is a sphere with time-dependent random radius and a center moving according to a diffusion process. The spheres are hard, hence non-intersecting, which induces in the equation a reflection term with a local (collision-)time.
A smooth interaction is considered too and, in the particular case of a gradient system, the reversible measure of the dynamics is given.
In the proofs, we analyze geometrical properties of the boundary of the set in which the process takes its values, in particular the so-called Uniform Exterior Sphere and Uniform Normal Cone properties.
These techniques extend to other hard core models of objects with a time-dependent random characteristic: we present here an application to the random motion of a chain-like molecule.\\
\noindent
{\bf AMS 2000 subject classification:} 60K35, 60J55, 60H10.\\
{\bf Keywords:} Stochastic Differential Equation, hard core interaction, reversible measure, normal reflection, local time, Brownian globule.\\
\section{Introduction}
Since the pioneering work of Skorokhod \cite{Skorokhod1et2}, many authors have investigated the question of the existence and uniqueness of a solution for reflected stochastic differential equations in a domain. It has first been solved for half-spaces, then for convex domains (see \cite{Tanaka}). Lions and Sznitman \cite{LionsSznitman} proved the existence of a solution in so-called \emph{admissible sets} and Saisho \cite{SaishoSolEDS} extended these results to domains satisfying only the \emph{Uniform Exterior Sphere} and the \emph{Uniform Normal Cone} conditions (see definitions in Section \ref{resultSaisho}).
These results were applied to prove the existence and uniqueness of Brownian dynamics for hard spheres in \cite{SaishoTanakaBrownianBalls}, \cite{FR2}, \cite{FR3}, or for systems of mutually reflecting molecules (see \cite{SaishoMolecules2}).
We are interested here in dynamics of finitely many objects having not only a random position but also another random time-dependent geometrical characteristic like the radius for spheres or the length of the bonds in a molecule. We will prove the existence and uniqueness of random dynamics for two elaborated models, using methods which are refinements of Saisho's techniques, analyzing fine geometrical properties of the boundary of the domain on which the motion is reflected.
More precisely~:
We first introduce a globules model representing a finite system of non-intersecting spheres which centers undergo diffusions and which radii vary according to other diffusions constrained to stay between a maximum and a minimum value.
The spheres might be cells, or particles, or soap bubbles floating on the surface of a liquid (2-dimensional motion) or in the air (3-dimensional motion).
The behavior of the globules is quite intuitive~:
two globules collide when the distance between their random centers is equal to the sum of their random radii, and the collision has as effect that their centers move away from one another and their sizes decrease. The associated stochastic differential equation $(\E_g)$ (see Section \ref{Sect_globules_model}) includes several reflection terms, each of them corresponding to a constraint in the definition of the set of allowed globules configurations~: constraints of non-intersection between each couple of spheres, constraints on the radii to stay between fixed minimum and maximum values. We proves that this equation has a unique strong solution and give in some special case a time-reversible initial distribution (theorems \ref{thexistglobules} and \ref{threversglobules}).
We also consider a model for linear molecules, such as alkanes (carbon chains) or polymers~:
each atom moves like a diffusion, the lengths of the bonds between neighbour atoms vary between a minimum and maximum value which evolve according to a reflected diffusion.
This corresponds to a SDE $(\E_c)$ reflected on the boundary of the set of all allowed chains.
Here also, we prove the existence and uniqueness of the solution of $(\E_c)$ (see theorem \ref{thexistchenilles}) with similar methods as in the globule case.
The rest of the paper is organized as follows~: In Section \ref{SectGeomCriterium}, we present a new general criterion for a multiple constraint domain to satisfy the Uniform Exterior Sphere and Uniform Normal Cone conditions (see proposition \ref{propcritere}). These geometrical assumptions on the boundary induce existence and uniqueness of the reflected diffusion on this domain.
We also obtain a disintegration of the local time along the different constraint directions .
Section \ref{SectPreuves} is devoted to the proofs of the theorems announced in Section \ref{Sect_two_models}.
For the sake of shortness, we restricted ourselves to these two examples, though dynamics for Brownian systems evolving under multiple constraints may be found in other situations (the results in section \ref{SectGeomCriterium} are given in a general frame for easier adaptation to other examples).
\section{Two hard core models}
\label{Sect_two_models}
\subsection{Globules model}
\label{Sect_globules_model}
We want to construct a model for interacting globules. Each globule is spherical with random radius oscillating between a minimum and a maximum value. Its center is a point in $\R^d$, $d \ge 2$. The number $n$ of globules is fixed. Globules configurations will be denoted by
$$
\bx=(x_1,\nx_1,\ldots,x_n,\nx_n)
\quad\text{ with }\quad x_1,\ldots,x_n\in\R^d \quad\text{ and }\quad \nx_1,\ldots,\nx_n\in \R
$$
where $x_i$ is the center of the $i^\text{th}$ globule and $\nx_i$ is its radius.
An allowed globules configuration is a configuration $\bx$ satisfying
$$
\forall i \quad \rm\le \nx_i \le\rp \quad\text{ and }\quad
\forall i\neq j \quad |x_i-x_j|\ge \nx_i+\nx_j
$$
So, in an allowed configuration, spheres do not intersect and their radii are bounded from below by the minimum value $\rm>0$ and bounded from above by the maximum value $\rp>\rm$.
In this paper, the symbol $|\cdot|$ denotes the Euclidean norm on $\R^d$ or $\R^{(d+1)n}$ (or some other Euclidean space, depending on the context).
Let $\A_g$ be the set of allowed globules configurations~:
$$
\A_g=\left\{ \bx \in \R^{(d+1)n},~\forall i \quad \rm\le \nx_i \le\rp \text{ and } \forall i\neq j \quad |x_i-x_j|\ge \nx_i+\nx_j
\right\}
$$
The random motion of reflecting spheres with fluctuating radii is represented by the following stochastic differential equation~:
$$
(\E_g)
\left\{
\begin{array}{l}
\disp
X_i(t) = X_i(0) + \int_0^t \sigma_i(\bX(s)) dW_i(s) + \int_0^t b_i(\bX(s)) ds
+ \sum_{j=1}^n \int_0^t \frac{X_i(s)-X_j(s)}{\nX_i(s)+\nX_j(s)} dL_{ij}(s) \\
\disp
\nX_i(t) = \nX_i(0) + \int_0^t \nsigma_i(\bX(s)) d\nW_i(s)
+ \int_0^t \nb_i(\bX(s)) ds - \sum_{j=1}^n L_{ij}(t) - L^+_i(t) + L^-_i(t)
\end{array}
\right.
$$
In this equation, $\bX(s)$ is the vector $(X_i(s),\nX_i(s))_{1 \le i\le n}$.
The $W_i$'s are independents $\R^d$-valued Brownian motions and the $\nW_i$'s are independents one-dimensional Brownian motions, also independent from the $W_i$'s.
The diffusion coefficients $\sigma_i$ and $\nsigma_i$, and the drift coefficients $b_i$ and $\nb_i$ are functions defined on $\A_g$, with values in the $d \times d$ matrices for $\sigma_i$, values in $\R^d$ for $b_i$, and values in $\R$ for $\nsigma_i$ and $\nb_i$.
To make things simpler with the summation indices, we let $ L_{ii} \equiv 0 $.
A solution of equation $(\E_g)$ is a continuous $\A_g$-valued process $\{ \bX(t) , t \ge 0 \}$ satisfying equation $(\E_g)$ for some family of local times $L_{ij}$, $L^+_i$, $L^-_i$ such that for each $i,j$~:
$$
(\E'_g)
\left\{
\begin{array}{l}
\disp
L_{ij} \equiv L_{ji}, \quad
L_{ij}(t) = \int_0^t \un_{|X_i(s)-X_j(s)|=\nX_i(s)+\nX_j(s)} ~dL_{ij}(s)~, \\
\disp
L^+_i(t) = \int_0^t \un_{\nX_i(s)=\rp} ~dL^+_i(s) \quad\textrm{ and }\quad L^-_i(t) = \int_0^t \un_{\nX_i(s)=\rm} ~dL^-_i(s)
\end{array}
\right.
$$
\begin{remarque} \label{remtempslocal}
Here, and in the sequel, the expression {\bf local time} stands for~: non-decreasing adapted continuous process, which starts from $0$ and has bounded variations on each finite interval.
\end{remarque}
Equation $(\E_g)$ has an intuitive meaning~:
\begin{itemize}
\item
the positions and radii of the spheres are Brownian;
\item
when a globule becomes too big, it's deflated~: $\nX_i$ decreases by $-dL^+_i$ when $\nX_i=\rp$;
\item
when a globule becomes too small, it's inflated~: $\nX_i$ increases by $+dL^-_i$ when $\nX_i=\rm$;
\item
when two globules bump into each other (i.e. $|X_i-X_j|=\nX_i+\nX_j$), they are deflated and move away from each other~:
$\nX_i$ decreases by $-dL_{ij}$ and $X_i$ is given an impulsion in the direction $\frac{X_i-X_j}{\nX_i+\nX_j}$ with an amplitude $dL_{ij}$.
\end{itemize}
In the case of an hard core interaction between spheres with a \emph{fixed} radius, the existence of solutions for the corresponding SDE has been proved in \cite{SaishoTanakaBrownianBalls}. However, the condition $|x_i-x_j|\ge \nx_i+\nx_j$ is not equivalent to $|(x_i,\nx_i)-(x_j,\nx_j)|\ge c$ for some real number $c$. Hence the above model is not a classical hard sphere model in $\R^d$ or $\R^{d+1}$.
\begin{theoreme}
\label{thexistglobules}
Assume that the diffusion coefficients $\sigma_i$ and $\nsigma_i$ and the drift coefficients $b_i$ and $\nb_i$ are bounded and Lipschitz continuous on $\A_g$ (for $1 \le i \le n$).
Then equation $(\E_g)$ has a unique strong solution.
\end{theoreme}
\begin{remarque}
"Strong uniqueness of the solution" here stands for strong uniqueness (in the sense of \cite{IkedaWatanabe} chap.IV def.1.6) of the process $\bX$, and, as a consequence, of the reflection term.
This does not imply strong uniqueness for the local times $L_{ij}$, $L^+_i$, $L^-_i$ unless several collisions at the same time with linearly dependant collision directions does not occurs a.s. (see the proof of corollary \ref{corolSaisho} for details).
\end{remarque}
The first part of the next theorem is a corollary of the previous one. The second part describes the equilibrium states of systems of interacting globules. Here, and through this paper, $d\bx$ denotes the Lebesgue measure.
\begin{theoreme}
\label{threversglobules}
The diffusion representing the motion of $n$ globules submitted to a smooth interaction $\phi$ exists as soon as the interaction potential $\phi$ is an even $\C^2$ function on $\R^d$ with bounded derivatives. It is the unique strong solution of the equation~:
$$
(\E^{\phi}_g)
\left\{
\begin{array}{l}
\disp
X_i(t) = X_i(0) + W_i(t) -\frac{1}{2} \int_0^t \sum_{j=1}^n \nabla\phi(X_i(s)-X_j(s)) ds
+ \sum_{j=1}^n \int_0^t \frac{X_i(s)-X_j(s)}{\nX_i(s)+\nX_j(s)} dL_{ij}(s) \\
\disp
\nX_i(t) = \nX_i(0) + \nW_i(t) - \sum_{j=1}^n L_{ij}(t) - L^+_i(t) + L^-_i(t) \\
\text{with } \bX~~ \A_g\text{-valued continuous process and } (L_{ij},L^+_i,L^-_i)_{0 \le i,j \le n} \text{ satisfying conditions } (\E'_g)
\end{array}
\right.
$$
Moreover, if $Z=\int_{\A_g} e^{-\sum_{1 \le i<j \le n} \phi(x_i-x_j)} d\bx <+\infty$ then the Probability measure $\mu$ defined by $d\mu(\bx)=\frac{1}{Z} \un_{\A_g}(\bx) e^{-\sum_{1 \le i<j \le n} \phi(\bx_i-\bx_j)} d\bx$ is time-reversible for this diffusion, i.e. $(\bX(T-t))_{t\in[0,T]}$ has the same distribution as $(\bX(t))_{t\in[0,T]}$ for each positive $T$ when $\bX(0) \sim \mu$.
\end{theoreme}
These theorems are proved in section \ref{PreuvesGlobules}. The proofs rely on previous results from Y.Saisho and H. Tanaka (see section \ref{resultSaisho}) and on an inheritance criterion for geometrical properties which is given in section \ref{criteredheredite}.
\subsection{Linear molecule model}
Another example of hard core interaction between particles with another spatial characteristic beside position is the following simple model for a linear molecule. In this model, we study chains of particles having a fixed number of links with variable length. More precisely, a configuration is a vector
\[
\bx=(x_1,\ldots,x_n,\nx_-,\nx_+)
\quad\text{ with }\quad x_1,\ldots,x_n\in\R^d \quad\text{ and }\quad \nx_-,\nx_+\in \R
\]
$x_i$ and $x_{i+1}$ are the ends of the $i^\text{th}$ link and $\nx_->0$ (resp. $\nx_+ > \nx_-$) is the minimum (resp. maximum) allowed length of the links for the chain. The number $n$ of particles in the chain is at least equal to $2$, they are moving in $\R^d$, $d \ge 2$.
So the set of allowed configurations is~:
$$
\A_c=\left\{ \bx \in \R^{dn+2},~ \rm \le \nx_- \le \nx_+ \le \rp \text{ and }
\forall i\in\{1,\ldots,n-1\} \quad \nx_- \le |x_i-x_{i+1}| \le \nx_+ \right\}
$$
We want to construct a model for the random motion of such chains, as the $\A_c$-valued solution of the following stochastic differential equation~:
$$
(\E_c)
\left\{
\begin{array}{l}
\disp
X_i(t) = X_i(0) + \int_0^t \sigma_i(\bX(s)) dW_i(s) + \int_0^t b_i(\bX(s)) ds
+ \int_0^t \frac{X_i-X_{i+1}}{\nX_-}(s) dL^-_i(s) \\
\phantom{X_i(t) =}\disp
+ \int_0^t \frac{X_i-X_{i-1}}{\nX_-}(s) dL^-_{i-1}(s)
- \int_0^t \frac{X_i-X_{i+1}}{\nX_+}(s) dL^+_i(s) - \int_0^t \frac{X_i-X_{i-1}}{\nX_+}(s) dL^+_{i-1}(s) \\
\disp
\nX_-(t) = \nX_-(0) + \int_0^t \sigma_-(\bX(s)) d\nW_-(s) + \int_0^t b_-(\bX(s)) ds
- \sum_{i=1}^{n-1} L^-_i(t) + L_-(t) - L_=(t) \\
\disp
\nX_+(t) = \nX_-(0) + \int_0^t \sigma_+(\bX(s)) d\nW_+(s) + \int_0^t b_+(\bX(s)) ds
+ \sum_{i=1}^{n-1} L^+_{i}(t) - L_+(t) + L_=(t)
\end{array}
\right.
$$
As in the previous model, the $W_i$'s are independents $\R^d$-valued Brownian motions and the $\nW_-$ and $\nW_+$ are independents one-dimensional Brownian motions, also independent from the $W_i$'s.
A solution of equation $(\E_c)$ is an $\A_c$-valued continuous process $\{ \bX(t), t \ge 0 \}$ satisfying the equation for some family of
local times $L^-_i$, $L^+_i$, $L_-$, $L_+$, $L_=$ such that for each $t \in \R^+ $ and each $i$~:
$$
\begin{array}{l}
\disp
L^-_i(t) = \int_0^t \un_{|X_i(s)-X_{i+1}(s)|=\nX_-(s)} dL^-_i(s)~,\quad
L^+_i(t) = \int_0^t \un_{|X_i(s)-X_{i+1}(s)|=\nX_+(s)} dL^+_i(s) \\
\disp
L_-(t) = \int_0^t \un_{\nX_-(s)=\rm} dL_-(s) ,\quad L_+(t) = \int_0^t \un_{\nX_+(s)=\rp} dL_+(s) \quad\text{and}\quad
L_=(t) = \int_0^t \un_{\nX_-(s)=\nX_+(s)} dL_=(s)
\end{array}
$$
Equation $(\E_c)$ looks more complicated than $(\E_g)$ because it contains a larger number of local times, but it is very simple on an intuitive level~: both ends of each link are Brownian, links that are too short ($|X_i-X_{i+1}|=\nX_-$) tend to become longer (reflection term in direction $X_i-X_{i+1}$ for $X_i$ and in the opposite direction for $X_{i+1}$) and also tend to diminish the lower bound $\nX_-$ (negative reflection term in the equation of $\nX_-$).
Symmetrically, links that are too large ($|X_i-X_{i+1}|=\nX_+$) are both becoming shorter (reflection term in direction $X_{i+1}-X_i$ for $X_i$) and enlarging the lower bound $\nX_+$ (positive reflection term in its equation).
Moreover, $\nX_-$ increases (by $L_-$) if it reaches its lower limit $\rm$ and $\nX_+$ decreases (by $L_+$) if it reaches its lower limit $\rp$. And the lower bound decreases and the upper bound increases (by $L_=$) when they are equal, so as to fulfill the condition $\nx_- \le \nx_+$.
\begin{theoreme}
\label{thexistchenilles}
If the $\sigma_i$'s, $\sigma_-$ and $\sigma_+$ and the $b_i$'s, $b_-$ and $b_+$ are bounded and Lipschitz continuous on $\A_c$, then equation $(\E_c)$ has a unique strong solution.
Moreover, assume that the $\sigma_i$'s, $\sigma_-$ and $\sigma_+$ are equal to the identity matrix, $b_-$ and $b_+$ vanish, and $b_i(\bx)=-\frac{1}{2}\sum_{j=1}^n \nabla\phi(x_i-x_j)$ for some even $\C^2$ function $\phi$ on $\R^d$ with bounded derivatives, satisfying $Z=\int_{\A_c} e^{-\sum_{1 \le i<j \le n} \phi(x_i-x_j)} d\bx <+\infty$. Then the solution with initial distribution
$d\mu(\bx)=\frac{1}{Z} \un_{\A_c}(\bx) e^{-\sum_{1 \le i<j \le n} \phi(\bx_i-\bx_j)} d\bx$ is time-reversible.
\end{theoreme}
See section \ref{PreuvesChenilles} for the proof of this theorem.
\section{Geometrical criteria for the existence of reflected dynamics} \label{SectGeomCriterium}
\subsection{Uniform Exterior Sphere and Uniform Normal Cone properties} \label{resultSaisho}
In order to solve the previous stochastic differential equations, we will use theorems of Saisho and Tanaka extending some previous results of Lions and Sznitman \cite{LionsSznitman}.
To begin with, we need geometrical conditions on subset boundaries. The subsets we are interested in are sets of allowed configurations. But for the time being, we just consider any subset $\D$ in $\R^m$($m\ge 2$) which is the closure of an open connected set with non-zero (possibly infinite) volume. $d\bx$ is the Lebesgue measure on $\R^m$, $|\cdot|$ denotes the Euclidean norm as before, and we set
$$
\S^m=\{ \bx\in\R^m,~~|\bx|=1 \}
$$
We define the set of all (inward) normal vectors at point $\bx$ on the boundary $\partial\D$ as~:
\[
\Nor^{\D}_\bx=\bigcup_{\alpha>0} \Nor^{\D}_{\bx,\alpha} \quad\text{ where }\quad
\Nor^{\D}_{\bx,\alpha}=\{\bn\in\S^m,~~ \mathring{B}(\bx-\alpha\bn,\alpha) \cap \D = \emptyset \}
\]
Here $\mathring{B}(\bx,r)$ is the open ball with radius $r$ and center $\bx$.
Note that
\[
\mathring{B}(\bx-\alpha\bn,\alpha) \cap \D = \emptyset
\quad\quad\Longleftrightarrow\quad\quad
\forall \by\in\D~~~ (\by-\bx).\bn+\frac{1}{2\alpha}|\by-\bx|^2 \ge 0
\]
\begin{define}
If there exists a constant $\alpha>0$ such that $\Nor^{\D}_\bx=\Nor^{\D}_{\bx,\alpha} \neq \emptyset$ for each $\bx\in\partial\D$, we say that $\D$ has the {\bf Uniform Exterior Sphere} property (with constant $\alpha$) and we write~: $\D\in UES(\alpha)$.
\end{define}
$UES(\alpha)$ means that a sphere of radius $\alpha$ rolling on the outside of $\D$ can touch each point of $\partial\D$.
This property is weaker than the convexity property (which corresponds to $UES(\infty)$) but still ensures the existence of a local projection function similar to the projection on convex sets.
\begin{define}
We say that $\D$ has the {\bf Uniform Normal Cone} property with constants $\beta$, $\delta$, and we write $\D\in UNC(\beta,\delta)$,
if for some $\beta\in [0,1[$ and $\delta>0$, for each $\bx\in\partial\D$, there exists $\bl_\bx \in \S^m$ such that for every $\by\in\partial\D$
\[
|\by-\bx| \le \delta \quad\Longrightarrow\quad \forall \bn\in\Nor^{\D}_\by \quad \bn.\bl_\bx \ge \sqrt{1-\beta^2}
\]
\end{define}
Let us consider the reflected stochastic differential equation~:
\begin{equation}
\bX(t)=\bX(0) + \int_0^t \bsigma(\bX(s)) d\bW(s) + \int_0^t \bb(\bX(s)) ds + \int_0^t \bn(s)d\bL(s) \label{eqSaisho}
\end{equation}
where $(\bW(t))_t$ is a $m$-dimensional Brownian motion.
A solution of (\ref{eqSaisho}) is a $(\bX,\bn,\bL)$ with $\bX$ a $\D$-valued adapted continuous process, $\bn(s)\in\Nor^{\D}_{\bX(s)}$ when $\bX(s)\in\partial\D$ and $\bL$ a local time such that
\[
\bL(\cdot)=\int_0^{\cdot} \un_{\bX(s)\in\partial\D} d\bL(s)
\]
The following result is a consequence of \cite{SaishoSolEDS} and \cite{SaishoTanakaSymmetry}~:
\begin{theoreme}\label{thSaisho}
Assume $\D\in UES(\alpha)$ and $\D\in UNC(\beta,\delta)$ for some positive $\alpha$, $\delta$ and some $\beta\in[0,1[$.
If $\sigma:\D\longrightarrow\R^{m^2}$ and $b:\D\longrightarrow\R^m$ are bounded Lipschitz continuous functions, then (\ref{eqSaisho}) has a unique strong solution.
Moreover, if $\bsigma$ is the identity matrix and $\disp \bb=-\frac{1}{2} \nabla\Phi$ with $\Phi$ a $\C^2$ function on $\R^m$ with bounded derivatives satisfying $Z=\int_{\D} e^{-\Phi(\bx)} d\bx <+\infty$, then the solution with initial distribution $d\mu(\bx)=\frac{1}{Z} \un_{\D}(\bx) e^{-\Phi(\bx)} d\bx$ is time-reversible.
\end{theoreme}
\begin{proof2} {\bf of theorem \ref{thSaisho}} \\
The existence of a unique strong solution of (\ref{eqSaisho}) is proved in \cite{SaishoSolEDS} theorem 5.1.
Let us consider the special case of the gradient system~: $\bb=-\frac{1}{2} \nabla\Phi$ and $\bsigma$ identity matrix.
For fixed $T>0$ and $\bx\in\D$, let $P_{\bx}$ be the distribution of the solution $(\bX,\bn,\bL)$ of (\ref{eqSaisho}) starting from $\bx$. The process $(W(t))_{t\in[0,T]}$ defined as
$$
\bW(t)=\bX(t)-\bx +\frac{1}{2} \int_0^t \nabla\Phi(\bX(s)) ds - \int_0^t \bn(s)d\bL(s)
$$
is a Brownian motion with respect to the probability measure $P_{\bx}$.
Since $\nabla\Phi$ is bounded, thanks to Girsanov theorem, the process $\disp \tilde{\bW}(t)=\bW(t)-\frac{1}{2} \int_0^t \nabla\Phi(\bX(s)) ds$ is a Brownian motion with respect to the probability measure $\tilde{P}_{\bx}$ defined by $\disp \frac{d\tilde{P}_{\bx}}{dP_{\bx}}=M_T$ where
$$
M_t=\exp\left( \frac{1}{2}\int_0^t \nabla\Phi(\bX(s)).d\bW(s) - \frac{1}{8} \int_0^t |\nabla\Phi(\bX(s))|^2 ds \right)
$$
From theorem 1 of \cite{SaishoTanakaSymmetry}, it is known that Lebesgue measure on $\D$ is time-reversible for the solution
of $\disp \bX(t)=\bX(0) + \tilde{\bW}(t) + \int_0^t \bn(s)d\bL(s)$, that is, the measure $\disp \tilde{P}_{d\bx}=\int_{\D} \tilde{P}_{\bx} d\bx$ is invariant under time reversal on $[0;T]$ for any positive $T$. Using It\^o's formula to compute~:
{\small$$
\int_0^t \nabla\Phi(\bX(s)).d\bW(s)
=\Phi(\bX(T))-\Phi(\bX(0)) +\frac{1}{2}\int_0^T |\nabla\Phi(\bX(s))|^2-\Delta\Phi(\bX(s))~ds -\int_0^T \nabla\Phi(\bX(s)).\bn(s) d\bL(s)
$$}
we notice that the density of the probability measure $\disp P_{\mu}=\int_{\D} P_{\bx} \mu(d\bx)$ with respect to the measure $\tilde{P}_{d\bx}$ is equal to~:
{\small
$$
\frac{e^{-\Phi(\bX(0))}}{Z} \exp\left( \frac{\Phi(\bX(0))-\Phi(\bX(T))}{2}
+\int_0^T \frac{1}{4} \Delta\Phi(\bX(s)) -\frac{1}{8}|\nabla\Phi(\bX(s))|^2 ds
+\frac{1}{2} \int_0^T \nabla\Phi(\bX(s)).\bn(s) d\bL(s) \right)
$$}
This expression does not change under time reversal, i.e. if $(\bX,\bn,\bL)$ is replaced by $(\bX(T-\cdot),\bn(T-\cdot),\bL(T)-\bL(T-\cdot)$. Thus the time reversibility of $\tilde{P}_{d\bx}$ implies the time reversibility of $P_{\mu}$.
\end{proof2}
\subsection{The special case of multiple constraints}\label{criteredheredite}
The sets we are interested in are sets of configurations satisfying multiple constraints. They are intersections of several sets, each of them defined by a single constraint. So we need a sufficient condition on the $\D_i$'s for Uniform Exterior Sphere and Uniform Normal Cone properties to hold on $\D=\cap_{i=1}^p \D_i$.
In \cite{SaishoMolecules2}, Saisho gives a sufficient condition for the intersection $\D=\cap_{i=1}^p \D_i$ to inherit both properties when each set $\D_i$ has these properties. However, in order to prove the existence of a solution in the case of our globules model, we have to consider the intersection of the sets~:
\[
\D_{ij}=\left\{ \bx \in \R^{dn+n},~ |x_i-x_j|\ge \nx_i+\nx_j \right\}
\]
Uniform Exterior Sphere property does not hold for $\D_{ij}$'s, so Saisho's inheritance criterion does not work here. However, $\D_{ij}$ satisfies an Exterior Sphere condition restricted to $\A_g$ in some sense, and we shall check that this is enough. Similarly, the Uniform Normal Cone property does not hold for $\D_{ij}$, but a restricted version holds, and proves sufficient for our needs.
So we present here a UES and UNC criterion with weaker assumptions. We keep Saisho's smoothness assumption, which holds for many interesting models, and restrict the UES and UNC assumptions on the $\D_i$'s to the set $\D$. Instead of the conditions $(B_0)$ and $(C)$ in \cite{SaishoMolecules2}, we introduce the compatibility assumption (iv) which is more convenient because it is easier to check a property at each point $\bx\in\partial\D$ for finitely many vectors normal to the $\partial\D_i$'s, than on a neighborhood of each $\bx$ for the whole (infinite) set of vectors normal to $\partial\D$.
The proof of this criterion is postponed to the end of this section.
\begin{propal} {\bf (Inheritance criterion for UES and UNC conditions)} \\
\label{propcritere}
For $\D\subset\R^m$ equal to the intersection $\disp \D=\bigcap_{i=1}^p \D_i$, assume that~:
\begin{itemize}
\item[(i)]
The sets $\D_i$ are closures of domains with non-zero volumes and boundaries at least $\C^2$ in $\D$~: this implies the existence of a unique unit normal vector $\bn_i(\bx)$ at each point $\bx\in \D\cap\partial\D_i$.
\item[(ii)]
Each set $\D_i$ has the Uniform Exterior Sphere property restricted to $\D$, i.e. \\
$ \disp
\quad \exists \alpha_i>0 , \quad \forall \bx\in \D\cap\partial\D_i \quad \mathring{B}(\bx-\alpha_i\bn_i(\bx),\alpha_i) \cap \D_i = \emptyset
$
\item[(iii)]
Each set $\D_i$ has the Uniform Normal Cone property restricted to $\D$, i.e. \\
for some $\beta_i\in [0,1[$ and $\delta_i>0$ and for each $\bx\in\D\cap\partial\D_i$ there is a unit vector $\bl^i_\bx$ s.t.
$\quad \forall \by\in\D\cap\partial\D_i \cap B(\bx,\delta_i) \quad \bn_i(\by).\bl^i_\bx \ge \sqrt{1-\beta_i^2}$
\item[(iv)] (compatibility assumption) There exists $\beta_0>\sqrt{2\max_{1 \le i \le p} \beta_i}$ satisfying\\
$\disp \forall\bx\in\partial\D,~~~Â \exists \bl_\bx^0 \in \S^m,
\quad \forall i \text{ s.t. } \bx\in\partial\D_i \quad \bl_\bx^0.\bn_i(\bx) \ge \beta_0$
\end{itemize}
Under the above assumptions,
$\D\in UES(\alpha)$ and $\D\in UNC(\beta,\delta)$ hold with $\alpha=\beta_0 \min_{1 \le i \le p} \alpha_i$,
$\delta=\min_{1 \le i \le p} \delta_i/2$ and $\beta=\sqrt{1-(\beta_0-2 \max_{1 \le i \le p} \beta_i / \beta_0)^2}$.
Moreover, the vectors normal to the boundary $\partial\D$ are convex combinations
of the vectors normal to the boundaries $\partial\D_i$~:
\[
\forall \bx\in\partial\D \quad
\Nor^\D_\bx=\left\{ \bn\in\S^m,~~~ \bn=\sum_{\partial\D_i\ni\bx} c_i \bn_i(\bx) \text{ with each } c_i \ge 0 \right\}
\]
\end{propal}
\begin{remarque}\label{remsommeci}
Thanks to the compatibility assumption $(iv)$, $\bn=\sum_{\partial\D_i\ni\bx} c_i \bn_i(\bx)$ with non-negatives $c_i$'s and $|\bn|=1$ implies that
$$
\sum_{\partial\D_i\ni\bx} c_i \le \sum_{\partial\D_i\ni\bx} c_i \frac{\bn_i(\bx).\bl_\bx^0}{\beta_0} = \frac{\bn.\bl_\bx^0}{\beta_0} \le \frac{1}{\beta_0}
$$
so that the last equality in proposition \ref{propcritere} can be rewritten as
$$
\Nor^\D_\bx=\left\{ \bn\in\S^m,~~~ \bn=\sum_{\partial\D_i\ni\bx} c_i \bn_i(\bx)
\text{ with } \forall i ~~ c_i \ge 0 \text{ and } \sum_{\partial\D_i\ni\bx} c_i \le \frac{1}{\beta_0}\right\}
$$
\end{remarque}
\begin{corolle} {\bf of theorem \ref{thSaisho}} \\
\label{corolSaisho}
If $\disp \D=\bigcap_{i=1}^p \D_i$ satisfies assumptions $(i)\cdots(iv)$ and if $\bsigma$ and $\bb$ are bounded Lipschitz continuous functions, then
\begin{equation}
\label{eqSaishoLi}
\bX(t)=\bX(0) +\int_0^t \bsigma(\bX(s)) d\bW(s) +\int_0^t \bb(\bX(s)) ds + \sum_{i=1}^p \int_0^t \bn_i(\bX(s)) dL_i(s)
\end{equation}
has a unique strong solution with local times $L_i$ satisfying
$\disp
L_i(\cdot) = \int_0^\cdot \un_{\partial\D_i}(\bX(s)) ~dL_i(s) $
\end{corolle}
\begin{proof2} {\bf of corollary \ref{corolSaisho}} \\
Thanks to proposition \ref{propcritere}, $\D$ satisfies the assumptions of theorem \ref{thSaisho}.
Thus equation (\ref{eqSaishoLi}) has a unique strong solution $\bX$ with local time $\bL$ and reflection direction $\bn$. Using proposition \ref{propcritere} again, there are (non-unique) coefficients $c_i(\omega,s)\in [0,\frac{1}{\beta_0}]$ for each normal vector in the reflection term to be written as a convex combination~:
\begin{equation}\label{egaliteci}
\bn(\omega,s)=\sum_{\partial\D_i\ni\bX(\omega,s)} c_i(\omega,s) \bn_i(\bX(\omega,s))
\end{equation}
Let us prove that there exists a measurable choice of the $c_i$'s~:
The map $\bn$ (resp. $\bn_i(\bX)$) is only defined for $(\omega,s)$ such that $\bX(\omega,s)\in\partial\D$ (resp. $\bX(\omega,s)\in\partial\D_i$).
We extend these maps by zero to obtain measurable maps on $\Omega\times[0,T]$ (for an arbitrary positive $T$).
Note that equality (\ref{egaliteci}) holds for the extended maps too.
For each $(\omega,s)$, we define the map $f_{\omega,s}$ on $\R^p$ by $f_{\omega,s}(c)=|\sum_{i=1}^p c_i \bn_i(\bX(\omega,s)) - \bn(\omega,s)|$.
For a positive integer $k$, let $R_k=\{0,\frac{1}{k},\frac{2}{k},\ldots,\lfloor \frac{k}{\beta_0} \rfloor \}^p$ denote the $\frac{1}{k}$-lattice on $[0,\frac{1}{\beta_0}]^p$ endowed with lexicographic order. The smaller point in $R_k$ for which $f_{\omega,s}$ reaches its minimum value is
$$
c^{(k)}(\omega,s)=\sum_{c\in R_k} c \prod_{c'\in R_k, c'\neq c} \left( \un_{f_{\omega,s}(c')>f_{\omega,s}(c)}
+ \un_{f_{\omega,s}(c')=f_{\omega,s}}(c) \un_{c'>c} \right)
$$
$c^{(k)}$ is a measurable map on $\Omega\times[0,T]$ and (\ref{egaliteci}) implies that $|f_{\omega,s}(c^{(k)}(\omega,s))| \le \frac{p}{k}$.
Taking (coordinate after coordinate) the limsup of the sequence of $(c^{(k)})_k$, we obtain a measurable process $c^{(\infty)}$ satisfying (\ref{egaliteci}).
Finally, let $\disp L_i = \int_0^{\cdot} \un_{\partial\D_i}(\bX(s)) c_i(s)~d\bL(s)$.
$L_i$ has bounded variations on each $[0,T]$ since the $c_i$'s are bounded, and it is a local time in the sense of remark \ref{remtempslocal}.
Here, strong uniqueness holds for process $\bX$ and for the reflection term $\disp \sum_{i=1}^p \int_0^t \bn_i(\bX(s)) dL_i(s)$.
The uniqueness of this term does not imply uniqueness of the $L_i$'s, because the $c_i$'s are not unique.
\end{proof2}
\begin{proof2} {\bf of proposition \ref{propcritere}} \\
Let $\bx\in\partial\D$. Since $\D=\bigcap_{i=1}^p \D_i$, the set $\{i \text{ s.t. } \bx\in\partial\D_i \}$ is not empty.
Since $\D_i$ satisfies $UES(\alpha_i)$ restricted to $\D$, for each $\by\in\D_i$~~~$(\by-\bx).\bn_i(\bx)+\frac{1}{2\alpha_i}|\by-\bx|^2 \ge 0$, and consequently~:
\[
\forall i \text{ s.t. } \partial\D_i\ni\bx \quad \forall \by\in\D \quad (\by-\bx).\bn_i(\bx) + \frac{1}{2\min_{\partial\D_j\ni\bx}\alpha_j} |\by-\bx|^2 \ge 0
\]
Summing over $i$ for non-negative $c_i$'s such that $\sum_{\partial\D_i\ni\bx} c_i~\le\frac{1}{\beta_0}$ we obtain~:
\[
\forall \by\in\D \quad
(\sum_{\partial\D_j\ni\bx} c_i \bn_i(\bx)).(\by-\bx)+\frac{1}{2 \beta_0 \min_{\partial\D_i\ni\bx}\alpha_j} |\by-\bx|^2 \ge 0
\]
which implies that $\sum_{\partial\D_j\ni\bx} c_i \bn_i(\bx)$ belongs to the set $\Nor_{\bx,\alpha}$ of normal vectors on the boundary of $\D$, for $\alpha=\beta_0 \min_{\partial\D_j\ni\bx}\alpha_j$. Thanks to remark \ref{remsommeci}, this proves the inclusion
\[
\Nor'_\bx:=\left\{ \bn\in\S^m,~~~ \bn=\sum_{\partial\D_i\ni\bx} c_i \bn_i(\bx) \text{ with } c_i \ge 0 \right\}
~~~\subset~~~ \Nor_{\bx,\alpha}
\]
Let us prove the converse inclusion.
We first remark that the smoothness of the boundary $\partial\D_i$ at point $\bx$ implies that for any $\eps>0$~:
\[
\exists \delta^i_\eps(\bx)>0 \quad\text{ s.t. }\quad
\{ \bx+\bz,~~ |\bz|\le\delta^i_\eps(\bx) \text{ and } \bz.\bn_i(\bx) \ge\eps|\bz| \} \subset \D_i
\]
Consequently, for $\disp N_\eps=\bigcap_{\partial\D_i\ni\bx} \{\bz,~\bn_i(\bx).\bz \ge \eps|\bz| \}$ we have~:
$
\{ \bx+\bz,~~ \bz\in N_\eps \text{ and } |\bz|\le\min_{i}\delta^i_\eps \} \subset \D $
By definition, for each $\bn\in\Nor^\D_\bx$, there exist $\alpha_\bn>0$ such that $\forall \by\in\D~~ (\by-\bx).\bn+\frac{1}{2\alpha_\bn}|\by-\bx|^2 \ge 0$, hence for $\bz\in N_\eps$ and $\lambda>0$ small enough~:
\[
\lambda \bz.\bn+\frac{\lambda^2}{2\alpha_\bn}|\bz|^2 \ge 0
\]
For this to hold even with $\lambda$ going to zero, $\bz.\bn$ has to be non-negative. So we obtain~:
\[
\forall \bn\in \Nor^\D_\bx \quad \forall \eps>0 \quad -\bn \in N_\eps^*
\]
where $N_\eps^*=\{\bv,~~\forall \bz\in N_\eps ~\bv.\bz \le 0 \}$ is the dual cone of the convex cone $N_\eps$.
As proved in Fenchel \cite{Fenchel} (see also \cite{SaishoTanakaBrownianBalls}), the dual of a finite intersection of convex cones is the set of all limits of linear combinations of their dual cones, in particular~:
\[
N_\eps^* =\overline{ \sum_{\partial\D_i\ni\bx} \{\bz,~\bn_i(\bx).\bz \ge \eps|\bz| \}^* }
=\sum_{\partial\D_i\ni\bx} \{ \bv,~ -\bn_i(\bx).\bv \ge \sqrt{1-\eps^2} |\bv| \}
\]
For $k\in \N$ large enough, since $-\bn \in N_{\frac{1}{k}}^*$, there exist unit vectors $\bn_{i,k}$ and non-negative numbers $c_{i,k}$ such that~:
\[
\bn=\sum_{\partial\D_i\ni\bx} c_{i,k} \bn_{i,k} \quad\quad \text{ and } \quad\quad
\text{ for } \partial\D_i\ni\bx \quad \bn_i(\bx).\bn_{i,k} \ge \sqrt{1-\frac{1}{k^2}}
\]
When $k$ tends to infinity, $\bn_{i,k}$ tends to $\bn_i(\bx)$, and for $k$ large enough $\bn_{i,k}.\bl^0_\bx \ge \frac{\beta_0}{2}$ thus~:
\[
1 \ge \bn.\bl^0_\bx \ge \sum_{\partial\D_i\ni\bx} c_{i,k} \bn_{i,k}.\bl^0_\bx \ge \frac{\beta_0}{2} \sum_{\partial\D_i\ni\bx} c_{i,k}
\]
Thus the sequences $(c_{i,k})_k$ are bounded, which implies the existence of convergent subsequences. Their limits $c_{i,\infty} \ge 0$ satisfy~:
\[
\bn=\sum_{\partial\D_i\ni\bx} c_{i,\infty} \bn_i(\bx)
\]
This completes the proof of $\Nor^\D_\bx \subset \Nor'_\bx$.
We already proved that $\Nor'_\bx \subset \Nor^\D_{\bx,\alpha}$ with $\alpha=\beta_0 \min_{\partial\D_j\ni\bx}\alpha_j$, so we obtain
$\Nor^\D_\bx = \Nor'_\bx = \Nor^\D_{\bx,\alpha}$ for each $\bx\in\partial\D$.
As a consequence, $\D\in UES(\beta_0 \min_{1 \le j \le p} \alpha_j)$.
Let us now prove that $\D\in UNC(\beta,\delta)$.
The Uniform Normal Cone property restricted to $\D$ holds for $\D_i$, with constant $\beta_i<\frac{\beta_0^2}{2} \le \frac{1}{2}$.
That is, for $\bx\in\D\cap\partial\D_i$ there exist a unit vector $\bl^i_\bx$ which satisfies $\bl^i_\bx.\bn_i(\by) \ge \sqrt{1-\beta_i^2}$ for each $\by\in\D\cap\partial\D_i$ such that $|\bx-\by|\le\delta_i$. \\
If $\bl^i_\bx=\bn_i(\bx)$, this implies that $|\bn_i(\bx)-\bn_i(\by)|^2 \le 2\beta_i^2$. \\
If $\bl^i_\bx \neq \bn_i(\bx)$, we use the Gram-Schmidt orthogonalization process for a sequence of vectors with $\bl^i_\bx$ and $\bn_i(\bx)$ as first vectors, then compute $\bn_i(\bx).\bn_i(\by)$ in the resulting orthonormal basis $(\bl^i_\bx,{\bf e}_2,{\bf e}_3,\ldots,{\bf e}_m)$~:
\[
\bn_i(\bx).\bn_i(\by)=(\bl^i_\bx.\bn_i(\by))(\bl^i_\bx.\bn_i(\bx))+(\bn_i(\by).{\bf e}_2)\sqrt{1-(\bl^i_\bx.\bn_i(\bx))^2}
\]
Note that $|\bn_i(\by).{\bf e}_2|\le\beta_i$ because $\bl^i_\bx.\bn_i(\by) \ge \sqrt{1-\beta_i^2}$ and $|\bn_i(\by)|=1$, thus~:
\[
\bn_i(\bx).\bn_i(\by) \ge \Big(\sqrt{1-\beta_i^2}\Big)^2-\beta_i^2=1-2\beta_i^2
\]
This implies that $|\bn_i(\bx)-\bn_i(\by)|^2 \le 4\beta_i^2$.\\
So in both cases~: $\disp |\bn_i(\bx)-\bn_i(\by)| \le 2\beta_i$ as soon as $|\bx-\by|\le\delta_i$ for $\bx,\by\in\D\cap\partial\D_i$.
Let us now fix $\bx\in\partial\D$ and $\delta=\min_{1 \le i \le p} \delta_i/2$.
We then choose $\bx'\in \partial\D \cap B(\bx,\delta)$ such that $\{i \text{ s.t. } \bx'\in\partial\D_i \} \supset \{i \text{ s.t. } \by\in\partial\D_i \}$ for each $\by\in \partial\D \cap B(\bx,\delta)$ and we let $\bl=\bl^0_{\bx'}$.
To complete the proof of $\D\in UNC(\beta,\delta)$, we only have to prove that $\bn.\bl$ is uniformly bounded from below for $\bn\in\Nor^\D_\by$ with $\by\in \partial\D \cap B(\bx,\delta)$.
We already know that each $\bn\in\Nor^\D_\by$ is a convex sum of elements of the $\Nor^{\D_i}_\by$~:
$\bn=\sum_{\partial\D_i\ni\by} c_i \bn_i(\by) $.
The coefficients $c_i$ are non-negative, their sum is not smaller than $1$ because $\bn$ is a unit vector, and is not larger than $\frac{1}{\beta_0}$ thanks to $(iv)$.
So the vector $\bn'=\sum_{\partial\D_i\ni\by} c_i \bn_i(\bx')$ satisfies~:
\[
\bn'.\bl=\sum_{\partial\D_i\ni\bx'} c_i \bn_i(\bx').\bl \ge \beta_0
\quad\text{ and }\quad
|\bn'-\bn| \le \sum_{\partial\D_i\ni\by} c_i |\bn_i(\bx')-\bn_i(\by)|
\le 2 \sum_{\partial\D_i\ni\by} c_i \beta_i \le 2 \max_{1 \le i \le p} \frac{\beta_i}{\beta_0}
\]
Consequently~:
$\bn.\bl \ge \bn'.\bl-|\bn'-\bn| \ge \beta_0-2 \max_{1 \le i \le p} \frac{\beta_i}{\beta_0} >0$.
\end{proof2}
\section{Existence of dynamics for globules and linear molecule models } \label{SectPreuves}
\subsection{Globules model} \label{PreuvesGlobules}
Let us prove that the globules model satisfies the assumptions in proposition \ref{propcritere}.
The set of allowed configurations is~:
\[
\begin{array}{c} \disp
\disp \A_g=(\bigcap_{1 \le i<j \le n} \D_{ij}) \cap (\bigcap_{1 \le i \le n} \D_{i+}) \cap (\bigcap_{1 \le i \le n} \D_{i-}) \\~\\
\text{ where } \quad \D_{ij}=\left\{ \bx \in \R^{dn+n},~ |x_i-x_j|\ge \nx_i+\nx_j \right\} \\~\\
\D_{i+}=\left\{ \bx \in \R^{dn+n},~ \nx_i \le \rp \right\} \quad
\D_{i-}=\left\{ \bx \in \R^{dn+n},~ \nx_i \ge \rm \right\}
\end{array}
\]
The $\D_{ij}$ have smooth boundaries on $\A_g$ and the characterization of normal vectors is easy~: at point $\bx$ satisfying $|x_i-x_j|=\nx_i+\nx_j>0$, the unique unit inward normal vector $\bn_{ij}(\bx)=\bn$ is given by~:
\[
n_i=\frac{x_i-x_j}{2(\nx_i+\nx_j)} \quad \nn_i=-\frac{1}{2} \quad n_j=\frac{x_j-x_i}{2(\nx_i+\nx_j)} \quad \nn_j=-\frac{1}{2}
\quad \text{(every other component vanishes)}
\]
On the boundary of the half-space $\D_{i+}$, at point $\bx$ such that $\nx_i=\rp$, the unique unit normal vector $\bn$ has only one non-zero component~: $\nn_i=-1$. Similarly, $\D_{i-}$ is a half-space, $\bx$ belongs to its boundary if $\nx_i=\rm$, and the unique unit normal vector $\bn$ at this point has $\nn_i=1$ as its only non-zero component.
These vectors do not depend on $\bx$ and will be denoted by $\bn_{i+}$, $\bn_{i-}$ instead of $\bn_{i+}(\bx)$, $\bn_{i-}(\bx)$.
\begin{propal}\label{PropGeomBulles}
$\A_g$ satisfies properties $\disp UES\left( \frac{\rm^2}{2\rp n\sqrt{n}} \right)$ and $\disp UNC\left( \sqrt{1-\frac{\rm^2}{2^6\rp^2 n^3}}~,~\frac{\rm^5}{2^{14}\rp^4 n^6} \right)$.\\
Moreover, the vectors normal to the boundary $\partial\A_g$ are convex combinations of the vectors normal to the boundaries $\partial\D_{ij}$, $\partial\D_{i+}$, $\partial\D_{i-}$, that is, for every $\bx$ in $\partial\A_g$~:
\[
\Nor^{\A_g}_\bx=\left\{ \bn\in\S^{dn+n},~~
\bn=\sum_{ \partial\D_{ij}\ni\bx} c_{ij} \bn_{ij}(\bx)
+\sum_{ \partial\D_{i+}\ni\bx } c_{i+} \bn_{i+}
+\sum_{ \partial\D_{i-}\ni\bx } c_{i-} \bn_{i-}
\text{ with } c_{ij},c_{i+},c_{i-} \ge 0 \right\}
\]
\end{propal}
\begin{proof2} {\bf of proposition \ref{PropGeomBulles}}
We have to check that the assumption of proposition \ref{propcritere} are satisfied for the set $\A_g$.
Since the $\D_{i+}$ and $\D_{i-}$ are half-spaces, the Uniform Exterior Sphere property holds for them with any positive constant (formally $\alpha_{i+}=\alpha_{i-}=+\infty$). For the same reason, the Uniform Normal Cone property holds for $\D_{i+}$ with any constants $\beta_{i+}$ and $\delta_{i+}$, and with $\bl^{i+}_\bx$ equal to the normal vector $\bn_{i+}$. This also holds for the sets $\D_{i-}$ with any $\beta_{i-}$ and $\delta_{i-}$, and with $\bl^{i-}_\bx=\bn_{i+}$.
Let us consider $\bx\in\A_g$ such that $\bx\in\partial\D_{ij}$, i.e. $|x_i-x_j|=\nx_i+\nx_j$.
By definition of $\bn_{ij}(\bx)$, for $\by=\bx-(\nx_i+\nx_j)\bn_{ij}(\bx)$, one has~:
\[
\begin{array}{c} \disp
y_i=x_i-(\nx_i+\nx_j)\frac{x_i-x_j}{2(\nx_i+\nx_j)}=\frac{x_i+x_j}{2}=x_j-(\nx_i+\nx_j)\frac{x_j-x_i}{2(\nx_i+\nx_j)}=y_j \\
\ny_i+\ny_j=\nx_i-(\nx_i+\nx_j)(-\frac{1}{2}) +\nx_j-(\nx_i+\nx_j)(-\frac{1}{2})=2(\nx_i+\nx_j)
\end{array}
\]
For $\bz\in\mathring{B}(0,\nx_i+\nx_j)$~:
\[
|(y_i+z_i)-(y_j+z_j)|-(\ny_i+\nz_i+\ny_j+\nz_j) \le |z_i|+|z_j|-2(\nx_i+\nx_j)+|\nz_i|+|\nz_j| \le 2|\bz|-2(\nx_i+\nx_j) <0
\]
thus $\by+\bz \in\D_{ij}^c$. This proves that $\mathring{B}(\by,\nx_i+\nx_j) \subset \D_{ij}^c$, hence $\Nor^{\D_{ij}}_\bx= \Nor^{\D_{ij}}_{\bx,\nx_i+\nx_j}$. The general Uniform Exterior Sphere property does not hold, but the property restricted to $\A_g$ holds for $\D_{ij}$ with constant $\alpha_{ij}=2\rm$ because $\nx_i+\nx_j \ge 2\rm$ for $\bx\in\A_g$.
For $\bx\in\A_g$ such that $\bx\in\partial\D_{ij}$, let us define $\bl^{ij}_\bx=\bn_{ij}(\bx)$.
For another configuration $\by\in\partial\D_{ij}$~:
\[
\bl^{ij}_\bx.\bn_{ij}(\by)
=\frac{x_i-x_j}{2(\nx_i+\nx_j)}.\frac{y_i-y_j}{2(\ny_i+\ny_j)}+\frac{x_j-x_i}{2(\nx_i+\nx_j)}.\frac{y_j-y_i}{2(\ny_i+\ny_j)}
+(-\frac{1}{2})^2+(-\frac{1}{2})^2
=\frac{(x_i-x_j).(y_i-y_j)}{2(\nx_i+\nx_j)(\ny_i+\ny_j)}+\frac{1}{2}
\]
Since $(x_i-x_j).(y_i-y_j) \ge |x_i-x_j|^2-|x_i-x_j||y_i-x_i-y_j+x_j| \ge |x_i-x_j|^2-\sqrt{2}|x_i-x_j||\bx-\by|$
and $\ny_i+\ny_j \le (\nx_i+\nx_j)+\sqrt{2}|\bx-\by|$, this leads to~:
\[
\bl^{ij}_\bx.\bn_{ij}(\by) \ge \frac{|x_i-x_j|-\sqrt{2}|\bx-\by|}{2(\nx_i+\nx_j+\sqrt{2}|\bx-\by|)}+\frac{1}{2}
=\frac{\nx_i+\nx_j}{\nx_i+\nx_j+\sqrt{2}|\bx-\by|}
\]
Consequently $\bl^{ij}_\bx.\bn_{ij}(\by) \ge \sqrt{1-\beta_{ij}^2}$ as soon as
$|\bx-\by| \le \frac{\nx_i+\nx_j}{\sqrt{2}} \left( \frac{1}{\sqrt{1-\beta_{ij}^2}}-1 \right)$.
This proves that $\D_{ij}$ has the Uniform Normal Cone property restricted to $\A_g$ with any constant $\beta_{ij}\in ]0,1[$ and with $\delta_{ij}=\sqrt{2} \rm \left( \frac{1}{\sqrt{1-\beta_{ij}^2}}-1 \right)>0$. In particular, the property holds for any $\beta_{ij}$ with $\delta_{ij}=\rm \beta_{ij}^2/\sqrt{2}$.
For $\bx\in\partial\A_g$, let us construct a unit vector $\bl_\bx^0$ satisfying assumption \emph{(iv)} in proposition \ref{propcritere}.
We first have to define clusters of colliding globules~:
\[
C_\bx(i)=\left\{ j \text{ s.t. }
|x_i-x_{j_1}|=\nx_i+\nx_{j_1},~ |x_{j_1}-x_{j_2}|=\nx_{j_1}+\nx_{j_2},\ldots,
|x_{j_k}-x_j|=\nx_{j_k}+\nx_j \text{ for some } j_1,\ldots,j_k
\right\}
\]
and their centers of gravity $\disp x'_i=\frac{1}{\sharp C_\bx(i)} \sum_{j\in C_\bx(i)} x_j$.
Let $\bl_\bx^0=\frac{\bv}{|\bv|}$ with for each $i$~:
\[
v_i= x_i-x'_i \quad\text{ and }\quad \nv_i=\frac{\rm}{\rp-\rm} (\frac{\rp+\rm}{2}-\nx_i)
\]
We need an upper bound on the norm of $\bv$. Since $\bx\in\A_g$, each $\nx_i$ is larger than $\rm$ and smaller than $\rp$, thus $|\frac{\rp+\rm}{2}-\nx_i| \le \frac{\rp-\rm}{2}$. Moreover, if $j$ belongs to the cluster $C_\bx(i)$ around the $i^\text{th}$ globule, the distance between $x_i$ and $x_j$ is at most $2(n-1)\rp$, thus $|v_i|\le 2(n-1)\rp$~:
\[
|\bv|^2=\sum_{i=1}^n |v_i|^2 + (\frac{\rm}{\rp-\rm})^2 \sum_{i=1}^n (\frac{\rp+\rm}{2}-\nx_i)^2
\le 4n(n-1)^2 \rp^2 + n\frac{\rm^2}{4} < 4 n^3 \rp^2
\]
We also need lower bounds on the scalar products of $\bv$ with normal vectors on the boundaries of the $\D_{i+}$, $\D_{i-}$, $\D_{ij}$. If $\nx_i=\rp$, then $\nv_i=\frac{\rm}{\rp-\rm} \frac{\rm-\rp}{2}$ thus $\bv.\bn_{i+}=\frac{\rm}{2}$. Similarly, if $\nx_i=\rm$, then $\bv.\bn_{i-}=\frac{\rm}{2}$. If $|x_i-x_j|=\nx_i+\nx_j$ then $x'_i=x'_j$ thus~:
\[
\begin{array}{l}
\bv.\bn_{ij}(\bx)=(x_i-x'_i).\frac{x_i-x_j}{2(\nx_i+\nx_j)}+(x_j-x'_j).\frac{x_j-x_i}{2(\nx_i+\nx_j)}
-\frac{1}{2}\frac{\rm}{\rp-\rm} (\frac{\rp+\rm}{2}-\nx_i)
-\frac{1}{2}\frac{\rm}{\rp-\rm} (\frac{\rp+\rm}{2}-\nx_j) \\
\phantom{\bv.\bn_{ij}(\bx)}
=(x_i-x_j).\frac{x_i-x_j}{2(\nx_i+\nx_j)}-\frac{1}{2}\frac{\rm}{\rp-\rm}(\rp+\rm-\nx_i-\nx_j) \\
\phantom{\bv.\bn_{ij}(\bx)}
=\frac{\rp}{2(\rp-\rm)}(\nx_i+\nx_j)-\frac{\rm(\rp+\rm)}{2(\rp-\rm)} \ge \frac{\rm}{2}
\end{array}
\]
because $\nx_i+\nx_j \ge 2\rm$. This proves that assumption \emph{(iv)} in proposition \ref{propcritere} is satisfied with
$\disp \beta_0 = \frac{\rm}{2\sqrt{4 n^3 \rp^2}}=\frac{\rm}{4\rp n\sqrt{n}}$.
The inequality $\beta_0>\sqrt{2\max(\max_i \beta_{i+},\max_i \beta_{i-},\max_{i,j} \beta_{ij})}$ holds as soon as
$\beta_{ij}<\beta_0^2/2$. As seen above, the Uniform Normal Cone property restricted to $\A_g$ holds for $\D_{ij}$ with constants $\beta_{ij}=\beta_0^2/4$ and $\delta_{ij} =\frac{\rm\beta_0^4}{16\sqrt{2}}$.
So, thanks to proposition \ref{propcritere}, we obtain that $\A_g$ has the Uniform Exterior Sphere property with constant
$\alpha_{\A_g}=2\rm\beta_0=\frac{\rm^2}{2\rp n\sqrt{n}}$ and the Uniform Normal Cone property with constants
$\delta_{\A_g}=\frac{\rm\beta_0^4}{32\sqrt{2}}=\frac{\rm^5}{2^{13}\sqrt{2}\rp^4 n^6}$ and
$\beta_{\A_g}=\sqrt{1-(\beta_0/2)^2}=\sqrt{1-\frac{\rm^2}{64\rp^2 n^3}}$
\end{proof2}
\begin{proof2} {\bf of theorems \ref{thexistglobules} and \ref{threversglobules}}
As seen in the proof of proposition \ref{PropGeomBulles}, the set $\A_g$ of allowed globules configurations satisfies the assumptions of corollary \ref{corolSaisho}.
If the diffusion coefficients $\sigma_i$ and $\nsigma_i$ and the drift coefficients $b_i$ and $\nb_i$ are bounded and Lipschitz continuous on $\A_g$ (for $1 \le i \le n$), then the functions
$$
\bsigma(\bx)=\left[\begin{array}{ccccc} \sigma_1(\bx)& 0 & \cdots & \cdots & 0 \\
0 &\nsigma_1(\bx)& \ddots & & \vdots \\
\vdots & \ddots & \ddots & \ddots & \vdots \\
\vdots & & \ddots &\sigma_n(\bx)& 0 \\
0 & \cdots & \cdots & 0 &\nsigma_n(\bx)
\end{array}\right]
\text{ and }
\bb(\bx)=\left[\begin{array}{c} b_1(\bx) \\
\nb_1(\bx) \\
\vdots \\
b_n(\bx) \\
\nb_n(\bx)
\end{array}\right]
$$
are bounded and Lipschitz continuous as well. Thus, for $m=n(d+1)$ and $\D=\A_g$, equation (\ref{eqSaisho}) has a unique strong solution $\bX$, and there exists a decomposition of the reflection term $\int_0^{\cdot} \bn(s)d\bL(s)$ so that it is equal to
$$
\left(
\sum_{j=1}^n \int_0^{\cdot} \frac{X_i-X_j}{2(\nX_i+\nX_j)}(s) dL_{ij}(s)~,~
-\sum_{j=1}^n \int_0^{\cdot} \frac{1}{2} dL_{ij}(s) -\int_0^{\cdot} dL_{i+}(s) +\int_0^{\cdot} dL_{i-}(s)
\right)_{1 \le i \le n}
$$
with $\disp L_{ij} = \int_0^{\cdot} \un_{|X_i(s)-X_j(s)|=\nX_i(s)+\nX_j(s)} c_{ij}(s)~d\bL(s)$,\quad
$\disp L^+_i = \int_0^{\cdot} \un_{\nX_i(s)=\rm} c_{i+}(s)~d\bL(s)$ and
$\disp L^-_i = \int_0^{\cdot} \un_{\nX_i(s)=\rp} c_{i-}(s)~d\bL(s)$
for some measurable choice of the $c_{ij},c_{i+},c_{i-}$.
For convenience, the local time $\frac{1}{2}L_{ij}$ has been used in equation $(\E_g)$, and denoted by $L_{ij}$ again.
For an even $\C^2$ function $\phi$ on $\R^{d+1}$ with bounded derivatives,
the function $\Phi(\bx)=\sum_{1 \le i<j \le n} \phi(\bx_i-\bx_j)$ is $\C^2$ with bounded derivatives
and the drift function in equation $(\E^{\phi}_g)$ is equal to $-\frac{1}{2} \nabla\Phi$.
Thus $(\E^{\phi}_g)$ has a unique strong solution.
Moreover, if $Z=\int_{\A_g} e^{-\Phi(\bx)} d\bx <+\infty$, theorem \ref{thSaisho} implies the time-reversibility of the solution with initial distribution $d\mu(\bx)=\frac{1}{Z} \un_{\A_g}(\bx) e^{-\Phi(\bx)} d\bx$.
\end{proof2}
\subsection{Linear molecule model} \label{PreuvesChenilles}
The set of allowed chains configurations is the intersection of $2n+1$ sets~:
\[
\begin{array}{c}
\disp \A_c= (\bigcap_{i=1}^{n-1} \D_{i-}) \bigcap~ (\bigcap_{i=1}^{n-1} \D_{i+}) \bigcap~ \D_- \bigcap~ \D_+ \bigcap~ \D_= \\~\\
\disp\text{ where } \quad \D_{i-} =\left\{ \bx \in \R^{dn+2},~ |x_i-x_{i+1}|\ge \nx_- \right\} \quad
\D_{i+} =\left\{ \bx \in \R^{dn+2},~ |x_i-x_{i+1}|\le \nx_+ \right\} \\~\\
\disp \D_- =\left\{ \bx \in \R^{dn+2},~ \nx_- \ge \rm \right\} \quad
\D_+ =\left\{ \bx \in \R^{dn+2},~ \nx_+ \le \rp \right\} \quad
\D_= =\left\{ \bx \in \R^{dn+2},~ \nx_- \le \nx_+ \right\}
\end{array}
\]
The boundaries of these sets are smooth, and simple derivation computations give the unique unit vector normal to each boundary at each point of $\partial\A_c$ (we are not interested in other points). Actually, $\D_-$, $\D_+$ and $\D_=$ are half-spaces, which makes the computations and checking of UES and UNC properties very simple.
\begin{itemize}
\item
The vector $\bn_-=(0,\ldots,0,1,0)$ is normal to the boundary $\partial\D_-$ at each point $\bx\in\partial\D_-$
\item
Similarly, $\bn_+=(0,\ldots,0,0,-1)$ is normal to the boundary $\partial\D_+$ at each point $\bx\in\partial\D_+$
\item
The vector normal to the boundary $\partial\D_=$ at each point $\bx\in\partial\D_=$ is
$\bn_= =(0,\ldots,0,\frac{-1}{\sqrt{2}},\frac{1}{\sqrt{2}})$
\item
At every $\bx\in\A_c\cap\partial\D_{i-}$ (for $1 \le i \le n-1$), the vector normal to $\partial\D_{i-}$ is $\bn=\bn_{i-}(\bx)$ defined by~:
\[
n_i=\frac{x_i-x_{i+1}}{\nx_- \sqrt{3}} \quad n_{i+1}=\frac{x_{i+1}-x_i}{\nx_- \sqrt{3}} \quad \nn_-=-\frac{1}{\sqrt{3}}
\quad \text{(other components equal zero)}
\]
\item
At every $\bx\in\A_c\cap\partial\D_{i+}$, the vector normal to $\partial\D_{i+}$ is $\bn=\bn_{i+}(\bx)$ defined by~:
\[
n_i=\frac{x_{i+1}-x_i}{\nx_+ \sqrt{3}} \quad n_{i+1}=\frac{x_i-x_{i+1}}{\nx_+ \sqrt{3}} \quad \nn_+=\frac{1}{\sqrt{3}}
\quad \text{(other components equal zero)}
\]
\end{itemize}
The proof of theorem \ref{thexistchenilles} is similar to the proof of theorems \ref{thexistglobules} and \ref{threversglobules}, and will be omitted. It relies on the following proposition and uses theorem \ref{thSaisho} and corollary \ref{corolSaisho} to obtain the existence, uniqueness, and reversibility of the solution of $(\E_c)$. As in the proof of theorem \ref{thexistglobules}, the local times are multiplied by a suitable constant to provide a more convenient expression.
So, in order to prove theorem \ref{thexistchenilles}, we only have to check that $\A_c$ satisfies the assumptions of proposition \ref{propcritere}, i.e. that the following proposition holds~:
\begin{propal}\label{PropGeomCaterpillar}
$\A_c$ satisfies properties $\disp UES\left(\frac{\rm^2}{2\rp n \sqrt{2n}}\right)$ and
$\disp UNC\left(\sqrt{1-\frac{\rm^2}{24\rp^2 n^3}},\frac{\rm^5 \sqrt{3}}{2^{12} \rp^4 n^6}\right)$. \\
Moreover, for each $\bx$ in $\partial\A_c$, the set $\Nor^{\A_c}_\bx$ of normal vectors is equal to~:
{\small\[
\left\{
\begin{array}{r} \disp
\bn\in\S^{dn+2},
\bn=\sum_{\partial\D_{i-}\ni\bx} c_{i-} \bn_{i-}(\bx) +\sum_{\partial\D_{i+}\ni\bx} c_{i+} \bn_{i+}(\bx)
+ \un_{\partial\D_-}(\bx) c_- \bn_- + \un_{\partial\D_+}(\bx)c_+ \bn_+ + \un_{\partial\D_=}(\bx) c_= \bn_= \\
\text{ with } c_{i-},c_{i+},c_-,c_+,c_= \ge 0
\end{array}
\right\}
\]}
\end{propal}
\begin{proof2} {\bf of proposition \ref{PropGeomCaterpillar}}
Let us check that the set $\A_c$ satisfies the assumptions of proposition \ref{propcritere}.
The Uniform Exterior Sphere property and the Uniform Normal Cone property hold for $\D_-$, $\D_+$ and $\D_=$, with any positive $\alpha$ and $\delta$ and any $\beta$ in $[0,1[$, because these sets are half-spaces.
For $\bx\in\A_c\cap\partial\D_{i-}$, for $\alpha=\frac{\rm\sqrt{3}}{2}$, and for $\by=\bx-\alpha\bn_{i-}(\bx)$~:
\[
y_i=x_i-\alpha\frac{x_i-x_{i+1}}{\nx_- \sqrt{3}} \quad y_{i+1}=x_{i+1}-\alpha\frac{x_{i+1}-x_i}{\nx_- \sqrt{3}} \quad
\ny_-=\nx_- +\frac{\alpha}{\sqrt{3}}
\]
so that each $\bz\in\mathring{B}(0,\alpha)$ satisfies
$|(y_i+z_i)-(y_{i+1}+z_{i+1})|=\left| x_i-x_{i+1}- \rm\frac{x_i-x_{i+1}}{\nx_-} + z_i-z_{i+1} \right|$
and that $|x_i-x_{i+1}|=\nx_-$ implies
\[
|(y_i+z_i)-(y_{i+1}+z_{i+1})|-(\ny_- + \nz_-)
\le \nx_-(1-\frac{\rm}{\nx_-}) +|z_i-z_{i+1}|-\nx_- -\frac{\rm}{2}+ \nz_-
=-\alpha\sqrt{3}+|z_i-z_{i+1}|+ \nz_-
\]
We know that $|z_i-z_{i+1}|+ \nz_- \le \sqrt{3}|\bz| < \sqrt{3}\alpha$, thus the above quantity is negative and $\by+\bz \not\in \D_{i-}$.
Consequently, the Uniform Exterior Sphere property holds for $\D_{i-}$ with constant $\alpha_{i-}=\frac{\rm\sqrt{3}}{2}$.
It also holds for $\D_{i+}$, with any positive constant, because this set is convex (the midpoint of two points in $\D_{i+}$ obviously belongs to $\D_{i+}$).
In order to prove that $\D_{i-}$ has the Uniform Normal Cone property restricted to $\A_c$, we fix two chains $\bx,\by\in\A_c\cap\partial\D_{i-}$ and compute~:
\[
\bn_{i-}(\bx).\bn_{i-}(\by)=\frac{2}{3}\frac{x_i-x_{i+1}}{\nx_-}.\frac{y_i-y_{i+1}}{\ny_-}+\frac{1}{3}
=\frac{2|x_i-x_{i+1}|^2}{3 \nx_-^2}+\frac{1}{3}
+\frac{2}{3}\frac{x_i-x_{i+1}}{\nx_-}.\left( \frac{y_i-y_{i+1}}{\ny_-}-\frac{x_i-x_{i+1}}{\nx_-} \right)
\]
Since $|x_i-x_{i+1}|=\nx_-$, we obtain~:
$\disp \bn_{i-}(\bx).\bn_{i-}(\by) \ge 1-\frac{2}{3}\left| \frac{y_i-y_{i+1}}{\ny_-}-\frac{x_i-x_{i+1}}{\nx_-} \right|$.\\
Then $|\nx_- (y_i-y_{i+1})-\ny_- (x_i-x_{i+1})| \le \nx_-|y_i-y_{i+1}-x_i+x_{i+1}|+|\nx_- - \ny_-||x_i-x_{i+1}|$ leads to~:
\[
\bn_{i-}(\bx).\bn_{i-}(\by) \ge 1-\frac{2}{3 \ny_-}\left( |y_i-x_i|+|y_{i+1}-x_{i+1}|+|\nx_- - \ny_-|\right)
\ge 1-\frac{2|\by-\bx|}{\sqrt{3} \ny_-}
\ge 1-\frac{2|\by-\bx|}{\sqrt{3} \rm}
\]
Thus $\bn_{i-}(\bx).\bn_{i-}(\by) \ge \sqrt{1-\beta_{i-}^2}$ as soon as $|\by-\bx| \le \frac{\rm\sqrt{3}}{2}(1-\sqrt{1-\beta_{i-}^2})$, which is implied by $|\by-\bx| \le \frac{\rm\sqrt{3}}{4}\beta_{i-}^2$.
As a consequence, the set $\D_{i-}$ has the Uniform Normal Cone property restricted to $\A_c$ with any constant $\beta_{i-}\in[0,1[$ and the corresponding constant $\delta_{i-}=\frac{\rm\sqrt{3}}{4}\beta_{i-}^2$.
Note that for $\bx,\by\in\A_c\cap\partial\D_{i+}$~:
$ \disp\quad \bn_{i+}(\bx).\bn_{i+}(\by)=\frac{2}{3}\frac{x_i-x_{i+1}}{\nx_+}.\frac{y_i-y_{i+1}}{\ny_+}+\frac{1}{3}$ \\
so that the same computation gives that $\D_{i+}$ has the Uniform Normal Cone property restricted to $\A_c$ with any constants $\beta_{i+}\in[0,1[$ and $\delta_{i+}=\frac{\rm\sqrt{3}}{4}\beta_{i+}^2$.
In order to check the compatibility assumption, let us fix a point $\bx\in\partial\A_c$ and construct a suitable vector $\bv$ such that $\bl_\bx^0=\frac{\bv}{|\bv|}$.
We first define the "middle point" $x'$ of the chain~: $x'=x_{(n+1)/2}$ if the chain contains an odd number of particles, and
$x'=\frac{x_{n/2}+x_{n/2+1}}{2}$ if $n$ is an even number. We also define $\nx'=\frac{\nx_+ + \nx_-}{2}$.
We then construct the $v_i$'s in an incremental way, starting from the middle and going up and down to both ends~:
\begin{itemize}
\item
If $n$ is odd, we choose $v_{(n+1)/2}=0$.
\item
In the case of an even $n$, we choose $(v_{n/2},v_{n/2+1})=(x_{n/2}-x',x_{n/2+1}-x')$ if $|x_{n/2}-x_{n/2+1}|<\nx'$ and
$(v_{n/2},v_{n/2+1})=(x_{n/2+1}-x',x_{n/2}-x')$ if $|x_{n/2}-x_{n/2+1}|>\nx'$. \\
In the critical case $|x_{n/2}-x_{n/2+1}|=\nx'$, our choice depends on the value of $\nx'$~: we let $(v_{n/2},v_{n/2+1})=(x_{n/2}-x',x_{n/2+1}-x')$ if $\nx'<\rp$ and $(v_{n/2},v_{n/2+1})=(x_{n/2+1}-x',x_{n/2}-x')$ if $\nx_+=\nx_-=\rp$.
\item
The other $v_i$'s are chosen incrementally so as to fulfill the same condition~:
\[
v_i-v_{i+1}=\left\{ \begin{array}{l}
x_i-x_{i+1} ~~~\text{ if } |x_i-x_{i+1}|<\nx' \text{,~~ or } |x_i-x_{i+1}|=\nx' \text{ and } \nx'<\rp \\
x_{i+1}-x_i ~~~\text{ if } |x_i-x_{i+1}|>\nx' \text{,~~ or } |x_i-x_{i+1}|=\nx' \text{ and } \nx'=\rp
\end{array} \right.
\]
\end{itemize}
Note that $|v_i|\le (n-1)\rp$ for each $i$. For our choice of $\bv$ to be complete, we also define~:
\begin{itemize}
\item
$\disp \nv_- =\frac{\rm}{2},~~ \nv_+ =\frac{-\rm}{2} $ \quad if $\nx_- < \nx_+$
\item
$\disp \nv_- =\frac{\rm}{2},~~ \nv_+ =\frac{3}{2}\rp $ \quad if $\nx_- = \nx_+ < \rp$
\item
$\disp \nv_- =\frac{-3}{2}\rp,~~ \nv_+ =\frac{-\rp}{2} $ \quad if $\nx_- = \nx_+ = \rp$
\end{itemize}
As in the proof of proposition \ref{PropGeomBulles}, we need an upper bound on the norm of $\bv$~:
\[
|\bv|^2=\sum_{i=1}^n |v_i|^2 + \nv_-^2 + \nv_+^2 \le n(\frac{n-1}{2}\rp)^2 + \frac{\rp^2}{4} + \frac{9\rp^2}{4} \le \frac{n^3 \rp^2}{2}
\]
We also have to prove that the scalar products of $\bv$ with normal vectors on the boundaries are uniformly bounded from below.
Note that $\bv.\bn_- =\nv_-$,\quad $\bv.\bn_+ =-\nv_+$,\quad $\bv.\bn_= = \frac{\nv_+ - \nv_-}{\sqrt{2}}$,\quad
$\disp \bv.\bn_{i-}(\bx)=\frac{x_i-x_{i+1}}{\nx_- \sqrt{3}}.(v_i-v_{i+1}) - \frac{\nv_-}{\sqrt{3}}$\quad and \quad
$\disp \bv.\bn_{i+}(\bx)=\frac{x_i-x_{i+1}}{\nx_+ \sqrt{3}}.(v_{i+1}-v_i) + \frac{\nv_+}{\sqrt{3}}$.
In the case where $\nx_- < \nx_+$, the choices made on $\bv$ lead to~:
\begin{itemize}
\item
if $\nx_- =\rm$,\quad $\bv.\bn_- =\frac{\rm}{2}$
\item
if $\nx_+ =\rp$,\quad $\bv.\bn_+ =\frac{\rm}{2}$
\item
for $i$ s.t. $|x_i-x_{i+1}|=\nx_-$,\quad $v_i-v_{i+1}=x_i-x_{i+1}$
hence $\bv.\bn_{i-}(\bx)=\frac{\nx_- - \nv_-}{\sqrt{3}} \ge \frac{\rm - \frac{\rm}{2}}{\sqrt{3}} = \frac{\rm}{2\sqrt{3}}$
\item
for $i$ s.t. $|x_i-x_{i+1}|=\nx_+$,\quad $v_i-v_{i+1}=x_{i+1}-x_i$
hence $\bv.\bn_{i+}(\bx)=\frac{\nx_+ + \nv_+}{\sqrt{3}} \ge \frac{\rm - \frac{\rm}{2}}{\sqrt{3}} = \frac{\rm}{2\sqrt{3}}$
\end{itemize}
We now proceed with the case $\nx_- = \nx_+ < \rp$. Since $|x_i-x_{i+1}|=\nx_-=\nx_+$ and $v_i-v_{i+1}=x_i-x_{i+1}$ for each $i$, we have~:
\begin{itemize}
\item
if $\nx_- =\rm$,\quad $\bv.\bn_- =\frac{\rm}{2}$
\item
$\disp \bv.\bn_= = \frac{\frac{3}{2}\rp-\frac{\rm}{2}}{\sqrt{2}} \ge \frac{\rp}{\sqrt{2}}$
\item
$\disp \bv.\bn_{i-}(\bx) =\frac{\nx_- - \nv_-}{\sqrt{3}} \ge \frac{\rm - \frac{\rm}{2}}{\sqrt{3}} = \frac{\rm}{2\sqrt{3}}$ and
$\disp \bv.\bn_{i+}(\bx)=\frac{-\nx_- + \nv_+}{\sqrt{3}} \ge \frac{-\rp + \frac{3}{2}\rp}{\sqrt{3}} \ge \frac{\rp}{2\sqrt{3}}$
\end{itemize}
In the last case $\nx_- = \nx_+ = \rp$, one has $|x_i-x_{i+1}|=\rp$ and $v_i-v_{i+1}=x_{i+1}-x_i$ for each $i$~:
\begin{itemize}
\item
$\bv.\bn_+ =\frac{\rp}{2}$
\item
$\bv.\bn_= = \frac{\frac{-\rp}{2} + \frac{3}{2}\rp}{\sqrt{2}} \ge \frac{\rp}{\sqrt{2}}$
\item
$\disp \bv.\bn_{i-}(\bx)=\frac{-\rp + \frac{3}{2}\rp}{\sqrt{3}} \ge \frac{\rp}{2\sqrt{3}}$ and
$\disp \bv.\bn_{i+}(\bx)=\frac{\rp - \frac{\rp}{2}}{\sqrt{3}}=\frac{\rp}{2\sqrt{3}}$
\end{itemize}
So all these scalar products are larger than $\frac{\rm}{2\sqrt{3}}$.
As a consequence, assumption \emph{(iv)} in proposition \ref{propcritere} is satisfied with
$\disp \beta_0 = \frac{\rm}{\rp n \sqrt{6n}}$.
Choosing $\beta_{i-}=\beta_{i+}=\beta_0^2/4$ hence $\delta_{i-}=\delta_{i+}=\frac{\rm\sqrt{3}}{64}\beta_0^4$
we obtain, thanks to proposition \ref{propcritere}, that $\A_c$ has the Uniform Exterior Sphere property with constant
$\alpha_{\A_c}=\frac{\rm^2}{2\rp n \sqrt{2n}}$ and the Uniform Normal Cone property with constants
$\delta_{\A_c}=\frac{\rm^5}{2^9 3 \sqrt{3}\rp^4 n^6}$ and $\beta_{\A_c}=\sqrt{1-\frac{\rm^2}{24\rp^2 n^3}}$
\end{proof2}
{\em Acknowledgments~:} The author thanks Sylvie Roelly for interesting discussions and helpful comments.
\end{document} |
\begin{document}
\title{
Fast Algorithms for the Shortest Unique Palindromic Substring Problem on Run-Length Encoded Strings
}
\begin{abstract}
For a string $S$,
a palindromic substring $S[i..j]$ is said to be a \emph{shortest unique palindromic substring} ($\mathit{SUPS}$)
for an interval $[s, t]$ in $S$,
if $S[i..j]$ occurs exactly once in $S$, the interval $[i, j]$ contains $[s, t]$,
and every palindromic substring containing $[s, t]$ which is shorter than $S[i..j]$
occurs at least twice in $S$.
In this paper, we study the problem of answering $\mathit{SUPS}$ queries on run-length encoded strings.
We show how to preprocess a given run-length encoded string $\rle{S}$ of size $m$
in $O(m)$ space and $O(m \log \sigma_{\rle{S}} + m \sqrt{\log m / \log\log m})$ time
so that all $\mathit{SUPSs}$ for any subsequent query interval can be answered
in $O(\sqrt{\log m / \log\log m} + \alpha)$ time, where $\alpha$ is the number of outputs,
and $\sigma_{\rle{S}}$ is the number of distinct runs of $\rle{S}$.
Additionaly, we consider a variant of the SUPS problem where
a query interval is also given in a run-length encoded form.
For this variant of the problem,
we present two alternative algorithms with faster queries.
The first one answers queries in $O(\sqrt{\log\log m /\log\log\log m} + \alpha)$ time
and can be built in $O(m \log \sigma_{\rle{S}} + m \sqrt{\log m / \log\log m})$ time,
and the second one answers queries in $O(\log \log m + \alpha)$ time
and can be built in $O(m \log \sigma_{\rle{S}})$ time.
Both of these data structures require $O(m)$ space.
\end{abstract}
\section{Introduction}
The \emph{shortest unique substring} (\emph{SUS}) problem,
which is formalized below, is a recent trend in the string processing community.
Consider a string $S$ of length $n$.
A substring $X = S[i..j]$ of $S$ is called a SUS for a position $p$~($1 \leq p \leq n$)
iff the interval $[i..j]$ contains $p$, $X$ occurs in $S$ exactly once,
and every substring containing $p$ which is shorter than $S[i..j]$ occurs at least twice in $S$.
The SUS problem is to preprocess a given string $S$ so that
SUSs for query positions $p$ can be answered quickly.
The study on the SUS problem was initiated by Pei et al.,
and is motivated by an application to bioinformatics e.g.,
designing polymerase chain reaction (PCR) primer~\cite{Pei}.
Pei et al.~\cite{Pei} showed an $\Theta(n^2)$-time and space
preprocessing scheme such that all $k$ SUSs for
a query position can be answered in $O(k)$ time.
Later, two independent groups, Tsuruta et al.~\cite{Tsuruta} and Ileri et al.~\cite{ileri14_SUS},
showed algorithms that use $\Theta(n)$ time and space\footnote{Throughout this paper, we measure the space complexity of an algorithm with the number of \emph{words} that the algorithm occupies in the word RAM model, unless otherwise stated.} for preprocessing,
and all SUSs can be answered in $O(k)$ time per query.
To be able to handle huge text data where $n$ can be massively large,
there have been further efforts to reduce the space usage.
Hon et al.~\cite{HonTX15} proposed an ``in-place'' algorithm which works within
space of the input string $S$ and two output arrays $A$ and $B$ of length $n$ each,
namely, in $n \log_2 \sigma$ \emph{bits} plus $2n$ words of space.
After the execution of their algorithm that takes $O(n)$ time,
the beginning and ending positions of a SUS for each text position
$i$~($1 \leq i \leq n$) are consequently stored in $A[i]$ and $B[i]$, respectively,
and $S$ remains unchanged.
Hon et al.'s algorithm can be extended to handle
SUSs with approximate matches, with a penalty of $O(n^2)$ preprocessing time.
For a pre-determined parameter $\tau$,
Ganguly et al.~\cite{GangulyHST17_SUS}
proposed a time-space trade-off algorithm for the SUS problem
that uses $O(n / \tau)$ additional working space (apart from the input string $S$)
and answers each query in $O(n \tau^2 \log \frac{n}{\tau})$ time.
They also proposed a ``succinct'' data structure of
$4n + o(n)$ \emph{bits} of space
that can be built in $O(n \log n)$ time and can answer a SUS for each given query
position in $O(1)$ time.
Another approach to reduce the space requirement for the SUS problem
is to work on a ``compressed'' representation of the string $S$.
Mieno et al.~\cite{MienoIBT16} developed a data structure of $\Theta(m)$ space
(or $\Theta(m \log n)$ \emph{bits} of space)
that answers all $k$ SUSs for a given position in $O(\sqrt{\log m / \log \log m} + k)$ time,
where $m$ is the size of the \emph{run length encoding} (\emph{RLE})
of the input string $S$.
This data structure can be constructed in $O(m \log m)$ time with $O(m)$ words of working space
if the input string $S$ is already compressed by RLE,
or in $O(n + m \log m)$ time with $O(m)$ working space
if the input string $S$ is given without being compressed.
A generalized version of the SUS problem,
called the \emph{interval} SUS problem, is to answer
SUSs that contain a query interval $[s, t]$ with $1 \leq s \leq t \leq n$.
Hu et al.~\cite{HuPT14} proposed an optimal $\Theta(n)$ time and space algorithm
to preprocess a given string $S$
so that all $k$ SUSs for a given query interval are reported in $O(k)$ time.
Mieno et al.'s data structure~\cite{MienoIBT16}
also can answer interval SUS queries
with the same preprocessing time/space and query time as above.
Recently, a new variant of the SUS problem, called the
\emph{shortest unique palindromic substring} (\emph{SUPS}) problem is considered~\cite{SUPS}.
A substring $P = S[i..j]$ is called a SUPS for an interval $[s,t]$
iff $P$ occurs exactly once in $S$, $[s,t] \subseteq [i,j]$,
and every palindromic substring of $S$ which contains interval $[s,t]$ and is shorter
than $P$ occurs at least twice in $S$.
The study on the SUPS problem is motivated by an application in molecular biology.
Inoue et al.~\cite{SUPS} showed how to preprocess a given string $S$ of length $n$
in $\Theta(n)$ time and space so that all $\alpha$ SUPSs (if any)
for a given interval can be answered
in $O(\alpha + 1)$ time\footnote{It is possible that $\alpha = 0$ for some intervals.}.
While this solution is optimal in terms of the length $n$ of the input string,
no space-economical solutions for the SUPS problem were known.
In this paper, we present the \emph{first} space-economical solution
to the SUPS problem based on RLE.
The proposed algorithm computes
a data structure of $\Theta(m)$ space
that answers each SUPS query in $O(\sqrt{\log m / \log \log m}+\alpha)$ time.
The most interesting part of our algorithm is how to preprocess
a given RLE string of length $m$ in $O(m (\log \sigma_{RLE_S} + \sqrt{\log m / \log \log m}))$ time,
where $\sigma_{RLE_S}$ is the number of distinct runs in the RLE of $S$.
Note that $\sigma_{RLE} \leq m$ always holds.
For this sake, we propose RLE versions of
Manacher's maximal palindrome algorithm~\cite{Manacher75} and Rubinchik and Shur's eertree data structure~\cite{EERTREE},
which may be of independent interest.
We remark that our preprocessing scheme is quite different from
Mieno et al.'s method~\cite{MienoIBT16} for the SUS problem on RLE strings
and Inoue et al.'s method~\cite{SUPS} for the SUPS problem on plain strings.
Additionaly, we consider a variant of the SUPS problem where
a query interval is also given in a run-length encoded form.
For this variant of the problem,
we present two alternative algorithms with faster queries.
The first one answers queries in $O(\sqrt{\log\log m /\log\log\log m} + \alpha)$ time
and can be built $O(m \log \sigma_{\rle{S}} + m \sqrt{\log m / \log\log m})$ time,
and the second one answers queries in $O(\log \log m + \alpha)$ time
and can be built in $O(m \log \sigma_{\rle{S}})$ time.
Both of these data structures require $O(m)$ space.
A part of the results presented here appeared in a preliminary version of this paper~\cite{WatanabeNIBT19}.
\section{Preliminaries}
\subsection{Strings}
Let $\Sigma$ be an ordered {\em alphabet} of size $\sigma$.
An element of $\Sigma^*$ is called a {\em string}.
The length of a string $S$ is denoted by $|S|$.
The empty string $\varepsilon$ is a string of length 0.
For a string $S = XYZ$, $X$, $Y$ and $Z$ are called
a \emph{prefix}, \emph{substring}, and \emph{suffix} of $S$, respectively.
The $i$-th character of a string $S$ is denoted by $S[i]$, for $1 \leq i \leq |S|$.
Let $S[i..j]$ denote the substring of $S$ that begins at position $i$ and ends at
position $j$, for $1 \leq i \leq j \leq |S|$.
For convenience, let $S[i..j] = \varepsilon$ for $i > j$.
For any string $S$, let $\rev{S} = S[|S|] \cdots S[1]$
denote the reversed string of $S$.
A string $P$ is called a \emph{palindrome} iff $P = \rev{P}$.
A substring $P = S[i..j]$ of a string $S$ is called a \emph{palindromic substring}
iff $P$ is a palindrome.
For a palindromic substring $P = S[i..j]$,
$\frac{i+j}{2}$ is called the \emph{center} of $P$.
A palindromic substring $P = S[i..j]$ is said to be a \emph{maximal palindrome} of $S$,
iff $S[i-1] \neq S[j+1]$, $i = 1$ or $j = |S|$.
A suffix of string $S$ that is a palindrome is called a \emph{suffix palindrome} of $S$.
Clearly any suffix palindrome of $S$ is a maximal palindrome of $S$.
We will use the following lemma in the analysis of our algorithm.
\begin{lemma}[\cite{DroubayJP01}] \label{lem:distinct_pal}
Any string of length $k$ can contain at most $k+1$
distinct palindromic substrings (including the empty string $\varepsilon$).
\end{lemma}
\subsection{MUPSs and SUPSs}
For any strings $X$ and $S$,
let $\mathit{occ}_S(X)$ denote the number of occurrences of $X$ in $S$,
i.e., $\mathit{occ}_S(X) = |\{i \mid S[i..i+|X|-1] = X\}|$.
A string $X$ is called a \emph{unique} substring of a string $S$
iff $\mathit{occ}_S(X) = 1$.
A substring $P = S[i..j]$ of string $S$
is called a \emph{minimal unique palindromic substring} (\emph{MUPS})
of a string $S$ iff
(1) $P$ is a unique palindromic substring of $S$ and
(2) either $|P| \geq 3$ and the palindrome $Q = S[i+1..j-1]$ satisfies
$\mathit{occ}_S(Q) \geq 2$, or $1 \leq |P| \leq 2$.
\begin{lemma}[\cite{SUPS}] \label{lem:MUPS_do_not_nest}
MUPSs do not nest, namely,
for any pair of distinct MUPSs,
one cannot contain the other.
\end{lemma}
Due to Lemma~\ref{lem:MUPS_do_not_nest},
both of the beginning positions and the ending positions of MUPSs
are monotonically increasing.
Let $\mathcal{M}_S$ denote the list of MUPSs in $S$
sorted in increasing order of their beginning positions
(or equivalently the ending positions) in $S$.
Let $[s,t]$ be an integer interval over the positions in a string $S$,
where $1 \leq s \leq t \leq |S|$.
A substring $P = S[i..j]$ of string $S$ is called
a \emph{shortest unique palindromic substring} ($\emph{SUPS}$)
for interval $[s,t]$ of $S$,
iff
(1) $P$ is a unique palindromic substring of $S$,
(2) $[s, t] \subseteq [i, j]$, and
(3) there is no unique palindromic substring $Q = S[i'..j']$
such that $[s, t] \subseteq [i', j']$ and $|Q| < |P|$.
We give an example in Fig.~\ref{fig:sups-mups}.
\begin{figure}
\caption{
This figure shows all $\mathit{MUPS}
\label{fig:sups-mups}
\end{figure}
\subsection{Run length encoding (RLE)}
The {\em run-length encoding} $\rle{S}$ of string $S$ is
a compact representation of $S$ such that
each maximal run of the same characters in $S$ is represented
by a pair of the character and the length of the run.
More formally, let $\mathcal{N}$ denote the set of positive integers.
For any non-empty string $S$,
$\rle{S} = (a_1, e_1), \ldots, (a_m, e_m)$,
where $a_j \in \Sigma$ and $e_j \in \mathcal{N}$ for any $1 \leq j \leq m$,
and $a_j \neq a_{j+1}$ for any $1 \leq j < m$.
E.g., if $S = \mathtt{aacccccccbbabbbb}$,
then $\rle{S} = (\mathtt{a}, 2), (\mathtt{c}, 7), (\mathtt{b}, 2), (\mathtt{a}, 1), (\mathtt{b}, 4)$.
Each $(a, e)$ in $\rle{S}$ is called a (character) \emph{run},
and $e$ is called the exponent of this run.
We also denote each run by $a^{e}$
when it seems more convenient and intuitive.
For example, we would write as $(a, e)$ when it seems more convenient
to treat it as a kind of character (called an RLE-character),
and would write as $a^e$ when it seems more convenient
to treat it as a string consisting of $e$ $a$'s.
The \emph{size} of $\rle{S}$ is the number $m$ of runs in $\rle{S}$.
Let $\rlebp{j}$ (resp. $\rleep{j}$) denote the beginning (resp. ending) position
of the $j$th run in the string $S$,
i.e., $\rlebp{j} = 1+\sum_{i=0}^{j-1} e_i$ with $e_0 = 0$
and $\rleep{j} = \sum_{i=1}^{j}e_i$.
The \emph{center} of the $j$th run is $\frac{\rlebp{j}+\rleep{j}}{2}$.
For any two ordered pairs $(a, e), (a', e') \in \Sigma \times \mathcal{N}$
of a character and positive integer,
we define the equality such that $(a, e) = (a', e')$
iff $a = a'$ and $e = e'$ both hold.
We also define a total order of these pairs
such that $(a, e) < (a', e')$ iff $a < a'$,
or $a = a'$ and $e < e'$.
An occurrence of a palindromic substring
$P = S[i..i']$ of a string $S$ with $\rle{S}$ of size $m$ is
said to be \emph{RLE-bounded}
if $i = \rlebp{j}$ and $i' = \rleep{j'}$ for some $1 \leq j \leq j' \leq m$,
namely, if both ends of the occurrence
touch the boundaries of runs.
An RLE-bounded occurrence
$P = S[i..i']$ is said to be \emph{RLE-maximal}
if $(a_{j-1}, e_{j-1}) \neq (a_{j'+1}, e_{j'+1})$,
$j = 1$ or $j' = m$.
Note that an RLE-maximal occurrence
of a palindrome may not be maximal in the string $S$.
E.g., consider string $S = \mathtt{caabbcccbbaaaac}$ with $\rle{S} = \mathtt{c^1a^2b^2c^3b^2a^4c^1}$.
\begin{itemize}
\item The occurrence of palindrome $\mathtt{c^3}$ is RLE-bounded
but is neither RLE-maximal nor maximal.
\item The occurrence of palindrome $\mathtt{b^2c^3b^2}$ is RLE-maximal but is not maximal.
\item The occurrence of palindrome $\mathtt{a^2b^2c^3b^2a^2}$ is not RLE-maximal but is maximal.
\item The first (leftmost) occurrence of palindrome $\mathtt{a^2}$ is both RLE-maximal and maximal.
\end{itemize}
\subsection{Problem}
In what follows, we assume that our input strings are given as RLE strings.
In this paper, we tackle the following problem.
\begin{problem}[$\mathit{SUPS}$ problem on run-length encoded strings]
\label{prob:SUPS_RLE}
\leavevmode
\begin{description}
\item[Preprocess:] $\rle{S} = (a_1, e_1), \ldots, (a_m, e_m)$ of size $m$ representing a string $S$ of length $n$.
\item[Query:] An integer interval $[s, t]$~$(1 \leq s \leq t \leq n)$.
\item[Return:] All SUPSs for interval $[s, t]$.
\end{description}
\end{problem}
In case the string $S$ is given as a plain string of length $n$,
then the time complexity of our algorithm will be increased by
an additive factor of $n$ that is needed to compute $\rle{S}$,
while the space usage will stay the same since $\rle{S}$ can be computed in
constant space.
\section{Computing MUPSs from RLE strings}
\label{sec:MUPS}
The following known lemma suggests that
it is helpful to compute the set $\mathcal{M}_S$ of MUPSs of $S$
as a preprocessing for the SUPS problem.
\begin{lemma}[\cite{SUPS}] \label{lem:unimups_in_sups}
For any SUPS $S[i..j]$ for some interval,
there exists exactly one MUPS that is contained in the interval $[i,j]$.
Furthermore, the MUPS has the same center as the SUPS $S[i..j]$.
\end{lemma}
\subsection{Properties of MUPSs on RLE strings}
Now we present some useful properties of MUPSs
on the run-length encoded string $\rle{S} = (a_1, e_1), \ldots, (a_m, e_m)$.
\begin{lemma} \label{lem:mups_center}
For any MUPS $S[i..j]$ in $S$,
there exists a unique integer $k$~($1 \leq k \leq m$)
such that $\frac{i+j}{2} = \frac{\rlebp{k}+\rleep{k}}{2}$.
\end{lemma}
\begin{proof}
Suppose on the contrary that there is a MUPS $S[i..j]$
such that $\frac{i+j}{2} \neq \frac{\rlebp{k}+\rleep{k}}{2}$ for any $1 \leq k \leq m$.
Let $l$ be the integer that satisfies $\rlebp{l} \leq \frac{i+j}{2} \leq \rleep{l}$.
By the assumption, the longest palindrome
whose center is $\frac{i+j}{2}$ is $a_l^{\min\{i-\rlebp{l}+1,\rleep{l}-j+1\}}$.
However, this palindrome $a_l^{\min\{i-\rlebp{l}+1,\rleep{l}-j+1\}}$ occurs
at least twice in the $l$th run $a_l^{e_l}$.
Hence MUPS $S[i..j]$ is not a unique palindromic substring, a contradiction.
\end{proof}
The following corollary is immediate from Lemma~\ref{lem:mups_center}.
\begin{corollary} \label{coro:num-of-mupss}
For any string $S$, $|\mathcal{M}_S| \leq m$.
\end{corollary}
It is easy to see that the above bound is tight:
for instance, any string where each run has a distinct character
(i.e., $m = \sigma_{\mathit{RLE}}$) contains exactly $m$ MUPSs.
Our preprocessing and query algorithms which will follow
are heavily dependent on this lemma and corollary.
\subsection{RLE version of Manacher's algorithm} \label{subsec:manacher}
Due to Corollary~\ref{coro:num-of-mupss},
we can restrict ourselves to computing palindromic substrings
whose center coincides with the center of each run.
These palindromic substrings are called \emph{run-centered} palindromes.
Run-centered palindromes will be candidates of MUPSs of the string $S$.
To compute run-centered palindromes from $\rle{S}$,
we utilize Manacher's algorithm~\cite{Manacher75}
that computes
all maximal palindromes for a given (plain) string of length $n$
in $O(n)$ time and space.
Manacher's algorithm is based only on character equality comparisons,
and hence it works with general alphabets.
Let us briefly recall how Manacher's algorithm works.
It processes a given string $S$ of length $n$ from left to right.
It computes an array $\mathbf{MaxPal}$ of length $2n-1$ such that
$\mathbf{MaxPal}{}[c]$ stores
the length of the maximal palindrome with center $c$
for $c = 1, 1.5, 2, \ldots, n-1, n-0.5, n$.
Namely, Manacher's algorithm processes a given string $S$ in an online manner
from left to right.
This algorithm is also able to compute, for each position $i = 1, \ldots, n$,
the longest palindromic suffix of $S[1..i]$ in an online manner.
Now we apply Manacher's algorithm to our run-length encoded input string
$\rle{S} = (a_1, e_1), \ldots,$ $(a_m, e_m)$.
Then, what we obtain after the execution of Manacher's algorithm
over $\rle{S}$ is all \emph{RLE-maximal} palindromes of $S$.
Note that by definition all RLE-maximal palindromes are run-centered.
Since $\rle{S}$ can be regarded as a string of length $m$
over an alphabet $\Sigma \times \mathcal{N}$,
this takes $O(m)$ time and space.
\begin{remark} \label{rmk:maximal_extension}
If wanted, we can compute all \emph{maximal} palindromes
of $S$ in $O(m)$ time after the execution of Manacher's algorithm to $\rle{S}$.
First, we compute every run-centered
maximal palindrome
$P_l$ that has its center in each $l$th run in $\rle{S}$.
For each already computed run-centered RLE-maximal palindrome
$Q_l = S[\rlebp{i}..\rleep{j}]$ with $1 < i \leq j < m$,
it is enough to first check whether $a_{i-1} = a_{j+1}$.
If no, then $P_l = Q_l$,
and if yes then we can further extend both
ends of $Q_l$ with $(a_{i-1}, \min\{e_{i-1}, e_{j+1}\})$
and obtain $P_l$.
As a side remark,
we note that any other maximal palindromes of $S$ are not run-centered,
which means that any of them consists only of the same characters
and lie inside of one character run.
Such maximal palindromes are trivial and need not be explicitly computed.
\end{remark}
\subsection{RLE version of eertree data structure}
The \emph{eertree}~\cite{EERTREE} of a string $S$,
denoted $\mathsf{eertree}(S)$,
is a pair of two rooted trees $\mathsf{T}_{\mathrm{odd}}$ and $\mathsf{T}_\mathrm{even}$
which represent all distinct palindromic substrings of $S$.
The root of $\mathsf{T}_{\mathrm{odd}}$ represents the empty string $\varepsilon$
and each non-root node of $\mathsf{T}_{\mathrm{odd}}$
represents a non-empty palindromic substring of $S$ of odd length.
Similarly, the root of $\mathsf{T}_\mathrm{even}$ represents
the empty string $\varepsilon$ and each non-root node of
$\mathsf{T}_\mathrm{even}$ represents a non-empty palindromic substring of $S$ of even length.
From the root $r$ of $\mathsf{T}_{\mathrm{odd}}$,
there is a labeled directed edge $(r, a, v)$
if $v$ represents a single character $a \in \Sigma$.
For any non-root node $u$ of $\mathsf{T}_{\mathrm{odd}}$ or $\mathsf{T}_\mathrm{even}$,
there is a labeled directed edge $(u, a, v)$ from $u$ to node $v$
with character label $a \in \Sigma$ if $aua = v$.
For any node $u$,
the labels of out-going edges of $u$ must be mutually distinct.
By Lemma~\ref{lem:distinct_pal},
any string $S$ of length $n$ can contain at most $n+1$
distinct palindromic substrings (including the empty string $\varepsilon$).
Thus, the size of $\mathsf{eertree}(S)$ is linear in the string length $n$.
Rubinchik and Shur~\cite{EERTREE} showed how to construct
$\mathsf{eertree}(S)$ in $O(n \log \sigma_S)$ time and $O(n)$ space,
where $\sigma_S$ is the number of distinct characters in $S$.
They also showed how to compute the number of occurrences of
each palindromic substring in $O(n \log \sigma_S)$ time and $O(n)$ space,
using $\mathsf{eertree}(S)$.
Now we introduce a new data structure named \emph{RLE-eertrees} based on eertrees.
Let $\rle{S} = (a_1, e_1), \ldots, (a_m, e_m)$,
and let $\Sigma_{\mathit{RLE}}$ be the set of maximal runs of $S$,
namely, $\Sigma_{\mathit{RLE}} = \{(a, e) \mid (a, e) = (a_i, e_i) \mbox{ for some } 1 \leq i \leq m\}$.
Let $\sigma_{RLE} = |\Sigma_{\mathit{RLE}}|$.
Note that $\sigma_{RLE} \leq m$ always holds.
The RLE-eertree of string $S$, denoted by $\mathsf{e^2rtre^2}(S)$,
is a \emph{single} eertree $\mathsf{T}_{\mathtt{odd}}$
\emph{over the RLE alphabet $\sigma_{\mathit{RLE}} \subset \Sigma \times \mathcal{N}$},
which represents distinct run-centered palindromes of $S$ which have
an RLE-bounded occurrence $[i,i']$ such that $i = \rlebp{j}$ and $i' = \rleep{j'}$
for some $1 \leq j \leq j' \leq m$
(namely, the both ends of the occurrence touch the boundary of runs),
or an occurrence as a maximal palindrome in $S$.
We remark that the number of \emph{runs} in each palindromes in $\mathsf{e^2rtre^2}(S)$ is odd,
but their decompressed string length may be odd or even.
In $\mathsf{e^2rtre^2}(S)$,
there is a directed labeled edge $(u, a^e, v)$ from node $u$ to node $v$
with label $a^e \in \Sigma_{RLE}$
if (1) $a^eua^e = v$,
or (2) $u = \varepsilon$ and $v = a^e \in \Sigma \times \mathcal{N}$.
Note that if the in-coming edge of a node $u$ is labeled with $a^e$,
then any out-going edge of $u$ cannot have a label $a^f$
with the same character $a$.
Since $\mathsf{e^2rtre^2}{S}$ is an eertree over the alphabet $\Sigma_{RLE}$ of size $\sigma_{\mathit{RLE}} \leq m$,
it is clear that the number of out-going edges of each node is bounded by $\sigma_{\mathit{RLE}}$.
We give an example of $\mathsf{e^2rtre^2}(S)$ in Fig.~\ref{fig:rletree}.
\begin{figure}
\caption{
The RLE-eertree $\mathsf{e^2rtre^2}
\label{fig:rletree}
\end{figure}
\begin{lemma}
Let $S$ be any string of which the size of $\rle{S}$ is $m$.
Then, the number of nodes in $\mathsf{e^2rtre^2}(S)$ is at most $2m+1$.
\end{lemma}
\begin{proof}
First, we consider $\rle{S}$ as a string of length $m$ over
the alphabet $\Sigma_{RLE}$.
It now follows from Lemma~\ref{lem:distinct_pal}
that the number of non-empty distinct run-centered palindromic substrings of $S$
that have an RLE-bounded occurrence is at most $m$.
Each of these palindromic substrings are represented by
a node of $\mathsf{e^2rtre^2}(S)$,
and let $\mathsf{e^2rtre^2}(S)'$ denote the tree
consisting only of these nodes
(in the example of Fig.~\ref{fig:rletree},
$\mathsf{e^2rtre^2}(S)'$ is the tree consisting only of the white nodes).
Now we count the number of nodes in $\mathsf{e^2rtre^2}(S)$
that do not belong to $\mathsf{e^2rtre^2}(S)'$
(the gray nodes in the running example of Fig.~\ref{fig:rletree}).
Since each palindrome represented by this type of node
has a run-centered maximal occurrence in $S$,
the number of such palindromes is bounded by the number $m$ of runs in $\rle{S}$.
Hence, including the root that represent the empty string,
there are at most $2m+1$ nodes in $\mathsf{e^2rtre^2}(S)$.
\end{proof}
\begin{lemma} \label{lem:rletree_constrction}
Given $\rle{S}$ of size $m$,
$\mathsf{e^2rtre^2}(S)$ can be built in $O(m \log \sigma_{\mathit{RLE}})$ time and $O(m)$ space,
where the out-going edges of each node are sorted according to
the total order of their labels.
Also, in the resulting $\mathsf{e^2rtre^2}(S)$,
each non-root node $u$ stores the number of
occurrences of $u$ in $S$ which are RLE-bounded or maximal.
\end{lemma}
\begin{proof}
Our construction algorithm comprises three steps.
We firstly construct $\mathsf{e^2rtre^2}(S)'$,
secondly compute an auxiliary array $\mathit{CPal}$ that will be used for the next step,
and thirdly we add some nodes
that represent run-centered maximal palindromes which are not in $\mathsf{e^2rtre^2}(S)'$
so that the resulting tree forms the final structure $\mathsf{e^2rtre^2}(S)$.
Rubinchik and Shur~\cite{EERTREE} proposed an online algorithm
which constructs $\mathsf{eertree}(T)$ of a string of length $k$ in
$O(k \log \sigma_T)$ time with $O(k)$ space,
where $\sigma_T$ denotes the number of distinct characters in $T$.
They also showed how to store, in each node,
the number of occurrences of the corresponding palindromic substring in $T$.
Thus, the Rubinchik and Shur algorithm applied to
$\rle{S}$ computes $\mathsf{e^2rtre^2}(S)'$ in $O(m \log \sigma_{\mathit{RLE}})$ time with $O(m)$ space.
Also, now each node $u$ of $\mathsf{e^2rtre^2}(S)'$ stores the number of
\emph{RLE-bounded} occurrence of $u$ in $S$.
This is the first step.
The second step is as follows:
Let $\mathit{CPal}$ be an array of length $m$ such that,
for each $1 \leq i \leq m$,
$\mathit{CPal}[i]$ stores a pointer to the node in $\mathsf{e^2rtre^2}(S)'$
that represents the RLE-bounded palindrome centered at $i$.
A simple application of the Rubinchik and Shur algorithm to
$\rle{S}$ only gives us the leftmost occurrence of each RLE-bounded palindrome
in $\mathsf{e^2rtre^2}(S)'$.
Hence, we only know the values of $\mathit{CPal}$ in the positions
that are the centers of the leftmost occurrences of RLE-bounded palindromes.
To compute the other values in $\mathit{CPal}$,
we run Manacher's algorithm to $\rle{S}$ as in Section~\ref{subsec:manacher}.
Since Manacher's algorithm processes $\rle{S}$ in an online manner from left to right,
and since we already know the leftmost occurrences of all RLE-bounded palindromes
in $\rle{S}$, we can copy the pointers from previous occurrences.
In case where an RLE-bounded palindrome extends with a newly read RLE-character $(a, e)$
after it is copied from a previous occurrence during the execution of Manacher's algorithm,
then we traverse the edge labeled $(a, e)$ from the current node of $\mathsf{e^2rtre^2}(S)'$.
By repeating this until the mismatch is found
in extension of the current RLE-bounded palindrome,
we can find the corresponding node for this RLE-bounded palindrome.
This way we can compute $\mathit{CPal}[i]$ for all $1 \leq i \leq m$ in $O(m \log \sigma_{\mathit{RLE}})$ total
time with $O(m)$ total space.
In the third step, we add new nodes that represent
run-centered maximal (but not RLE-bounded) palindromic substrings.
For this sake, we again apply Manacher's algorithm to $\rle{S}$,
but in this case it is done as in Remark~\ref{rmk:maximal_extension} of Section~\ref{subsec:manacher}.
With the help of $\mathit{CPal}$ array,
we can associate each run $(a_l, e_l)$ with
the RLE-bounded palindromic substring that has the center in $(a_l, e_l)$.
Let $Q_l = S[\rlebp{i}..\rleep{j}]$ denote this palindromic substring for $(a_l, e_l)$,
where $1 \leq i \leq l \leq j \leq m$,
and $u_l$ the node that represents $Q_l$ in $\mathsf{e^2rtre^2}(S)'$.
We first check whether $a_{i-1} = a_{j+1}$.
If no, then $Q_l$ does not extend from this run $(a_l, e_l)$,
and if yes then we extend both ends of $Q_l$ with $(a_{i-1}, \min\{e_{i-1}, e_{j+1}\})$.
Assume w.l.o.g. that $e_{i-1} = \min\{e_{i-1}, e_{j+1}\}$.
If there is no out-going edge of $u_l$ with label $(a_{i-1}, e_{i-1})$,
then we create a new child of $u_l$ with an edge labeled $(a_{i-1}, e_{i-1})$.
Otherwise, then let $v$ be the existing child of $u_l$
that represents $a_{i-1}^{e_{i-1}} u_l a_{i-1}^{e_{i-1}}$.
We increase the number of occurrences of $v$ by 1.
This way, we can add all new nodes
and we obtain $\mathsf{e^2rtre^2}(S)$.
Note that each node stores the number of RLE-bounded or maximal occurrence
of the corresponding run-centered palindromic substring.
It is easy to see that the second step takes a total of $O(m \log \sigma_{\mathit{RLE}})$
time and $O(m)$ space.
\end{proof}
It is clear that for any character $a \in \Sigma$,
there can be only one MUPS of form $a^e$.
Namely, $a^e$ is a MUPS iff $e$ is the largest exponent
for all runs of $a$'s in $S$ and $\mathit{occ}_S(a^e) = 1$.
Below, we consider other forms of MUPSs.
Let $P$ be a non-empty palindromic substring of string $S$
that has a run-centered RLE-bounded occurrence.
For any character $a \in \Sigma$,
let $\mathsf{emax}$ and $\mathsf{esec}$ denote the largest and second largest
positive integers such that
$a^{\mathsf{emax}}Pa^{\mathsf{emax}}$ and $a^{\mathsf{esec}}Pa^{\mathsf{esec}}$ are
palindromes that have
run-centered RLE-bounded or maximal occurrences in $S$.
If such integers do not exist, then let $\mathsf{emax} = \mathsf{nil}$ and $\mathsf{esec} = \mathsf{nil}$.
\begin{observation} \label{obs:MUPSonRLE}
There is at most one MUPS of form $a^e P a^e$ in $S$. Namely,
\begin{enumerate}
\item[(1)] The palindrome $a^{\mathsf{esec}+1}Pa^{\mathsf{esec}+1}$ is a MUPS of $S$
iff $\mathsf{emax} \neq \mathsf{nil}$, $\mathsf{esec} \neq \mathsf{nil}$, and $\mathit{occ}_S(a^{\mathsf{emax}}Pa^{\mathsf{emax}})$ $= 1$.
\item[(2)] The palindrome $a^{1}Pa^{1}$ is a MUPS of $S$
iff $\mathsf{emax} \neq \mathsf{nil}$, $\mathsf{esec} = \mathsf{nil}$, and $\mathit{occ}_S(a^{\mathsf{emax}}Pa^{\mathsf{emax}}) = 1$.
\item[(3)] There is no MUPS of form $a^e P a^e$ with any $e \geq 1$
iff either $\mathsf{emax} = \mathsf{nil}$, or $\mathsf{emax} \neq \mathsf{nil}$ and $\mathit{occ}_S(a^{\mathsf{emax}}Pa^{\mathsf{emax}}) > 1$.
\end{enumerate}
\end{observation}
\begin{lemma}
$\mathcal{M}_S$ can be computed in $O(m \log \sigma_{\mathit{RLE}})$ time and $O(m)$ space.
\end{lemma}
\begin{proof}
For each node $u$ of $\mathsf{e^2rtre^2}(S)$,
let $\Sigma_u$ be the set of characters $a$
such that there is an out-going edge of $u$ labeled by $(a, e)$
with some positive integer $e$.
Due to Observation~\ref{obs:MUPSonRLE},
for each character in $\Sigma_u$,
it is enough to check the out-going edges
which have the largest and second largest exponents with character $a$.
Since the edges are sorted,
we can find all children of $u$ that represent MUPSs in
time linear in the number of children of $u$.
Hence, given $\mathsf{e^2rtre^2}(S)$,
it takes $O(m)$ total time to compute all MUPSs in $S$.
$\mathsf{e^2rtre^2}(S)$ can be computed in $O(m \log \sigma_{\mathit{RLE}})$ time and $O(m)$ space
by Lemma~\ref{lem:rletree_constrction}.
What remains is how to sort the MUPSs in increasing order of their beginning positions.
We associate each MUPS with the run where its center lies.
Since each MUPS occurs in $S$ exactly once
and MUPSs do not nest (Lemma~\ref{lem:MUPS_do_not_nest}),
each run cannot contain the centers of two or more MUPSs.
We compute an array $A$ of size $m$ such that
$A[j]$ contains the corresponding interval of the MUPS
whose center lies in the $j$th run in $\rle{S}$, if it exists.
After computing $A$,
we scan $A$ from left to right.
Since again MUPSs do not nest,
this gives as the sorted list $\mathcal{M}_S$ of MUPSs.
It is clear that this takes a total of $O(m)$ time and space.
\end{proof}
\section{SUPS queries on RLE strings}
\label{sec:query_algorithm}
In this section, we present our algorithm for SUPS queries.
Our algorithm is based on Inoue et al.'s algorithm~\cite{SUPS}
for SUPS queries on a plain string.
The big difference is that
the space that we are allowed for is limited to $O(m)$.
\subsection{Data structures}\label{data_structure}
As was discussed in Section~\ref{sec:MUPS}, we can compute the list
$\mathcal{M}_S$ of all MUPSs of string $S$ efficiently. We store $\mathcal{M}_S$ using
the three following arrays:
\begin{itemize}
\item $\mathit{MUPS}beg[i]$ : the beginning position of the $i$th MUPS in $\mathcal{M}_S$.
\item $\mathit{MUPS}end[i]$ : the ending position the $i$th MUPS in $\mathcal{M}_S$.
\item $\mathit{MUPS}len[i]$ : the length of the $i$th MUPS in $\mathcal{M}_S$.
\end{itemize}
Since the number of MUPSs in $\mathcal{M}_S$ is at most $m$
(Corollary~\ref{coro:num-of-mupss}),
the length of each array is at most $m$.
In our algorithm, we use \emph{range minimum queries} and
\emph{predecessor/successor queries} on integer arrays.
Let $A$ be an integer array of length $d$.
A range minimum query $\rmq{A}{i}{j}$ returns one of $\arg \min_{i \leq k \leq j}\{A[k]\}$
for a given interval $[i, j]$ in $A$.
\begin{lemma}[e.g.~\cite{rmqspace}]
We can construct an $O(d)$-space data structure in $O(d)$ time for an integer array $A$ of length $d$
which can answer $\rmq{A}{i}{j}$ in constant time for any query $[i, j]$.
\end{lemma}
Let $B$ be an array of $d$ positive integers in $[1, N]$ in increasing order.
The predecessor and successor queries on $B$ are defined for any $1 \leq k \leq N$ as follows.
\begin{eqnarray*}
\Pred{B}{k} & = &
\begin{cases}
\max\{ i \mid B[i] \leq k\} \quad & \mbox{if it exists,}\\
0 \quad & \mbox{otherwise.}
\end{cases} \\
\Succ{B}{k} & = &
\begin{cases}
\min\{ i \mid B[i] \geq k\} \quad & \mbox{if it exists,}\\
N+1\quad & \mbox{otherwise.}
\end{cases}
\end{eqnarray*}
\begin{lemma}[\cite{Beame200238}]\label{lem:pred_succ_data_structure}
We can construct, in $O(d \sqrt{\log d / \log\log d})$ time,
an $O(d)$-space data structure
for an array $B$ of $d$ positive integer in $[1, N]$ in increasing order
which can answer $\Pred{B}{k}$ and $\Succ{B}{k}$ in $O(\sqrt{\log d / \log\log d})$ time for any query $k \in [1, N]$.
\end{lemma}
\subsection{Query algorithm}\label{query_algorithm}
Our algorithm simulates the query algorithm for a plain string~\cite{SUPS}
with $O(m)$-space data structures.
We summarize our algorithm below.
Let $[s, t]$ be a query interval such that $1 \leq s \leq t \leq n$.
Firstly, we compute the number of MUPSs contained in $[s, t]$.
This operation can be done in $O(\sqrt{\log m / \log\log m})$
by using $\Succ{\mathit{MUPS}beg}{s}$ and $\Pred{\mathit{MUPS}end}{t}$.
Let $\mathit{num}$ be the number of MUPSs contained in $[s, t]$.
If $\mathit{num} \geq 2$, then there is no SUPS for this interval (Corollary~{1} of \cite{SUPS}).
Suppose that $\mathit{num} = 1$.
Let $S[i..j]$ be the MUPS contained in $[s, t]$.
If $S[i-z..j+z]$ is a palindromic substring,
then $S[i-z..j+z]$ is the only $\mathit{SUPS}$ for $[s, t]$ where $z = \max \{ i-s, t-j \}$.
Otherwise, there is no $\mathit{SUPS}$ for $[s, t]$ (Lemma~{6} of \cite{SUPS}).
Since this candidate has a run as the center,
we can check whether $S[i-z..j+z]$ is a palindromic substring or not in constant time
after computing all run-centered maximal palindromes.
Suppose that $\mathit{num} = 0$ (this case is based on Lemma~{7} of \cite{SUPS}).
Let $p = \Pred{\mathit{MUPS}end}{t}, q = \Succ{\mathit{MUPS}beg}{s}$.
We can check whether each of $S[\mathit{MUPS}beg[p]-t+\mathit{MUPS}end[p]..t]$
and $S[s..\mathit{MUPS}end[q]+\mathit{MUPS}beg[q]-s]$ is a palindrome or not.
If so, the shorter one is a candidate of $\mathit{SUPS}s$.
Let $\ell$ be the length of the candidates.
Other candidates are the shortest $\mathit{MUPS}s$ which contain the query interval $[s, t]$.
If the length of these candidates is less than or equal to $\ell$,
we need to compute these candidates as $\mathit{SUPS}s$.
We can compute these $\mathit{MUPS}s$ by using range minimum queries on $\mathit{MUPS}len[p+1, q-1]$.
Thus, we can compute all $\mathit{SUPS}s$ in linear time w.r.t. the number of outputs (see \cite{SUPS} in more detail).
We conclude with the main theorem of this paper.
\begin{theorem}
Given $\rle{S}$ of size $m$ for a string $S$,
we can compute a data structure of $O(m)$ space in
$O(m(\log \sigma_{\mathit{RLE}} + \sqrt{\log m / \log\log m}))$ time
so that subsequent SUPS queries can be answered
in $O(\alpha+\sqrt{\log m / \log\log m})$ time,
where $\sigma_{\mathit{RLE}}$ denotes the number of distinct
RLE-characters in $\rle{S}$ and $\alpha$ the number of SUPSs to report.
\end{theorem}
\section{Faster algorithms for a variant of the SUPS problem}
In this section, we consider a variant of Problem~\ref{prob:SUPS_RLE}
where each query interval $[s, t]$ is given as a tuple representing
the left-end run $s_r$ that contains $s$,
the local position $s_p$ in the left-end run $s_r$ corresponding to $s$,
the right-end run $t_r$ that contains $t$,
and the local position $t_p$ in the right-end run $t_r$ corresponding to $t$.
Intuitively, queries are also run-length encoded in this variant of the problem.
Formally, this variant of the problem is defined as follows:
\begin{problem}[$\mathit{SUPS}$ problem on run-length encoded strings with run-length encoded queries]
\label{prob:SUPS_RLE_faster}
\leavevmode
\begin{description}
\item[Preprocess:] $\rle{S} = (a_1, e_1), \ldots, (a_m, e_m)$ of size $m$ representing a string $S$ of length $n$.
\item[Query:] Tuple $(s_r, s_p, t_r, t_p)$ representing
query interval $[s, t]$~ $(1 \leq s \leq t \leq n)$,
such that
$s = \rlebp{s_r} + s_p - 1$,
$t = \rlebp{t_r} + t_p - 1$,
$1 \leq s_r \leq t_r \leq m$,
$1 \leq s_p \leq e_{s_r}$, and
$1 \leq t_p \leq e_{t_r}$.
\item[Return:] All SUPSs for interval $[s, t]$.
\end{description}
\end{problem}
In this section, we present two alternative algorithms for Problem~\ref{prob:SUPS_RLE_faster}.
\subsection{Further combinatorial properties on MUPSs}
The key to our algorithms is a combinatorial property of
maximal palindromes in RLE strings.
To show this property, we utilize the following result.
\begin{lemma}[\cite{ApostolicoBG95,MATSUBARA2009900}]
\label{lem:suf_pal_arithmetic_progression}
Let $S$ be any string of length $n$.
The set of suffix palindromes of $S$
can be partitioned into $O(\log n)$ disjoint groups,
such that the suffix palindromes in the same group
has the same shortest period.
Namely, each group can be represented by a single arithmetic progression
$\langle s, d, t \rangle$,
such that $s$ is the length of the shortest suffix palindrome
in the group, $t$ is the number of suffix palindromes in the group,
and $d$ is the common difference (i.e. the shortest period).
\end{lemma}
When there is only one element in a group (i.e. $t = 1$),
then we set $d = 0$.
By applying Lemma~\ref{lem:suf_pal_arithmetic_progression}
to $\rle{S} = (a_1, e_1) \cdots (a_m, e_m)$ where
each RLE factor $(a_i, e_i)$ is regarded as a single character,
we immediately obtain the following corollary:
\begin{corollary} \label{coro:rle_suf_pal_arithmetic_progression}
Let $S$ be any string and let $m$ be the size of $\rle{S}$.
The set of RLE-bounded suffix palindromes of $\rle{S} = (a_1, e_1) \cdots (a_m, e_m)$
can be partitioned into $O(\log m)$ disjoint groups,
such that the RLE-bounded suffix palindromes in the same group
has the same shortest period.
Namely, each group can be represented by a single arithmetic progression
$\langle s', d', t' \rangle$,
such that $s'$ is the number of runs in the shortest RLE-bounded suffix palindrome
in the group, $t'$ is the number of RLE-bounded suffix palindromes in the group,
and $d'$ is the common difference (i.e. the shortest period).
\end{corollary}
In the sequel,
we will store arithmetic progressions representing
the RLE-bounded suffix palindromes of \emph{all}
RLE-bounded prefixes $\rle{S}[1..i] = (a_1, e_1),$ $\ldots, (a_i, e_i)$ of $\rle{S}$.
It may seem that it takes $O(m \log m)$ total space
due to Corollary~\ref{coro:rle_suf_pal_arithmetic_progression}.
However, since each RLE-bounded suffix palindrome of $\rle{S}[1..i]$
is a \emph{maximal} palindrome of string
$\rle{S} = (a_1, e_1), \ldots, (a_m, e_m)$ where each run $(a_i, e_i)$ is
regarded as a single character,
and since there are only $2m-1$ such maximal palindromes in
$\rle{S} = (a_1, e_1), \ldots, (a_m, e_m)$,
the total space requirement for storing all arithmetic progressions
is $O(m)$.
The next lemma is a key to our algorithms.
\begin{lemma} \label{lem:number_of_mups_in_a_run}
Let $S$ be any string and let $\rle{S} = a_1^{e_1} \cdots a_m^{e_m}$.
For any $1 \leq i \leq m$,
the number of MUPSs of $S$ that end in the $i$th run $a_i^{e_i}$ of $\rle{S}$,
namely in the position interval $[\rlebp{i}, \rleep{i}]$ in $S$, is $O(\log m)$.
\end{lemma}
\begin{proof}
It is possible that the $i$th run $a_i^{e_i}$ itself is a MUPS.
To consider other MUPSs ending in the $i$th run,
we consider MUPSs of $S$ that begin \emph{before} the $i$th run $a_i^{e_i}$
and end in $a_i^{e_i}$,
namely, those MUPSs that begin in position range $[1..\rleep{i-1}]$
and end in $[\rlebp{i}..\rleep{i}]$.
We observe that
any \emph{palindromic substring} in $S$ that begins before the $i$th run $a_i^{e_i}$
and ends in the $i$th run $a_i^{e_i}$
can be obtained by extending some \emph{suffix palindrome} of
$S[1..\rleep{i-1}]$
to the left and to the right within $S$.
We remark that the extension may not terminate at RLE boundaries
and can terminate within runs.
For any $1 < i \leq m$,
let $\mathbf{RBSP}_{i-1}$ be the set of RLE-bounded suffix palindromes
of $\rle{S}[1..i-1] = (a_1, e_1), \ldots, (a_{i-1}, e_{i-1})$
whose beginning positions coincide with the beginning positions
of some runs in $\rle{S}[1..i-1]$.
Now it follows from Corollary~\ref{coro:rle_suf_pal_arithmetic_progression} that
the lengths of the suffix palindromes in $\mathbf{RBSP}_{i-1}$ can be represented by $O(\log m)$
arithmetic progressions.
Let $\langle s', d', t' \rangle$ be a single arithmetic progression
representing a group of suffix palindromes in $\mathbf{RBSP}_{i-1}$.
In what follows, we show that for each $\langle s', d', t' \rangle$,
the number of MUPSs ending in the $i$th run $a_i^{e_i}$
that can be obtained by extending elements of $\langle s', d', t' \rangle$ is at most two.
The case where $t' \leq 2$ is trivial,
and hence let us consider the case where $t' \geq 3$.
We consider the following sub-cases:
\begin{enumerate}
\item When $a_{i - (s' + (t' - 1)d') - 1} = a_{i}$,
then we can obtain a palindrome that ends in the $i$th run $a_i^{e_i}$
by extending the longest palindrome $P$
belonging to $\langle s', d', t' \rangle$.
This extended palindrome can be a MUPS of $S$ that ends in the $i$th run.
We note that there is a unique positive integer $\ell$
such that $a_i^\ell P a_i^\ell$ is a MUPS in $S$.
\item When $a_{i - s' - 1} = a_{i}$,
then for any $2 \leq j \leq t'$, we concider the palindrome that ends in
the $i$th run $a_i^{e_i}$ by maximally extending the $j$th longest palindrome
$S[\rlebp{i - (s' + (t' - j)d')}..\rleep{i - 1}]$ belonging
to $\langle s', d', t' \rangle$.
The length of this extension is $\min\{e_{i - s' - 1}, e_i\}$
to either side.
Now, for any $3 \leq k\leq t'$, we have that
\begin{align*}
& S[\rlebp{i \! - \! (s' \! + \! (t' \! - \! k)d')} \! - \! \min\{e_{i - s' - 1}, e_i\}
..\rleep{i \! - \! 1} + \min\{e_{i - s' - 1}, e_i\}] = \\
& S[\rlebp{i \! - \! (s' \! + \! (t' \! - \! (k-1))d')} \! - \! \min\{e_{i - s' - 1}, e_i\}
..\rleep{i \! - \! d' \! - \! 1} \! + \! \min\{e_{i - s' - 1}, e_i\}].
\end{align*}
This implies that for any $3 \leq k\leq t'$
any palindrome that is obtained by extending
the $k$th longest palindrome corresponding to $\langle s', d', t' \rangle$
and ending in the $i$th run $a_i^{e_i}$,
occurs at least twice in $S$.
Thus, the elements of $\langle s', d', t' \rangle$ except for
the longest one and the second longest one do not yield MUPSs
ending in the $i$th run $a_i^{e_i}$.
\end{enumerate}
Consequently, for each single group $\langle s', d', t' \rangle$,
there are at most two MUPSs ending in the $i$th run $a_i^{e_i}$
that can be obtained by extending palindromes belonging to $\langle s', d', t' \rangle$.
It follows from Corollary~\ref{coro:rle_suf_pal_arithmetic_progression}
that there are $O(\log i)$ groups for each $i$.
Thus, the number of MUPSs that end in the $i$th run is bounded by $O(\log m)$
for any $1 \leq i \leq m$.
\end{proof}
See Fig.~\ref{fig:lemma11-2}, \ref{fig:lemma11-3}, and \ref{fig:lemma11-4}
for concrete examples for the proof of Lemma~\ref{lem:number_of_mups_in_a_run}.
\begin{figure}
\caption{
An example for the first case ($a_{i - (s' + (t' - 1)d') - 1}
\label{fig:lemma11-2}
\end{figure}
\begin{figure}
\caption{
An example for the second case ($a_{i - s' - 1}
\label{fig:lemma11-3}
\end{figure}
\begin{figure}
\caption{
An example where
both the first case ($a_{i - (s' + (t' - 1)d') - 1}
\label{fig:lemma11-4}
\end{figure}
\subsection{Query algorithm}
Here we present our algorithms for answering queries of Problem~\ref{prob:SUPS_RLE_faster}.
While we inherit the basic concepts of our query algorithm for the original problem (Problem~\ref{prob:SUPS_RLE}),
we here use slightly different data structures.
\subsubsection{Preprocessing}
As was done in Section~\ref{sec:query_algorithm},
we store the set $\mathcal{M}_S$ of MUPSs of $S$ in the three following arrays.
We store $\mathcal{M}_S$ using the three following arrays:
\begin{itemize}
\item $\mathit{MUPS}beg[i]$ : the beginning position of the $i$th MUPS in $\mathcal{M}_S$.
\item $\mathit{MUPS}end[i]$ : the ending position the $i$th MUPS in $\mathcal{M}_S$.
\item $\mathit{MUPS}len[i]$ : the length of the $i$th MUPS in $\mathcal{M}_S$.
\end{itemize}
Since the number of MUPSs in $\mathcal{M}_S$ is at most $m$ (Corollary~\ref{coro:num-of-mupss}),
the length of each array is at most $m$.
Additionally, we build the two following arrays of size exactly $m$ each,
such that for each $1 \leq i \leq m$:
\begin{itemize}
\item $\mathit{mrb}[i]$ stores
a sorted list of the beginning positions of MUPSs that begin in the $i$th run
(i.e. in the position interval $[\rlebp{i}..\rleep{i}]$),
arranged in increasing order.
\item $\mathit{mre}[i]$ stores
a sorted list of the ending positions of MUPSs that end in the $i$th run
(i.e. in the position interval $[\rlebp{i}..\rleep{i}]$),
arranged in increasing order.
\end{itemize}
We can easily precompute $\mathit{mrb}$ (resp. $\mathit{mre}$) in $O(m)$ time
by a simple scan over $\mathit{MUPS}beg$ (resp. $\mathit{MUPS}end$).
Given a query input $(s_r, s_p, t_r, t_p)$ for Problem~\ref{prob:SUPS_RLE_faster},
we can retrieve the corresponding query interval $[s, t]$ over the string $S$
by
\begin{itemize}
\item $s = \rlebp{s_r} + s_p - 1$,
\item $t = \rlebp{t_r} + t_p - 1$.
\end{itemize}
Thus, provided that $\rlebp{i}$ are already computed for all $1 \leq i \leq m$,
we can retrieve the query interval $[s, t]$ in $O(1)$ time.
We can easily compute $\rlebp{i}$ for all $1 \leq i \leq m$
in $O(m)$ total time by scanning $\rle{S}$.
\subsubsection{SUPSs queries}
Suppose that we have retrieved the query interval $[s, t]$
from the query input $(s_r, s_p, t_r, t_p)$ as above.
The next task is to compute the number of MUPSs contained in $[s, t]$.
As was discussed previously, the number of MUPSs contained in $[s, t]$
can be computed from $\Succ{\mathit{MUPS}beg}{s}$ and $\Pred{\mathit{MUPS}end}{t}$.
In our algorithm for Problem~\ref{prob:SUPS_RLE} (Section~\ref{sec:query_algorithm}),
we computed $\Succ{\mathit{MUPS}beg}{s}$ and $\Pred{\mathit{MUPS}end}{t}$
using the successor/predecessor data structures of Lemma~\ref{lem:pred_succ_data_structure}
built on $\mathit{MUPS}beg$ and $\mathit{MUPS}end$.
Here, we present two alternative approaches.
Our first solution is the following:
\begin{theorem}
\label{theorem:problem2-1}
Given $\rle{S}$ of size $m$ for a string $S$,
we can compute a data structure of $O(m)$ space in
$O(m(\log \sigma_{\mathit{RLE}} + \sqrt{\log m / \log\log m}))$ time
so that subsequent run-length encoded SUPS queries of Problem~\ref{prob:SUPS_RLE_faster}
can be answered
in $O(\sqrt{\log\log m / \log\log\log m} + \alpha)$ time,
where $\sigma_{\mathit{RLE}}$ denotes the number of distinct
RLE-characters in $\rle{S}$ and $\alpha$ the number of SUPSs to report.
\end{theorem}
\begin{proof}
For each $1 \leq i \leq m$,
we build the successor data structure of Lemma~\ref{lem:pred_succ_data_structure}
on the elements stored in $\mathit{mrb}[i]$.
Similarly,
for each $1 \leq i \leq m$,
we build the predecessor data structure of Lemma~\ref{lem:pred_succ_data_structure}
on the elements stored in $\mathit{mre}[i]$.
Then, we compute $\Succ{\mathit{MUPS}beg}{s}$ and $\Pred{\mathit{MUPS}end}{t}$
from the successor/predecessor data structures for
$\Succ{\mathit{mrb}[s_r]}{s}$ and $\Pred{\mathit{mre}[t_r]}{t}$, respectively.
This approach covers the cases where $\Succ{\mathit{MUPS}beg}{s}$ exists in $\mathit{mrb}[s_r]$
and $\Pred{\mathit{MUPS}end}{t}$ exists in $\mathit{mre}[t_r]$.
To deal with the case where $\Succ{\mathit{MUPS}beg}{s}$ does not exist in $\mathit{mrb}[s_r]$,
we precompute $\Succ{\mathit{MUPS}beg}{\rleep{s_r}}$.
We can precompute $\Succ{\mathit{MUPS}beg}{\rleep{i}}$ for all $1 \leq i \leq m$
in $O(m)$ time by a simple scan over $\mathit{MUPS}beg$.
The case where $\Pred{\mathit{MUPS}end}{t}$ does not exist in $\mathit{mre}[t_r]$
can be treated similarly.
For each $1 \leq i \leq m$,
let $c_i$ be the number of MUPSs stored in $\mathit{mrb}[i]$.
The successor data structure of Lemma~\ref{lem:pred_succ_data_structure}
for the list of MUPSs in $\mathit{mrb}[i]$ occupies $O(c_i)$ space,
can be built in $O(c_i \sqrt{\log c_i / \log\log c_i})$ time,
and answers successor queries in $O(\sqrt{\log c_i / \log\log c_i})$ time.
It follows from Lemma~\ref{lem:number_of_mups_in_a_run} and Corollary~\ref{coro:num-of-mupss} that
$c_i = O(\log m)$ for each $1 \leq i \leq m$
and the total number of MUPSs stored in the data structures
for all $1 \leq i \leq m$ is $\sum_{i=1}^{m}c_i = O(m)$,
Therefore,
the successor data structures for $\mathit{mrb}[i]$ for all $1 \leq i \leq m$
can be built in a total of $O(m\sqrt{\log m / \log\log m})$ time (due to Jensen's inequality),
can be stored in $O(m)$ total space,
and answer successor queries in $O(\sqrt{\log\log m /\log\log\log m})$ time.
The same argument holds for the predecessor data structures for $\mathit{mre}[i]$.
Thus, we can count the number of MUPSs contained in the interval $[s,t]$
in $O(\sqrt{\log\log m /\log\log\log m})$ time.
The rest of our query algorithm is the same as in Section~\ref{query_algorithm}.
\end{proof}
Our second solution is simpler and can be built faster than the first solution,
but supports slightly slower queries.
\begin{theorem}
Given $\rle{S}$ of size $m$ for a string $S$,
we can compute a data structure of $O(m)$ space in
$O(m\log \sigma_{\mathit{RLE}})$ time
so that subsequent run-length encoded SUPS queries of Problem~\ref{prob:SUPS_RLE_faster}
in $O(\log\log m + \alpha)$ time,
where $\sigma_{\mathit{RLE}}$ denotes the number of distinct
RLE-characters in $\rle{S}$ and $\alpha$ the number of SUPSs to report.
\end{theorem}
\begin{proof}
Given $s_r$ and $t_r$,
we binary search $\mathit{mrb}[s_r]$ and $\mathit{mre}[t_r]$ for $\Succ{\mathit{MUPS}beg}{s}$ and $\Pred{\mathit{MUPS}end}{t}$,
respectively.
Since the numbers of elements stored in $\mathit{mrb}[s_r]$ and in $\mathit{mre}[t_r]$ are $O(\log m)$ each
by Lemma~\ref{lem:number_of_mups_in_a_run},
the binary searches terminate in $O(\log \log m)$ time.
The cases where $\Succ{\mathit{MUPS}beg}{s}$ does not exist in $\mathit{mrb}[s_r]$,
and $\Pred{\mathit{MUPS}end}{t}$ does not exist in $\mathit{mre}[t_r]$
can be treated similarly as in Theorem~\ref{theorem:problem2-1}.
The rest of our query algorithm follows our method in Section~\ref{query_algorithm}.
Clearly, this data structure takes $O(m)$ total space.
\end{proof}
\section*{Acknowledgments}
This work was supported by JSPS KAKENHI Grant Numbers JP18K18002 (YN), JP17H01697 (SI), JP16H02783 (HB), JP18H04098 (MT), and by JST PRESTO Grant Number JPMJPR1922 (SI).
\end{document} |
\begin{document}
\title{Congruence subgroups of braid groups}
\author{Charalampos Stylianakis}
\date{ }
\maketitle
\begin{abstract}
In this paper we give a description of the generators of the prime level congruence subgroups of braid groups. Also, we give a new presentation of the symplectic group over a finite field, and we calculate symmetric quotients of the prime level congruence subgroups of braid groups. Finally, we find a finite generating set for the level-3 congruence subgroup of the braid group on 3 strands.
\end{abstract}
\section{Introduction}
Let $B_n$ be the braid group on $n$ strands. By evaluating the (unreduced) Burau representation $B_n \to \mathrm{GL}_{n-1}(\mathbb{Z}[t^{\pm 1}])$ at $t=-1$ we obtain a symplectic representation
\[
\rho : B_{n} \rightarrow
\begin{cases}
\mathrm{Sp}_{n-1}(\mathbb{Z})
& \text{ if $n$ is odd,} \\
(\mathrm{Sp}_{n}(\mathbb{Z}))_{u}
& \text{ if $n$ is even,}
\end{cases}
\]
where $(\mathrm{Sp}_{n}(\mathbb{Z}))_{u}$ is the subgroup of $\mathrm{Sp}_{n}(\mathbb{Z})$ fixing one vector $u \in \mathbb{Z}^{n}$ \cite[Proposition 2.1]{GJ1} (see also \cite{BM} and \cite{C1}).
For a positive integer $m$, the projection $\mathbb{Z} \to \mathbb{Z}/m$ induces a representation as follows:
\[\rho_m : B_{n} \rightarrow
\begin{cases}
\mathrm{Sp}_{n-1}(\mathbb{Z}/m)
& \text{ if $n$ is odd,} \\
(\mathrm{Sp}_{n}(\mathbb{Z}/m))_{u}
& \text{ if $n$ is even.}
\end{cases}
\]
Note that if $m=1$, then $\rho_1=\rho$. For $i>1$ the kernel of $\rho_m$ is denoted by $B_n[m]$ and it is called the \emph{level}-\emph{m congruence subgroup of} $B_n$. The kernel of $\rho$ is called the \emph{braid Torelli} group, and it is denoted by $\mathcal{BI}_n$. The group $\mathcal{BI}_n$ has been extensively studied by Hain \cite{RH}, Brendle-Margalit \cite{BM1,BM2}, and Brendle-Margalit-Putman \cite{BMP}.
For $p$ prime, A'Campo proved that the homomorphism $\rho_p$ is surjective, by explicitly calculating the image of $\rho_p$ \cite[Theorem 1 (1)]{C1}. Wanjryb gave a presentation of $\mathrm{Sp}_{n-1}(\mathbb{Z}/p)$ and $(\mathrm{Sp}_{n}(\mathbb{Z}/p))_{u}$ as quotients of $B_n$ \cite[Theorem 1]{W}. Let $PB_n$ be the \emph{pure braid group}, that is, the kernel of the epimorphism $B_n \to S_n$, where $S_n$ is the symmetric group on $n$ letters. Our first result is an analogue of Wanjryb's theorem.
\paragraph{Theorem A} \textit{For $p$ prime, the groups $\mathrm{Sp}_{n-1}(\mathbb{Z}/p)$ and $(\mathrm{Sp}_{n}(\mathbb{Z}/p))_{u}$ admit a presentation as quotients of the pure braid group $PB_n$.}\\
\begin{flushleft}
This result is given as Theorem \ref{SYPRE} in the paper.
\end{flushleft}
A result of Arnol'd shows that $B_n[2]=PB_n$, where $PB_n$ is the pure braid group \cite{A1}. Therefore, for every $k$ even, we have that $B_n[k] \unlhd PB_n$. Our second result extends A'Campo's theorem.
\paragraph{Theorem B} \textit{For $m=2p_1...p_k$, where $p_i\geq3$ are primes, we have that $PB_n/ B_n[m]$ is isomorphic to $\bigoplus\limits_{i=1}^k \mathrm{Sp}_{n-1}(\mathbb{Z}/p_i)$ if $n$ is odd, and $\bigoplus\limits_{i=1}^k (\mathrm{Sp}_{n}(\mathbb{Z}/p_i))_{u}$ if $n$ is even.}\\
\begin{flushleft}
Theorem B is Theorem \ref{PBQ1} (see also Theorem \ref{PBQ2}) in the paper.
\end{flushleft}
We also characterize quotient groups of congruence subgroups of braid groups. The braid group $B_n$ surjects onto the symmetric group $S_n$. The kernel of this map is well known to be the pure braid group $PB_n$. Also, by a result established by A'rnold \cite{A1} the group $PB_n$ is isomorphic to $B_n[2]$. See also \cite[Section 2]{BM} for further discussion. Therefore, we have $B_n / B_n[2] \cong S_n$. We generalize this result as stated in the following theorem.
\paragraph{Theorem C.} For $p$ prime number, the group $B_n[p] / B_n[2p]$ is isomorphic to $S_n$.
\begin{flushleft}
Theorem C is Theorem \ref{symquo} in the paper.
\end{flushleft}
\paragraph{Topological description of congruence subgroups.} A key part of the paper is a topological interpretation of $B_n[p]$, for $p\geq3$ prime, given in Section 4. The content of Section 4 was inspired by Powell, who based on Birman's work on the presentation of the symplectic group \cite[Theorem 1]{JB1}, to show that the Torelli subgroup of the mapping class group is normally generated by bounding pair maps, and Dehn twists about separating simple closed curves \cite[Theorem 2]{JP}.
Theorems A and B are used to find normal generators for $B_n[m]$, where $m=2p_1...p_k$ and $p_i$ is an odd prime. Motivated by Section 4 it would be interesting to find a topological description of the generators of $B_n[m]$ in the future.
\paragraph{Related results.} The mapping class group $\mathrm{Mod}(\Sigma)$ of an orientable surface $\Sigma$ is the group of isotopy classes of homeomorphisms that preserve the orientation of $\Sigma$, fix the boundary pointwise, and preserve the set of marked points setwise. We denote by $T_c$ a Dehn twist about a simple closed curve $c$. Let $\Sigma^b_g$ be a surface of genus $g\geq1$ with $b$ boundary components, where $b\in \{1,2\}$. It is a special case of theorem of Birman-Hilden \cite{BH} that $B_{2g+b}$ embeds into $\mathrm{Mod}(\Sigma^b_g)$ \cite[Section 9.4]{BFM}. We denote the image of this embedding by $\mathrm{SMod}(\Sigma^b_g)$. As mentioned in the previous page, the braid Torelli $\mathcal{BI}_{2g+b}$ is the kernel of the symplectic representation of $B_{2g+b}$. Hain conjectured that $\mathcal{BI}_{2g+b}$ is isomorphic to the group generated by Dehn twists about separating simple closed curves inside $\mathrm{SMod}(\Sigma^b_g)$ \cite{RH}. This conjecture was proved by Brendle-Margalit-Putman \cite[Theorem A]{BMP}, and also studied by Brendle-Margalit \cite{BM1,BM2}. By the definitions given in the beginning of the paper, the group $\mathcal{BI}_{2g+b}$ is a subgroup of $B_{2g+b}[m]$, for any $m \in \mathbb{N}$.
For $m\geq2$, consider $B_{2g+b}[m]$ as a subgroup of $ \mathrm{SMod}(\Sigma^b_g) \cong B_{2g+b}$. A consequence of a work of Arnol'd shows that $B_{2g+b}[2]$ is isomorphic to the pure braid group $PB_{2g+b}$ \cite{A1} (see \cite[Section 2]{BM} for explanation of this isomorphism). Combining the latter result with the work of Humphries \cite[Theorem 1]{SH} we obtain that $B_{2g+b}[2]$ is isomorphic to the normal closure of a square of a Dehn twist about nonseparating simple closed curve in $\mathrm{SMod}(\Sigma^b_g)$. Brendle-Margalit extended the latter result by proving that the normal closure of the $4^{th}$ power of a Dehn twist about a nonseparating simple closed curve in $\mathrm{SMod}(\Sigma^b_g)$ is isomorphic to $B_{2g+b}[4]$ \cite[Main Theorem]{BM}.
Let $\mathcal{T}_{2g+b}(m)$ be the normal closure of the $m^{th}$ power of a Dehn twist in $\mathrm{SMod}(\Sigma^b_g)$, where $g\geq1$ and $b=1,2$. Coxeter proved that $\mathcal{T}_{2g+b}(m)$ is a finite index subgroup of $\mathrm{SMod}(\Sigma^b_g)=B_{2g+b}$ if and only if $(2g+b-2)(m-2)<4$ \cite[Section 10]{C2}. As mentioned above, $\mathcal{T}_{2g+b}(2) = B_{2g+b}[2]$. Furthermore, Humphries gave a complete description of when a group generated by $\{\mathcal{T}_{2g+b}(m_i) \mid m_i \in \mathbb{N}\}$, for finite number of $m_i$, is of finite index in $PB_{2g+b}$ \cite[Theorem 1]{HU}. In addition, Funar-Kohno proved that the intersection of all $\mathcal{T}_{2g+b}(2m)$, where $m\in \mathbb{N}$, is trivial \cite[Theorem 1.1]{FK1}.
Finally, we note a more general definition of congruence subgroups of braid groups. Let $F_n$ be the free group of rank $n$. There is an inclusion $B_n \to \mathrm{Aut}(F_n)$ \cite[Theorem 1.9]{JB}. Consider a characteristic subgroup $H$ of finite index in $F_n$. The kernel of $\mathrm{Aut}(F_n) \to \mathrm{Aut}(F_n / H)$ is called \emph{principal congruence subgroup}, and any finite index subgroup of $\mathrm{Aut}(F_n)$ containing a principal congruence subgroup is called \emph{congruence subgroup}. A group $G$ is said to have the \emph{congruence subgroup propery} if every finite index subgroup of $G$ contains a principal congruence subgroup. Asada proved that $B_n$ satisfies the congruence subgroup property by using the notions of field extensions and profinite groups \cite[Theorem 3A, Theorem 5]{AS}. In contrast with Asada's techniques, Thurston gave a more elementary proof to the congruence subgroup property of $B_n$ \cite{BMC}.
\paragraph{Outline of the paper.} In Section 2 we give basic background on braid groups, hyperelliptic mapping class groups, the symplectic representation of braid groups, and the congruence subgroups of braid groups. In Section 3 we recall some key results about the congruence subgroups of symplectic groups. In Section 4 we give a topological interpretation of the generators of the prime level congruence subgroups of braid groups. In Section 5 we prove Theorems A and B. In Section 6 we prove Theorem C.
\paragraph{Acknowledgments.} I would like to thank my PhD supervisor Tara Brendle for her support during my work on this paper.
\section{Preliminaries}
In this section we recall the definition of braid groups, hyperelliptic mapping class groups, and the symplectic representation of braid groups.
\subsection{Definitions of braid groups}
\begin{figure}
\caption{The action of $\sigma_3$ on a punctured disc.}
\label{braidgroup}
\end{figure}
\paragraph{Braid groups.} For detailed description of the following definition, see Birman-Brendle's survey \cite{BB}. Let $\Sigma^b_{g,n}$ denote an orientable surface of genus $g$ with $n$ punctures and $b$ boundary components. If $n=0$ we will simply write $\Sigma^b_{g}$. If $g=0$ and $b=1$ then $\Sigma^1_{0,n}$ is homeomorphic to a punctured disc. We enumerate the punctures from left to right. The \emph{braid group} $B_n$ on $n$ strands is defined to be the mapping class group $\mathrm{Mod}(\Sigma^1_{0,n})$ of $\Sigma^1_{0,n}$. For $1\leq i \leq n-1$ we denote by $\sigma_i$ the mapping classes that interchanges the punctures $i,i+1$ as depicted in Figure \ref{braidgroup} for $i=3$. The mapping classes $\sigma_i$ are called half-twists. It turns out that $\sigma_i$ generate the braid group $B_n$. In fact we have the following presentation
\[ \left< \sigma_1,...,\sigma_{n-1} \: | \: \sigma_i \sigma_{i+1} \sigma_i=\sigma_{i+1} \sigma_i \sigma_{i+1}, \sigma_i \sigma_j = \sigma_j \sigma_i \: \mathrm{when} \: |i-j|>1 \right>. \]
Consider the symmetric group $S_n$, and for $1 \geq i \geq n-1$ let $s_i$ denote the generators of $S_n$, that is the transpositions $(i,i+1)$. The map $B_n \to S_n$ defined by $\sigma_i \mapsto s_i$ is a well defined homomorphism with kernel the \emph{pure braid group} $PB_n$. Let $1\leq i<j\leq n-1$, we denote by $a_{i,j}$ the element $\sigma_{j-1}... \sigma^2_i ... \sigma_{j-1}$. For $1\leq i<j\leq n-1$ the group $PB_n$ admits a presentation with generators $a_{i,j}$ and relations
\begin{enumerate}
\item[P1.] $\begin{aligned}[t]
a^{-1}_{r,s} a_{i,j} a_{r,s} = a_{i,j}, \: 1 \leq r<s<i<j \leq n \: \mathrm{or} \: 1 \leq i<r<s<j \leq n,
\end{aligned}$
\item[P2.] $\begin{aligned}[t]
a^{-1}_{r,s} a_{i,j} a_{r,s} = a_{r,j} a_{i,j} a^{-1}_{r,j} , \: 1 \leq r<s=i<j \leq n,
\end{aligned}$
\item[P3.] $\begin{aligned}[t]
a^{-1}_{r,s} a_{i,j} a_{r,s} = (a_{i,j} a_{s,j})a_{i,j}(a_{i,j} a_{s,j})^{-1} , \: 1 \leq r=i<s<j \leq n,
\end{aligned}$
\item[P4.] $\begin{aligned}[t]
a^{-1}_{r,s} a_{i,j} a_{r,s} = (a_{r,j} a_{s,j} a^{-1}_{r,j} a^{-1}_{s,j})a_{i,j}(a_{r,j} a_{s,j} a^{-1}_{r,j} a^{-1}_{s,j})^{-1} , \: 1 \leq r<i<s<j \leq n.
\end{aligned}$
\end{enumerate}
For more details about definitions and presentations of $B_n$ and $PB_n$ see \cite[Chapter 1]{BB}.
\begin{figure}
\caption{Action of the hyperelliptic involution.}
\label{twofold}
\end{figure}
\paragraph{Hyperelliptic mapping class groups.} Let $c$ be a nonseparating simple closed curve on a surface $\Sigma^b_{g,n}$. We denote by $T_c$ the Dehn twist about the curve $c$. Dehn twists about nonseparating simple closed curves generate $\mathrm{Mod}(\Sigma^b_g)$. Consider a hyperelliptic involution $\iota$ as depicted in Figure \ref{twofold}. For $b=1,2$, $\iota$ acts on $\Sigma^b_g$. Since $\iota$ does not fix the boundary components of $\Sigma^b_g$ pointwise, then $\iota \notin \mathrm{Mod}(\Sigma^b_g)$. We have a two fold branched cover $\Sigma^b_g \rightarrow \Sigma^b_g / \iota.$ Topologically $\Sigma^b_g / \iota$ is homeomorphic to $\Sigma^1_{0,2g+b}$ (see Figure \ref{twofold}). We note that if $q_1,q_2$ denote the boundary components of $\Sigma^2_g$, then $\iota(q_1)=q_2$.
\begin{figure}
\caption{Generators of the hyperelliptic mapping class group.}
\label{hyperm}
\end{figure}
Consider the curves $c_i$ depicted in Figure \ref{hyperm}, and let $\sigma_i$ be the generators of $B_{2g+b}$. We define a map $\xi:B_{2g+b}\rightarrow \mathrm{Mod}(\Sigma^b_g)$ by $\xi(\sigma_i) = T_{c_i}$. Since the braid, and the disjointness relations are satisfied by $\sigma_i$ and $T_{c_i}$, then $\xi$ is a homomorphism. The image of $\xi$ is called \emph{hyperelliptic mapping class group}, and it is denoted by $\mathrm{SMod}(\Sigma^b_g)$. In fact we have $B_{2g+b}\cong \mathrm{SMod}(\Sigma^b_g)$ \cite[Theorem 9.2]{BFM} (see also \cite{PV}).
\subsection{Symplectic representation}
In this section we will construct a representation for the braid group $B_n$. Firstly, we recall the definition of $\mathrm{Sp}_{2n}(\mathbb{Z})$. Let $J$ be the $2n \times 2n$ matrix
\[\left( \begin{array}{ccc}
0 & I_n\\
-I_n & 0\\
\end{array} \right).\]
The symplectic group with integer coefficients is defined to be
\[\mathrm{Sp}_{2n}(\mathbb{Z}) = \{ A \in \mathrm{GL}(2n,\mathbb{Z}) \, \mid \, A^T J A = J \}.\]
We also define the symplectic group with coefficients in $\mathbb{Z}/m$ to be
\[\mathrm{Sp}_{2n}(\mathbb{Z}/m) = \{ A \in \mathrm{GL}(2n,\mathbb{Z}) \, \mid \, A^T J A \equiv J \: \mathrm{mod}(m) \}\]
where $m \in \mathbb{N}$. For a fixed $u \in \mathbb{Z}^{2n}$, we also recall
\[(\mathrm{Sp}_{2n}(\mathbb{Z}))_{u} = \{ t \in \mathrm{Sp}_{2n}(\mathbb{Z}) \mid t(u)=u \}.\]
Consider $g\geq1$ and $b=1,2$. Since $B_{2g+b}\cong \mathrm{SMod}(\Sigma^b_g)$, we will use the action of $\mathrm{SMod}(\Sigma^b_g)$ on the first homology of $\Sigma^b_g$ to construct a representation for $B_{2g+b}$.
\begin{figure}
\caption{Standard generators for $\mathrm{H}
\label{sympb1}
\end{figure}
\paragraph{Construction of the representation.} We denote by $\iota_a$ the algebraic intersection number between curves of $\Sigma^b_g$ for $g\geq1$ and $b=1,2$. The form $\iota_a$ is an alternating bilinear and nondegenerate. Every element of the mapping class group preserves $\iota_a$ \cite[Section 6.3]{BFM}. Consider $b=1$; the oriented curves $x_i,y_i$ of $\Sigma^1_g$ of Figure \ref{sympb1} form a symplectic basis for $\mathrm{H}_1(\Sigma^1_g;\mathbb{Z})$. The action of $\mathrm{SMod}(\Sigma^1_g)$ on $\mathrm{H}_1(\Sigma^1_g;\mathbb{Z})$ induces the following representation:
\[\mathrm{SMod}(\Sigma^1_g) \to \mathrm{Sp}_{2g}(\mathbb{Z}).\]
If $b=2$, the module $\mathrm{H}_1(\Sigma^2_g;\mathbb{Z})$ is not symplectic. Thus, we will consider a different module. Fix a point on each of the boundaries of $\Sigma^2_g$, and denote by $Q$ the set that contains those two points. Denote also by $P$ the set that contains the two boundary components. We set $\mathrm{H}^P_1(\Sigma^2_g;\mathbb{Z}) \cong \mathrm{H}_1(\Sigma^2_g,Q;\mathbb{Z})/\langle P \rangle$. The module $\mathrm{H}^P_1(\Sigma^2_g;\mathbb{Z})$ is symplectic \cite[Section 2.1]{BM} (see also \cite{PM1}). The basis of $\mathrm{H}^P_1(\Sigma^2_g;\mathbb{Z})$ is $x_i,y_i$ as indicated on the right hand side of Figure \ref{sympb1}. The action of $\mathrm{SMod}(\Sigma^2_g)$ on $\mathrm{H}^P_1(\Sigma^2_g;\mathbb{Z})$ induces the following representation:
\[\mathrm{SMod}(\Sigma^2_g) \to (\mathrm{Sp}_{2g+2}(\mathbb{Z}))_{y_{g+1}},\]
where $(\mathrm{Sp}_{2g+2}(\mathbb{Z}))_{y_{g+1}}$ stands for the subgroup of $\mathrm{Sp}_{2g+2}(\mathbb{Z})$ that fixes the vector $y_{g+1}$.
Since the map $\xi : B_{2g+b} \to \mathrm{SMod}(\Sigma^b_g)$ is an isomorphism, we have a well defined representation
\[
\rho : B_{2g+b} \rightarrow
\begin{cases}
\mathrm{Sp}_{2g}(\mathbb{Z})
& \text{ if $b=1$} \\
(\mathrm{Sp}_{2g+2}(\mathbb{Z}))_{y_{g+1}}
& \text{ if $b=2$.}
\end{cases}
\]
\paragraph{Image of the representation.} We denote also by $[c]$ the homology class of a curve $c$ in $\Sigma^b_g$. For $x,c$ nonseparating simple closed curves in $\Sigma^b_g$, the automorphism $T_{[c]}([x]) = [x]+\iota_a(x,c)[c]$ is called a transvection \cite[Section 6.6.3]{BFM}. We remark that for every integer $m$, we have $T^m_{[c]}([x]) = [x]+m \iota_a(x,c)[c]$.
Let $T_{c_i}$ be a Dehn twist about a curve $c_i$ indicated in Figure \ref{hyperm}. The image of $T_{c_i}$ under the symplectic representation is the transvection $T_{[c_i]}$. Also, since $\xi(\sigma_i)=T_{c_i}$ as explained in the previous section, we have $\rho(\sigma_i)=T_{[c_i]}$. We note also that $\rho(\sigma^m_i)=T^m_{[c_i]}$.
\paragraph{Kernel of the symplectic representation.} Assume that $b=1,2$, $g\geq0$, and recall that $B_{2g+b}=\mathrm{Mod}(D_{2g+b})$. The kernel of the symplectic representation $\rho$ is denoted by $\mathcal{BI}_{2g+b}$, and it is called the \emph{braid Torelli}. It is a result by Brendle-Margalit-Putman that $\mathcal{BI}_{2g+b}$ is generated by Dehn twists about simple closed curves surrounding 3 or 5 number of puncture points \cite[Theorem C]{BMP}.
Consider the isomorphism $\xi : B_{2g+b} \to \mathrm{SMod}(\Sigma^b_g)$. The image of $\mathcal{BI}_{2g+b}$ in $\mathrm{SMod}(\Sigma^b_g)$ under $\xi$ is denoted by $\mathcal{SI}(\Sigma^b_g)$. The latter group is well known as the hyperelliptic Torelli group. Furthermore, $\mathcal{SI}(\Sigma^b_g)$ is generated by Dehn twists about symmetric separating simple closed curves that bound a subsurface of genus 1 or 2 \cite[Theorem A]{BMP}.
\subsection{Congruence subgroups of braid groups}
Let $m$ be a positive integer. The surjective homomorphisms $\mathrm{H}_1(\Sigma^1_g;\mathbb{Z}) \to \mathrm{H}_1(\Sigma^1_g;\mathbb{Z}/m)$ and $\mathrm{H}^P_1(\Sigma^2_g;\mathbb{Z}) \to \mathrm{H}^P_1(\Sigma^2_g;\mathbb{Z}/m)$ induce the following epimorphisms:
\[
\begin{cases}
\mathrm{Sp}_{2g}(\mathbb{Z}) \to & \mathrm{Sp}_{2g}(\mathbb{Z}/m) \\
(\mathrm{Sp}_{2g+2}(\mathbb{Z}))_{y_{g+1}} \to & (\mathrm{Sp}_{2g+2}(\mathbb{Z}/m))_{y_{g+1}}.
\end{cases}
\]
Thus we have a family of representations for the braid groups
\[
\rho_m : B_{2g+b} \rightarrow
\begin{cases}
\mathrm{Sp}_{2g}(\mathbb{Z}/m)
& \text{ if $b=1$} \\
(\mathrm{Sp}_{2g+2}(\mathbb{Z}/m))_{y_{g+1}}
& \text{ if $b=2$,}
\end{cases}
\]
where $g\geq1$. The kernels of the representations $\rho_m$ are denoted by $B_{2g+b}[m]$ and they are known as \emph{level-m congruence subgroups of braid groups}.
\section{Congruence subgroups of Symplectic groups}
In this section we examine the structure of the congruence subgroups of symplectic groups.
\paragraph{Congruence subgroups and generators.} The projection $\mathbb{Z}\to \mathbb{Z}/m$ induces a surjective homomorphism $\mathrm{Sp}_{2n}(\mathbb{Z}) \rightarrow \mathrm{Sp}_{2n}(\mathbb{Z}/m)$, whose kernel is the \textit{principal level $m$ congruence subgroup} of $\mathrm{Sp}_{2n}(\mathbb{Z})$ denoted by $\mathrm{Sp}_{2n}(\mathbb{Z})[m]$. The group $\mathrm{Sp}_{2n}(\mathbb{Z})[m]$ consists of all matrices of the form $I_{2n} + m A$; where $A \in \mathrm{Sp}_{2n}(\mathbb{Z})$. Furthermore, if $m$ is a multiple of $l$ then $\mathrm{Sp}_{2n}(\mathbb{Z})[m] \triangleleft \mathrm{Sp}_{2n}(\mathbb{Z})[l]$.\\
Next we give generators for $\mathrm{Sp}_{2n}(\mathbb{Z})[p]$ when $p$ is any prime number. Let $r \in \mathbb{Z}$. We define $e_{i,j}(r)$ to be the $n \times n$ matrix with $(i,j)^{th}$ entry equal to $r$ and 0 otherwise. Let $\beta_i(r)$ be the $n \times n$ matrix with $(i,i)^{th}$ and $(i,i+1)^{th}$ entries equal to $r$, $(i+1,i+1)^{th}$ and $(i+1,i)^{th}$ entries equal to $-r$ and 0 otherwise. Define also $s e_{i,j}(r)$ to be the $n \times n$ matrix with $(i,j)^{th}$ and $(j,i)^{th}$ entries equal to $r$ and 0 otherwise. For $1 \leq i \leq j \leq n$ we define:
\[ \mathcal{X}_{i,j}(r) = I_{2n} + \left( \begin{array}{ccc}
0 & 0\\
s e_{i,j}(r) & 0\\
\end{array} \right), \quad \mathcal{Y}_{i,j}(r) = I_{2n} + \left( \begin{array}{ccc}
0 & s e_{i,j}(r)\\
0 & 0\\
\end{array} \right). \]
\begin{flushleft}
For $1 \leq i,j \leq n$ with $i \neq j$ we define:
\end{flushleft}
\[ \mathcal{Z}_{i,j}(r) = I_{2n} + \left( \begin{array}{ccc}
e_{i,j}(r) & 0\\
0 & -e_{i,j}(r)\\
\end{array} \right). \]
\begin{flushleft}
For $1 \leq i < n$
\end{flushleft}
\[ \mathcal{W}_{i}(r) = I_{2n} + \left( \begin{array}{ccc}
\beta_i(r) & 0\\
0 & -\beta_i(r)\\
\end{array} \right). \]
\begin{flushleft}
Finally,
\end{flushleft}
\[ \mathcal{U}_{1}(r) = I_{2n} + \left( \begin{array}{ccc}
e_{1,1}(r) & e_{1,1}(r)\\
-e_{1,1}(r) & -e_{1,1}(r)\\
\end{array} \right). \]
The following theorem gives a nice description of $\mathrm{Sp}_{2n}(\mathbb{Z})[p]$ as a group generated by the matrices above \cite[Lemma 5.4]{CP}.
\begin{theorem}[Church-Putman]
For $n \geq 2$ and for a prime number $p \geq 2$ the congruence subgroup $\mathrm{Sp}_{2n}(\mathbb{Z})[p]$ is generated by the set
\[ \mathcal{S} = \{ \mathcal{X}_{i,j}(p), \mathcal{Y}_{i,j}(p), \mathcal{Z}_{i,j}(p), \mathcal{W}_{i}(p), \mathcal{U}_{1}(p) \} \]
where $i,j$ are indices defined as above.
\label{CP}
\end{theorem}
We use Theorem \ref{CP} to prove the lemma below, since we do not know a concise proof in the literature. In particular, we use the generators of Theorem \ref{CP} to prove that $\mathrm{Sp}_{2n}(\mathbb{Z}/b)$ can be expressed as a quotient of some congruence subgroup of $\mathrm{Sp}_{2n}(\mathbb{Z})$ when $b$ is a prime number.
\begin{lemma}
Let $a$ and $b$ two distinct prime numbers. Then the following sequence is exact.
\[ 1 \rightarrow \mathrm{Sp}_{2n}(\mathbb{Z})[ab] \rightarrow \mathrm{Sp}_{2n}(\mathbb{Z})[a] \rightarrow \mathrm{Sp}_{2n}(\mathbb{Z}/b) \rightarrow 1. \]
\label{Ha}
\end{lemma}
\begin{proof}
The map $\mathrm{Sp}_{2n}(\mathbb{Z})[a] \rightarrow \mathrm{Sp}_{2n}(\mathbb{Z}/b)$ sends every matrix $A \in \mathrm{Sp}_{2n}(\mathbb{Z})[a]$ into its $\mathrm{mod}(b)$ reduction. First, we prove the surjectivity of the latter map. The generators of $\mathrm{Sp}_{2n}(\mathbb{Z}/b)$ are $\mathcal{X}_{i,j}(1) \: \mathrm{mod}(b)$ and $\mathcal{Y}_{i,j}(1) \: \mathrm{mod}(b)$ where $1 \leq i < j \leq n$. Define $n$ to be the solution of the equation $an \equiv 1 \: \mathrm{mod}(b)$. Then, $\mathcal{X}_{i,j}(a)^n \equiv \mathcal{X}_{i,j}(1) \: \mathrm{mod}(b)$ and $\mathcal{Y}_{i,j}(a)^n \equiv \mathcal{Y}_{i,j}(1) \: \mathrm{mod}(b)$. This proves the surjectivity of the reduction map. The kernel of this reduction map contains matrices which satisfy $I_{2n} + a A \equiv I_{2n} \: \mathrm{mod}(b)$. But since $a$ and $b$ are relatively primes, the latter equivalence holds if and only if $A = bB$ when $B$ is a symplectic matrix.
\end{proof}
The following proposition gives a useful decomposition of $\mathrm{Sp}_{2n}(\mathbb{Z}/m)$ \cite[Theorem 5]{NS}.
\begin{proposition}[Newman-Smart]
\label{newman}
Let $m \in \mathbb{N}$ and write $m = p^{k_1}_{1} p^{k_2}_{2}...p^{k_l}_{l}$, where $p^{k_i}_{i}$ are powers of prime numbers. Then
\[ \mathrm{Sp}_{2n}(\mathbb{Z}/m) = \bigoplus^{l}_{i=1} \mathrm{Sp}_{2n}(\mathbb{Z}/p^{k_i}_{i}). \]
\end{proposition}
Newman-Smart also proved that the abelian group $\mathfrak{sp}_{2n}(\mathbb{Z}/l)$ can be expressed as a quotient of congruence subgroups of $\mathrm{Sp}_{2n}(\mathbb{Z})$, \cite[Theorem 7]{NS}.
\begin{proposition}[Newman-Smart]
Let $l,m \geq 2$ such that $l$ divides $m$. Then we have the following isomorphism.
$$\mathrm{Sp}_{2n}(\mathbb{Z})[m] / \mathrm{Sp}_{2n}(\mathbb{Z})[ml] \cong \mathfrak{sp}_{2n}(\mathbb{Z}/l).$$
\label{comutsympl}
\end{proposition}
Lemma 3.2 and Propositions 3.3 and 3.4 play crucial role in Section 5, in which we explore the structure of congruence subgroups of braid groups.
\section{Topological interpretation of prime level congruence subgroups}
The purpose of this section is the characterization of the group $B_{2g+b}[p]$ when $p$ is prime. Since $B_{2g+b}\cong\mathrm{SMod}(\Sigma^b_g)$, it is convenient to study the kernel of the map
\[
\mathrm{SMod}(\Sigma^b_{g}) \rightarrow
\begin{cases}
\mathrm{Sp}_{2g}(\mathbb{Z}/p)
& \text{ if $b=1$}, \\
(\mathrm{Sp}_{2g+2}(\mathbb{Z}/p))_{y_{g+1}}
& \text{ if $b=2$}\\
\end{cases}
\]
and we denote the map again by $\rho_p$. Also, we denote the kernel of $\rho_p$ by $B_{2g+b}[p]$.
A'Campo proved that the homomorphism $\rho_p$ is surjective \cite[Theorem 1 (1)]{C1}. Later Assion gave a presentation for $\mathrm{Sp}_{2g}(\mathbb{Z}/3)$ and $(\mathrm{Sp}_{2g+2}(\mathbb{Z}/3))_{y_{g+1}}$ as quotients of braid groups \cite{A3}. Wajnryb improved the result of Assion and generalized it for any prime number greater than 2 \cite[Theorem 1]{W}. We begin with the theorem of Wajnryb.
\begin{theorem}[Wajnryb]
\label{WA}
Consider the curves $c_i$ depicted in Figure \ref{hyperm}. Let $G_{2g+b}$ be a group with generators $T_{c_1},...,T_{c_{2g+b-1}}$ and relations $R1$ to $R6$ as follows.
\begin{enumerate}
\item[R1.] $\begin{aligned}[t]
T_{c_i} T_{c_{i+1}} T_{c_i} = T_{c_{i+1}} T_{c_i} T_{c_{i+1}};
\end{aligned}$
\item[R2.] $\begin{aligned}[t]
[T_{c_i},T_{c_j}]=1, \quad \mathrm{for} \: \vert i - j \vert >1;
\end{aligned}$
\item[R3.] $\begin{aligned}[t]
T^p_{c_1} = 1;
\end{aligned}$
\item[R4.] $\begin{aligned}[t]
(T_{c_1} T_{c_2})^6 = 1, \quad \mathrm{for} \: p > 3;
\end{aligned}$
\item[R5.] $\begin{aligned}[t]
T^{(p-1)/2}_{c_1} T^4_{c_2} T^{-(p-1)/2}_{c_1} = T^{2}_{c_2} T_{c_1} T^{-2}_{c_2}, \quad \mathrm{for} \: p > 3; \: \mathrm{and}
\end{aligned}$
\item[R6.] $\begin{aligned}[t]
(T_{c_1} T_{c_2} T_{c_3})^4 = A T^2_{c_1} A^{-1}, \: \mathrm{for} \: n > 4, \: \mathrm{where} \: A = T_{c_4} T^2_{c_3} T_{c_4} T^{(p-1)/2}_{c_2} T^{-1}_{c_3} T_{c_2}.
\end{aligned}$
\end{enumerate}
Then $G_{2g+1}$ is isomorphic to $\mathrm{Sp}_{2g}(\mathbb{Z}/p)$, and $G_{2g+2}$ is isomorphic to $(\mathrm{Sp}_{2g+2}(\mathbb{Z}/p))_{y_{n+1}}$.
\end{theorem}
As a consequence of Theorem \ref{WA} we obtain elements of $\mathrm{SMod}(\Sigma^b_{g})$ which normally generate $B_{2g+b}[p]$.
In the rest of the section we examine the elements of the relations of Theorem \ref{WA} in order to give a topological description for the generators of $B_n[p]$. We note that relations $R1$ and $R2$ are the defining relations in the presentation of the braid group.
We denote by $[c_i]$ the homology class of $c_i$, and by $T_{[c_i]}$ the transvection associated to the Dehn twist $T_{c_i}$ under the map
\[
\mathrm{SMod}(\Sigma^b_{g}) \rightarrow
\begin{cases}
\mathrm{Sp}_{2g}(\mathbb{Z}/p)
& \text{ if $b=1$}, \\
(\mathrm{Sp}_{2g+2}(\mathbb{Z}/p))_{y_{g+1}}
& \text{ if $b=2$}. \\
\end{cases}
\]
By definition, the action of a transvection $T^m_{[c]}$ on an element $u \in \mathrm{H}_1(\Sigma^1_{g},\mathbb{Z})$ (respectively $\mathrm{H}^P_1(\Sigma^2_{g},\mathbb{Z})$) is defined to be $T^m_{[c]}(u) = [u] + m \hat{i}(u,[c])[c]$, where $\hat{i}$ stands for the algebraic intersection number.
\paragraph{\textbf{R3}: Powers of Dehn twists.} The $p^{th}$ powers of Dehn twists about symmetric nonseparating simple closed curves are easy to check by looking at their image in the symplectic group. The symplectic representation sends $T^p_{c_1}$ into the following matrix:
\[ \left( \begin{array}{ccc}
1 & p\\
0 & 1\\
\end{array} \right) \oplus I, \]
where $I$ stands for the identity matrix of dimension depending on $g$ and $b$ (see Section 7.1.3). The $\mathrm{mod}(p)$ reduction of the matrix above is the identity. Moreover, every Dehn twist about a non-separating curve is conjugate to $T_{c_1}$. As a consequence, every Dehn twist in $\mathrm{SMod}(\Sigma^b_{g})$ raised to the power of $p$ lies in $B_n[p]$.
\paragraph{$\textbf{R4}$: Symmetric separating Dehn twists.} By the chain relation the element $(T_{c_1} T_{c_2})^6$ can be represented by a Dehn twist $T_{\gamma}$, where $\gamma$ is the symmetric separating curve bounding the genus 1 subsurface of $\Sigma^b_{g}$ as indicated in Figure \ref{refive} \cite[Proposition 4.12]{BFM}. We can generalize the relation R4 by considering a symmetric separating curve $\delta$ of a genus $k$ subsurface of $\Sigma^b_{g}$. By the chain relation there is a maximal chain of curves $a_1,...,a_{2k}$ in the subsurface of genus $k$ with boundary $\delta$ such that $(T_{a_1}...T_{a_{2k}})^{4k+2} = T_{\delta}$.
The fact that every symmetric separating simple closed curve $\delta$ is nullhomologous in $H_1(\Sigma^1_g)$ (respectively $H^P_1(\Sigma^2_g)$) implies that $T_{[\delta]}(x)=x + \iota_a(x,[\delta])=x+0=x$ for every $x \in H_1(\Sigma^1_g)$ (respectively $H^P_1(\Sigma^2_g)$), where $T_{[\delta]}$ is the corresponding transvection of $T_{\delta}$ as described in Section 2. Since for every symmetric separating curve $\delta$ in $\Sigma^b_{g}$ and $T_{\delta} \in B_{2g+b}[p]$ we have that $(T_{a_1}...T_{a_{2k}})^{4k+2} \in \mathcal{SI}(\Sigma^b_{g}) \subset B_{2g+b}[p]$.
\begin{figure}
\caption{The curve $\gamma$ that bound a surface of genus 1.}
\label{refive}
\end{figure}
\paragraph{$\textbf{R5}$: Mod-p involution maps.} We begin by modifying the relation $R5$ of Theorem \ref{WA}.
\begin{lemma}
The relation $R5$ given above is equivalent to:
$$(T^{(p+1)/2}_{c_1} T^{4}_{c_2})^2 = (T_{c_1} T_{c_2})^3$$
in $\mathrm{Sp}_{2g}(\mathbb{Z}/p)$ (respectively $(\mathrm{Sp}_{2g+2}(\mathbb{Z}/p))_{y_{g+1}}$).
\label{insert}
\end{lemma}
\begin{proof}
We have that $(T_{c_1} T_{c_2})^3 = T_{c_1} T^{2}_{c_2} T_{c_1} T^{2}_{c_2}$. Then
\[ T^{(p-1)/2}_{c_1} T^4_{c_2} T^{-(p-1)/2}_{c_1} = T^{-1}_{c_1} (T^{(p+1)/2}_{c_1} T^4_{c_2})^2 T^{-4}_{c_2} = T^{2}_{c_2} T_{c_1} T^{-2}_{c_2}. \]
On the other hand
\[ (T^{(p+1)/2}_{c_1} T^{4}_{c_2})^2 = T_{c_1} T^{(p-1)/2}_{c_1} T^4_{c_2} T^{-(p-1)/2}_{c_1} T^4_{c_2} = T_{c_1} T^2_{c_2} T_{c_1} T^2_{c_2}. \]
\end{proof}
Now we examine the relation of Lemma \ref{insert}.
\paragraph{RHS.} For $i=1,2$, $(T_{c_1} T_{c_2})^3([c_i]) = -[c_i]$, where $[c_i]$ stands for the homology class of $c_i$. Thus, the homeomorphism $(T_{c_1} T_{c_2})^3$ acts as the hyperelliptic involution on the subsurface bounded by the boundary of the chain $ch(c_1,c_2)$ (see Figure \ref{refive}).
\paragraph{LHS.} We have
\begin{align*}
(T^{(p+1)/2}_{c_1} T^{4}_{c_2})^2([c_1]) = -8p[c_2] + (4p^2 +2p -1)[c_1] \equiv -[c_1] \: \bmod(p),\\
(T^{(p+1)/2}_{c_1} T^{4}_{c_2})^2([c_2]) = 2p \frac{p+1}{2} [c_1] - (2p+1)[c_2] \equiv -[c_2] \: \bmod(p)
\end{align*}
Therefore, $(T^{(p+1)/2}_{c_1} T^{4}_{c_2})^2$ acts as the hyperelliptic involution $\bmod(p)$ in the subspace of $\mathrm{H}_1(\Sigma^1_g,\mathbb{Z}/p)$ (resp $\mathrm{H}^P_1(\Sigma^2_g,\mathbb{Z}/p)$) spanned by $[c_1],[c_2]$.\\
We can generalize Relation $R5$ as follows. For $k$ even, consider any chain $ch(a_1,a_2,...,a_k)$ of symmetric simple closed curves such that $T_{a_i} \in \mathrm{SMod}(\Sigma_{g,b})$ for all $i \leq k$. Choose an $f \in \mathrm{SMod}(\Sigma^b_g)$ such that $f([a_i]) = -[a_i]$. Then $(T_{a_1}...T_{a_k})^{k+1} f^{-1} \in B_{2g+b}[p]$. We call this type of element an \emph{mod-p involution map}.
\paragraph{$\textbf{R6}$: Mod-p center maps.} We describe a generalized version of $(T_{c_1} T_{c_2} T_{c_3})^4 (A T^{-2}_{c_1} A^{-1})$. Let $A_1$ be the trivial homeomorphism in $\mathrm{SMod}(\Sigma^b_{g})$. For $k$ odd, and $k\geq3$, define
\[ A_k = T_{c_{k+1}} T^{2}_{c_k} T_{c_{k+1}} T^{(p-1)/2}_{c_{k-1}} T^{-1}_{c_k} T_{c_{k-1}} A_{k-2}. \]
First, we deal with the case $b=1$. (For $b=2$ the process is exactly the same.) Consider the symplectic bases $\{ y_i,x_i \}$ for $\mathrm{H}_1(\Sigma^1_{g},\mathbb{Z})$ depicted on Figure \ref{sympb1}.
\begin{lemma}
For $k$ odd, we have that $A_k T_{[c_1]} A^{-1}_k = T_{[y_{(k+1)/2}]}$ in $\mathrm{Sp}_{2g}(\mathbb{Z}/p)$.
\label{rel6}
\end{lemma}
Note that if $k=3$, then $T_{[y_2]} = T_{[d_3]}$.
\begin{proof}
We need to prove that $A_k([c_1]) \equiv [c_1] + [c_3] + ... + [c_k] \in \mathrm{Sp}_{2g}(\mathbb{Z}/p)$. A direct calculation shows that $A_3 ([c_1]) \equiv [c_1] + [c_3] \: \mathrm{mod}(p)$. Assume that the theorem is true for $k-2$, that is $A_{k-2}([c_1]) = [c_1] + [c_3] + ... + [c_{k-2}]$. Then $T_{c_{k+1}} T^{2}_{c_k} T_{c_{k+1}} T^{(p-1)/2}_{c_{k-1}} T^{-1}_{c_k} T_{c_{k-1}}([c_{k-2}]) \equiv [c_{k-2}] + [c_k] \bmod(p)$. The proof of the lemma follows.
\end{proof}
\begin{figure}
\caption{The chain relation of $R6$.}
\label{skatochain}
\end{figure}
Let $k$ be an odd integer, and consider also the odd chain $ch(c_1,c_2,...,c_k)$. By the chain relation we have that $(T_{c_1}...T_{c_k})^{k+1} = T_{d_k} T_{d'_k}$, where $d_k = y_{(k+1)/2}$, and $[d_k]=[d'_k]=[y_{(k+1)/2}]$ (see, for example, Figure \ref{skatochain}). Thus, $(T_{[c_1]}...T_{[c_k]})^{k+1} = T^2_{[y_{(k+1)/2}]} \in \mathrm{Sp}_{2g}(\mathbb{Z}/p)$. On the other hand, according to Lemma \ref{rel6} we have that $A_k T^2_{[c_1]} A^{-1}_k = T^2_{[y_{(k+1)/2}]} \in \mathrm{Sp}_{2g}(\mathbb{Z}/p)$. Hence, $(T_{c_1}...T_{c_k})^{k+1} A_k T^{-2}_{c_1} A^{-1}_k \in B_{n}[p]$. Note that if $k=3$, the element $(T_{c_1}...T_{c_k})^{k+1} A_k T^{-2}_{c_1} A^{-1}_k$ is the same one as in the relation 6 of Theorem \ref{WA}.
We can describe a generalized version of $(T_{c_1}...T_{c_k})^{k+1} A_k T^{-2}_{c_1} A^{-1}_k$. Consider any odd chain $ch(a_1,a_2,...,a_k)$, such that $T_{a_i} \in \mathrm{SMod}(\Sigma^1_{g})$ for all $i \leq k$. Choose a homeomorphism $h \in \mathrm{SMod}(\Sigma^1_{g})$ such that $h([a_1]) = [a_1]+[a_3]+...+[a_k] \in \mathrm{Sp}_{2g}(\Sigma^1_{g})$. Then $(T_{a_1}...T_{a_k})^{k+1} h T^{-2}_{a_1} h^{-1}$ lies on $B_{2g+1}[p]$. If we consider $(T_{a_1}...T_{a_k})^{k+1}$ as the center of the subgroup $K$ of $\mathrm{SMod}(\Sigma^b_{g})$ generated by $T_{a_1}...T_{a_k}$, then $h T^{-2}_{a_1} h^{-1}$ is the center $\bmod(p)$ of the same group. Note that the choice of $h$ is not unique. We call this type of element an \emph{mod-p center map}.
\paragraph{Generators for congruence subgroups.} As a corollary of Theorem \ref{WA} we obtain the following theorem.
\begin{theorem}
If $p=3$, then $B_{2g+b}[3]$ is generated by Dehn twists raised to the power of $3$, and for $2g+b>4$ by mod-p center maps. For $p>3$ the subgroup $B_{2g+b}[p]$ of $\mathrm{SMod}(\Sigma^b_{g})$ is generated by Dehn twists raised to the power of $p$, by Dehn twists about symmetric separating curves, by mod-p involution maps, and for $2g+b>4$ by mod-p center maps.
\label{congen}
\end{theorem}
\paragraph{Finite set of generators.} It is well known that every finite index subgroup of a finitely generated group, is finitely generated \cite[Corollary 2.7.1]{MKS}. The generating set in Theorem \ref{congen} is infinite. When $p=3$ and $g=1$ we can find a finite set of generators.
\begin{theorem}
The group $B_3[3]$ is generated by four elements.
\label{cong3}
\end{theorem}
\begin{proof}
Set $S = \{ T^3_{c_1},T^3_{c_2}, T_{c_2} T^3_{c_1} T^{-1}_{c_2}, T^2_{c_2} T^3_{c_1} T^{-2}_{c_2} \}.$ We denote by $\Gamma$ the subgroup of $B_3[3]$ generated by $S$. We prove that if we conjugate elements of $S$ by $T_{c_1}$ or $T_{c_2}$, then the resulting elements lie in $\Gamma$. Since $B_3[3]$ is normally generated by $S$ and since $S$ generates a normal subgroup of $B_3$, then $\Gamma = B_3[3]$.\\
In the braid group we have the relation $$T_{c_j} T_{c_{j-1}}...T^3_{c_i}...T^{-1}_{c_{j-1}} T^{-1}_{c_j} = T^{-1}_{c_i} T^{-1}_{c_{i+1}}...T^3_{c_j}...T_{c_{i+1}} T_{c_i} $$
We prove the theorem in three steps.
\paragraph{Step 1:} Conjugates of $T^3_{c_1},T^3_{c_2}$:\\
\begin{align*}
T^{- 1}_{c_2} T^3_{c_1} T_{c_2} = T^{-3}_{c_2} T^{2}_{c_2} T^3_{c_1} T^{-2}_{c_2} T^{3}_{c_2} \in \Gamma\\
T^{-1}_{c_1} T^3_2 T_{c_1} = T_2 T^3_{c_1} T^{-1}_2 \in \Gamma\\
T_{c_1} T^3_{c_2} T^{-1}_{c_1} = T^{- 1}_{c_2} T^3_{c_1} T_{c_2} = T^{-3}_{c_2} T^{2}_{c_2} T^3_{c_1} T^{-2}_{c_2} T^{3}_{c_2} \in \Gamma.
\end{align*}
\paragraph{Step 2:} Conjugates of $T_{c_2} T^3_{c_1} T^{-1}_{c_2}$:
\begin{align*}
T_{c_1} T_{c_2} T^3_{c_1} T^{-1}_{c_2} T^{-1}_{c_1} = T^3_{c_2} \in \Gamma\\
T^{-1}_{c_1} T_{c_2} T^3_{c_1} T^{-1}_{c_2} T_{c_1} = T^{-2}_{c_1} T^{3}_{c_2} T^{2}_{c_1} = T^{-3}_{c_1} (T_{c_1} T^3_{c_2} T^{-1}_{c_1}) T^3_{c_1}.
\end{align*}
The latter is in $\Gamma$ by step 1.
\paragraph{Step 3:} Conjugates of $T^2_{c_2} T^3_{c_1} T^{-2}_{c_2}$:
\begin{align*}
T^{-1}_{c_1} T^2_{c_2} T^3_{c_1} T^{-2}_{c_2} T_{c_1} = T^{-1}_{c_1} T^3_{c_2} T^{-1}_{c_2} T^3_{c_1} T_{c_2} T^{-3}_{c_2} T_{c_1} = \\
(T^{-1}_{c_1} T^3_{c_2} T_{c_1}) (T^{-1}_{c_1} T^{-1}_{c_2} T^3_{c_1} T_{c_2} T_{c_1}) (T^{-1}_1 T^{-3}_{c_2} T_{c_1})
\end{align*}
The elements $(T^{-1}_{c_1} T^3_{c_2} T_{c_1}), (T^{-1}_{c_1} T^{-3}_{c_2} T_{c_1})$ are in $\Gamma$ by step 1.
\begin{align*}
T^{-1}_{c_1} T^{-1}_{c_2} T^3_{c_1} T_{c_2} T_{c_1} = T^3_{c_2}
\end{align*}
Finally, since $T^2_{c_2} T^3_{c_1} T^{-2}_{c_2} = T^3_{c_2} T^{-1}_{c_2} T^3_{c_1} T_{c_2} T^{-3}_{c_2}$, it suffices to check that $T_{c_1} T^{-1}_{c_2} T^3_{c_1} T_{c_2} T^{-1}_{c_1}$ is in $\Gamma$. But we have that
\begin{align*}
T_{c_1} T^{-1}_{c_2} T^3_{c_1} T_{c_2} T^{-1}_{c_1} = T^2_{c_1} T^3_{c_2} T^{-2}_{c_1} = T^3_{c_1} T^{-1}_{c_1} T^3_2 T_{c_1} T^{-3}_{c_1}=T^3_{c_1} T_{c_2} T^3_{c_1} T^{-1}_{c_2} T^{-3}_{c_1} \in \Gamma.
\end{align*}
This proves the theorem.
\end{proof}
Since $T^2_{c_2} T^3_{c_1} T^{-2}_{c_2} = T^3_{c_2} T^{-1}_{c_2} T^3_{c_1} T_{c_2} T^{-3}_{c_2}$ we deduce that $\{ T^3_{c_1},T^3_{c_2}, T_{c_2} T^3_{c_1} T^{-1}_{c_2}, T^{-1}_{c_2} T^3_{c_1} T_{c_2} \}$ is also a generating set for $B_3[3]$.
\section{Symplectic groups and pure braid groups}
For $i \in \mathbb{N}$, let $p_i$ denote a prime number greater than 2. In this section we characterize $B_{2g+b}[m]$, where $m=2p_1 p_2...p_k$ and $m=4p_1 p_2...p_k$. Our strategy is to find a presentation for $PB_{2g+b}/B_{2g+b}[m]$. We recall that $\mathrm{H}_1(PB_{2g+b},\mathbb{Z}/2)$ is $\mathfrak{sp}_{2g}(\mathbb{Z}/2)$, if $b=1$ and $\mathrm{Ann}(y_{g+1})$ if $b=2$, where $\mathrm{Ann}(y_{g+1}) = \{ h \in \mathfrak{sp}_{2g+2}(\mathbb{Z}/2) \mid h(y_{g+1})=0 \}$ \cite{BM}. The generators of $B_{2g+b}$ are denoted by $\sigma_i$ and the generators of $PB_{2g+b}$ are denoted by $a_{i,j}$ as in Section 2.
\begin{theorem}
For $m=2p_1 p_2...p_k$, where $p_i\geq3$ are prime numbers, we have
\[
PB_{2g+b}/B_{2g+b}[m] =
\begin{cases}
\bigoplus^k_{i=1} \mathrm{Sp}_{2g}(\mathbb{Z}/p_i)
& \text{ if $b=1$}, \\
\bigoplus^k_{i=1} (\mathrm{Sp}_{2g+2}(\mathbb{Z}/p_i))_{y_{g+1}}
& \text{ if $b=2$}.\\
\end{cases}
\]
\label{PBQ1}
\end{theorem}
\begin{proof}
We set $m=2p_1 p_2 ... p_k$. We have the map
\[
\rho_m: B_{2g+b} \rightarrow
\begin{cases}
\mathrm{Sp}_{2g}(\mathbb{Z}) \rightarrow \mathrm{Sp}_{2g}(\mathbb{Z}/m)
& \text{ if $b=1$}, \\
(\mathrm{Sp}_{2g+2}(\mathbb{Z}))_{y_{g+1}} \rightarrow (\mathrm{Sp}_{2g+2}(\mathbb{Z}/m))_{y_{g+1}}
& \text{ if $b=2$}\\
\end{cases}
\]
with kernel $B_{2g+b}[m]$. By Lemma \ref{newman} we know that
\[ \mathrm{Sp}_{2g}(\mathbb{Z}/m) = \mathrm{Sp}_{2g}(\mathbb{Z}/2) \bigoplus^{k}_{i=1} \mathrm{Sp}_{2g}(\mathbb{Z}/p_i). \]
If we restrict to the pure braid group, then the image of the map $PB_{2g+1} \rightarrow \mathrm{Sp}_{2g}(\mathbb{Z})$ is the group $\mathrm{Sp}_{2g}(\mathbb{Z})[2]$, (see \cite[Theorem 3.3]{BM}). Furthermore, by Lemma \ref{Ha} we have that the map $\mathrm{Sp}_{2g}(\mathbb{Z})[2] \rightarrow \mathrm{Sp}(\mathbb{Z}/p_i)$ is surjective. Thus, the image of the map
$$\mathrm{Sp}_{2g}(\mathbb{Z}) \rightarrow \mathrm{Sp}_{2g}(\mathbb{Z}/m) = \mathrm{Sp}_{2g}(\mathbb{Z}/2) \bigoplus^{k}_{i=1} \mathrm{Sp}_{2g}(\mathbb{Z}/p_i),$$
after we restrict to $\mathrm{Sp}_{2g}(\mathbb{Z})[2]$, is the group $\bigoplus^{k}_{i=1} \mathrm{Sp}_{2g}(\mathbb{Z}/p_i)$. Hence, have a short exact sequence
\[ 1 \rightarrow B_{2g+1}[m] \rightarrow PB_{2g+1} \rightarrow \bigoplus^{k}_{i=1} \mathrm{Sp}_{2g}(\mathbb{Z}/p_i) \rightarrow 1.\]
Likewise, since the image of the map $PB_{2g+2} \rightarrow (\mathrm{Sp}_{2g+2}(\mathbb{Z}))_{y_{g+1}}$ is $(\mathrm{Sp}_{2g+2}(\mathbb{Z})[2])_{y_{g+1}}$ (see \cite[Theorem 3.3]{BM}), and since $(\mathrm{Sp}_{2g+2}(\mathbb{Z}/m))_{y_{g+1}} <\mathrm{Sp}_{2g+2}(\mathbb{Z}/m)$, we can apply Lemma \ref{newman} and end up with the following exact sequence.
\[ 1 \rightarrow B_{2g+2}[m] \rightarrow PB_{2g+2} \rightarrow \bigoplus^k_{i=1} (\mathrm{Sp}_{2g+2}(\mathbb{Z}/p_i))_{y_{g+1}} \rightarrow 1.\]\\
This completes the proof.
\end{proof}
In the following statement we slightly generalize Lemma \ref{PBQ1}. The symplectic Lie algebra $\mathfrak{sp}_{2n}(\mathbb{Z})$ consists of those elements $A \in \mathfrak{gl}_{2n}(\mathbb{Z})$ which satisfy the relation $A^T J + J A = 0$. We define also
\[ \mathrm{Ann}(u) = \{ m \in \mathfrak{sp}_{2n}(\mathbb{Z})\mid m(u)=0 \}, \]
where $\mathrm{Ann}(u)$ stands for the annihilator of the vector $u$. We have the following theorem.
\begin{theorem}
For $m=4p_1 p_2...p_k$, where $p_i\geq3$ are prime numbers , we have
\[
PB_{2g+b}/B_{2g+b}[m] =
\begin{cases}
\mathfrak{sp}_{2g}(\mathbb{Z}/2) \bigoplus^k_{i=1} \mathrm{Sp}_{2g}(\mathbb{Z}/p_i)
& \text{ if $b=1$}, \\
\mathrm{Ann}(e) \bigoplus^k_{i=1} (\mathrm{Sp}_{2g+2}(\mathbb{Z}/p_i))_{y_{g+1}}
& \text{ if $b=2$}.\\
\end{cases}
\]
\label{PBQ2}
\end{theorem}
\begin{proof}
We set $m=4p_1 p_2 ... p_k$. By Lemma \ref{newman} we have that
\[ \mathrm{Sp}_{2g}(\mathbb{Z}/m) = \mathrm{Sp}_{2g}(\mathbb{Z}/4) \bigoplus^{k}_{i=1} \mathrm{Sp}_{2g}(\mathbb{Z}/p_i). \]
We want to characterize the image of the map
\[
B_{2g+b} \rightarrow
\begin{cases}
\mathrm{Sp}_{2g}(\mathbb{Z}/4) \bigoplus^k_{i=1} \mathrm{Sp}_{2g}(\mathbb{Z}/p_i)
& \text{ if $b=1$}, \\
(\mathrm{Sp}_{2g+2}(\mathbb{Z}/4))_{y_{g+1}} \bigoplus^k_{i=1} (\mathrm{Sp}_{2g+2}(\mathbb{Z}/p_i))_{y_{g+1}}
& \text{ if $b=2$}.\\
\end{cases}
\]
For $b=1$ we only need to characterize the image of the restriction of the map above to $PB_{2g+b}$. In particular, we want to compute the image of the map $PB_{2g+1} \rightarrow \mathrm{Sp}_{2g}(\mathbb{Z}/4)$. We know that the image of the map $PB_{2g+1} \rightarrow \mathrm{Sp}_{2g}(\mathbb{Z})$ is $\mathrm{Sp}_{2g}(\mathbb{Z})[2]$. Consider the inclusion
\[ \mathrm{Sp}_{2g}(\mathbb{Z})[2] \hookrightarrow \mathrm{Sp}_{2g}(\mathbb{Z}). \]
We quotient the above inclusion by $\mathrm{Sp}_{2g}(\mathbb{Z})[4]$, and we get the following inclusion:
\[ \mathfrak{sp}_{2g}(\mathbb{Z}/2) \hookrightarrow \mathrm{Sp}_{2g}(\mathbb{Z}/4). \]
We finally have
\[ PB_{2g+1} \rightarrow \mathrm{Sp}_{2g}(\mathbb{Z})[2] \rightarrow \mathfrak{sp}_{2g}(\mathbb{Z}/2) < \mathrm{Sp}_{2g}(\mathbb{Z}/4). \]
Hence, the image of the map $PB_{2g+1} \rightarrow \mathrm{Sp}_{2g}(\mathbb{Z}/4)$ is the abelian group $\mathfrak{sp}_{2g}(\mathbb{Z}/2)$. Thus, we have
\[ PB_{2g+b}/B_{2g+b}[m] \cong \mathfrak{sp}_{2g}(\mathbb{Z}/2) \bigoplus^k_{i=1} \mathrm{Sp}_{2g}(\mathbb{Z}/p_i). \]
For $b=2$, the maps
\[ PB_{2g+2} \rightarrow (\mathrm{Sp}_{2g+2}(\mathbb{Z})[2])_{y_{g+1}} \rightarrow \mathrm{Ann}(y_{g+1}) \]
are both surjective, \cite[Lemma 3.5]{BM}. But $\mathrm{Ann}(y_{g+1}) < (\mathrm{Sp}_{2g+2}(\mathbb{Z}/4))_{y_{g+1}}$, and thus, the image of the map
\[ PB_{2g+2} \rightarrow (\mathrm{Sp}_{2g+2}(\mathbb{Z}/4))_{y_{g+1}} \]
is the group $\mathrm{Ann}(y_{g+1})$. Thus, we get
\[ PB_{2g+2} / B_{2g+2}[m] \cong \mathrm{Ann}(y_{g+1}) \bigoplus^k_{i=1} (\mathrm{Sp}_{2g+2}(\mathbb{Z}/p_i))_{y_{g+1}}. \]
This completes the proof.
\end{proof}
In order to find generators for $B_{2g+1}[m]$, it suffices to find a presentation for $\mathrm{Sp}_{2g}(\mathbb{Z}/p)$ in terms of pure braids. In the next proposition we prove that $\mathrm{Sp}_{2g}(\mathbb{Z}/p)$ admits a presentation as a quotient of the pure braid group over some relations. These new relations are the generators for $B_{2g+1}[2p]$. Recall that the generators of $PB_n$ are defined to be $a_{i,j} = \sigma_{j-1}...\sigma_{i+1} \sigma^2_i \sigma^{-1}_{i+1}...\sigma^{-1}_{j-1}$, where $1 \leq i < j \leq n$.
\begin{proposition}
Fix a prime number $p$, and put $p=2k+1$. Let $H_n$ be the group with generators $ \{ a_{i,j} \}$ with defining relations as follows:
\label{SYPRE}
\begin{enumerate}
\item[PR1.] $\begin{aligned}[t]
a^{k}_{i,i+1} a^{k}_{i+1,i+2} a^{k}_{i,i+1}= a^{k}_{i+1,i+2} a^{k}_{i,i+1} a^{k}_{i+1,i+2},
\end{aligned}$
\item[PR2.] $\begin{aligned}[t]
a^p_{i,j} = 1,
\end{aligned}$
\item[PR3.] $\begin{aligned}[t]
(a_{1,2} a_{1,3} a_{2,3})^2=1 \: \mathrm{for} \, p>3,
\end{aligned}$
\item[PR4.] $\begin{aligned}[t]
a^{-1}_{r,s} a_{i,j} a_{r,s} = a_{i,j}, \: 1 \leq r<s<i<j \leq n \: \mathrm{or} \: 1 \leq i<r<s<j \leq n,
\end{aligned}$
\item[PR5.] $\begin{aligned}[t]
a^{-1}_{r,s} a_{i,j} a_{r,s} = a_{r,j} a_{i,j} a^{-1}_{r,j} , \: 1 \leq r<s=i<j \leq n,
\end{aligned}$
\item[PR6.] $\begin{aligned}[t]
a^{-1}_{r,s} a_{i,j} a_{r,s} = (a_{i,j} a_{s,j})a_{i,j}(a_{i,j} a_{s,j})^{-1} , \: 1 \leq r=i<s<j \leq n,
\end{aligned}$
\item[PR7.] $\begin{aligned}[t]
a^{-1}_{r,s} a_{i,j} a_{r,s} = (a_{r,j} a_{s,j} a^{-1}_{r,j} a^{-1}_{s,j})a_{i,j}(a_{r,j} a_{s,j} a^{-1}_{r,j} a^{-1}_{s,j})^{-1} , \: 1 \leq r<i<s<j \leq n,
\end{aligned}$
\item[PR8.] $\begin{aligned}[t]
a_{i,j} = a^{k+1}_{j-1,j} a^{k+1}_{j-2,j-1}...a_{i,i+1} a^k_{i+1,i+2}...a^k_{j-1,j}, \: 1 < |i-j| \leq n,
\end{aligned}$
\item[PR9.] $\begin{aligned}[t]
a_{1,2} a_{1,3} a_{2,3} = C, \mathrm{where}
\end{aligned}$
\subitem $C = (a^{(p+1)/4}_{1,2} a^2_{2,3})^2$, if $(p+1)/2$ is even,
\subitem $C = a^{(p+3)/4}_{1,2} a^2_{1,3} a^{(p-1)/4}_{1,2} a^2_{2,3}$, if $(p+1)/2$ is odd.
\item[PR10.] $\begin{aligned}[t]
a_{1,2} a_{1,3} a_{1,4} a_{2,3} a_{2,4} a_{3,4} = B a_{1,4} B^{-1}, \mathrm{where}
\end{aligned}$
\subitem $B = a_{3,5} a_{4,5} a^{k/2}_{2,3} a^{-1}_{3,4}$, if $k$ is even,
\subitem $B = a_{3,5} a_{4,5} a^{k+1}_{2,3} a_{3,4}$, if $k$ is odd.
\end{enumerate}
\begin{flushleft}
If $n = 2g+1$ then $H_{n}$ is isomorphic to $\mathrm{Sp}_{2g}(\mathbb{Z}/p)$. On the other hand if $n= 2g+2$, then $H_{n}$ is isomorphic to $\mathrm{Sp}_{2g+2}(\mathbb{Z}/p)_{y_{g+1}}$.
\end{flushleft}
\end{proposition}
Note that relations $PR4$, $PR5$, $PR6$, $PR7$ are relations in the presentation of the pure braid group given in Chapter 4. We begin with the group $G_n$ defined in Theorem \ref{WA}, and using Tietze transformations, we obtain the presentation of $H_n$.\\
\begin{proof}
By Theorem \ref{WA} the group $G_n$ has the following presentation:
\[ G_n = \langle \sigma_i \vert \: R1,R2,R3,R4,R5,R6 \rangle, \]
where $1 \leq i < 2g+b$. Let $a_{i,j} = \sigma_{j-1}...\sigma_{i+1} \sigma^2_i \sigma^{-1}_{i+1}...\sigma^{-1}_{j-1}$ and denote this relation by $PR11$. Then include $PR11$ into the presentation of $G_n$ and add the generator $a_{i,j}$ to obtain
\[ \langle \sigma_i, a_{i,j} \vert \: R1,R2,R3,R4,R5,R6, PR11 \rangle.\]
Since $PB_n$ is a subgroup of $B_n$, this means that $R1$ and $R2$ can be used to deduce the relations $PR4$, $PR5$, $PR6$, $PR7$.
\[ \langle \sigma_i, a_{i,j} \vert \: R1,R2,R3,R4,R5,R6,PR4,PR5,PR6,PR7, PR11 \rangle.\]
The relation $R2$ can be deduced by $PR11$ and $R3$ and $PR4$
\[ \langle \sigma_i, a_{i,j} \vert \: R1,R3,R4,R5,R6,PR2,PR4,PR5,PR6,PR7, PR11 \rangle.\]
We derive two more relations from $PR11$ and $R3$.
\[ \sigma_i = a^{k+1}_{i,i+1}, \quad \sigma^{-1}_{i} = a^k_{i,i+1}. \]
Then $PR1$ is equivalent to $R1$, $PR2$ is equivalent to $R3$, $PR3$ is equivalent to $R4$, $PR9$ is equivalent to $R5$, $PR10$ is equivalent to $R6$, and $PR11$ is equivalent to $PR8$. In other words,
\[ \langle \sigma_i, a_{i,j} \vert \: PR1,PR2,PR4,PR5,PR6,PR7,PR8,PR9,PR10,\sigma_i = a^{k+1}_{i,i+1}, \sigma^{-1}_{i} = a^k_{i,i+1} \rangle \]
Finally, for $1 \leq i < j \geq 2g+b$ we have that
\[ \langle a_{i,j} \vert \: PR1,PR2,PR4,PR5,PR6,PR7,PR8,PR9,PR10 \rangle, \]
which is the presentation of $H_n$.
\end{proof}
As an application of Proposition \ref{SYPRE}, we can obtain generators for $B_{2g+b}[2p]$.
\begin{corollary}
For $k = (p-1)/2$, the group $B_{2g+b}[2p]$ is normally generated by six types of elements:
\begin{align*}
a^p_{i,j},\\
(a_{1,2} a_{1,3} a_{2,3})^2,\\
a_{1,2} a_{1,3} a_{2,3} C^{-1},\\
a_{1,2} a_{1,3} a_{1,4} a_{2,3} a_{2,4} a_{3,4} B a^{-1}_{1,4} B^{-1},\\
a^{k}_{i,i+1} a^{k}_{i+1,i+2} a^{k}_{i,i+1} a^{-k}_{i+1,i+2} a^{-k}_{i,i+1} a^{-k}_{i+1,i+2},\\
a^{k+1}_{j-1,j} a^{k+1}_{j-2,j-1}...a_{i,i+1} a^k_{i+1,i+2}...a^k_{j-1,j} a^{-1}_{i,j}.\\
\end{align*}
\end{corollary}
Actually we can use Proposition \ref{SYPRE} to find normal generators for any $B_n[m]$, where $m$ is either $2 p_1 ... p_k$ or $4 p_1 ... p_k$ and $p_i\geq3$ are prime numbers.
\section{Symmetric quotients of congruence subgroups}
In this section we explore factor groups of congruence subgroups of braid groups. From Section 3 we know that $B_n[2] \cong PB_n$ and $B_n / B_n[2] \cong S_n$. In the next theorem we generalize the latter isomorphism.
\begin{theorem}
The quotient $B_n[p] / B_n[2p]$ is isomorphic to $S_n$.
\label{symquo}
\end{theorem}
Before we proceed to the proof of Theorem \ref{symquo}, we will prove the following lemma.
\begin{lemma}
The groups $B_n[2p]$ and $B_n[2] \cap B_n[p]$ are isomorphic.
\label{evenint}
\end{lemma}
\begin{proof}
It is obvious that $B_n[2p] < B_n[2] \cap B_n[p]$. By Proposition \ref{newman} we have the decomposition $\mathrm{Sp}_{2g}(\mathbb{Z}/2p) = \mathrm{Sp}_{2g}(\mathbb{Z}/2) \oplus \mathrm{Sp}_{2g}(\mathbb{Z}/p)$. By the homomorphism $\rho: B_n \rightarrow \mathrm{Sp}_{2g}(\mathbb{Z}/2p)$ we deduce that $\rho(B_n[2] \cap B_n[p])$ is trivial. Hence $B_n[2] \cap B_n[p] < B_n[2p]$.
\end{proof}
Now we can prove the main theorem of the section.
\begin{proof}[Proof of Theorem 6.1]
Denote by $s_i$ the transposition $i,i+1$, that is, the generators of $S_n$. We have the following presentation.
\[ S_n = \left< s_1,...,s_{n-1} \: | \: s^2_i=1, s_i s_{i+1} s_i=s_{i+1}s_is_{i+1}, s_i s_j = s_j s_i \: \mathrm{when} \: |i-j|>1 \right>. \]
Consider the natural epimorphism $\tau:B_n \rightarrow S_n$ defined by $\tau(\sigma_i)=s_i$. Fix a prime number $p>2$; then the restriction $\tau:B_n[p] \rightarrow S_n$ is a surjective homomorphism as well. Indeed, we have that $\tau(\sigma^p_i)=s^p_i = s_i$, and for any other generator $g \in B_n[p]$ we have $\tau(g)=1$. Finally, $\mathrm{ker}(\tau) = B_n[2] \cap B_n[p] = B_n[2p]$ by Lemma \ref{evenint}.
\end{proof}
Charalampos Stylianakis, department of Mathematics \& Statistics, University of Glasgow, Glasgow, G12 8QW, UK.\\
\textit{E-mail address:} \texttt{[email protected]}
\end{document} |
\begin{document}
\begin{center}
{\sf ~\\[14pt]
{\Large {\bf Boundary Element Procedure for 3D Electromagnetic Transmission Problems with Large Conductivity}}}
\end{center}
\footnotesize{
\begin{center}
M. Maischak$^1$, Z. Nezhi$^2$, J. E. Ospino$^3$, E.~P.~Stephan$^2$ \\[14pt]
$^1$Department of Mathematics Sciences, Brunel University, U.K. \\[3mm]
$^2$Institute for Applied Mathematics, Leibniz University of Hannover, Hannover, Germany \\[3mm]
$^3$Departamento de Matem\'{a}ticas y Estad\'{\i}stica, Fundaci\'{o}n Universidad del Norte, Barranquilla, Colombia. \\[3mm]
e-mail: [email protected]
\end{center}
}
\normalsize
\noindent
\begin{center}
{\Large{\bf Abstract:}}\\
\end{center}
We consider the scattering of time periodic electro-magnetic fields by metallic obstacles,
the eddy current problem. In this interface problem different sets of Maxwell equations
must be solved in the obstacle and outside, while the tangential components of both
electric and magnetic fields are continuous across the interface. We
describe an asymptotic procedure, which applies for large conductivity and
reflects the skin effect in metals. The key to our method is to introduce a special
integral equation procedure for the exterior boundary value problem
corresponding to perfect conductors. The asymptotic procedure
leads to a great reduction in complexity for the numerical solution since it involves solving
only the exterior boundary value problem. Furthermore we introduce a new fem/bem coupling procedure for the transmission problem and consider the implementation of the Galerkin elements for the perfect
conductor problem and present numerical experiments.\\
{\bf Key words}: Boundary element; asymptotic expansion; skin effect.\\
\section{Introduction}
\label{sec:s0}
We present asymptotic expansions with respect to inverse powers of conductivity
for the electrical and magnetical fields and report the algorithm of MacCamy
and Stephan \cite{MacCamyS} which allows to compute the expansion terms of the electrical field
in the exterior domain by solving sucessively only exterior problems (so-called perfect
conductor problems) with different data on the interface between conductor (metal) and
isolator (air). We solve these exterior problems numerically by applying the Galerkin
boundary element method to first kind boundary integral equations which were originally
introduced by MacCamy and Stephan in \cite{MacCamyP}. This system of integral equations
on the interface $\Sigma$ results from a single layer potential ansatz for the electrical field
and has unknown densities namely a vector field and a scalar function on $\Sigma$ which we
approximate with lower order Raviart Thomas elements and continous piecewise linear
functions on a regular, triangular mesh on $\Sigma$ As in the two dimensional case, investigated
by Hariharan \cite{Hariharan,Hariharan1} and MacCamy and Stephan \cite{MacCamyE}, the asymptotic procedure
gives for the computation of the solution of the transmission problem a great reduction
in complexity since it involves solving only the exterior problem and furthermore only
a few expansion terms must be computed. We describe in detail how to implement the boundary element method
for the perfect conductor problem. As an alternative to the asymptotic expansions for
the solution of the transmission problem we introduce a new finite element/boundary element
Galerkin coupling procedure which converges quasi-optimally in the energy norm.
\section{Asymptotic expansion for large conductivity and skin effect}
\label{sec:s1}
Let $\Omega_{-}$ be a bounded region in $\mathbb{R}^{3}$ representing a metallic conductor and
$\Omega_{+}:=\mathbb{R}^{3}\backslash \overline{\Omega_{-}}$.
$\Omega_{+}$ representing air. The parameters $\varepsilon_{0}$,
$\mu_{0}$, $\sigma_{0}$ denote permittivity, permeability and conductivity. is assumed to
have zero conductivity in $\Omega_{+}$ with $\varepsilon$, $\mu$, $\sigma$ in $\Omega_{-}$. Let the incident
electric and magnetic fields, $\textbf{E}^{0}$ and $\textbf{H}^{0}$,
satisfy Maxwell's equations in air. The total fields $\textbf{E}$
and $\textbf{H}$ satisfy the same Maxwell's equations as
$\textbf{E}^{0}$ and $\textbf{H}^{0}$ in $\Omega_{+}$ but a
different set of equations in $\Omega_{-}$. Across the interface $\Sigma:=\partial
\Omega_{-}=\partial \Omega_{+}$, which is assumed to be a regular analytic surface, the tangential components of both
$\textbf{E}$ and $\textbf{H}$ are continuous. $\textbf{E}-\textbf{E}^{0}$ and $\textbf{H}-\textbf{H}^{0}$ represent the scattered fields. All fields are time-harmonic
with frequency $\omega$. As in \cite{MacCamyS} we neglect conduction (displacement) currents in air (metal). Then, with appropriate scaling, the eddy current problem is (see \cite{Stratton,Weggler}).\\
\textit{Problem $(\textbf{P}_{\alpha \beta})$}: Given $\alpha>0$ and
$\beta>0$, find $\textbf{E}$ and $\textbf{H}$ such that;
\begin{equation}\label{s1}
\begin{array}{lll}
\mbox{curl}\hspace{0.1cm}\textbf{E}=\textbf{H}, & \mbox{curl}\hspace{0.1cm}\textbf{H}=\alpha^{2}\textbf{E} & \mbox{in}\hspace{0.3cm} \Omega_{+}\hspace{0.3cm}\mbox{(air)}\\\\
\mbox{curl}\hspace{0.1cm}\textbf{E}=\textbf{H}, & \mbox{curl}\hspace{0.1cm}\textbf{H}=i\beta^{2}\textbf{E} & \mbox{in}\hspace{0.3cm} \Omega_{-}\hspace{0.3cm}\mbox{(metal)}\\\\
\textbf{E}_{T}^{+}=\textbf{E}_{T}^{-}, & \textbf{H}_{T}^{+}=\textbf{H}_{T}^{-}, &
\mbox{on}\hspace{0.3cm} \Sigma.
\end{array}
\end{equation}
$$\dfrac{\partial}{\partial r}\textbf{E}(\textbf{x})-i\alpha\textbf{E}(\textbf{x})=O\left( \dfrac{1}{r^{2}}\right)\hspace{0.3cm}\mbox{with}\hspace{0.3cm}r=|\textbf{x}|,\hspace{0.3cm}\mbox{as}\hspace{0.3cm}|\textbf{x}|\rightarrow\infty. $$
Here $\alpha^{2}=\omega^{2}\mu_{0}\varepsilon_{0}$ and
$\beta^{2}=\omega\mu \sigma-i\omega^{2}\mu \varepsilon$ are
dimensionless parameters, and $\beta^{2}=\omega\mu \sigma>0$ if displacement currents are neglected in metal $(\varepsilon=0)$. The
subscript $T$ denotes tangential component and the superscripts plus
and minus denote limits from $\Omega_{+}$ and $\Omega_{-}$.\\
At higher frequencies the constant $\beta$ is usually large leading to the \textit{perfect conductor approximation}. Formally this
means solving only the $\Omega_{+}$ equation and requiring that
$\textbf{E}_{T}=0$ on $\Sigma$. If we let $\textbf{E}$ and
$\textbf{H}$ denote the scattered
fields, we obtain\\
\textit{Problem $(\textbf{P}_{\alpha \infty})$}: Given $\alpha>0$,
find $\textbf{E}$ and $\textbf{H}$ such that;
\begin{equation}\label{s2}
\begin{array}{rrr}
\mbox{curl}\hspace{0.1cm}\textbf{E}=\textbf{H}, & \mbox{curl}\hspace{0.1cm}\textbf{H}=\alpha^{2}\textbf{E} & \mbox{in}\hspace{0.3cm} \Omega_{+}\\\\
& \textbf{E}_{T}=-\textbf{E}_{T}^{0}, & \mbox{on}\hspace{0.3cm} \Sigma.
\end{array}
\end{equation}
\begin{remark}\label{rem1}
There exists at most one solution of problem $(\textbf{P}_{\alpha
\beta})$ for any $\alpha>0$ and $0<\beta\leq \infty$ (see \cite{Muller}).
\end{remark}
\begin{remark}
There exists a sequence $\{\alpha_{k}\}_{k=1}^{\infty}$, such that if $\alpha\neq\alpha_{k}$ then $\mbox{curl}\hspace{0.1cm}\textbf{E}=\textbf{H}$, $\mbox{curl}\hspace{0.1cm}\textbf{H}=\alpha^{2}\textbf{E}$ in $\Omega_{+}$, $\textbf{E}_{T}\equiv 0$ on $\Sigma$ implies $\textbf{E}\equiv\textbf{H}\equiv 0$ in $\Omega_{+}$.
\end{remark}
We are interesting in an asymptotic expansion of the solution of problem $(\textbf{P}_{\alpha \beta})$ with respect to inverse powers of conductivity. With $\tau$ denoting the
distance from $\Sigma$ measured into $\Omega_{-}$ along the normal
to $\Sigma$ the expansions reads:
\begin{equation}\label{s3}
\textbf{E}\sim
\textbf{E}^{0}+\sum_{n=0}^{\infty}\textbf{E}_{n}\beta^{-n}
\hspace{0.3cm} \mbox{in}\hspace{0.3cm} \Omega_{+}
\end{equation}
\begin{equation}\label{s4}
\textbf{H}\sim
\textbf{H}^{0}+\sum_{n=0}^{\infty}\textbf{H}_{n}\beta^{-n}
\hspace{0.3cm} \mbox{in}\hspace{0.3cm} \Omega_{+}
\end{equation}
\begin{equation}\label{s5}
\textbf{E}\sim e^{-\sqrt{-i}\beta
\tau}\sum_{n=0}^{\infty}\textbf{E}_{n}\beta^{-n} \hspace{0.3cm}
\mbox{in}\hspace{0.3cm} \Omega_{-}
\end{equation}
\begin{equation}\label{s6}
\textbf{H}\sim e^{-\sqrt{-i}\beta
\tau}\sum_{n=0}^{\infty}\textbf{H}_{n}\beta^{-n} \hspace{0.3cm}
\mbox{in}\hspace{0.3cm} \Omega_{-}
\end{equation}
Here $\textbf{E}_{n}$ and $\textbf{H}_{n}$ are
independent of $\beta$ which is proportional to $\sqrt{\sigma}$. The exponential in (\ref{s5}) and (\ref{s6})
represents \textit{the skin effect}. Next we present from \cite{MacCamyS} these expansions for the half-space case where the various coefficients can be computed recursively. Note
$\textbf{E}_{0}$ and $\textbf{H}_{0}$ in (\ref{s3}) and (\ref{s4})
is simply the perfect conductor approximation, that is, the solution
of $(\textbf{P}_{\alpha \infty})$. $\textbf{E}_{n}$ and $\textbf{H}_{n}$ in (\ref{s3}) and (\ref{s4}) can be calculated successively by solving a sequence of problems of the same form as
$(\textbf{P}_{\alpha \infty})$ but with boundary values determined
from earlier coefficients. The $\textbf{E}_{n}$ and $\textbf{H}_{n}$
in (\ref{s5}) and (\ref{s6}) are obtained by solving ordinary
differential equations in the variable $x_{3}$.\\
For the ease of the reader we present here for the half-space case $\Omega_{+}=\mathbb{R}^{3}_{+}$ i.e. $x_{3}>0$ and $\Omega_{-}=\mathbb{R}^{3}_{-}$ i.e. $x_{3}<0$ a formal procedure to compute $\textbf{E}_{n}$, $\textbf{H}_{n}$ which was given by MacCamy and Stephan \cite{MacCamyS}. They substitute in (\ref{s3})-(\ref{s6}) into $(\textbf{P}_{\alpha \beta})$ for $\Sigma=\mathbb{R}^{2}$ and equate coefficients of $\beta^{-n}$. Here we give a short description of their approach.\\
Let $\chi=e^{\sqrt{-i}\beta x_{3}}$ and decompose fields $\textbf{F}$ into tangential and normal components
\begin{equation}\label{s13}
\textbf{F}=\mathfrak{F}+f\textbf{e}_{3},\hspace{0.3cm}
\mathfrak{F}=\mathcal{F}^{1}\textbf{e}_{1}+\mathcal{F}^{2}\textbf{e}_{2},
\end{equation}
with orthogonal component $\mathfrak{F}^{\bot}=\textbf{e}_{3}\times \mathfrak{F}$, and unit vectors $\textbf{e}_{i}$ ($i=1,2,3$).\\
Then one computes with the surface gradient $grad_{T}$ for the rotation
\begin{equation}\label{s17}
\mbox{curl}\hspace{0.1cm}\textbf{F}=\mathfrak{F}^{\bot}_{x_{3}}-(\mbox{grad}_{T}\hspace{0.1cm}f)^{\bot}-(\mbox{div}\hspace{0.1cm}\mathfrak{F}^{\bot})\textbf{e}_{3}
\end{equation}
and
\begin{equation}\label{s18}
\mbox{curl}(\chi \textbf{F})=\chi[\sqrt{-i}\beta
\mathfrak{F}^{\bot}+\mathfrak{F}^{\bot}_{x_{3}}-(\mbox{grad}_{T}\hspace{0.1cm}f)^{\bot}-(\mbox{div}\hspace{0.1cm}\mathfrak{F}^{\bot})\textbf{e}_{3}].
\end{equation}
Now setting $\textbf{E}_{n}=\mathcal{E}_{n}+\ell_{n}\textbf{e}_{3}$ one obtains for $x_{3}<0$
\begin{equation}\label{s20}
\mbox{curl}\hspace{0.1cm}\textbf{E}\sim\chi\{\sqrt{-i}\beta
\mathcal{E}^{\bot}_{0}+\sum_{n=0}^{\infty}[\sqrt{-i}\mathcal{E}^{\bot}_{n+1}+\mathcal{E}^{\bot}_{n,x_{3}}-(\mbox{grad}_{T}\hspace{0.1cm}\ell_{n})^{\bot}-(\mbox{div}\hspace{0.1cm}\mathcal{E}^{\bot}_{n})\textbf{e}_{3}]\beta^{-n}\},
\end{equation}
and
\begin{equation}\label{s21}
\begin{aligned}
\mbox{curl}\hspace{0.1cm}\mbox{curl}\hspace{0.1cm}\textbf{E}&\sim\chi\left\lbrace i\beta^{2}\mathcal{E}_{0}-\sqrt{-i}\beta
\mathcal{E}_{0,x_{3}}+\sqrt{-i}\beta\mbox{div}\hspace{0.1cm}\mathcal{E}_{0}\textbf{e}_{3}+\sum_{n=0}^{\infty}\left[ i\beta\mathcal{E}_{n+1}-\sqrt{-i}\mathcal{E}_{n+1,x_{3}}\right. \right. \\\\
&-\sqrt{-i}\mbox{div}\hspace{0.1cm}\mathcal{E}_{n+1}\textbf{e}_{3}-\sqrt{-i}\beta\mathcal{E}_{n,x_{3}} -\mathcal{E}_{n,x_{3},x_{3}}+\mbox{div}\hspace{0.1cm}\mathcal{E}_{n,x_{3}}\textbf{e}_{3}+\sqrt{-i}\beta\mbox{grad}\hspace{0.1cm}\ell_{n}\\\\
&\left.\left. +(\mbox{grad}_{T}\hspace{0.1cm}\ell_{n})_{x_{3}}+\mbox{div}\hspace{0.1cm}\mbox{grad}\hspace{0.1cm}\ell_{n}\textbf{e}_{3}\right] \beta^{-n}+\mbox{grad}\hspace{0.1cm}\mbox{div}\hspace{0.1cm}\beta^{-n}\textbf{e}_{3}\right\rbrace \\\\
&=\chi[i\beta^{2}\mathcal{E}_{0}+i\beta^{2}\ell_{0}\textbf{e}_{3}+i\beta\mathcal{E}_{1}+i\beta
\ell_{1}\textbf{e}_{3}+\sum_{n=0}^{\infty}(i\mathcal{E}_{n+2}+i\ell_{n+2}\textbf{e}_{3})\beta^{-n}]\sim i\beta^{2}\textbf{E}.
\end{aligned}
\end{equation}
Hence, equating coefficients of $\beta^{2}$ and $\beta$, respectively yields $\ell_{0}\equiv 0$, $i\ell_{1}=\sqrt{-i}\mbox{div}\hspace{0.1cm}\mathcal{E}_{0}$ and $\mathcal{E}_{0,x_{3}}=0$ implying $\mathcal{E}_{0}(x_{1},x_{2},x_{3})=\mathcal{E}_{0}(x_{1},x_{2},0)$.\\
As coefficients of $\beta^{0}$ one obtains
$$-\sqrt{-i}\mathcal{E}_{1,x_{3}}+\sqrt{-i}\mbox{grad}\hspace{0.1cm}\ell_{1}=0,$$
$$\sqrt{-i}\mbox{div}\hspace{0.1cm}\mathcal{E}_{1}+\mbox{div}\hspace{0.1cm}\mathcal{E}_{0,x_{3}}-\mbox{grad}\hspace{0.1cm}\mbox{div}\hspace{0.1cm}\mathcal{E}_{0}=i\ell_{2}.$$
Now the gauge condition $\mbox{div}\hspace{0.1cm}\mathcal{E}_{0}=0$ implies $\ell_{1}\equiv 0$ and $\mbox{div}\hspace{0.1cm}\mathcal{E}_{0,x_{3}}=0$, hence $\mathcal{E}_{1,x_{3}}=0$ and $\sqrt{-i}\mbox{div}\hspace{0.1cm}\mathcal{E}_{1}=i\ell_{2}.$\\
Thus $\mathcal{E}_{1}(x_{1},x_{2},x_{3})=\mathcal{E}_{1}(x_{1},x_{2},0)$.\\
Equating coefficients of $\beta^{-1}$ in (\ref{s21}) gives
$$-\sqrt{-i}\mathcal{E}_{2,x_{3}}-\sqrt{-i}\mathcal{E}_{2,x_{3}}+\sqrt{-i}\mbox{grad}\hspace{0.1cm}\ell_{2}=0,$$
$$\sqrt{-i}\mbox{div}\hspace{0.1cm}\mathcal{E}_{2}-\mbox{grad}\hspace{0.1cm}\mbox{div}\hspace{0.1cm}\mathcal{E}_{1}=i\ell_{3}.$$
Setting
\begin{equation}\label{s22}
\textbf{H}=\chi\sum_{n=0}^{\infty}(\mathcal{H}_{n}+h_{n}\textbf{e}_{3})\beta^{-n}
\end{equation}
MacCamy and Stephan obtain in \cite{MacCamyS} with $\ell_{1}=0$, $h_{0}=0$
$\mathcal{E}_{0}=0$:\\
\begin{equation}
\sqrt{-i}\mathcal{E}_{1}^{\bot}+\mathcal{E}_{0,x_{3}}^{\bot}=\mathcal{H}_{0},\hspace{0.2cm}\sqrt{-i}\mathcal{H}_{0}^{\bot}=i\mathcal{E}_{1},\hspace{0.2cm}h_{0}=\mbox{div}\hspace{0.1cm}\mathcal{E}_{0}^{\bot}=0.
\end{equation}
and
\begin{equation}\label{s27}
\sqrt{-i}\mathcal{E}_{2}^{\bot}+\mathcal{E}_{1,x_{3}}^{\bot}=\mathcal{H}_{1},\hspace{0.3cm}
\sqrt{-i}\mathcal{H}_{1}^{\bot}+\mathcal{H}_{0,x_{3}}^{\bot}=i\mathcal{E}_{2}
\end{equation}
\begin{equation}\label{s28}
h_{1}=-\mbox{div}\hspace{0.1cm}\mathcal{E}_{1}^{\bot},\hspace{0.3cm}
-\mbox{div}\hspace{0.1cm}\mathcal{H}_{0}^{\bot}=i\ell_{2}.
\end{equation}
and
\begin{equation}\label{s29}
\begin{array}{lcl}
\mathcal{H}_{0,x_{3}}\equiv \mathcal{E}_{1,x_{3}}\equiv 0 \\\\
\mathcal{H}_{0}\equiv \sqrt{-i}\mathcal{E}_{1}^{\bot} & \mbox{in}
& x_{3}<0
\end{array}
\end{equation}
For $x_{3}>0$, we have with $\mbox{curl}\hspace{0.1cm}\textbf{E}=\textbf{H}$ yields
$$\mbox{curl}\hspace{0.1cm}\textbf{E}^{0}+\sum_{n=0}^{\infty}\mbox{curl}\hspace{0.1cm}\textbf{E}_{n}\beta^{-n}=\textbf{H}^{0}+\sum_{n=0}^{\infty}\textbf{H}_{n}\beta^{-n}$$
Equating coefficients of $\beta^{-n}$ one finds in $x_{3}>0$
$$\mbox{curl}\hspace{0.1cm}\textbf{E}^{0}=\textbf{H}^{0},\hspace{0.3cm}
\mbox{curl}\hspace{0.1cm}\textbf{E}_{n}=\textbf{H}_{n},\hspace{0.2cm}n\geq
0,$$ (and correspondlying due to $\mbox{curl}\hspace{0.1cm}\textbf{H}=\alpha^{2}\textbf{E}$)
$$\mbox{curl}\hspace{0.1cm}\textbf{H}^{0}=\alpha^{2}\textbf{E}^{0},\hspace{0.3cm}
\mbox{curl}\hspace{0.1cm}\textbf{H}_{n}=\alpha^{2}\textbf{E}_{n},\hspace{0.2cm}n\geq
0.$$
With the above relations the recursion process goes as follows. First one use (6.10) for
$n=0$ and (6.13), in \cite{MacCamyS}, to conclude that
$$
\begin{array}{cccc}
\mbox{curl}\hspace{0.1cm}\textbf{E}_{0}=\textbf{H}_{0}, & \mbox{curl}\hspace{0.1cm}\textbf{H}_{0}=\alpha^{2}\textbf{E}_{0} & \mbox{in} & x_{3}>0 \\\\
\textbf{E}_{0}^{+}=-(\textbf{E}^{0}_{T})^{-}, & \mbox{on} & x_{3}=0. &
\end{array}
$$
Now $(\textbf{E}_{0},\textbf{H}_{0})$ is just the solution of
$(\textbf{P}_{\alpha\infty})$ which we can solve by the boundary integral equation procedure introduce in MacCamy and Stephan and revisited belov. But from
$(\ref{s1})_{3}$ we obtain
\begin{equation}\label{s30}
\mathcal{H}_{0}^{-}=\mathcal{H}_{0}^{+}=(\textbf{H}_{0})_{T}^{+}\hspace{0.3cm}\mbox{on}\hspace{0.3cm}x_{3}=0.
\end{equation}
Now the right side of (\ref{s30}) is known and easily computed. Then $(\ref{s1})_{3}$ and (\ref{s30}) yield
\begin{equation}\label{s31}
(\textbf{E}_{1})_{T}^{+}=(\textbf{E}_{1})_{T}^{-}=\mathcal{E}_{1}^{-}=-\sqrt{i}(\mathcal{H}_{0}^{\bot})^{-}=-\sqrt{i}((\textbf{H}_{0})_{T}^{+})^{\bot}.
\end{equation}
Therefore by (6.10), in \cite{MacCamyS}, we have a new again solvable problem for
$(\textbf{E}_{1},\textbf{H}_{1})$ which is just like
$(\textbf{P}_{\alpha\infty})$, that is
$$\mbox{curl}\hspace{0.1cm}\textbf{E}_{1}=\textbf{H}_{1},\hspace{0.3cm}\mbox{curl}\hspace{0.1cm}\textbf{H}_{1}=\alpha^{2}\textbf{E}_{1}\hspace{0.3cm}\mbox{in}\hspace{0.3cm}x_{3}>0,$$
but with new boundary values for $\textbf{E}_{T}$ as given by
(\ref{s31}).\\
For the complete algorithm see \cite{MacCamyS}. Note, with $\lambda=\sqrt{-i}$ we have $\mathcal{E}_{1}^{-}(x_{1},x_{2},0)=-\dfrac{1}{\lambda}(\textbf{n}\times\mbox{curl}\hspace{0.1cm}\textbf{E}_{0})$ yielding in $x_{3}<0$
$$\textbf{E}_{1}(x_{1},x_{2},x_{3})=\int_{0}^{-\tau}e^{\lambda\beta\widetilde{x}_{3}}\mathcal{E}_{1}^{-}(x_{1},x_{2},0)d\widetilde{x}_{3}=-\dfrac{1}{\lambda^{2}\beta}(\textbf{n}\times\mbox{curl}\hspace{0.1cm}\textbf{E}_{0})[e^{-\lambda\beta\tau}-1]$$
A comparison with Peron's results (see Chapter 5 in \cite{Peron}) shows that $\textbf{W}_{j}^{cd}(y_{\alpha},h_{\rho})=e^{-\sqrt{-i}\beta\tau}\textbf{E}_{j}$, $j\geq 0$, in $\Omega^{cd}$, $\lambda Y_{3}=\sqrt{-i}\beta\tau$ and $w_{j}=\ell_{j}$. Furthermore we see that the first terms in the asymptotic expansion of the electrical field for a smooth surface $\Sigma$ derived by Peron coincide with those for the half-space $x_{3}=0$ investigated by MacCamy and Stephan, namely $\ell_{0}=w_{0}=0$, $\ell_{1}=w_{1}=0$, $\mathcal{E}_{0}=\textbf{W}_{0}^{cd}=0$.
\begin{remark}
Since due Theorem 5 in Chapter 3 of \cite{Ospino} there exists only one solution of the electromagnetic transmission problem for a smooth interface this solution can be compute by the boundary integral equation procedure below, when we assume that (\ref{s47}) holds. Then for the electrical field $\textbf{E}$ obtained via the boundary integral equation system we have that in the tubular region $\Omega_{\pm}(\delta)=\left\lbrace x\in\Omega_{\pm},\mbox{dist}(x,\Sigma)<\delta\right\rbrace $ there holds for the remainders $\textbf{E}_{m}^{is(cd)}$ obtained by truncating (\ref{s3}) and (\ref{s5}) at $n=m$
$$\lVert\textbf{E}_{m,\rho}^{is}\rVert_{\textbf{W}(\mbox{curl},\Omega^{is})}\leq C_{1}\rho^{-m-1}\hspace{0.2cm}\mbox{and}\hspace{0.2cm}\lVert\textbf{E}_{m,\rho}^{cd}\rVert\leq C_{2}e^{C_{3}\tau}$$
for constants $C_{1},C_{2},C_{3}>0$, independent of $\rho$.
\end{remark}
\section{A boundary integral equation method of the first kind}
\label{sec:s2}
Next we describe the integral equation procedure for
$(\textbf{P}_{\alpha \beta})$ and $(\textbf{P}_{\alpha \infty})$ from \cite{MacCamyS,Weggler}.\\
Throughout the section we require that
\begin{equation}\label{s47}
\alpha\neq\alpha_{k},\hspace{0.3cm}k=1,2,\ldots
\end{equation}
This methods, like others, are based on the Stratton-Chu formulas from \cite{Stratton}. To describe these we need some notation. We will let $\textbf{n}$ denote the exterior normal to $\Sigma$. Given any vector field $\textbf{v}$ defined on $\Sigma$ we have
\begin{equation}\label{s48}
\textbf{v}=\textbf{v}_{T}+v_{N}\textbf{n},\hspace{0.3cm}\textbf{v}_{T}=\textbf{n}\times(\textbf{v}\times\textbf{n})
\end{equation}
where $\textbf{v}_{T}$, which lies in the tangent plane, is the tangential component of $\textbf{v}$.\\
We define the simple layer potential $\mathcal{V}_{\kappa}$ for density $\psi$ (correspondingly for a vector field) for the surface $\Sigma$ by
\begin{equation}\label{s50}
\mathcal{V}_{\kappa}(\psi)=\int_{\Sigma}\psi(\textbf{y})G_{\kappa}(|\textbf{x}-\textbf{y}|)ds_{y},G_{\kappa}(r)=\dfrac{e^{i\kappa r}}{4\pi r}.
\end{equation}
For a vector field $\textbf{v}$ on $\Sigma$ we define $\mathcal{V}_{\kappa}(\textbf{v})$ by (\ref{s50}) with $\textbf{v}$ replacing $\psi$.\\
We collect in the following lemma some of the well-known results about the simple layer potential $\mathcal{V}_{\kappa}$.
\begin{remark}\label{slem1}
\cite[Lemma 2.1]{MacCamyS} For any complex $\kappa$, $0\leq\mbox{arg}\kappa\leq\dfrac{\pi}{2}$ and any continuous $\psi$ on $\Sigma$; there holds:
\begin{itemize}
\item[(i)] $\mathcal{V}_{\kappa}(\psi)$ is continuous in $\mathbb{R}^{3}$,
\item[(ii)] $\Delta\mathcal{V}_{\kappa}(\psi)=-\kappa^{2}\mathcal{V}_{\kappa}(\psi)$ in $\Omega_{-}\cup\Omega_{+}$,
\item[(iii)] $\mathcal{V}_{\kappa}(\psi)(\textbf{x})=O\left(\dfrac{e^{i\kappa |\textbf{x}|}}{|\textbf{x}|} \right) $ as $|\textbf{x}|\rightarrow\infty$,
\item[(iv)]
$$\left( \dfrac{\partial\mathcal{V}_{\kappa}(\psi)}{\partial\textbf{n}}(\textbf{x})\right)^{\pm}=\mp\dfrac{1}{2}\psi(\textbf{x})+\int_{\Sigma}K_{\kappa}(\textbf{x},\textbf{y})\psi(\textbf{y})ds_{y},\hspace{0.3cm}\mbox{on}\hspace{0.3cm}\Sigma, $$
where $K_{\kappa}(\textbf{x},\textbf{y})=O(|\textbf{x}-\textbf{y}|^{-1})$ as $\textbf{y}\rightarrow\textbf{x}$.
\item[(v)] $$(\textbf{n}\times\mbox{curl}\hspace{0.1cm}\mathcal{V}_{\kappa}(\textbf{v})(\textbf{x}))^{\pm}=\pm\dfrac{1}{2}\textbf{v}(\textbf{x})+\dfrac{1}{2}\int_{\Sigma}\textbf{K}_{\kappa}(\textbf{x},\textbf{y})\textbf{v}(\textbf{y})ds_{y},$$
where the matrix function $\textbf{K}_{\kappa}$ satisfies $\textbf{K}_{\kappa}(\textbf{x},\textbf{y})=O(|\textbf{x}-\textbf{y}|^{-1})$ as $\textbf{y}\rightarrow\textbf{x}$.
\end{itemize}
\end{remark}
For problem $(\ref{s1})_{2}$, in $\Omega_{-}$ the Stratton-Chu formula gives
\begin{equation}\label{s51}
\begin{array}{l}
\textbf{E}=\mathcal{V}_{\sqrt{i}\beta}(\textbf{n}\times\textbf{H})-\mbox{curl}\hspace{0.1cm}\mathcal{V}_{\sqrt{i}\beta}(\textbf{n}\times\textbf{E})+\mbox{grad}\hspace{0.1cm}\mathcal{V}_{\sqrt{i}\beta}(\textbf{n}\cdot\textbf{E}),\\\\
\textbf{H}=\mbox{curl}\hspace{0.1cm}\mathcal{V}_{\sqrt{i}\beta}(\textbf{n}\times\textbf{H})-\mbox{curl}\hspace{0.1cm}\mbox{curl}\hspace{0.1cm}\mathcal{V}_{\sqrt{i}\beta}(\textbf{n}\times\textbf{E}).
\end{array}
\end{equation}
Similarly, for problem $(\ref{s1})_{1}$, in $\Omega_{+}$
\begin{equation}\label{s52}
\begin{array}{l}
\textbf{E}=\mathcal{V}_{\alpha}(\textbf{n}\times\textbf{H})-\mbox{curl}\hspace{0.1cm}\mathcal{V}_{\alpha}(\textbf{n}\times\textbf{E})+\mbox{grad}\hspace{0.1cm}\mathcal{V}_{\alpha}(\textbf{n}\cdot\textbf{E}),\\\\
\textbf{H}=\mbox{curl}\hspace{0.1cm}\mathcal{V}_{\alpha}(\textbf{n}\times\textbf{H})-\mbox{curl}\hspace{0.1cm}\mbox{curl}\hspace{0.1cm}\mathcal{V}_{\alpha}(\textbf{n}\times\textbf{E}).
\end{array}
\end{equation}
For given $\textbf{n}\times\textbf{H}$, $\textbf{n}\times\textbf{E}$ and $\textbf{n}\cdot\textbf{E}$ (\ref{s52}) yield a solution of $(\textbf{P}_{\alpha \infty})$. But we know only $\textbf{n}\times\textbf{E}$. The standard treatment of $(\textbf{P}_{\alpha \infty})$ starts from (\ref{s52}), sets $\textbf{n}\times\textbf{H}=0$ and $\textbf{n}\cdot\textbf{E}=0$ and replaces $-\textbf{n}\times\textbf{E}$ by an unknown tangential field $\textbf{L}$ yielding
\begin{equation}\label{s53}
\textbf{E}=\mbox{curl}\hspace{0.1cm}\mathcal{V}_{\alpha}(\textbf{L}),\hspace{0.3cm}\textbf{H}=\mbox{curl}\hspace{0.1cm}\mbox{curl}\hspace{0.1cm}\mathcal{V}_{\alpha}(\textbf{L}).
\end{equation}
Then the boundary condition yields an integral equation of the second kind for $\textbf{L}$ in the tangent space to $\Sigma$.\\
The method (\ref{s53}) is analogous to solving the Dirichlet problem for the scalar Helmholtz equation with a double layer potential. But having found $\textbf{L}$ it is hard to determine $\textbf{H}_{T}$, or equivalently $\textbf{n}\times\textbf{H}$, on $\Sigma$. Note calculating $\textbf{n}\times\textbf{H}$ on $\Sigma$ involves finding a second normal derivative of $\mathcal{V}_{\alpha}(\textbf{L})$.\\
The method in \cite{MacCamyS} for $(\textbf{P}_{\alpha \infty})$ is analogous to solving the scalar problems with a simple layer potential (see \cite{Hsiao}). MacCamy and Stephan use (\ref{s52}) but this time they set $\textbf{n}\times\textbf{E}=0$ and replace $\textbf{n}\times\textbf{H}$ and $\textbf{n}\cdot\textbf{E}$ by unknowns $\textbf{J}$ and $M$. Thus they take
\begin{equation}\label{s54}
\textbf{E}=\mathcal{V}_{\alpha}(\textbf{J})+\mbox{grad}\hspace{0.1cm}\mathcal{V}_{\alpha}(M),\hspace{0.3cm}\textbf{H}=\mbox{curl}\hspace{0.1cm}\mathcal{V}_{\alpha}(\textbf{J}).
\end{equation}
If they can determine $\textbf{J}$ then in this case they can use Remark \ref{slem1} to determine $\textbf{n}\times\textbf{H}$, hence $\textbf{H}_{T}$ on $\Sigma$.\\
With the surface gradient $\mbox{grad}_{T}\psi=(\mbox{grad}\hspace{0.1cm}\psi)_{T}$ on $\Sigma$, the boundary condition in (\ref{s1}) and (\ref{s54}) imply, by continuity of $\mathcal{V}_{\alpha}$,
$$\textbf{n}\times\textbf{E}=\textbf{n}\times\mathcal{V}_{\alpha}(\textbf{J})+\textbf{n}\times\mbox{grad}\hspace{0.1cm}\mathcal{V}_{\alpha}(M)=-\textbf{n}\times\textbf{E}^{0}$$
or equivalently
\begin{equation}\label{s55}
\mathcal{V}_{\alpha}(\textbf{J})_{T}+\mbox{grad}_{T}\hspace{0.1cm}\mathcal{V}_{\alpha}(M)=-\textbf{E}^{0}_{T}.
\end{equation}
We note that for any field $\textbf{v}$ defined in a neighbourhood of $\Sigma$ one can define the surface divergence $\mbox{div}_{T}$ by
$$\mbox{div}\hspace{0.1cm}\textbf{v}=\mbox{div}_{T}\hspace{0.1cm}\textbf{v}+\dfrac{\partial v}{\partial\textbf{n}}\textbf{n}.$$
As shown in \cite[Lemma 2.3]{MacCamyS},
there holds for any differentiable tangential field $\textbf{v}$,
$\mbox{div}\hspace{0.1cm}\mathcal{V}_{\kappa}(\textbf{v})=\mathcal{V}_{\kappa}(\mbox{div}_{T}\hspace{0.1cm}\textbf{v})\hspace{0.3cm}\mbox{on}\hspace{0.3cm}\Sigma.$\\
Setting $\mbox{div}\textbf{E}=0$ on $\Sigma$ yields therefore with (\ref{s54})
$$0=\mbox{div}\hspace{0.1cm}\textbf{E}=\mbox{div}\hspace{0.1cm}\mathcal{V}_{\alpha}(\textbf{J})+\mbox{div}\hspace{0.1cm}\mbox{grad}\hspace{0.1cm}\mathcal{V}_{\alpha}(M)$$
and $\mbox{div}\hspace{0.1cm}\mbox{grad}\mathcal{V}_{\alpha}(M)=-\alpha^{2}\mathcal{V}_{\alpha}(M)$
gives immediately
\begin{equation}\label{s56}
\mathcal{V}_{\alpha}(\mbox{div}_{T}\hspace{0.1cm}\textbf{J})-\alpha^{2}\mathcal{V}_{\alpha}(M)=0.
\end{equation}
\section{FE/BE coupling}
\label{sec:s3}
Next we present a coupling method for the interface problem $(P_{\alpha\beta})$ (see \cite{Ammari,Ammari1,Hitmair, Hitmair1,Ospino}). Integration by parts gives in $\Omega_{-}$ for the first equation in $(P_{\alpha\beta})$ with $\gamma_{N}\textbf{E}=(\mbox{curl}\hspace{0.1cm}\textbf{E})\times\textbf{n}$, $\gamma_{D}\textbf{E}=\textbf{n}\times(\textbf{E}\times\textbf{n})$
\begin{equation}
\int_{\Omega_{-}}\mbox{curl}\hspace{0.1cm}\textbf{E}\cdot\mbox{curl}\hspace{0.1cm}\overline{\textbf{v}}d\textbf{x}-\int_{\Omega_{-}}i\beta^{2}\textbf{E}\cdot\overline{\textbf{v}}d\textbf{x}-\int_{\Sigma}\gamma_{N}^{-}\textbf{E}\cdot\gamma_{D}^{-}\overline{\textbf{v}}ds=0.
\end{equation}
Therefore with $\gamma_{N}^{-}\textbf{E}=\gamma_{N}^{+}\textbf{E}+\gamma_{N}\textbf{E}^{0}$ and setting $\textbf{E}=\mathcal{V}_{\alpha}(\textbf{J})+\mbox{grad}\hspace{0.1cm}\mathcal{V}_{\alpha}(M)$ in $\Omega_{+}$ we obtain
\small{$$\int_{\Omega_{-}}\mbox{curl}\hspace{0.1cm}\textbf{E}\cdot\mbox{curl}\hspace{0.1cm}\overline{\textbf{v}}d\textbf{x}-\int_{\Omega_{-}}i\beta^{2}\textbf{E}\cdot\overline{\textbf{v}}d\textbf{x}-\int_{\Sigma}\gamma_{N}^{+}(\mathcal{V}_{\alpha}(\textbf{J})+\mbox{grad}\hspace{0.1cm}\mathcal{V}_{\alpha}(M))\cdot\gamma_{D}^{+}\overline{\textbf{v}}ds=\int_{\Sigma}\gamma_{N}\textbf{E}^{0}\cdot\gamma_{D}^{+}\overline{\textbf{v}}ds.$$}
Note that $\gamma_{N}^{+}(\mathcal{V}_{\alpha}(\textbf{J})+\mbox{grad}\hspace{0.1cm}\mathcal{V}_{\alpha}(M))=\dfrac{1}{2}\textbf{J}+\dfrac{1}{2}\textbf{K}_{\alpha}(\textbf{J})$ where $\textbf{K}_{\alpha}$ is a smothing operator.\\
As shown in \cite[Lemma 4.5]{MacCamyS} there exists a continuous map $J_{\alpha}(\textbf{J})_{T}$ from $\textbf{H}^{r}(\Sigma)$ into $H^{r+1}(\Sigma)$, for any real number $r$ with
\begin{equation}\label{b4}
\mbox{div}_{T}\hspace{0.1cm}\mathcal{V}_{\alpha}(\textbf{J})_{T}=\mathcal{V}_{\alpha}(\mbox{div}_{T}\hspace{0.1cm}\textbf{J})+J_{\alpha}(\textbf{J})_{T}.
\end{equation}
As shown in \cite{MacCamyP} the system of boundary operators on $\Sigma$ (which is equivalent to (\ref{s55}) and (\ref{s56}))
\begin{equation}\label{b5}
\begin{array}{ll}
\mathcal{V}_{\alpha}(\textbf{J})_{T}+\mbox{grad}_{T}\hspace{0.1cm}\mathcal{V}_{\alpha}(M)&=-\textbf{E}^{0}_{T}\\\\
-J_{\alpha}(\textbf{J})_{T}-(\Delta_{T}+\alpha^{2})\mathcal{V}_{\alpha}(M)&=\mbox{div}_{T}\hspace{0.1cm}\textbf{E}^{0}_{T}.
\end{array}
\end{equation}
is strongly elliptic as a mapping from $\textbf{H}^{-\frac{1}{2}}(\Sigma)\times H^{\frac{1}{2}}(\Sigma)$ into $\textbf{H}^{\frac{1}{2}}(\Sigma)\times H^{-\frac{1}{2}}(\Sigma)$, where $\mbox{grad}_{T}(\mbox{div}_{T})$ denote the surface gradient (surface divergence) and $\Delta_{T}$ the Laplace-Beltrami operator on $\Sigma$.\\
Now, our fem/bem coupling method is based on the variational formulation: For given incident field $\textbf{E}^{0}$ on $\Sigma$ find $\textbf{E}\in\textbf{H}(\mbox{curl},\Omega_{-})$, $\textbf{J}\in\textbf{H}^{-\frac{1}{2}}(\Sigma)$ and $M\in H^{\frac{1}{2}}(\Sigma)$ with
\small{
\begin{equation}\label{b6}
\begin{aligned}
\int_{\Omega_{-}}\mbox{curl}\hspace{0.1cm}\textbf{E}\cdot\mbox{curl}\hspace{0.1cm}\overline{\textbf{v}}d\textbf{x}-\int_{\Omega_{-}}i\beta^{2}\textbf{E}\cdot\overline{\textbf{v}}d\textbf{x}-\dfrac{1}{2}\int_{\Sigma}(\textbf{J}+\textbf{K}_{\alpha}(\textbf{J}))\cdot\gamma_{D}^{+}\overline{\textbf{v}}ds=\int_{\Sigma}\gamma_{N}\textbf{E}^{0}\cdot\gamma_{D}^{+}\overline{\textbf{v}}ds\\\\
\int_{\Sigma}\mathcal{V}_{\alpha}(\textbf{J})_{T}\cdot\overline{\textbf{j}}\hspace{0.1cm}dS+\int_{\Sigma}\mbox{grad}_{T}\mathcal{V}_{\alpha}(M)\cdot\overline{\textbf{j}}\hspace{0.1cm}dS=-\int_{\Sigma}\textbf{E}_{T}^{0}\cdot\overline{\textbf{j}}\hspace{0.1cm}dS,\\\\
-\int_{\Sigma}J_{\alpha}(\textbf{J})_{T}\overline{m}\hspace{0.1cm}dS-\int_{\Sigma}(\Delta_{T}+\alpha^{2})\mathcal{V}_{\alpha}(M)\overline{m}\hspace{0.1cm}dS=\int_{\Sigma}\mbox{div}_{T}\hspace{0.1cm}\textbf{E}^{0}_{T}\overline{m}dS,
\end{aligned}
\end{equation}
}
$\forall\textbf{v}\in\textbf{H}(\mbox{curl},\Omega_{-})$, $\textbf{j}\in\textbf{H}^{-\frac{1}{2}}(\Sigma)$, $m\in H^{\frac{1}{2}}(\Sigma)$.\\
In order to formulate a conforming Galerkin scheme for (\ref{b6}) we take subspaces $\textbf{H}^{1}_{h}\subset\textbf{H}(\mbox{curl},\Omega_{-})$, $\textbf{H}^{-\frac{1}{2}}_{h}\subset\textbf{H}^{-\frac{1}{2}}(\Sigma)$, $H^{\frac{1}{2}}_{h}\subset H^{\frac{1}{2}}(\Sigma)$ with mesh parameter $h$ and look for $\textbf{E}_{h}\in\textbf{H}^{1}_{h}$, $\textbf{J}_{h}\in\textbf{H}^{-\frac{1}{2}}_{h}$, $M_{h}\in H^{\frac{1}{2}}_{h}$ such that
\begin{equation}\label{b7}
\langle\mathcal{A}(\textbf{E}_{h},\textbf{J}_{h},M_{h}),(\textbf{v}_{h},\textbf{j}_{h},m_{h})\rangle=\langle\mathcal{F},(\textbf{v}_{h},\textbf{j}_{h},m_{h})\rangle
\end{equation}
where $\mathcal{A}$ is the operator given by the left hand side in (\ref{b6}), $\mathcal{F}=(\gamma_{N}\textbf{E}^{0},-\textbf{E}^{0}_{T},\mbox{div}_{T}\hspace{0.1cm}\textbf{E}^{0}_{T})$.
\begin{theorem}\label{ts1}
\begin{enumerate}
\item System (\ref{b6}) has a unique solution $(\textbf{E},\textbf{J},M)$ in $\textbf{X}=\textbf{H}(\mbox{curl},\Omega_{-})\times\textbf{H}^{-\frac{1}{2}}(\Sigma)\times H^{\frac{1}{2}}(\Sigma)$.
\item The Galerkin system (\ref{b7}) is uniquely solvable in $\textbf{X}_{h}=\textbf{H}^{1}_{h}\times\textbf{H}^{-\frac{1}{2}}_{h}\times H^{\frac{1}{2}}_{h}$ and there exists $C>0$, independent of $h$,
\begin{equation}\label{b8}
\begin{aligned}
\|\textbf{E}-\textbf{E}_{h}\|_{\textbf{H}(\mbox{curl},\Omega_{-})}+\|\textbf{J}-\textbf{J}_{h}\|_{\textbf{H}^{-\frac{1}{2}}(\Sigma)}+\|M-M_{h}\|_{H^{\frac{1}{2}}(\Sigma)}\\\\
\leq C\inf_{(\textbf{v},\textbf{j},m)\in\textbf{X}_{h}}\left\lbrace \|\textbf{E}-\textbf{v}\|_{\textbf{H}(\mbox{curl},\Omega_{-})}+\|\textbf{J}-\textbf{j}\|_{\textbf{H}^{-\frac{1}{2}}(\Sigma)}+\|M-m\|_{H^{\frac{1}{2}}(\Sigma)}\right\rbrace
\end{aligned}
\end{equation}
where $(\textbf{E},\textbf{J},M)$ and $(\textbf{E}_{h},\textbf{J}_{h},M_{h})$ solve (\ref{b6})-(\ref{b7}) respectively.
\end{enumerate}
\end{theorem}
\begin{proof}
First we note that system (\ref{b6}) is strongly elliptic in $\textbf{X}$ which follows by considering $\mathcal{A}$ as a system of pseudodifferential operators (cf. \cite{MacCamyP}). The only difference to \cite{MacCamyP} is that here we have additionally the first equation in (\ref{b6}). If we note $\Delta\textbf{E}=\mbox{curl}\mbox{curl}\textbf{E}-\mbox{grad}\mbox{div}\textbf{E}$ and take $\mbox{div}\textbf{E}=0$ we have that the principal symbol of $\mathcal{A}$ has the form (with $|\xi|^{2}=\xi_{1}^{2}+\xi_{2}^{2}$)
\begin{equation}\label{b9}
\sigma(\mathcal{A})(\xi)(\textbf{E},\textbf{J},M)^{t}=\left(
\begin{array}{cccccc}
|\xi|^{2}+\xi^{2}_{3} & 0 & 0 & 1 & 0 & 0\\\\
0 & |\xi|^{2}+\xi^{2}_{3} & 0 & 0 & 1 & 0\\\\
0 & 0 & |\xi|^{2}+\xi^{2}_{3} & 0 & 0 & 0\\\\
0 & 0 & 0 & \dfrac{1}{|\xi|} & 0 & i\xi_{1}\dfrac{1}{|\xi|}\\\\
0 & 0 & 0 & 0 & \dfrac{1}{|\xi|} & i\xi_{2}\dfrac{1}{|\xi|}\\\\
0 & 0 & 0 & 0 & 0 & |\xi|
\end{array}\right)\left( \begin{array}{c}
\\
E_{1}\\
\\
E_{2}\\
\\
E_{3}\\
\\
J^{1}\\
\\
J^{2}\\
\\
M\\
\\
\end{array}
\right)
\end{equation}
where $(E_{1},E_{2})=\textbf{E}_{T}$ and $E_{3}$ is perpendicular to $x_{3}=0$.\\
Obviously the two subblocks are strongly elliptic (see \cite{MacCamyP} for the lower subblock). Assuming that $(\alpha,\sqrt{i}\beta)$ is not an eigenvalue of $P_{\alpha\beta}$ we have existence and uniqueness of the exact solution. Due to the strong ellipticity of $\mathcal{A}$ there exists a unique Galerkin solution and the a priori error estimate holds due to the abstract results by Stephan and Wendland \cite{StephanW}.
\end{proof}
\section{Galerkin procedure for the perfect conductor problem ($P_{\alpha\infty}$)}
\label{sec:s4}
Next we consider the implementation of the Galerkin methods (see \cite{Christiansen,Ospino,Taskinen,Weggler}) and present corresponding numerical experiments for the integral equations (\ref{s55}) and (\ref{s56}). These experiments are performed with the program package \textit{Maiprogs}, cf. Maischak \cite{Mattias2,Mattias3}, which is a Fortran-based program package used for finite element and boundary element simulations \cite{Mattias4}. Initially developed by M. Maischak, \textit{Maiprogs} has been extended for electromagnetics problem by Teltscher \cite{Teltscher} and Leydecker \cite{Leydecker}.\\
We will investigate the exterior problem $(P_{\alpha\infty})$ by performing the integral equations procedure (\ref{s55}) and (\ref{s56}):\\
Testing against arbitrary functions $\textbf{j}\in
\textbf{H}^{-\frac{1}{2}}(\Sigma)$ and $m\in H^{\frac{1}{2}}(\Sigma)$ in
(\ref{s55}) and (\ref{s56}), we get
\begin{equation}\label{eg2}
\begin{aligned}
\int_{\Sigma}\mathcal{V}_{\alpha}(\textbf{J})_{T}\cdot\overline{\textbf{j}}\hspace{0.1cm}dS+\int_{\Sigma}\mbox{grad}_{T}\mathcal{V}_{\alpha}(M)\cdot\overline{\textbf{j}}\hspace{0.1cm}dS&=-\int_{\Sigma}\textbf{E}_{T}^{0}\cdot\overline{\textbf{j}}\hspace{0.1cm}dS,\\\\
-\int_{\Sigma}\mathcal{V}_{\alpha}(\mbox{div}_{T}\textbf{J})\cdot
\overline{m}\hspace{0.1cm}dS+\alpha^{2}\int_{\Sigma}\mathcal{V}_{\alpha}(M)\cdot
\overline{m}\hspace{0.1cm}dS&=0.
\end{aligned}
\end{equation}
Partial integration in the second term of $(\ref{eg2})_{1}$
$$\int_{\Sigma}\mbox{grad}_{T}\mathcal{V}_{\alpha}(M)\cdot\overline{\textbf{j}}\hspace{0.1cm}dS=-\int_{\Sigma}\mathcal{V}_{\alpha}(M)\cdot\mbox{div}_{T}\overline{\textbf{j}}\hspace{0.1cm}dS$$
shows that the formulation (\ref{eg2}) is symmetric: By definition
of symmetric bilinear forms $a$, $c$, of the bilinear form $b$ and
linear form $\ell$ through
\begin{equation*}
\begin{aligned}
a(\textbf{J},\textbf{j}):&=\int_{\Sigma}\mathcal{V}_{\alpha}(\textbf{J})_{T}\cdot\overline{\textbf{j}}\hspace{0.1cm}dS,\\\\
b(\textbf{J},m):&=-\int_{\Sigma}\mathcal{V}_{\alpha}(\mbox{div}_{T}\textbf{J})\cdot \overline{m}\hspace{0.1cm}dS\\\\
&=-\int_{\Sigma}\mathcal{V}_{\alpha}(m)\cdot\mbox{div}_{T}\overline{\textbf{J}}\hspace{0.1cm}dS,\\\\
c(M,m):&=\alpha^{2}\int_{\Sigma}\mathcal{V}_{\alpha}(M)\cdot \overline{m}\hspace{0.1cm}dS,\\\\
\ell(\textbf{j}):&=-\int_{\Sigma}\textbf{E}_{T}^{0}\cdot\overline{\textbf{j}}\hspace{0.1cm}dS
\end{aligned}
\end{equation*}
the variational formulation has the form: Find $(\textbf{J},M)\in
\textbf{H}^{-\frac{1}{2}}(\Sigma)\times H^{\frac{1}{2}}(\Sigma)$ such that
\begin{equation}\label{eg3}
\begin{array}{l}
a(\textbf{J},\textbf{j})+b(\textbf{j},M)=\ell(\textbf{j})\\\\
b(\textbf{J},m)+c(M,m)=0
\end{array}
\end{equation}
for all $(\textbf{j},m)\in \textbf{H}^{-\frac{1}{2}}(\Sigma)\times
H^{\frac{1}{2}}(\Sigma)$.\\
We now proceed to finite dimensional subspaces
$\mathcal{R}_{h}\subset \textbf{H}^{-\frac{1}{2}}(\Sigma)$ of dimension $n$
and $\mathcal{M}_{h}\subset H^{\frac{1}{2}}(\Sigma)$ of dimension
$m$, and seek approximations $\textbf{J}_{h}\in\mathcal{R}_{h}$ and
$M_{h}\in\mathcal{M}_{h}$ for $\textbf{J}$ and $M$, such that
\begin{equation}\label{eg4}
\begin{array}{l}
a(\textbf{J}_{h},\textbf{j})+b(\textbf{j},M_{h})=\ell(\textbf{j}),\\\\
b(\textbf{J}_{h},m)+c(M_{h},m)=0
\end{array}
\end{equation}
for all $\textbf{j}\in\mathcal{R}_{h}$ and $m\in\mathcal{M}_{h}$.\\
Let $\{\mbox{\boldmath$\psi$\unboldmath}_{i}\}_{i=1}^{n}$ be a basis of $\mathcal{R}_{h}$ and
$\{\varphi_{j}\}_{j=1}^{m}$ be a basis of $\mathcal{M}_{h}$.
$\textbf{J}_{h}$ and $M_{h}$ are of the forms
\begin{equation}\label{eg5}
\textbf{J}_{h}:=\sum_{i=1}^{n}\lambda_{i}\mbox{\boldmath$\psi$\unboldmath}_{i}\hspace{0.2cm}\mbox{and}\hspace{0.2cm}M_{h}:=\sum_{j=1}^{m}\mu_{j}\varphi_{j}.
\end{equation}
Inserting (\ref{eg5}) in (\ref{eg4}) provides
\begin{equation}\label{eg6}
\begin{aligned}
\sum_{i=1}^{n}\lambda_{i}a(\mbox{\boldmath$\psi$\unboldmath}_{i},\mbox{\boldmath$\psi$\unboldmath}_{k})+\sum_{j=1}^{m}\mu_{j}b(\mbox{\boldmath$\psi$\unboldmath}_{k},\varphi_{j})&=\ell(\mbox{\boldmath$\psi$\unboldmath}_{k})\\\\
\sum_{i=1}^{n}\lambda_{i}b(\mbox{\boldmath$\psi$\unboldmath}_{i},\varphi_{l})+\sum_{j=1}^{m}\mu_{j}c(\varphi_{j},\varphi_{l})&=0
\end{aligned}
\end{equation}
for all $\mbox{\boldmath$\psi$\unboldmath}_{k}$ and $\varphi_{l}$, $1\leq k\leq n$, $1\leq l\leq
m$.\\
With matrices and vectors
\begin{equation}\label{eg7}
\begin{array}{l}
A:=(a(\mbox{\boldmath$\psi$\unboldmath}_{i},\mbox{\boldmath$\psi$\unboldmath}_{k}))_{i,k}\in\mathbb{C}^{n\times n},\\\\
B:=(b(\mbox{\boldmath$\psi$\unboldmath}_{i},\varphi_{l}))_{i,l}\in\mathbb{C}^{n\times m},\\\\
C:=(c(\varphi_{j},\varphi_{l}))_{j,l}\in\mathbb{C}^{m\times m},\\\\
\mbox{\boldmath$\lambda$\unboldmath}:=(\lambda_{i})_{i}\in\mathbb{C}^{n},\\\\
\mbox{\boldmath$\mu$\unboldmath}:=(\mu_{j})_{j}\in\mathbb{C}^{m},\\\\
\mbox{\boldmath$\ell$\unboldmath}:=(\ell(\mbox{\boldmath$\psi$\unboldmath}_{k}))_{k}\in\mathbb{C}^{n}.
\end{array}
\end{equation}
(\ref{eg6}) has also the form
\begin{equation}\label{eg8}
\left(\begin{array}{cc}
A&B^{t}\\
B&C
\end{array}\right)\left(\begin{array}{c}
\mbox{\boldmath$\lambda$\unboldmath}\\
\mbox{\boldmath$\mu$\unboldmath}
\end{array}\right)=\left(\begin{array}{c}
\mbox{\boldmath$\ell$\unboldmath}\\
0
\end{array}\right).
\end{equation}
We have considered with $\{\mbox{\boldmath$\psi$\unboldmath}_{i}\}_{i=1}^{n}$ a basis of $\mathcal{R}_{h}$ and $\{\varphi_{j}\}_{j=1}^{m}$ a basis of $\mathcal{M}_{h}$. These functions, are chosen as piecewise polynomials. To win these bases, we consider suitable basis functions locally on the element of a grid, i.e. on each component grid.\\
If we start from a grid
$$\{\Sigma_{k}\}_{k=1}^{N}\hspace{0.3cm}\mbox{with}\hspace{0.3cm}\bigcup_{1\leq k\leq N}\Sigma_{k}=\Sigma$$
with $N$ elements, and let $\{\widehat{\mbox{\boldmath$\psi$\unboldmath}}_{i}\}_{i=1}^{\widehat{n}}$ and $\{\widehat{\varphi}_{j}\}_{j=1}^{\widehat{m}}$ respectively bases on a square reference element $\widehat{\Sigma}$. The local basis functions on an element $\Sigma_{k}$ are each $\{\mbox{\boldmath$\psi$\unboldmath}_{i}\}_{i=1}^{n_{k}}$ or $\{\varphi_{j}\}_{j=1}^{m_{k}}$.\\
It should therefore be calculated first
$$A:=(a(\mbox{\boldmath$\psi$\unboldmath}_{j_{s}},\mbox{\boldmath$\psi$\unboldmath}_{i_{z}}))_{i_{z},j_{s}}\in \mathbb{C}^{n\times n},$$
where $\mbox{\boldmath$\psi$\unboldmath}_{j_{s}}$ or $\mbox{\boldmath$\psi$\unboldmath}_{i_{z}}$ are the basics function of
$\mathcal{R}_{h}$ and
$$a(\mbox{\boldmath$\psi$\unboldmath}_{j_{s}},\mbox{\boldmath$\psi$\unboldmath}_{i_{z}})=\int_{\Sigma}\mathcal{V}_{\alpha}(\mbox{\boldmath$\psi$\unboldmath}_{j_{s}})_{T}\cdot\mbox{\boldmath$\psi$\unboldmath}_{i_{z}}\hspace{0.1cm}dS=\sum_{k=1}^{N}\int_{\Sigma_{k}}\mathcal{V}_{\alpha}(\mbox{\boldmath$\psi$\unboldmath}_{j_{s}})_{T}\cdot\mbox{\boldmath$\psi$\unboldmath}_{i_{z}}\hspace{0.1cm}dS,$$
We test each local basis function against any other local basis function and sum the result to the test value of the global basis functions, which include these local basis functions.\\
Let $I_{N}=\{1,\ldots,N\}$ the index set for the grid elements, $I_{\widehat{n}}=\{1,\ldots,\widehat{n}\}$ the index set for the basic functions on the reference element and $I_{n}=\{1,\ldots,n\}$ the index set for the global basis functions.\\
Let $\mbox{\boldmath$\zeta$\unboldmath}:I_{N}\times I_{\widehat{n}}\rightarrow I_{n}$ the mapping from local to global basis functions such that $\mbox{\boldmath$\zeta$\unboldmath}(k,i)=j$, if the local basis function $\mbox{\boldmath$\psi$\unboldmath}_{k,i}$ component of the global basis function is $\mbox{\boldmath$\psi$\unboldmath}_{j}$.\\
Let $\mbox{\boldmath$\zeta$\unboldmath}^{-1}$ the set of all pairs of $(k,j)$ with $\mbox{\boldmath$\zeta$\unboldmath}(k,j)=i$, then
$$\int_{\Sigma}\mathcal{V}_{\alpha}(\mbox{\boldmath$\psi$\unboldmath}_{j_{s}})_{T}\cdot\mbox{\boldmath$\psi$\unboldmath}_{i_{z}}\hspace{0.1cm}dS=\sum_{\substack{(k,i)\in\\\mbox{\boldmath$\zeta$\unboldmath}^{-1}(i_{z})}}\sum_{\substack{(l,j)\in\\\mbox{\boldmath$\zeta$\unboldmath}^{-1}(j_{s})}}\int_{\Sigma_{k}}\mathcal{V}_{\alpha}(\mbox{\boldmath$\psi$\unboldmath}_{l,j})_{T}\cdot\mbox{\boldmath$\psi$\unboldmath}_{k,i}\hspace{0.1cm}dS$$
$$=\sum_{\substack{(k,i)\in\\\mbox{\boldmath$\zeta$\unboldmath}^{-1}(i_{z})}}\sum_{\substack{(l,j)\in\\\mbox{\boldmath$\zeta$\unboldmath}^{-1}(j_{s})}}\int_{\Sigma_{k}}\int_{\Sigma_{l}}G_{\alpha}(\lvert\textbf{x}-\textbf{y}\rvert)(\mbox{\boldmath$\psi$\unboldmath}_{l,j}(\textbf{y}))^{t}\cdot\mbox{\boldmath$\psi$\unboldmath}_{k,i}(\textbf{x})\hspace{0.1cm}dS_{\textbf{y}}\hspace{0.1cm}dS_{\textbf{x}}.$$
We are dealing in this implementation with Raviart-Thomas basis
functions. The transformation of these functions requires a
Peano transformation
$\mbox{\boldmath$\psi$\unboldmath}_{k,i}=\dfrac{1}{\lvert\mbox{det}\hspace{0.1cm}A_{k}\rvert}A_{k}\widehat{\mbox{\boldmath$\psi$\unboldmath}}_{i}$. Thus, if $A_{k}=(\textbf{a}_{1},\textbf{a}_{2})$, $\mbox{det}A_{k}$ is calculated by $\mbox{det}A_{k}=(\textbf{a}_{1}\times\textbf{a}_{2})\cdot\dfrac{\textbf{a}_{1}\times\textbf{a}_{2}}{\lVert\textbf{a}_{1}\times\textbf{a}_{2}\rVert}$.
The Peano-transformation of the local basis functions to the basic
functions on the reference element then gives
\begin{equation}\label{eg9}
\begin{aligned}
I&=\sum_{\substack{(k,i)\in\\\mbox{\boldmath$\zeta$\unboldmath}^{-1}(i_{z})}}\sum_{\substack{(l,j)\in\\\mbox{\boldmath$\zeta$\unboldmath}^{-1}(j_{s})}}\int_{\Sigma_{k}}\int_{\Sigma_{l}}G_{\alpha}(\lvert\textbf{x}-\textbf{y}\rvert)(\mbox{\boldmath$\psi$\unboldmath}_{l,j}(\textbf{y}))^{t}\cdot\mbox{\boldmath$\psi$\unboldmath}_{k,i}(\textbf{x})\hspace{0.1cm}dS_{\textbf{y}}\hspace{0.1cm}dS_{\textbf{x}}\\\\
&=\sum_{\substack{(k,i)\in\\\mbox{\boldmath$\zeta$\unboldmath}^{-1}(i_{z})}}\sum_{\substack{(l,j)\in\\\mbox{\boldmath$\zeta$\unboldmath}^{-1}(j_{s})}}\int_{\widehat{\Sigma}}\int_{\widehat{\Sigma}}\dfrac{G_{\alpha}(\lvert\textbf{x}-\textbf{y}\rvert)}{\lvert\mbox{det}\hspace{0.1cm}A_{k}\cdot\mbox{det}\hspace{0.1cm}A_{l}\rvert}(\widehat{\mbox{\boldmath$\psi$\unboldmath}}_{i}(\widehat{\textbf{x}}))^{t}(A_{k})^{t}\cdot
A_{l}\widehat{\mbox{\boldmath$\psi$\unboldmath}}_{j}(\widehat{\textbf{y}})\hspace{0.1cm}dS_{\widehat{\textbf{y}}}\hspace{0.1cm}dS_{\widehat{\textbf{x}}}
\end{aligned}
\end{equation}
with $\textbf{x}=\textbf{a}_{k}+A_{k}\widehat{\textbf{x}}$ and $\textbf{y}=\textbf{a}_{l}+A_{l}\widehat{\textbf{y}}$, and referent element $\widehat{\Sigma}$.\\
The calculation of the integrals with Helmholtz kernel $G_{\alpha}$
is not exact. We consider the expansion of the Helmholtz kernel in a Taylor
series. There holds
\begin{equation*}
\begin{aligned}
\nonumber G_{\alpha}(\lvert\textbf{x}-\textbf{y}\rvert) &=\dfrac{1}{4\pi}
\frac{e^{\alpha
i\lvert\textbf{x}-\textbf{y}\rvert}}{\lvert\textbf{x}-\textbf{y}\rvert}
&=\dfrac{1}{4\pi}\left[ \frac{1}{\lvert\textbf{x}-\textbf{y}\rvert}+\alpha i+\frac{(\alpha
i)^2}{2}\lvert\textbf{x}-\textbf{y}\rvert+\dots\right]
\end{aligned}
\end{equation*}
The first term are singular for $\textbf{x}=\textbf{y}$ and the correspondly integral are treated by analytic evaluation in \textit{Maiprogs}, cf. Maischak \cite{Mattias1,Mattias2,Mattias3} , but the integrals of all other summands can be calculated sufficiently well by Gaussian quadrature.\\
We compute
\begin{equation}\label{eq0}
\begin{aligned}
b(\mbox{\boldmath$\psi$\unboldmath}_{i_{z}},\varphi_{j_{s}})&= -\int_\Sigma
\mathcal{V}_{\alpha}(\nabla_{T}\cdot\mbox{\boldmath$\psi$\unboldmath}_{i_{z}})\cdot\varphi_{j_{s}}
\hspace{0.1cm}dS\\\\
&=-\sum_{\substack{(k,i)\in\\\mbox{\boldmath$\zeta$\unboldmath}^{-1}_{\psi}(i_{z})}}\sum_{\substack{(l,j)\in\\\mbox{\boldmath$\zeta$\unboldmath}^{-1}_{\varphi}(j_{s})}}
\int_{\Sigma_l}\int_{\Sigma_k}
G_{\alpha}(\lvert\textbf{x}-\textbf{y}\rvert)\nabla_{T}\cdot\mbox{\boldmath$\psi$\unboldmath}_{k,i}(\textbf{y})
\cdot\varphi_{l,j}(\textbf{x})
\hspace{0.1cm}dS_{\textbf{y}}\,\text{d}S_{\textbf{x}}.
\end{aligned}
\end{equation}
with $\mbox{\boldmath$\zeta$\unboldmath}^{-1}_{\psi}=\mbox{\boldmath$\zeta$\unboldmath}$ described above, and $\mbox{\boldmath$\zeta$\unboldmath}^{-1}_{\varphi}$, the analogously defined map for the basic functions of $\mathcal{M}_{h}$.\\
While a transformation of the scalar basis functions is not required, the transformation of the surface divergence of Raviart-Thomas elements is carried out by $\nabla_{T}\cdot\mbox{\boldmath$\psi$\unboldmath}_{k,i}=\frac{1}{\lvert\mbox{det}A_{k}\rvert}\widehat{\nabla}\cdot\widehat{\mbox{\boldmath$\psi$\unboldmath}}_{i}$ and we have
\begin{equation}\label{eg18a}
\begin{aligned}
b(\mbox{\boldmath$\psi$\unboldmath}_{i_{z}},\varphi_{j_{s}})=-\sum_{\substack{(k,i)\in\\\mbox{\boldmath$\zeta$\unboldmath}^{-1}_{\psi}(i_{z})}}\sum_{\substack{(l,j)\in\\\mbox{\boldmath$\zeta$\unboldmath}^{-1}_{\varphi}(j_{s})}}
\int_{\widehat{\Sigma}}\int_{\widehat{\Sigma}}\dfrac{G_{\alpha}(\lvert\textbf{x}-\textbf{y}\rvert)}{\lvert\mbox{det}A_{k}\rvert}\widehat{\nabla}\cdot\widehat{\mbox{\boldmath$\psi$\unboldmath}}_{k,i}(\widehat{\textbf{y}})
\cdot\widehat{\varphi}_{l,j}(\widehat{\textbf{x}})
\hspace{0.1cm}dS_{\widehat{\textbf{y}}}\,\text{d}S_{\widehat{\textbf{x}}}
\end{aligned}
\end{equation}
with $\textbf{y}=\textbf{a}_{k}+A_{k}\widehat{\textbf{y}}$ and $\textbf{x}=\textbf{a}_{l}+A_{l}\widehat{\textbf{x}}$.\\
The calculation of $c(\varphi_{i},\varphi_{j})$ is similar to the above-mentioned case.\\
The calculation of the right hand side appears simple at first glance,
since there are no single layer potential terms. Howewere we must compute the right hand side with quadrature.\\
The quadrature of an integral over $\textbf{f}$ on the reference
element is determined by the quadrature points
$\widehat{\textbf{x}}_{x,y}$, and the associated weights
$w_{x,y}=w_{x}\cdot w_{y}$, which are processed in $x$ and $y$
direction. We perform the two-dimensional quadrature as a combination of one-dimensional
quadratures in each $x$ and $y$ direction, and we use here the
weights from the already implemented one-dimensional quadrature
formula. With $\widetilde{n}_{x}$ quadrature points in
$x$-direction, and $\widetilde{n}_{y}$ quadrature points in
$y$-direction, then the quadrature formula reads:
\begin{equation}\label{eg24}
\mathcal{Q}_{\widehat{\Sigma}}(\textbf{f})=\sum_{i=1}^{\widetilde{n}_{x}}\sum_{j=1}^{\widetilde{n}_{y}}\textbf{f}(\widehat{\textbf{x}}_{i,j})\cdot
w_{i}w_{j}.
\end{equation}
The quadrature points on the square reference element and the corresponding
weights for Gaussian quadrature are implemented in \textit{Maiprogs} already. For triangular elements,
we use Duffy transformation.\\
We will now calculate the right hand side in the Galerkin formulation, i.e.
the linear form $\ell$, applied to the bases functions $\mbox{\boldmath$\psi$\unboldmath}_{i}$,
$i=1,\ldots,n$. The quadrature takes place on the reference
element. We decompose the global into local basis functions
and then use the Peano-transformation for the Raviart-Thomas functions. It is therefore
\begin{equation*}
\begin{aligned}
\ell(\mbox{\boldmath$\psi$\unboldmath}_{i_{r}})&=-\int_\Sigma (\textbf{E}^0_{T}(\textbf{x}))^{t}
\cdot\mbox{\boldmath$\psi$\unboldmath}_{i_{r}}(\textbf{x})\hspace{0.1cm}dS_{\textbf{x}}\\\\
&=-\sum_{\substack{(k,i)\in\\\zeta^{-1}(i_{r})}}
\int_{\widehat{\Sigma}} (\textbf{E}^0_{T}(\textbf{x}))^{t} \cdot
A_{k}\cdot\widehat{\mbox{\boldmath$\psi$\unboldmath}}_{k,i}(\widehat{\textbf{x}})\hspace{0.1cm}dS_{\widehat{\textbf{x}}}
\end{aligned}
\end{equation*}
with $\textbf{x}=\textbf{a}_k+A_k\widehat{\textbf{x}}$. Applying
(\ref{eg24}) leads with
$\widetilde{n}_{x}=\widetilde{n}_{y}:=\widetilde{n}$ to
\begin{equation}\label{eg26}
\mathcal{Q}(\ell(\mbox{\boldmath$\psi$\unboldmath}_{i}))=-\sum_{\substack{(k,i)\in\\\zeta^{-1}(i_{r})}}\sum_{i_{1}=1}^{\widetilde{n}}\sum_{i_{2}=1}^{\widetilde{n}}(\textbf{E}^0_{T}(\textbf{x}_{i_{1},i_{2}}))^{t}
\cdot
A_{k}\cdot\widehat{\mbox{\boldmath$\psi$\unboldmath}}_{k,i}(\widehat{\textbf{x}}_{i_{1},i_{2}})\cdot
w_{i_{1}}w_{i_{2}}
\end{equation}
with $\textbf{x}_{i,j}=\textbf{a}_k+A_k\widehat{\textbf{x}}_{i,j}$.
As before, the task is carried out by looping through all grid
components, and the values are added to the entries for each of its
base function.\\
The electrical field can be calculated by
\begin{equation}\label{eg34}
\textbf{E}_{h}=\mathcal{V}_{\alpha}(\textbf{J}_{h})+\mbox{grad}\hspace{0.1cm}\mathcal{V}_{\alpha}(M_{h}).
\end{equation}
We have for the first term in (\ref{eg34}) with $(\ref{eg5})_{1}$
\begin{equation}\label{eg36}
\mathcal{V}_{\alpha}(\textbf{J}_{h})(\textbf{x})=\sum_{i=1}^{n}\lambda_{i}\int_{\Sigma}G_{\alpha}(\lvert\textbf{x}-\textbf{y}\rvert)\mbox{\boldmath$\psi$\unboldmath}_{i}(\textbf{y})dS_{\textbf{y}}.
\end{equation}
Then using Peano-transformation we have
\begin{equation}\label{eg37}
\begin{aligned}
\mathcal{V}_{\alpha}(\mbox{\boldmath$\psi$\unboldmath}_{i_{s}})(\textbf{x})&=\int_{\Sigma}G_{\alpha}(\lvert\textbf{x}-\textbf{y}\rvert)\mbox{\boldmath$\psi$\unboldmath}_{i_{s}}(\textbf{y})dS_{\textbf{y}}\\\\
&=\sum_{\substack{(l,i)\in\\\mbox{\boldmath$\zeta$\unboldmath}^{-1}(i_{s})}}\int_{\widehat{\Sigma}}\dfrac{G_{\alpha}(\lvert\textbf{x}-\textbf{y}\rvert)}{\lvert\mbox{det}\hspace{0.1cm}A_{l}\rvert}
A_{l}\widehat{\mbox{\boldmath$\psi$\unboldmath}}_{i}(\widehat{\textbf{y}})\hspace{0.1cm}dS_{\widehat{\textbf{y}}}.
\end{aligned}
\end{equation}
For the second term in (\ref{eg34}) we have with
\begin{equation}\label{eg42}
\begin{aligned}
\mbox{grad}\hspace{0.1cm}\mathcal{V}_{\alpha}(\varphi_{j_{z}})(\textbf{x})&=\sum_{\substack{(l,j)\in\\\mbox{\boldmath$\zeta$\unboldmath}^{-1}(j_{z})}}\int_{\widehat{\Sigma}}\mbox{grad}_{\textbf{x}}G_{\alpha}(\lvert\textbf{x}-\textbf{y}\rvert)\widehat{\varphi}_{j}(\widehat{\textbf{y}})\hspace{0.1cm}dS_{\widehat{\textbf{y}}}
\end{aligned}
\end{equation}
The calculation of $\textbf{H}_{T}^{\pm}$ is done as follows (compare Remark \ref{slem1} $(v)$)
\begin{equation}\label{eg43}
\textbf{H}_{T}^{\pm}=\left[ \textbf{n}\times\mbox{curl}\hspace{0.1cm}\mathcal{V}_{\alpha}(\textbf{J})\right]^{\pm}=\pm\dfrac{1}{2}\textbf{J}(\textbf{x})+\dfrac{1}{2}\textbf{n}(\textbf{x})\times\int_{\Sigma}\mbox{grad}_{\textbf{x}}G_{\alpha}(\lvert\textbf{x}-\textbf{y}\rvert)\times\textbf{J}(\textbf{y}) dS_{\textbf{y}}.
\end{equation}
\section{Numerical experiments}
\label{sec:s5}
\begin{example}
Here, we consider one example to test the implementation. As domain we take the cube $\Omega_{-}=[-2,2]^{3}$, and we now want to test the Galerkin method in (\ref{eg4}). We choose the wave number $\alpha=0.1$ (or $\alpha=0.5,1.5$), and the exact solution
\begin{equation}\label{eg29}
\textbf{J}=\dfrac{1}{8}\left( \begin{array}{c}
0\\
(1-x_{1})(1-x_{2})\cdot n_{3}\\
-(1-x_{1})(1-x_{2})\cdot n_{2}
\end{array}
\right)
\end{equation}
and
\begin{equation}\label{eg30}
M=\dfrac{1}{8\alpha^{2}}(x_{1}-1)\cdot n_{3}
\end{equation}
where $\textbf{n}=(n_{1},n_{2},n_{3})$ denotes the outer normal vector at a point on the surface $\Sigma=\cup_{k=1}^{6}\Sigma_{k}$. We can write each term of equation (\ref{s55}) as:
\begin{equation}\label{eg31}
\begin{aligned}
\mathcal{V}_{\alpha}(\textbf{J})_{T}(\textbf{x})&=\sum_{k=1}^{6}\int_{\Sigma_{k}}G_{\alpha}(\lvert\textbf{x}-\textbf{y}\rvert)(\textbf{J}_{k}(\textbf{y}))^{t}\hspace{0.1cm}dS_{\textbf{y}},
\end{aligned}
\end{equation}
and
\begin{equation}\label{eg32}
\begin{aligned}
\mbox{grad}_{T}\mathcal{V}_{\alpha}(M)_{T}(\textbf{x})&=\sum_{k=1}^{6}\mbox{grad}_{T}\int_{\Sigma_{k}}G_{\alpha}(\lvert\textbf{x}-\textbf{y}\rvert)M_{k}(\textbf{y})\hspace{0.1cm}dS_{\textbf{y}}.
\end{aligned}
\end{equation}
Then, from (\ref{s55}), (\ref{eg31}) and (\ref{eg32}) holds
\begin{equation}\label{eg33}
\textbf{E}_{T}=\sum_{k=1}^{6}\left( \int_{\Sigma_{k}}G_{\alpha}(\lvert\textbf{x}-\textbf{y}\rvert)(\textbf{J}_{k}(\textbf{y}))^{t}\hspace{0.1cm}dS_{\textbf{y}}+\mbox{grad}_{T}\int_{\Sigma_{k}}G_{\alpha}(\lvert\textbf{x}-\textbf{y}\rvert)M_{k}(\textbf{y})\hspace{0.1cm}dS_{\textbf{y}}\right) .
\end{equation}
We use different values of $\alpha$ for our investigation. In Table \ref{table1} we present the results of the errors in energy norm and $L2$-norm for $\alpha=0.1,0.5,1.5$ for the uniform $h$-version with polynomial degree $p=1$. In Figures \ref{figure1} and \ref{figure2} we compare the $h$-version with different $\alpha$. The exact norm is known by extrapolation for $\alpha=0.1$ is $\lvert C\rvert=8.580798$, for $\alpha=0.5$ is $\lvert C\rvert=1.6171534$, and for $\alpha=1.5$ is $\lvert C\rvert=1.8042380$. Here $C=Re\langle\textbf{E}_{T}^{0},\textbf{J}\rangle$ and $C_{h}=Re\langle\textbf{E}_{T}^{0},\textbf{J}_{h}\rangle$ (see \cite{Holm}). The exact $L2$-norm is known by extrapolation for $\alpha=0.1$ are $\lVert \textbf{J}\rVert_{L^2}=2.1066356$ and $\lVert M\rVert_{L^2}=81.9249906$, for $\alpha=0.5$ are $\lVert \textbf{J}\rVert_{L^2}=2.1977966$ and $\lVert M\rVert_{L^2}=3.9588037$ and for $\alpha=1.5$ are $\lVert \textbf{J}\rVert_{L^2}=2.3826646$ and $\lVert M\rVert_{L^2}=0.7763804$.\\
The convergence rate $\eta$ for $\alpha=0.1$ are for the energy norm $\eta_{C}=1.325363$, for $L^{2}$-norm $\eta_{\textbf{J}}=1.617988$ and $\eta_{M}=1.184964$. For $\alpha=0.5$ are for the energy norm $\eta_{C}=1.165255$, for $L^{2}$-norm $\eta_{\textbf{J}}=0.976440$ and $\eta_{M}=1.211619$ and for $\alpha=1.5$ are for the energy norm $\eta_{C}=1.552163$, for $L^{2}$-norm $\eta_{\textbf{J}}=0.174124$ and $\eta_{M}=0.295586$.\\
Let as compare our numerical convergence rates above for the boundary element methods obtained in the above example with the theoretical convergence rates predicted by Theorem~\ref{ts1}. Note that we have implemented the boundary integral equation system (\ref{s55}), (\ref{s56}) and note the strongly elliptic system (\ref{b5}), where convergence is garanteed due to Theorem \ref{ts1}. Nevertheless our experiments show convergence for the boundary element solution, but with suboptimal convergence rates. Theorem \ref{ts1} predicts (when Raviart-Thomas elements are used to approximate $\textbf{J}$ and piecewise linear elements to approximate $M$) a convergence rate of order $\eta=\frac{3}{2}$ in the energy norm for smooth solutions $\textbf{J}$ and $M$. Our computations depend on the parameter $\alpha$ which is a well-known effect with boundary integral equations where it may come to spurious eigenvalues diminishing the orders of the Galerkin approximations. Due to the cube $\Omega_{-}=[-2,2]^{3}$ the numerical solution might become singular near the edges and corners of $\Omega_{-}$; hence the Galerkin scheme converge suboptimally.
\end{example}
\begin{table}[bht]
\begin{center}
\begin{tabular}[h]{|c||l|c|c|c|c|c|c|}\hline
N & DOF & $\lvert C\rvert$ & $\lvert C-C_{h}\rvert$ & $\lVert\textbf{J}\rVert_{\textbf{L}^{2}}$ & $\lVert M\rVert_{\textbf{L}^{2}}$ & $\lVert\textbf{J}-\textbf{J}_{h}\rVert_{\textbf{L}^{2}}$ & $\lVert M-M_{h}\rVert_{\textbf{L}^{2}}$ \\ \hline \hline
& & & & $\alpha=0.1$ & & & \\ \hline
1 & 144 & 8.502965 & 1.153119 & 2.085189 & 80.704374 & 0.299829 & 14.08929 \\ \hline
2 & 576 & 8.568451 & 0.460150 & 2.104369 & 81.690279 & 0.097681 & 6.196968 \\ \hline
3 & 2304 & 8.578833 & 0.033717 & 2.106395 & 81.879637 & 0.031823 & 2.725645 \\ \hline
4 & 9216 & 8.654072 & 0.073274 & 2.117002 & 83.123825 & 0.010367 & 1.198835 \\ \hline
& & & & $\alpha=0.5 $ & & & \\ \hline
1 & 144 & 1.603519 & 0.209552 & 2.149511 & 3.8937090 & 0.458159 & 0.714952 \\ \hline
2 & 576 & 1.614451 & 0.093436 & 2.185426 & 3.9467491 & 0.232851 & 0.308704 \\ \hline
3 & 2304 & 1.616616 & 0.041661 & 2.194608 & 3.9565591 & 0.118342 & 0.133293 \\ \hline
4 & 9216 & 1.617260 & 0.018576 & 2.198619 & 3.9592220 & 0.060145 & 0.057554 \\ \hline
& & & & $\alpha=1.5$ & & & \\ \hline
1 & 144 & 1.774450 & 0.326497 & 2.350909 & 0.7243729 & 0.387707 & 0.279375 \\ \hline
2 & 576 & 1.800799 & 0.111334 & 2.365011 & 0.7422644 & 0.343627 & 0.227618 \\ \hline
3 & 2304 & 1.803838 & 0.037965 & 2.382843 & 0.7539064 & 0.304558 & 0.185450 \\ \hline
4 & 9216 & 1.804284 & 0.012946 & 2.397906 & 0.7909461 & 0.269932 & 0.151093 \\ \hline
\end{tabular}
\end{center}
\caption{\label{table1}Errors in $L^{2}$-norm and energy norm with respect to the degrees of freedom for $\alpha=0.1,0.5,1.5$.}
\end{table}
\begin{figure}
\caption{\label{figure1}
\label{figure1}
\end{figure}
\begin{figure}
\caption{\label{figure2}
\label{figure2}
\end{figure}
Next, we apply the boundary element method above to compute the first terms in the asymptotic expansion of the electrical field considered in subsection \ref{sec:s1} (Remark \ref{rem1}). In this way we obtain good results for the electrical field at some point away from the transmission surface $\Sigma$ by only computing a few terms in the expansion.\\
Algorithm for the asymptotics of the eddy current problem:
\begin{enumerate}
\item First solve the exterior Problem $(\textbf{P}_{\alpha \infty})$ by integral equations (\ref{s55}) and (\ref{s56}) i.e. (\ref{eg2}) with given incident field $-\textbf{E}_{T}^{0}$.
\item Compute $\textbf{H}_{T}^{+}$ from (\ref{eg43}).
\item Go back to 1: Solve the exterior problem $(\textbf{P}_{\alpha \infty})$ with new right hand side from (\ref{s31}).
\item Go back to 2.
\item $\textbf{E}=\textbf{E}_{0}+\beta^{-1}\textbf{E}_{1}+\beta^{-2}\textbf{E}_{2}+\textbf{R}_{m}$, where $\textbf{E}_{0}$ is the solution of the step 1 and $\textbf{E}_{1}$ and $\textbf{E}_{2}$ are solutions of step 3.
\end{enumerate}
We have $\widetilde{\textbf{E}}=\textbf{E}_{0}+\beta^{-1}\textbf{E}_{1}+\beta^{-2}\textbf{E}_{2}$ and calculate the error $\lvert\widetilde{\textbf{E}}-\textbf{E}_{\mbox{exact}}(\textbf{x}_{i})\rvert$, $i=1,2,3$, where $\textbf{x}_{1}=(3,0,0)$, $\textbf{x}_{2}=(6,0,0)$ and $\textbf{x}_{3}=(9,0,0)$. We present the results in Table \ref{table2} and in Figure \ref{figure3}.
\begin{table}[bht]
\begin{center}
\begin{tabular}[h]{|c||c||c||c|}\hline
DOF & $\lvert\widetilde{\textbf{E}}-\textbf{E}_{\mbox{exact}}(\textbf{x}_{1})\rvert$ & $\lvert\widetilde{\textbf{E}}-\textbf{E}_{\mbox{exact}}(\textbf{x}_{2})\rvert$ & $\lvert\widetilde{\textbf{E}}-\textbf{E}_{\mbox{exact}}(\textbf{x}_{3})\rvert$ \\ \hline \hline
144 & 0.4959 & 0.6499 & 0.8049 \\ \hline
576 & 0.1043 & 0.0910 & 0.0347 \\ \hline
2304 & 0.0998 & 0.0067 & 0.0378 \\ \hline
\end{tabular}
\end{center}
\caption{\label{table2}Errors for electrical field in $\textbf{x}_{1}$, $\textbf{x}_{2}$, and $\textbf{x}_{3}$.}
\end{table}
\begin{figure}
\caption{\label{figure3}
\label{figure3}
\end{figure}
{\bf Acknowledgements:} This research was supported in part by the Progama ALECOL-DAAD, Institute for Applied Mathematics, Leibniz University of Hannover, Hannover-Germany, Department of Mathematics Sciences, Brunel University, U.K and Universidad del Norte, Barranquilla-Colombia. Also We thank the anonymous referees for their suggestions.\\
\end{document} |
\begin{document}
\title{Unsupervised Bump Hunting Using Principal Components}
\date{\today}
\author{Daniel A D\'{\i}az-Pach\'on\thanks{[email protected]} \and Jean-Eudes Dazard\thanks{[email protected]} \and J. Sunil Rao\thanks{[email protected]}}
\maketitle
\begin{abstract}
Principal Components Analysis is a widely used technique for dimension reduction and characterization of variability in multivariate populations. Our interest lies in studying when and why the rotation to principal components can be used effectively within a response-predictor set relationship in the context of mode hunting. Specifically focusing on the Patient Rule Induction Method (PRIM), we first develop a fast version of this algorithm (fastPRIM) under normality which facilitates the theoretical studies to follow. Using basic geometrical arguments, we then demonstrate how the PC rotation of the predictor space alone can in fact generate improved mode estimators. Simulation results are used to illustrate our findings.\\
\textbf {Key words:} Algorithms, Bump hunting, Computationally intensive methods, Mode hunting, Principal components.
\end{abstract}
\section{Introduction}\label{Intro}
The PRIM algorithm for bump hunting was first developed by \citet{FriedmanFisher1999}. It is an intuitively useful computational algorithm for the detection of local maxima (or minima) on target functions. Roughly speaking, PRIM {\it peels} the (conditional) distribution of a response from the outside in, leaving at the end rectangular boxes which are supposed to contain a bump (see the formal description in Algorithm \ref{AlgoPRIM}) at page \pageref{AlgoPRIM}. However, some shortcomings against this procedure have also appeared in the literature when several dimensions are under consideration. For instance, as \citet{PolonikWang2010} explained it, the method could fail when there are two or more modes in high-dimensional settings.
Almost at the same time, \citet{DazardRao2010} proposed a supervised bump hunting strategy, given that the use of PRIM is still ``challenged in the context of high-dimensional data''. The strategy, called Local Sparse Bump Hunting (LSBH) is outlined in Algorithm \ref{AlgoLSBH} at page \pageref{NotationConcepts}. Summarizing the algorithm, it uses a recursive partitioning algorithm (CART) to identify subregions the whole space where at most one mode is estimated to be present; then a Sparse Principal Component Analysis (SPCA) is performed separately on each local partition; and finally, the location of the bump is determined via PRIM in the local, rotated and projected subspace induced by the sparse principal components.
As an example, we show in Figure \ref{figure01} simulation results representing a multivariate bimodal situation in the presence of noise, similarly to the simulation design used by \citet{DazardRao2010}. We simulated in a three-dimensional input space ($p = 3$) for visualization purposes. The data consists of a mixture of two trivariate normal distributions, taking on discrete binary response values ($Z \in \{1,2\}$), noised by a trivariate uniform distribution with a null response ($Z = 0$), so that the the data can be written by $X \sim w \cdot N_p(0,\Sigma) + (1 - w) \cdot B_p$, where $B_p \sim U_p[a,b]$, $w \in [0,1]$ is the mixing weight, and $(a,b) \in \mathbb R^2$.
\begin{figure}
\caption{Illustration of the efficiency of the encapsulation process by LSBH of two target normal distributions (red and green dots), in the presence of 10\% ($w = 0.9$) noise distribution (black dots) in a three-dimensional input space ($p = 3$). We let the total sample size be $n = 10^3$. Top row: each plot represents a projected view of the data in input subspace ($X_{1}
\label{figure01}
\end{figure}
Notice how the data in the PC spaces determined by Partition \#1 and \#2 do align with the PC coordinate axes $Y_{11}$ and $Y_{21}$, respectively (Figure \ref{figure01}).
Our goal in this paper is to provide some theoretical basis for the use of PCs in mode hunting using PRIM and a modified version of this algorithm that we called ``fastPRIM''. Although the original LSBH algorithm accepts more than one mode by partition, we will restrict ourselves to the case in which there is at most one on each partition, in order to get more workable developments and more understandable results in this work.
In Section \ref{NotationConcepts} we define the algorithms we are working with and set some useful notation. Section \ref{SecfastPRIM} proposes a modification of PRIM (called fastPRIM) for the particular case in which the bumps are modes in a setting of normal variables that allows to compare the boxes in the original space and in the rotation induced by principal components. The approach goes beyond normality and can be shown to be true for every symmetric distributions with finite second moment, and it is also an important reduction on the computational complexity since it is also useful for samples when $n\gg0$, via the central limit theorem (Subsection \ref{fastPRIMdata}). In this section we also present simulations which display the differences between considering the original space or the PC rotation for PRIM and fastPRIM. Finally, Section \ref{SpacesCompared} proves Theorem \ref{main}, a result explaining why the (volume-standardized) output box mode is higher in the PC rotation than in the original input space, a situation observed computationally by \citet{DazardRao2010} for which we give here a formal explanation. Theorem \ref{ComparingPRIMAndFastPRIM} shows that in terms of bias and variance, fastPRIM does better than PRIM. Finally, in Section \ref{Simulations} we show additional simulations relevant to the results found in Section \ref{SpacesCompared}.
\section{Notation and basic concepts}\label{NotationConcepts}
We set here the concepts that will be useful throughout the paper to define the algorithms and its modifications. Our notation on PRIM follows as a guideline the one used by \citet{PolonikWang2010}.
Let $X$ be a $p$-dimensional real-valued random vector with distribution $F$. Let $Z$ be an integrable random variable. Let $m(x):=\textbf E[Z|X=x]$, $x\in\mathbb R^p$. Assume without loss of generality that $m(x)\geq0$.
Define $I(A):=\int_Am(x)dF(x)$, for $A\subset\mathbb R^p$. So when $A=\mathbb R^p$, then $I(A)=\textbf EZ$. We are interested in a region $C$ such that
\begin{align}\label{Goal}
ave(C):=\frac{I(C)}{F(C)} > \rho,
\end{align}
where $\rho=ave(\mathbb R^p)$. Note then that $ave(C)$ is just a notational convenience for the average of $Z$ given $X\in C$.
Given a box $B$ whose sides are parallel to the coordinate axes of $\mathbb R^p$, we peel small pieces of $B$ parallel to its sides and we stop peeling when what remains of the box $B$ becomes too small. Let the class of all these boxes be denoted by $\mathcal B$. Given a subset $S(X) = S \subseteq \mathbb R^p$ and a parameter $\beta\in(0,1)$, we define
\begin{align}\label{OptBox}
B^*_\beta=\arg\max_{B\in\mathcal B}\{ave(B|S):F(B|S)=\beta\},
\end{align}
where $ave(B|S)=I(B|S)/F(B|S)$. In words, $B^*_\beta$ is the box with maximum average of $Z$ among all the boxes whose $F$-measure, conditioned to the points in the box $S$, is $\beta$. The former definitions set the stage to define Algorithm \ref{AlgoPRIM} at page \pageref{AlgoPRIM} below.
Some remarks are in order given Algorithm \ref{AlgoPRIM}:
\begin{algorithm}[!ht]
\caption{Patient Rule Induction Method}
\label{AlgoPRIM}
\begin{itemize}
\item(Peeling) Begin with $B_1=S$. For $l=1,\ldots,L-1$, where $(1-\alpha)^L=\beta$, and $\alpha\in(0,1)$, remove a subbox contained in $B_l$, chosen among $2p$ candidates given by:
\begin{align}\label{bjk}
b_{j1}:=\{x\in B:x_j<x_{j(\alpha)}\},\nonumber\\
b_{j2}:=\{x\in B:x_j>x_{j(1-\alpha)}\},
\end{align}
where $j=1,\ldots,p$. The subbox $b^*_l$ chosen for removal gives the largest expected value of $Z$ conditional on $B_l\setminus b^*_l(X)$. That is,
\begin{align}\label{bmin}
b^*_l = \arg\min\left\{I\left(b_{jv}|B_l\right):j=1,\ldots,p \text{ and } v=1,2\right\}.
\end{align}
Then $B_l$ is replaced by $B_{l+1}=B_l \setminus b^*_l$ and the process is iterated as long as the current box $B_l$ be such that $F(B_l|S)\geq\beta+\alpha$.
\item (Pasting) Alongside the $2p$ boundaries of the resulting box $B$ on the peeling part of the algorithm we look for a box $b^+\subset S\setminus B$ such that $F(b^+|S)=\alpha F(B|S)$ and $ave((B\cup b^+)\cap S) > ave(B\cap S)$. If there exists such a box $b^+$, we replace $B$ by $(B\cup b^+)$. If there exists more than one box satisfying that condition, we replace $B$ by the one that maximizes the average $ave((B\cup b^+)\cap S)$. In words, pasting is an enlargement on the Lebesgue measure of the box which is also an enlargement on the average $ave((B\cup b^+) \cap S)$.
\item (Covering) After the first application of the peeling-pasting process, we update $S$ by $S \setminus B_1$, where $B_1$ is the box found after pasting, and iterate the peeling-pasting process replacing $S = S^{(1)}$ by $S^{(2)} = S^{(1)} \setminus B_1$, and so on, removing at each step $k=1,\ldots,t$ the optimal box of the previous step: $S^{(k)} = S^{(k-1)} \setminus B_{k-1}$, so that $S^{(k)} = S^{(1)} \setminus \cup_{1 \leq b \leq k-1}B_b$. At the end of the PRIM algorithm we are left with a region, shaped as a rectangular box:
\begin{align}\label{FinalRegion}
R_\rho(p,k)=\bigcup_{ave\left(B_k | S^{(k)}\right)\geq\rho}\left\{B_k | S^{(k)}\right\}.
\end{align}
\end{itemize}
\end{algorithm}
\begin{remark}
The value $\alpha$ is the second tuning parameter and $x_{j(\alpha)}$ is the $\alpha$-quantile of $F_j(\cdot|B_l)$, the marginal conditional distribution function of $X_j$ given the occurrence of $B_l$. Thus, by construction,
\begin{align}\label{marginal}
\alpha=F_j\left(b_{jv}|B_l\right)=F\left(b_{jv}|B_l\right).
\end{align}
\end{remark}
\begin{remark}
Conditioning on an event, say $\tilde A$, is equivalent to conditioning on the random variable $\textbf 1\{x\in\tilde A\}$; i.e., when this occurs, as in (\ref{OptBox}), we are conditioning on a Bernoulli random variable.
\end{remark}
\begin{remark}
When dealing with a sample, we define analogs of the terms used previously and replace those terms in Algorithm \ref{AlgoPRIM} with:
\begin{align*}
& I_n(C)=\frac{1}{n}\sum_{i=1}^nZ_i\textbf 1\{X_i\in C\},\\
& F_n(C)=\frac{1}{n}\sum_{i=1}^n\textbf 1\{X_i\in C\},\\
& ave_n(C)=\frac{I_n(C)}{F_n(C)},
\end{align*}
where $F_n$ is the empirical cumulative distribution of $X_1,\ldots,X_n$.
\end{remark}
\begin{remark}\label{FinalBoxProbability}
Ignore the pasting stage, considering only peeling and covering. Let us call $\beta_T$ the probability of the final region. Then
\begin{align*}
\beta_T= \textbf P[x\in R_\rho(p)]&=\sum_{k=1}^t\beta(1-\beta)^{k-1}\\
&=1-(1-\beta)^t.
\end{align*}
\end{remark}
\begin{algorithm}[!ht]
\caption{Local Sparse Bump Hunting}
\label{AlgoLSBH}
\begin{itemize}
\item Partition the input space into $R$ partitions $P_1,\ldots, P_R$, using a tree-based algorithm like CART, in such a way that there is at most one mode in each of the partitions.
\item For $r$ from 1 to $\tilde r$
\begin{itemize}
\item If $P_r$ is elected for bump hunting (i.e.; if $G_r$, the number of class labels in $P_r$, is greater than 1)
\begin{itemize}
\item Run a local SPCA in the partition $P_r$, rotating and reducing the space to $p'\ (\leq p$) dimensions, and if possible, decorrelating the sparse principal components (SPC). Call this resulting space $\mathcal T(P_r)$.
\item Estimate PRIM meta-parameters $\alpha$ and $\beta$ in $\mathcal T(P_r)$.
\item Run a local and tuned PRIM-based bump hunting within $\mathcal T(P_r)$ to get descriptive rules of the bumps in the SPC space of the form $R_\rho^{(r)}(p')$, as in (\ref{FinalRegion}), where $r$ indicates the partition being considered.
\item Rotate the local rules $R^{(r)}$ back into the input space to get rules in terms of the sparse linear combinations.
\end{itemize}
\item Actualize $r$ to $r+1$.
\end{itemize}
\item Collect the rules from all partitions to get a global rule $\mathcal R=\bigcup_{r=1}^RR_\rho^{(r)}$ giving a full description of the estimated bumps in the entire input space.
\end{itemize}
\end{algorithm}
\subsection{Principal Components}\label{SecPRIMPC}
The theory about PCA is widely known, however we will oultine it here for the sake of completeness and to define notation. Among others, \citet{Mardia1976} presents a thorough analysis.
If $\textbf x$ is a random centered vector with covariance matrix $\Sigma$, we can define a linear transformation $\textbf T$ such that
\begin{align}\label{PopPCA}
\textbf T\textbf x = \textbf y = \Gamma'\textbf x,
\end{align}
where $\Gamma$ is a matrix such that its columns are the standardized eigenvectors of $\Sigma:=\Gamma\Lambda\Gamma'$; $\Lambda$ is a diagonal matrix with $\lambda_1\geq\cdots\lambda_p\geq0$; and $\lambda_j$, $j=1,\ldots,p$, are the eigenvalues of $\Sigma$. Then $\textbf T$ is called the principal components transformation.
Let $p'\leq p$. We call $\mathfrak X(p)$ the original $p$-dimensional space where $\textbf x$ lives, $\mathfrak X'(p)$ the rotated $p$-dimensional space where $\textbf y$ lives, and $\mathfrak X'(p')$ the rotated and projected space on the $p'$ first PC's.
As we will explain later, we are not advising on the reduction of dimensionality in the context of regression or other learning settings. However, since it is relevant to some features of our simulations, we consider the case $\mathfrak X'(p')$ with $p'\leq p$.
\section{fastPRIM: a More Efficient Approach to mode hunting}\label{SecfastPRIM}
Despite successful applications in many fields, PRIM presents some shortcomings. For instance, \citet{FriedmanFisher1999}, the proponents of the algorithm, show that in the presence of high collinearity or high correlation PRIM is likely to behave poorly. This is also true when there is significant background noise. Further, PRIM becomes computationally expensive in simulations and real data sets in large dimensions. In this section we propose a modified version of PRIM, called ``fastPRIM'', aimed to solve these two problems when we are hunting the mode. The high collinearity problem can be solved via principal components. The computational problems can be solved via the CLT and the geometric properties of the normal distribution, if we can warrant $n\gg0$.
The following situations are variations from simple to complex of the input $X$ and the response $Z$ being normally distributed $N(\textbf 0,\Sigma)$ and $N(0,\sigma)$, respectively. We are interested on maximizing the density of $Z$ given $X$. But there are several ways to define the mode of a continuum distribution. So for simplicity, let us define the mode of $Z$ as the region $C\subset\mathbb R^p$ with $P_X[x\in C]=\beta$ that maximizes
\begin{equation}\label{Mode}
M(C):=\int_Cf_Z(x)dF(x)
\end{equation}
(note the similarity of $M(C)$ with $I(C)$ in Equation (\ref{Goal})). In terms of PRIM, we are interested in the box $B^*_\beta$ defined on Equation (\ref{OptBox}). That is, $B^*_\beta$ is a box such that $\textbf P_X[x\in B^*_\beta]=\beta$, and inside it the mean density of the response $Z$ is maximized. Then, since the mean and the mode of the normal distribution coincide, finding a box of size $\beta$ centered around the mean of $X$ is equivalent to finding a box that maximizes the mode of $Z$ (since $X$ and $Z$ are both centered around the origin).
Although it is good to have explicit knowledge of our final region of interest, on what follows most of the results ---with the exception of Theorem \ref{main} below--- can be stated without direct reference to the mode of $Z$, taking into account that the mode of $Z$ is centered around the mean of $X$.
\subsection{fastPRIM for Standard Normality}\label{SecPRIMStandard}
Let $X\sim N(\textbf 0,\textbf I)$ with $X$ living in the space $S(X)$. Let $Z\sim N(0,1)$. Since the whole input space is defined by symmetric uncorrelated variables, PRIM can be modified in a very efficient way. (See below Algorithm \ref{AlgofastPRIM}.)
\begin{algorithm}[!ht]
\caption{fastPRIM with Standard Normal Predictors}
\label{AlgofastPRIM}
\begin{itemize}
\item (Peeling) Instead of peeling just one side of probability $\alpha$, make $2p$ peels corresponding to each side of the box, giving to each one a probability $\alpha(2p)^{-1}$. Then, after $L$ steps, the remaining box has the same $\beta$ measure, it is still centered at the origin and its marginals will have probability measure $\beta^{1/p}$.
\item (Covering) Call $B_M(k)$ the box found after the $k$-th step, $k=1,\ldots,t$ of this modified peeling stage. Setting $S(X)=S^{(1)}(X)$, take the space $S^{(k)}(X) := S^{(1)}(X) \setminus \bigcup_{1 \leq b \leq k-1} B_M(b)$ and repeat on it the peeling stage.
\end{itemize}
\end{algorithm}
Several comments are worthy to mention related to this modification.
\begin{enumerate}
\item Given that the standard normal is spherical, the final box at the end of the peeling algorithm is centered. It is also squared in that all its marginals have the same Lebesgue measure and the same probability measure $\beta^{1/p}$. Then, instead of doing the whole peeling stage, we can reduce it to select the central box whose vertices are located at the coordinates corresponding to the quantiles $\frac{1}{2}\beta^{1/p}$ and $1-\frac{1}{2}\beta^{1/p}$ of each marginal.
\item Say we want to apply $t$ steps of covering. Since the boxes chosen are centered at the end of the $t$-th covering step, the final box will have probability measure $\beta_T:=1-(1-\beta)^t$ (which, by Remark \ref{FinalBoxProbability}, produces the same probability than PRIM), each marginal has measure ($\beta_T)^{1/p}$, and the vertices of each marginal are located at the coordinates corresponding to the quantiles $\frac{1}{2}(\beta_T)^{1/p}$ and $1-\frac{1}{2}(\beta_T)^{1/p}$. It means that the whole fastPRIM is reduced to calculating this central box of probability measure $t\beta$.
\item The only non-zero values outside the diagonal in the covariance matrix of $(Z\; X)^T$ of size $(p+1)\times(p+1)$ are possibly the non-diagonal terms in the first row and the first column. Let us call them $\sigma_{ZX_1},\ldots,\sigma_{ZX_p}$. From this we get that $\textbf E[Z|X]=\sum_{j=1}^p\sigma_{ZX_j}X_i$ and $\textbf V[Z|X]=1-\sum_{j=1}^p\sigma_{ZX_j}^2$.
\item It does not make too much sense to have a pasting stage, since we will be adding the same $\alpha$ we just peeled in portions of $\alpha/(2p)$ at each side. However, a possible way to add this whole stage is to look for the dimension that maximizes the conditional mean, once a portion of probability $\alpha/2$ have been added to each side of the selected dimension. All this, of course, provided that this maximal conditional mean be higher than the one already found during the peeling stage. If this stage is applied as described, the final region will be a rectangular centered box.
\end{enumerate}
Points 1, 2 and 3 can be stated as follows:
\begin{lem}\label{PRIMStandardLemma}
Assume $Z \sim N(0,1)$ and $X \sim N(\textbf 0, \textbf I)$. Let us iterate $t$ times Algorithm \ref{AlgofastPRIM}. Then the whole algorithm can be reduced to a single stage of finding a centralized box with vertices located at the coordinates corresponding to the quantiles $\frac{1}{2}(\beta_T)^{1/p}$ and $1-\frac{1}{2}(\beta_T)^{1/p}$ of each of the $p$ variables.
\end{lem}
\subsection{fastPRIM and Principal Components}\label{fastPRIMPC}
Note that if $Z \sim N(\mu,\sigma^2)$ and $X \sim N(\textbf 0, \Sigma)$, the same algorithm as in Section \ref{SecfastPRIM} can be used. The only difference is that the final box will be a rectangular Lebesgue set, not necessarily a square as before (although it continues being a square in probability). Some comments are in order.
First, with each of the variables having possible different variances, we are also peeling the random variables with lower variance. That is, we are peeling precisely the variables that we do not want to touch. The whole idea behind PRIM, however, is to peel from the variables with high variance, leaving the ones with lower variance as untouched as possible. The obvious solution is to use a PCA to project on the variables with higher variance, peel on those variables, and after the box is obtained to add the whole set of variables we chose not to touch. Adding to the notation developed in Section \ref{SecPRIMPC} for PCA, call $Y'$ the projection of $Y$ to its firsts $p'$ principal components, where $0 < p'\leq p$. Algorithm \ref{AlgofastPRIMPCA} below makes this explicit.
\begin{algorithm}[!ht]
\caption{fastPRIM with Principal Components}
\label{AlgofastPRIMPCA}
\begin{itemize}
\item (PCA) Apply PCA to $X$ to obtain the space $\mathfrak X'(p')$.
\item (Peeling) Make $2p'$ peels corresponding to each side of the box, each one with probability $\alpha(2p')^{-1}$. After $L$ steps, the centered box has $\beta$ measure, and its marginals will have probability $\beta^{1/p'}$ each.
\item (Covering) Call $B_M(k)$ the box found after the $k$-th step, $k=1,\ldots,t$, of this modified peeling stage. Setting $S(Y')=S^{(1)}(Y')$, take the space $S^{(k)}(Y') := S^{(1)}(Y') \setminus \bigcup_{1 \leq b \leq k-1} B_M(b)$ and repeat on it the peeling stage.
\item (Completing) The final box will be given by $\left[\mathfrak X'(p)\setminus\mathfrak X'(p')\right]\cup S^{(t)}(Y')$. That is, to the final box we are adding the whole subspace which we chose not to peel.
\end{itemize}
\end{algorithm}
In this way, we avoid to select for peeling the variables with lower variance. Concededly, we are still peeling the same amount (we are getting squares, not rectangles, in probability), but we are also getting an important simplification in algorithmic complexity cost. Besides this fact, most of the comments in Section \ref{SecPRIMStandard} are still valid but one clarification has to be made: The covariance matrix of $(Z\; Y')$ has size $(p'+1)\times(p'+1)$; as before, all the non-diagonal elements are zero, except possibly the ones in the first row and the first column. Call $\sigma_{ZY_1'}, \ldots,\sigma_{ZY_p'}$. Then $\textbf E[Z|Y']=\sum_{j=1}^{p'}\sigma_{ZY_j'}\lambda_j^{-1}Y_j'$ and $\textbf {Var}[Z|Y']=\sigma_Z^2-\sum_{j=1}^{p'}\lambda_j^{-1}\sigma_{ZY_j'}^2$, where $Y_j'$ is the $j$-th component of the random vector $Y'$.
As before, we can state the following lemma:
\begin{lem}
Assume $Z \sim N(\mu,\sigma^2)$ and $X \sim N(\textbf 0,\Sigma)$. Iterate $t$ times the covering stage of Algorithm \ref{AlgofastPRIMPCA}. Then the whole algorithm can be reduced to a two-stage setting: First, to find a centralized box with vertices located at the coordinates corresponding to the quantiles $\frac{1}{2}(\beta_T)^{1/p'}$ and $1-\frac{1}{2}(\beta_T)^{1/p'}$ of each of the $p'$ variables. Second, add the $p- p'$ dimensions left untouched to the final box.
\end{lem}
\begin{remark}
Even though we have developed the algorithm with $p'\leq p$, it is not wise to try to reduce the dimensions of the input. To be sure, the rotation of the input in the direction of the principal components is a useful thing to do in learning settings, as \citet{DiazRaoDazard2014} have showed. However, \citet{Cox1968}, \citet{HadiLing1998}, and \citet{Joliffe1982}, have warned against the reduction of dimensionality.
\end{remark}
\subsection{fastPRIM and Data}\label{fastPRIMdata}
The usefulness of the previous result can be more easily seen when, for relatively large $n$, we consider the iid vectors $X_1,\ldots,X_n$ with finite second moment, since in this way we can approximate to a normal distribution by the Multivariate Central Limit Theorem:
Call $X=[X_1\cdots X_n] $ and let us assume that $n\gg 0$. By the multivariate central limit theorem, if the vectors of observations are iid, such that their distribution has mean $\mu_X$ and variance $\Sigma_X$, we can approximate $ X^* := n^{1/2}\left(\overline X-\mu_X\right)$ to a $p$-variate normal distribution with parameters $\textbf 0$ and $\Sigma_X$. That is, $\overline X$ can be approximated to a distribution $N(\mu,(1/n)\Sigma_X)$. Now, $Y^*=X^*G$ is the PC transformation of $X^*$, where $G$ is the matrix of eigenvectors of $S$, the sample covariance matrix of $X^*$; i.e., $S = GLG^T$, and $L$ is the diagonal matrix of eigenvalues of $S$, with $l_{j^{'}}\geq l_j$ for all $j^{'}<j$.
As before, call $Y'$ the projection of $Y$ to its firsts $p'$ principal components. Apply Algorithm \ref{AlgofastPRIMPCA}.
Note that the use of the CLT is indeed well justified: since the asymptotic mean of $X^*$ is $\textbf 0$, its asymptotic mode is also at $\textbf 0$ (or around $\textbf 0$).
\subsection{Graphical Illustrations}\label{Graphical}
In the following simulations, we first test PRIM and fastPRIM and illustrate graphically how fastPRIM compares to PRIM either in the input space $\mathfrak X(p)$ or in the PC space $\mathfrak X'(p)$. We generated a synthetic dataset derived from a simulation setup similar to the one used in Section \ref{Intro}, although with a single target distribution and a continuous normal response, without noise. Thus, the data $X$ was simulated as $X \sim N_p(0,\Sigma)$ with response $Z \sim N(\mu,\sigma^2)$. To control the amount of variance for each input variable and their correlations, the sample covariance matrix $\Sigma$ was constructed from a specified sample correlation matrix $R$ and sample variance matrix $V$ such that $\Sigma := V^{\sfrac{1}{2}} R V^{\sfrac{1}{2}}$, after ensuring that the resulting matrix $\Sigma$ is symmetric positive definite.
Simulations were carried out with a continuous normal response with parameters $\mu = 1$ and $\sigma = 0.2$, a fixed sample size $n = 10^3$, and no added noise (i.e. mixing weight $w = 1$). Here, we limited ourselves to a low dimensional space ($p = p' = 2$) for graphical visualization purposes. Simulations were for a fixed peeling quantile $\alpha$, a fixed minimal box support $\beta$, a fixed maximal coverage parameter $t$, and no pasting for PRIM. Empirical results presented in Figure \ref{figure02} show the marked computational efficiency of fastPRIM compared to PRIM. CPU times are plotted against PRIM and fastPRIM coverage parameters $k \in \{1, \ldots, t\}$ and $t \in \{1, \ldots, 20\}$, respectively, in the original input space $\mathfrak X(2)$ and PC space $\mathfrak X'(2)$.
\begin{figure}
\caption{Total CPU time as a function of coverage. For all plots, comparison of speed metrics are reported against coverage parameter $k \in \{1, \ldots, t\}
\label{figure02}
\end{figure}
Further, empirical results presented in Figure \ref{figure03} show PRIM and fastPRIM box coverage sequences as a function of PRIM and fastPRIM coverage parameters $k \in \{1, \ldots, t\}$ and $t \in \{1, \ldots, 20\}$, respectively. Notice the centering and nesting of the series of fastPRIM boxes in contrast to the sequence of boxes induced by PRIM (Figure \ref{figure03}).
\begin{figure}
\caption{PRIM and fastPRIM box coverage sequences. Top row: PRIM complete sequence of coverage boxes, each corresponding to a coverage step $k \in \{1, \ldots, t\}
\label{figure03}
\end{figure}
\vskip 0pt
\section{Comparison of the Algorithms in the Input and PC Spaces}\label{SpacesCompared}
The greatest theoretical advantage of fastPRIM is that, because of the centrality of the boxes, it gives us a framework to compare the output mean in the original input space and in the PC space, something that cannot be attained with the original PRIM algorithm in which the behaviour of the final region is unknown (see Figure \ref{figure02}). \citet{PolonikWang2010} explain how PRIM tries to approximate regression level curves, an objective that the algorithm does not accomplish in general. With the idea of level curves in mind, it is clear that the bump of a multivariate normal distribution can be seen as the data inside the ellipsoids of concentration. This concept is the key to prove the optimality of the box found on the PC space. By optimality here we mean the box with minimal Lebesgue measure among all possible central boxes found by fastPRIM with probability measure $\beta$.
\begin{lem}\label{Circum}
Let $E$ be a $p$-dimensional ellipsoid. The rectangular box that is circumscribing $E$ (i.e. centered at the center of $E$, with sides parallel to the axes of $E$, such that each of its edges is of length equal to the axis length of $E$ in the corresponding dimension), is the box with the minimal volume of all the rectangular boxes containing $E$.
\end{lem}
The proof of Lemma \ref{Circum} is well-known and is omitted here.
\begin{prop}\label{minBox}
Let $X \sim N(\textbf 0,\Sigma)$. Assume that the true bump $E$ of $X$ has probability measure $\beta' > 0$. Then, it is possible to find a rectangular box $R$ by fastPRIM that circumscribes $E$ under the PC rotation with minimal Lebesgue measure over all rectangular boxes containing $E$ and the set of all possible rotations.
\end{prop}
\begin{proof}
The true bump satisfies that $\textbf P[x \in E]=\beta'$. This bump, by definition of normality, lives inside an ellipsoid of concentration $E$, of volume $\text{Vol}(E)=\pi_p\prod_{1\leq j\leq p}r_j$, where $r_j$ is the length of the semi axis of the dimension $j$ and $\pi_p$ is a constant that only depends on the dimension $p$. By Lemma \ref{Circum} above, the box $R$ with sides parallel to the axes of $E$, and circumscribing $E$, has minimal volume over all the boxes containing $E$ and its volume is $2^p\prod_{1\leq j\leq p}r_j$, and $2^p>\pi_p$. Let us assume that $\textbf P[x \in R] = \beta$ (thus $\beta' < \beta$).
Note now that $R$ is parallel to the axes in the space of principal components $\mathfrak X'(p)$ and it is centered at its origin. Therefore, provided an appropriate small $\alpha$ (it is possible that we need to adjust proportionally $\alpha$ on each direction of the principal components to obtain the box that circumscribes $E$), the minimal rectangular box $R$ containing the bump $E$ can be approximated through fastPRIM and is in the direction of the principal components. As such, then the box $R$ has smaller Lebesgue measure than any other approximation in every other rotation.
\end{proof}
\begin{remark}
The box of size $\beta$ circumscribing the ellipsoid of concentration $E$ is identical to $B^*_\beta$ in equation (\ref{OptBox}).
\end{remark}
Proposition \ref{minBox} allows us to compare box estimates in the PC space of PRIM (Figure \ref{figure02}, top-right) versus fastPRIM (Figure \ref{figure02}, down-right). Remember from Equation (\ref{FinalRegion}) that $R_\rho(p,1)$ is the box obtained with PRIM after a single stage of coverage. We now restrict ourselves to the case of $R_\rho(p,1)$ in the direction of the principal components (i.e., its sides are parallel to the axes of $\mathfrak X'(p)$). We establish the following result:
\begin{teo}\label{main}
Assume $X \sim N(0,\Sigma)$ and $Z \sim N(0,\sigma^2)$. Call $R$ the final fastPRIM box resulting from Algorithm \ref{AlgofastPRIMPCA} and assume $p'=p$. As in (\ref{FinalRegion}), call also $\hat R_\rho(p,1)$ the final box from Algorithm \ref{AlgoPRIM} after one stage of coverage. Assume that $R$ and $R_\rho(p,1)$ contain the true bump. Then
\begin{align}
\frac{M(R)}{Vol(R)}>\frac{M(R_\rho(p,1))}{Vol(R_\rho(p,1))},
\end{align}
that is, the volume-adjusted box output mean of the mode of $Z$ given $R$ is bigger than the volume-adjusted box output mean of the mode of $Z $ given $R_\rho(p)$.
\end{teo}
\begin{proof}
Note that by definition, the two boxes have sides parallel to the axes of $\mathfrak X'(p)$. The proof is direct because of the assumptions. By Proposition \ref{minBox}, $R$ is the minimal box of measure $\beta$ that contains the true bump. Therefore, any other box $R'$ with parallel sides to $R$ that contains the bump also contains $R$. Since $R$ is centered around the mean of $Z$, every point $z$ in the support of $Z$ such that $z\in R'\setminus R$ have less density than $\arg\min_zf_Z(z)$. Therefore $M(R)>M(R')$. From Proposition 1 we also get that $Vol(R)<Vol(R')$.
Since $R_\rho(p,1)$ is but a particular case of a box $R'$, the result follows.
\end{proof}
Not only $R$ has better volume-adjusted output mean than $R_\rho(p,1)$. We conclude showing the optimality of the latter over the former in terms of bias and variance.
\begin{teo}\label{ComparingPRIMAndFastPRIM}
Assume $Z \sim N(\mu,\sigma^2)$ and $X \sim N(\textbf 0,\Sigma)$. Define $E$ as the true bump, and let us assume that both $R$ and $R_ \rho(p)$ cover $E$. Then $\textbf {Var}(Z|Y \in R) < \textbf {Var}(Z|Y \in R_\rho(p))$, and $R$ is unbiased while $R_\rho(p)$ is not.
\end{teo}
\begin{proof}
Note that $R$ and $R_\rho(p,1)$ are estimators of $B_\beta^*$, as defined in Equation (\ref{OptBox}). Algorithm \ref{AlgofastPRIMPCA} is producing unbiased boxes since by construction it is centered around the mean. In fact, $R$ would be unbiased even if not taken in the direction of the PC. On the other hand, $\hat R_\rho(p)$ is almost surely biased, even in the direction of the principal components, since it is producing boxes that are not centered around the mean.
Now, the inequality $\textbf {Var}(Z|Y \in R) < \textbf {Var}(Z|Y \in R_\rho(p))$ stems from the fact that $R$ is the box with minimal volume containing $E$. Since $R$ is in the direction of the principal components, every other box that contains $E$ in the same direction also contains $R $, in particular $R \subseteq R_\rho(p)$.
\end{proof}
\section{Simulations}\label{Simulations}
Next, we illustrate how the optimality of the box encapsulating the true bump is improved in the PC space $\mathfrak X'(p)$ as compared to the input space $\mathfrak X(p)$. Empirical results presented in Figure \ref{figure04} are for the same simulation design and the same fastPRIM and PRIM parameters as described in Subsection \ref{Graphical}, except that we now allow for higher dimensionality since no graphical visualization is desired here ($p =100$).
\begin{figure}
\caption{Box statistics and performance metrics as a function of coverage. For all plots, results are plotted against PRIM coverage parameter $k \in \{1, \ldots, t\}
\label{figure04}
\end{figure}
Some of the theoretical results between the original input space and the PC space are borne out based on the empirical conclusions plotted in Figure \ref{figure04}. In sum, for situations with no added noise, one observes for both algorithms that: i) the effect of PCA rotation dramatically decreases the box geometric volume; ii) the box output (response) means are almost identical in the PC space and in the original input space; and iii) the volume-adjusted box output (response) means are markedly larger in the PC space than in the original input space - indicating a much more concentrated determination of the true bump structure (Figure \ref{figure04}).
Some additional comments:
\begin{enumerate}
\item As each algorithm covers the space (up to step $k = t$), the box support and the box geometric volume are expected to increase monotonically (up to sampling variability) for both algorithms.
\item The boxes are equivalent for the mean of $Z$ and the mode of $Z$ because $Z$ is normal, we expect the fastPRIM box being centered around the mean and therefore the conditional mean of $Z$ should be 1 (because in this simulation the mean of $Z$ is 1). While, the box for $Z$ given PRIM must have a different conditional expectation. This justifies the fact of looking at the mode of $Z$ inside the boxes, and not directly the mode of $Z$.
\item Since the the box output (response) mean is almost perfectly constant at 1 for fastPRIM and close to 1 for PRIM, it is expected that the box volume-adjusted output mean decreases monotonically at the rate of the box geometric volume for both algorithms.
\item Also, as coverage $k,t$ increases, the two boxes $R$ and $R_\rho(p)$ of each algorithm converge to each other (covering most of the space), so it is expected that the output (response) means inside the final boxes converge to each other as well (i.e. towards the whole space mean response 1).
\end{enumerate}
To illustrate the effect of increasing dimensionality, we plot in Figure \ref{figure05} the profiles of gains in volume-adjusted box output (response) mean as a function of increasing dimensionality $p \in \{2,3,\ldots,8,9,10,20,30,\ldots,180,190,200\}$. Here, the gain is measured in terms of a ratio of the quantity of interest in the PC space $\mathfrak X'(p')$ over that in the original input space $\mathfrak X(p)$. Empirical results presented are for the same simulation design and the same fastPRIM and PRIM parameters as described in subsection \ref{Graphical}. Notice the extremely fast increase in volume-adjusted box output (response) mean ratio as a function of dimensionality $p$, that is, the marked larger value of volume-adjusted box output (response) mean in the PC space as compared to the one in the input space for both algorithms. Notice also the weak dependency with respect to the coverage parameters ($k,t$).\\
\begin{figure}
\caption{Gains profiles in volume-adjusted box output (response) mean as a function of dimensionality $p$. For all plots, comparison of box statistics and performance metrics profiles are reported as a ratio of the values obtained in the PC space $\mathfrak X'(p')$ (denoted Y) over the original input space $\mathfrak X(p)$ (denoted X). We show empirical results for varying dimensionality $p \in \{2,3,\ldots,8,9,10,20,30,\ldots,180,190,200\}
\label{figure05}
\end{figure}
Further, using the same simulation design and the same fastPRIM and PRIM parameters as described in subsection \ref{Graphical}, we compared the efficiency of box estimates generated by both algorithms in the PC space $\mathfrak X'(p')$ as a function of dimension $p'$ and coverage parameters $k,t$ for PRIM or fastPRIM, respectively. Notice, the reduced box geometric volume (Figure \ref{figure06}) and increased box volume-adjusted output (response) mean (Figure \ref{figure07}) of fastPRIM as compared to PRIM.
\begin{figure}
\caption{Comparative profiles of box geometric volumes in the PC space $\mathfrak X'(p')$ as a function of dimension $p'$ and coverage parameters $k \in \{1, \ldots, t\}
\label{figure06}
\end{figure}
\begin{figure}
\caption{Comparative profiles of box volume-adjusted output (response) means in the PC space $\mathfrak X'(p')$ as a function of dimension $p'$ and coverage parameters $k \in \{1, \ldots, t\}
\label{figure07}
\end{figure}
Finally, in Figures \ref{figure08} and \ref{figure09} below we compare variances of fastPRIM and PRIM volume-adjusted box output (response) means in the PC space $\mathfrak X'(p')$ as a function of dimension $p'$ and coverage parameters $k,t$ for PRIM or fastPRIM, respectively. Empirical results are presented for the same simulation design and the same fastPRIM and PRIM parameters as described in subsection \ref{Graphical}. Results show that the variance of fastPRIM box geometric volume (Figure \ref{figure08}) is reduced than its PRIM counterparts for coverage $t$ not too large ($ \leq 10-15$), which is matched to a reduced variance of fastPRIM \emph{volume-adjusted} box output (response) mean for coverage $t$ not too small ($ \leq 10-15$).
\begin{figure}
\caption{Comparative profiles of variances of box geometric volumes in the PC space $\mathfrak X'(p')$ as a function of dimensionality $p'$ and coverage parameters $k \in \{1, \ldots, t\}
\label{figure08}
\end{figure}
\begin{figure}
\caption{Comparative profiles of variances of box volume-adjusted output (response) means in the PC space $\mathfrak X'(p')$ as a function of dimensionality $p'$ and coverage parameters $k \in \{1, \ldots, t\}
\label{figure09}
\end{figure}
Of note, the results in Figures \ref{figure06} and \ref{figure07} below, and similarly in \ref{figure08} and \ref{figure09}, are for the sample size $n = 1000$ of this simulation design. In particular, efficiency results of fastPRIM versus PRIM box estimates show some dependency with respect to coverage parameters $k,t$ for large coverages and increasing dimensionality. As discussed above, this reflects a finite sample-effect favoring PRIM box estimates in these coverages and dimensionality.
Notice finally in Figures \ref{figure06} and \ref{figure07} how the curves approach each other for the largest coverage step $k = t = 20$, and similarly in \ref{figure08} and \ref{figure09} how the curves approach the identity line. This is in line with the aforementioned convergence point of the two boxes $R$ and $R_\rho(p)$ as coverage increases.
\
\
\
\section{Discussion}
Our analysis here corroborates what \citet{DiazRaoDazard2014} have showed on how the rotation of the input space to the one of principal components is a reasonable thing to do when modeling a response-predictor relationship. In fact, \citet{DazardRao2010} use a \emph{sparse} PC rotation for improving bump hunting in the context of high dimensional genomic predictors. And \citet{DazardRaoMarkowitz2012} also show how this technique can be applied to find additional heterogeneity in terms of survival outcomes for colon cancer patients. The geometrical analysis we present here shows that as long as the principal components are not being selected prior to modeling the response, then these improved variables can produce more accurate mode characterizations. In order to elucidate this effect, we introduced the fastPRIM algorithm, starting with a supervised learner and ending up with an unsupervised one. This analysis opens the question on whether is possible to go from supervised to unsupervised settings in more general bump hunting situations, not only modes; and more generally, whether is possible to go from unsupervised to supervised in other learning contexts beyond bump hunting.
\vskip0.2in
\noindent
{\bf Acknowledgements}: All authors supported in part by NIH grant NCI R01-CA160593A1. We would like to thank Rob Tibshirani, Steve Marron and Hemant Ishwaran for helpful discussions of the work. This work made use of the High Performance Computing Resource in the Core Facility for Advanced Research Computing at Case Western Reserve University.
\bibpunct[,]{(}{)}{;}{a}{}{,}
{
\end{document} |
\begin{document}
\title{A Converse to a Theorem on Normal Forms of Volume Forms with Respect to a Hypersurface}
\author{Konstantinos Kourliouros}
\address{Imperial College London, Department of Mathematics, Huxley Building 180 Queen's Gate,\\
South Kensington Campus, London SW7, United Kingdom}
\ead{[email protected]}
\begin{abstract}
In this note we give a positive answer to a question asked by Y. Colin de Verdi\`ere concerning the converse of the following theorem, due to A. N. Varchenko: two germs of volume forms are equivalent with respect to diffeomorphisms preserving a germ of an isolated hypersurface singularity, if their difference is the differential of a form whose restriction on the smooth part of the hypersurface is exact.
\end{abstract}
\begin{keyword}
Isolated Singularities \sep De Rham Cohomology \sep Volume Forms \sep Normal Forms
\end{keyword}
\maketitle
\section{Introduction-Main Results}
In this paper we will give a positive answer to a question asked by Y. Colin de Verdi\`ere in \cite{C1} which was formulated as follows: suppose that two germs of symplectic forms at the origin of the plane are equivalent with respect to a diffeomorphism preserving a plane curve germ with an isolated singularity at the origin. Is it true that their difference is the differential of a 1-form whose restriction on the smooth part of the curve is exact? This question asks for the validity of the converse to a general normal form theorem in Lagrangian singularity theory according to which: two germs of symplectic structures are equivalent with respect to diffeomorphisms preserving a Lagrangian variety if their difference is the differential of a 1-form whose restriction on the smooth part of the variety is exact. The proof of this theorem can be easily deduced from the reasoning in A. B. Givental's paper \cite{Gi} using Moser's homotopy method. It holds in any dimension and for arbitrary Lagrangian singularities. It's converse though is not so easy to deduce; as it turns out, the main difficulty comes from the fact that the singularities of Lagrangian varieties in dimension higher than two are non-isolated (c.f. \cite{Gi}, \cite{Sev}) and their cohomology can be rather complicated. On the other hand, for the 2-dimensional case (where the Lagrangian singularities are indeed isolated) the normal form theorem stated above can be viewed as a special case of a general theorem obtained by A. N. Varchenko in \cite{Var} concerning the normal forms of germs of (powers of) volume forms with respect to an isolated hypersurface singularity. Here we will prove a converse to Varchenko's normal form theorem, which trivially answers Verdi\`ere's question, and it can be formulated as follows:
\begin{thm}
\label{t}
Suppose that two germs of volume forms are equivalent with respect to a diffeomorphism preserving a germ of an isolated hypersurface singularity. Then their difference is the differential of a form whose restriction on the smooth part of the hypersurface is exact.
\end{thm}
The method of proof is as follows: we first prove the theorem in the formal category. For this we use a formal interpolation lemma for the elements of the isotropy group of an isolated hypersurface singularity (Lemma \ref{l1}) which is a variant of the one presented by J. -P. Fran\c{c}oise in \cite{F} and relies in a general interpolation method obtained by S. Sternberg \cite{Ste}. Then we pass to the analytic category using a comparison theorem between the corresponding de Rham cohomologies in the formal and analytic categories (Lemma \ref{l2}). This is analogous to the well known Bloom-Brieskorn theorem \cite{B} for the de Rham cohomology of an analytic space with isolated singularities. But in contrast to the ordinary Bloom-Brieskorn theorem where the cohomology of the complex of K\"ahler differentials is considered, we need to consider instead the cohomology of the so called Givental complex, i.e. the complex of germs of holomorphic forms modulo those that vanish on the smooth part of the hypersurface (which naturally appears in the statements of the theorems above).
\section{De Rham Cohomology of an Isolated Hypersurface Singularity and an Analog of the Bloom-Brieskorn Theorem}
Let $f:(\mathbb{C}^{n+1},0)\rightarrow (\mathbb{C},0)$ be a germ of a holomorphic function with an isolated singularity at the origin and let $(X,0)=\{f=0\}$ be the corresponding hypersurface germ, zero level set of $f$ (we will suppose throughout that the germ $(X,0)$ is reduced). To the germ $(X,0)$ we may associate several complexes of holomorphic forms, quotients of the complex $\Omega^{\bullet}$ of germs of holomorphic forms at the origin of $\mathbb{C}^{n+1}$, the ``largest'' one being the so called complex of K\"ahler differentials:
\[\Omega^{\bullet}_{X,0}=\frac{\Omega^{\bullet}}{df\wedge \Omega^{\bullet-1}+f\Omega^{\bullet}},\]
where the differential is induced by the differential in $\Omega^{\bullet}$ after passing to quotients. The cohomologies of this complex are finite dimensional vector spaces and they have being computed by E. Brieskorn in \cite{B}. In particular, along with the results of M. Sebastiani \cite{S} it follows that:
\begin{equation}
\label{ck}
H^p(\Omega^{\bullet}_{X,0})=\left\{\begin{array}{cl}
\mathbb{C}, & p=0,\\
0, & 0<p<n, p>n \\
\mathbb{C}^{d}, & p=n,\\
\end{array} \right..
\end{equation}
The number $d$ can be interpreted as the degree of non-quasihomogeneity of the germ $f$, i.e.
\[d=\mu-\tau,\]
where $\mu$ is the Milnor number and $\tau$ is the Tjurina number of the singularity $f$:
\[\mu=\dim_{\mathbb{C}}\frac{\Omega^{n+1}}{df\wedge \Omega^n}, \hspace{0.3cm} \tau=\dim_{\mathbb{C}}\frac{\Omega^{n+1}}{df\wedge \Omega^n+f\Omega^{n+1}},\]
\[d=\dim_{\mathbb{C}}\frac{df\wedge \Omega^n+f\Omega^{n+1}}{df\wedge \Omega^n}.\]
Indeed, it is a result of K. Saito \cite{Sa} according to which $f$ is equivalent to a quasihomogeneous germ if and only if it belongs to its gradient ideal, i.e. $f\Omega^{n+1}\subset df\wedge \Omega^n$.
Denote now by $X^*=X\setminus 0$ the smooth part of the hypersurface $X$. In \cite{Fe}, A. Ferrari introduced another important complex associated to $X$ which is the quotient complex of $\Omega^{\bullet}$ modulo the subcomplex $\Omega^{\bullet}(X^*)$ which consists of forms whose restriction on the smooth part $X^*$ of $X$ is identically zero:
\[\tilde{\Omega}^{\bullet}_{X,0}=\frac{\Omega^{\bullet}}{\Omega^{\bullet}(X^*)}.\]
This complex was also used extensively by A. B. Givental in \cite{Gi} and is called the Givental complex in \cite{He}. We adopt the same notation here as well. As it is easy to see there is an identification of the complex of K\"ahler differentials with the Givental complex on the smooth part $X^*$ and thus there is a short exact sequence of complexes:
\begin{equation}
\label{ses0}
0\rightarrow T^{\bullet}_{X,0}\rightarrow \Omega^{\bullet}_{X,0}\rightarrow \tilde{\Omega}^{\bullet}_{X,0}\rightarrow 0,
\end{equation}
where $T^{\bullet}_{X,0}$ is the torsion subcomplex of $\Omega^{\bullet}_{X,0}$ (here is where we need $(X,0)$ to be reduced). Indeed any torsion element vanishes on the smooth part $X^*$ and thus the complex $T^{\bullet}_{X,0}$ is contained in the kernel of the natural projection $\Omega^{\bullet}_{X,0}\rightarrow \tilde{\Omega}^{\bullet}_{X,0}$.
In \cite{G}, G. M. Greuel studied the relationship of the Givental and K\"ahler complexes in the general case where $(X,0)$ defines an $n$-dimensional isolated complete intersection singularity (embedded in some $\mathbb{C}^{m}$). He proves that:
\[T^{p}_{X,0}=0, \hspace{0.3cm} p< n, \]
\[T^p_{X,0}=\Omega^{p}_{X,0}, \hspace{0.3cm} p>n,\]
and also:
\[H^p(\Omega^{\bullet}_{X,0})=0,\hspace{0.3cm} 0<p<n,\]
\[H^p(\tilde{\Omega}^{\bullet}_{X,0})=0, \hspace{0.3cm} p\neq 0,n.\]
Thus, in the particular case where $(X,0)$ is an isolated hypersurface singularity we obtain the following analog of the Brieskorn-Sebastiani result (\ref{ck}) for the cohomology of the Givental complex:
\begin{prop}
\label{p1}
\[H^p(\tilde{\Omega}^{\bullet}_{X,0})=\left\{\begin{array}{cl}
\mathbb{C}, & p=0,\\
0, & 0<p<n, p>n \\
\mathbb{C}^{d}, & p=n,\\
\end{array} \right.,\]
where $d=\mu-\tau$ is the degree of non-quasihomogeneity of the germ $f$.
\end{prop}
\begin{proof}
It suffices only to show the following equality (the zero cohomology is trivial):
\[H^n(\tilde{\Omega}^{\bullet}_{X,0})=\mathbb{C}^d.\]
This in turn has been proved by A. N. Varchenko in \cite{Var}. Here we will give an alternative, simple proof, which is distilled from \cite{C1}. To the germ $f$ we associate the Brieskorn module as in \cite{B}:
\[H_f''=\frac{\Omega^{n+1}}{df\wedge d\Omega^{n-1}}.\]
According to the Sebastiani theorem \cite{S} this is a free module of rank $\mu$ over $\mathbb{C}\{f\}$ and thus the quotient
\[\frac{H''_f}{fH''_f}=\frac{\Omega^{n+1}}{df\wedge d\Omega^{n-1}+f\Omega^{n+1}}\]
is a $\mu$-dimensional $\mathbb{C}$-vector space. Denote now by
\[\mathcal{Q}_{X,0}=\frac{\Omega^{n+1}}{df\wedge \Omega^n+f\Omega^{n+1}}\]
the space of deformations of the germ $(X,0)$. By the fact that $df\wedge d\Omega^{n-1}+f\Omega^{n+1}\subseteq df\wedge \Omega^n+f\Omega^{n+1}$ there is a natural projection:
\[\frac{H''_f}{fH''_f}\stackrel{\pi}{\rightarrow}\mathcal{Q}_{X,0},\]
whose kernel:
\[\ker{\pi}=\frac{df\wedge \Omega^n}{df\wedge d\Omega^{n-1}+f\Omega^{n+1}}\]
is a priori a $d=\mu-\tau$-dimensional vector space. Now, the $n$-th cohomology of the Givental complex is:
\[H^n(\tilde{\Omega}^{\bullet}_{X,0})=\frac{\tilde{\Omega}^n_{X,0}}{d\tilde{\Omega}^{n-1}_{X,0}}=\frac{\Omega^n}{\Omega^n(X^*)+d\Omega^{n-1}},\]
where:
\[\Omega^n(X^*)=\{\alpha \in \Omega^n/df\wedge \alpha \in f\Omega^{n+1}\}.\]
It follows from this that
\[\ker{\pi}=df\wedge H^n(\tilde{\Omega}^{\bullet}_{X,0})\]
and thus there is a short exact sequence:
\begin{equation}
\label{ses1}
0\rightarrow H^n(\tilde{\Omega}^{\bullet}_{X,0})\stackrel{df\wedge}{\rightarrow}\frac{H''_f}{fH''_f}\stackrel{\pi}{\rightarrow}\mathcal{Q}_{X,0}\rightarrow 0.
\end{equation}
This proves that indeed $H^n(\tilde{\Omega}^{\bullet}_{X,0})=\mathbb{C}^d$ as was asserted.
\end{proof}
It follows from the proposition above along with (\ref{ck}) that there is an isomorphism of vector spaces:
\[H^{\bullet}(\Omega^{\bullet}_{X,0})\cong H^{\bullet}(\tilde{\Omega}^{\bullet}_{X,0}).\]
Thus we may formulate the following version of the Poincar\'e lemma for the germ $(X,0)$:
\begin{cor}[c.f. \cite{Gi} for $n=1$]
\label{c1}
The germ $(X,0)$ is quasihomogeneous if and only if its Givental (or K\"ahler) complex is acyclic (except in zero degree).
\end{cor}
Finally, we will need the following analog of the Bloom-Brieskorn theorem \cite{B}, which is a comparison of the cohomologies of the analytic and formal Givental complexes. The proof we will give below is in fact a simple variant of the one presented in \cite{B}. Moreover, the fact that $(X,0)$ is an isolated hypersurface singularity plays no significant role; the same proof holds for any analytic space, as long as its singularities are isolated.
\begin{lem}
\label{l2}
Let $\hat{\tilde{\Omega}}^{\bullet}_{X,0}$ be the formal completion of the Givental complex. Then the natural inclusion $\tilde{\Omega}^{\bullet}_{X,0}\hookrightarrow \hat{\tilde{\Omega}}^{\bullet}_{X,0}$ induces an isomorphism of finite dimensional vector spaces:
\[H^{\bullet}(\tilde{\Omega}^{\bullet}_{X,0})\cong H^{\bullet}(\hat{\tilde{\Omega}}^{\bullet}_{X,0}).\]
\end{lem}
\begin{proof}
Following \cite{B} let $\pi:Y\rightarrow X$ be a resolution of singularities in the sense of Hironaka and denote by $A=\pi^{-1}(0)$ the exceptional set, which we may suppose it is given by some equations $y_1\cdots y_r=0$. Let $\Omega^{\bullet}_{Y}$ be the complex of holomorphic forms on $Y$ and let $\Omega^{\bullet}_Y|_{A}$ be its restriction on $A$. Let also
\[\hat{\Omega}^{\bullet}_{Y}=\lim_{\underset{k}{\leftarrow}} \frac{\Omega^{\bullet}_Y}{\frak{m}^k\Omega^{\bullet}_Y}.\]
Consider now the direct image sheaf $R^0\pi_*\Omega^{\bullet}_{Y}$ (this is also called the Noether complex). Since the map $\pi$ is proper this is a coherent sheaf (by Grauert's coherence theorem), which away from the singular point $0$ it can be identified with the Givental complex: $R^0\pi_*\Omega^{\bullet}_Y|_{X^*}\cong \tilde{\Omega}^{\bullet}_{X^*}$. In particular there is an inclusion $j:\tilde{\Omega}^{\bullet}_{X}\rightarrow R^0\pi_*\Omega^{\bullet}_Y$ whose cokernel is concentrated at the singular point $0$ and it is thus finite dimensional. Consider now the formal completion of the above complexes. It gives a commutative diagram:
\begin{equation}
\begin{CD}
\tilde{\Omega}^{\bullet}_{X,0} @>j>>H^0(A, \Omega^{\bullet}_Y|_{A}) \\
@VVV @VVV \\
\hat{\tilde{\Omega}}^{\bullet}_{X,0} @>\hat{j}>>H^0(A, \hat{\Omega}^{\bullet}_Y) \\
\end{CD}
\end{equation}
where of course $H^0(A, \Omega^{\bullet}_Y|_{A}) \cong (R^0\pi_*\Omega^{\bullet}_Y)|_0$ and $\hat{j}$ is the formal completion of the inclusion $j$. Indeed, this follows from the fact (c.f. \cite{B} and the corresponding references therein):
\[H^0(A,\hat{\Omega}^{\bullet}_Y)\cong \lim_{\underset{k}{\leftarrow}}H^0(A, \frac{\Omega^{\bullet}_Y}{\frak{m}^k\Omega^{\bullet}_Y})\cong \lim_{\underset{k}{\leftarrow}}\frac{H^0(A, \Omega^{\bullet}_Y|_{A})}{\frak{m}^kH^0(A, \Omega^{\bullet}_Y|_{A})}.\]
Now, since the completion functor is exact and by the fact that the cokernel of $j$ is already complete (by finite dimensionality), it follows that
\[\text{Coker}j\cong \text{Coker}\hat{j}.\]
Thus, in order to show the theorem starting from the commutative diagram above, it suffices to show the isomorphism:
\[H^{\bullet}(H^0(A,\Omega^{\bullet}_{Y}|_{A}))\cong H^{\bullet}(H^0(A,\hat{\Omega}^{\bullet}_Y)).\]
This is proved in turn in \cite{B} (points (b)-(d), pp. 140-142).
\end{proof}
\begin{rem}
For the hypersurface case, there is a simple alternative proof of the above lemma, only for the $n$th-cohomology of the Givental complex, without using resolution of singularities: let $\hat{H}''_f$ be the formal completion of the Brieskorn module with respect to the $\frak{m}$-adic topology. Then, by the regularity of the Gauss-Manin connection and the properties of its analytical index \cite{Mal}, there is an isomorphism of $\mathbb{C}[[f]]$-modules\footnote{or equivalently by the Bloom-Brieskorn theorem \cite{B}, but this uses again resolution of singularities.}:
\[\hat{H}''_f\cong H''_f\otimes_{\mathbb{C}\{f\}} \mathbb{C}[[f]]\]
and thus the quotient
\[\frac{\hat{H}''_f}{f\hat{H}''_f}=\frac{\hat{\Omega}^{n+1}}{df\wedge d\hat{\Omega}^{n-1}+f\hat{\Omega}^{n+1}}\]
is again a $\mu$-dimensional vector space. The space of deformations $\mathcal{Q}_{X,0}$ of the germ $(X,0)$ is finite dimensional and thus it is already complete:
\[\mathcal{Q}_{X,0}\cong \hat{\mathcal{Q}}_{X,0}.\]
Following the construction presented in the proof of Proposition \ref{p1} for the cohomology $H^n(\tilde{\Omega}^{\bullet}_{X,0})$ we obtain again a short exact sequence:
\[0\rightarrow H^n(\hat{\tilde{\Omega}}^{\bullet}_{X,0})\stackrel{df\wedge}{\rightarrow}\frac{\hat{H}''_f}{f\hat{H}''_f}\stackrel{\pi}{\rightarrow}\hat{\mathcal{Q}}_{X,0}\rightarrow 0.\]
The proof of the isomorphism
\begin{equation}
\label{iso}
H^n(\tilde{\Omega}^{\bullet}_{X,0})\cong H^n(\hat{\tilde{\Omega}}^{\bullet}_{X,0})
\end{equation}
follows then immediately by comparing the short exact sequence above with the analytic one (\ref{ses1}).
\end{rem}
\section{An Interpolation Lemma for the Isotropy Group of a Hypersurface Singularity}
Let $\mathcal{R}_{X,0}$ be the isotropy group of the germ $(X,0)$, i.e. the group of germs of diffeomorphisms at the origin tangent to the identity and preserving the hypersurface $X=\{f=0\}$. It means that for every $\Phi \in \mathcal{R}_{X,0}$ there exists an invertible function germ $g \in \mathcal{O}$ such that the following hold:
\[\Phi(x)=x \hspace{0.15cm} \text{mod} \hspace{0.15cm} \frak{m}^2, \hspace{0.3cm} g(x)=1\hspace{0.15cm} \text{mod}\hspace{0.15cm} \frak{m},\]
\[\Phi^*f=gf.\]
We will need the following interpolation lemma for the group $\mathcal{R}_{X,0}$ which is a simple variant of the one presented by J. -P. Fran\c{c}oise in \cite{F} and it relies in a general method obtained by S. Sternberg in \cite{Ste}. It can be also generalised without difficulty to any germ of an analytic subset $(X,0)$ (whose singularities can be arbitrary).
\begin{lem}
\label{l1}
Any diffeomorphism $\Phi \in \mathcal{R}_{X,0}$ can be interpolated by a 1-parameter family of formal diffeomorphisms $\Phi_t \in \hat{\mathcal{R}}_{X,0}$, i.e. there exists a family of formal function germs $g_t \in \hat{\Omega}^0$ such that:
\[\Phi_0=Id, \hspace{0.3cm} \Phi_1=\Phi,\]
\[g_0=1, \hspace{0.3cm} g_1=g,\]
\[\Phi_t^*f=g_tf.\]
\end{lem}
\begin{proof}
Denote by $(x_1,...,x_{n+1})$ the coordinates at the origin and let $x^{\beta}=x_1^{\beta_1}...x_{n+1}^{\beta_{n+1}}$, $\beta=(\beta_1,...,\beta_{n+1})\in \mathbb{N}^{n+1}$, $|\beta|=\sum_{i=1}^{n+1}\beta_i$. Let
\[\Phi_i(x)=x_i+\sum_{j}\sum_{|\beta|=j}\phi_{i,\beta}x^{\beta}, \hspace{0.3cm} i=1,...,{n+1}\]
be the components of $\Phi$. We will find the interpolation $\Phi_t$ with components in the form:
\[\Phi_{t,i}(x)=x_i+\sum_j\sum_{|\beta|=j}\phi_{i,\beta}(t)x^{\beta}, \hspace{0.3cm} i=1,...,{n+1}\]
as solution of the differential equation:
\begin{equation}
\label{fe}
\Phi'_t=\Phi'_0\circ \Phi_t,
\end{equation}
with boundary conditions $\Phi_0=Id$, $\Phi_1=\Phi$ (c.f. \cite{Ste}). We can can do this by induction on $j$ and we may assume that the $\phi_{i,\beta}$ are already known for $j\leq k-1$. Then, for $j=k$, equation (\ref{fe}) implies:
\[\phi'_{i,\beta}(t)=\phi'_{i,\beta}(0)+\psi_{i,\beta}(t),\]
where the functions $\psi_{i,\beta}(t)$ are known by induction and they vanish at zero. Integration then gives:
\[\phi_{i,\beta}(t)=\phi'_{i,\beta}(0)t+\int_0^t\psi_{i,\beta}(\tau)d\tau.\]
Obviously the initial condition $\phi_{i,\beta}(0)=0$ is satisfied, and it suffices to choose the $\phi'_{i,\beta}(0)$ such that the boundary condition $\phi_{i,\beta}(1)=\phi_{i,\beta}$ is satisfied as well. Now, by the fact that the family $\Phi_t$ is an interpolation of $\Phi$, we may choose an interpolation $g_t$ of $g$:
\[g_t(x)=g(0)+\sum_{|\beta|\geq 1}g_{\beta}(t)x^{\beta},\]
satisfying the required assumptions (recall that $g(0)=1$) and such that $\Phi_t^*f=g_tf$ for all integer values of $t$. In fact, the coefficients of $\Phi_t$ are polynomials in $t$, and choosing the interpolation $g_t$ with polynomial coefficients in $t$ as well (linear in $t$ for example), it follows that for any $k$ fixed, the homogeneous part in the Taylor expansion of $\Phi_t^*f-g_tf$ is a polynomial in $t$ which vanishes for all integer values of $t$. Thus, it vanishes for all real $t$ as well and this finishes the proof of the lemma.
\end{proof}
\section{Proof of the Theorem}
We will prove here Theorem \ref{t} which can now be restated in the following form:
\begin{thm}
\label{t2}
Let $\omega$ and $\omega'$ be two germs of volume forms which are $\mathcal{R}_{X,0}$-equivalent. Then there exists an $n$-form $\alpha$ such that $\omega-\omega'=d\alpha$ and $[\alpha]=0$ in $H^n(\tilde{\Omega}^{\bullet}_{X,0})$.
\end{thm}
\begin{proof}
Consider first the $n$-form $\alpha$ defined by $\omega-\omega'=d\alpha$ (Poincar\'e lemma) and let $\Phi \in \mathcal{R}_{X,0}$ be the diffeomorphism providing the equivalence: $\Phi^*\omega'=\omega$.
It follows that
\begin{equation}
\label{c1}
\omega-\Phi^*\omega=d\alpha
\end{equation}
holds in $\Omega^{n+1}$. Interpolate now $\Phi$ by the 1-parameter family of formal diffeomorphisms $\Phi_t\in \hat{\mathcal{R}}_{X,0}$ as in Lemma \ref{l1} above. We have that:
\[\omega-\Phi^*\omega=\int_0^1\frac{d}{dt}\Phi_t^*\omega dt=\int_0^1\Phi_t^*(L_{\hat{v}}\omega) dt=\]
\[=\int_0^1\Phi_t^*d(\hat{v}\lrcorner \omega)dt=d\int_0^1\Phi_t^*(\hat{v}\lrcorner \omega)dt,\]
holds in $\hat{\Omega}^{n+1}$, where $\hat{v}$ is the 1-parameter family of formal vector fields generating $\Phi_t$: $\exp{t\hat{v}}=\Phi_t$. Thus, in $\hat{\Omega}^{n+1}$ we may write:
\begin{equation}
\label{c2}
\omega-\Phi^*\omega=d\hat{\alpha},
\end{equation}
where the formal $n$-form $\hat{\alpha}$ is defined by:
\[\hat{\alpha}=\int_0^1\Phi_t^*(\hat{v}\lrcorner \omega)dt+d\hat{h},\]
for some formal $(n-1)$-form $\hat{h}$. Now, since $\Phi_t$ preserves the germ $(X,0)$ for all $t$ and $\hat{v}$ is tangent to its smooth part, it follows that $\hat{\alpha}|_{X^*}=d\hat{h}|_{X^*}$, i.e. that $[\hat{\alpha}]=0$ in $H^n(\hat{\tilde{\Omega}}^{\bullet}_{X,0})$. View now the relation (\ref{c1}) as a relation in $\hat{\Omega}^{n+1}$. By comparing it with the relation (\ref{c2}) we obtain $\alpha=\hat{\alpha}+d\hat{g}$ for some formal $(n-1)$-form $\hat{g}$ and thus $[\alpha]=[\hat{\alpha}]=0$ in $H^n(\hat{\tilde{\Omega}}^{\bullet}_{X,0})$ as well. By the the Bloom-Brieskorn Lemma \ref{l2} and in particular by the isomorphism (\ref{iso}) we finally obtain that $[\alpha]=0$ in $H^n(\tilde{\Omega}^{\bullet}_{X,0})$ and this finishes the proof of the theorem.
\end{proof}
\section*{Bibliography}
\end{document} |
\begin{document}
\begin{frontmatter}
\title{Symbolic lumping of some\\ catenary, mamillary and circular\\
compartmental systems}
\author[egri]{Edith Egri\thanksref{ize}}
\thanks[ize]{The research upon which the present paper is based has been started during a visit of EE
partially supported by the National Scientific Foundation, Hungary,
under No. T 047132.} \ead{[email protected]}
\address[egri]{Babe\c{s}--Bolyai University, Department of Differential Equations,
Cluj Napoca, Str. M. Kog\u{a}lniceanu, nr.1, 3400, ROMANIA}
\author[toth]{J\'anos T\'oth\thanksref{ize}}
\ead{[email protected]}
\address[toth]{Department of Mathematical Analysis, Budapest University of Technology and Economics,
Budapest, H-1111 Egry J. u. 1., HUNGARY}
\author[brochot]{C\'eline Brochot and Frederic Yves Bois}
\ead{[email protected]} \ead{[email protected]}
\address[brochot]{INERIS,
Institut National de l'Environnement Industriel et des Risques,
Unit\'e de Toxicologie Exp\'erimentale, Parc Alata BP2, 60550
Verneuil En Halatte, FRANCE}
\begin{abstract}
Some of the most important compartmental systems,
such as irreversible catenary, mamillary and circular systems
are symbolically simplified by the method of exact linear lumping.
A few symbolically unmanageable systems are numerically lumped.
Transformation of the qualitative properties under lumping are also traced.
\end{abstract}
\begin{keyword}
lumping\sep reduction of the number of variables\sep circular
system\sep catenary system\sep mamillary system \MSC 80A30\sep
15A09\sep 15A18\sep 34A30 \sep 34C14
\end{keyword}
\end{frontmatter}
\tableofcontents
\section{Introduction}
Compartmental systems are mathematical systems that are frequently
used in biology and mathematics. Also a subclass of the class of
chemical processes can be modeled as compartmental systems. A
compartmental system consists of several compartments with more or
less homogeneous amounts of material. The compartments interact by
processes of transport and diffusion. The dynamics of a
compartmental system is derived from mass balance considerations.
The mathematical theory of compartmental systems is of major
importance: it is the bread-and-butter of analysis for medical
researchers, pharmacokineticists, physiologists, ecologists,
economists as well as other researchers\\ \cite{cob76},
\cite{dist87}, \cite{cob87}, \cite{jack99}, \cite{nestorov98}.
Sometimes it is useful to reduce a model to get a new one with a
lower dimension. The technique's name is lumping, i.e. reduction
of the number of variables by grouping them via a linear or
nonlinear function.
The objective of model reduction methods is to obtain a model that
can describe the response of the original model accurately and
efficiently (cf. \cite{wilkinson08}).
Our aim here is to give explicitly possible lumped compartmental
systems in a few important classes, mainly of symmetric structure such as: mamillary models,
catenary models and circular models. Some classes can be treated in
full generality, some only under restrictions on the parameters.
We also show how to lump systems which are only numerically
lumpable.
The structure of our paper is as follows. In Section 2 the formal
definitions of reactions, compartmental systems, induced kinetic
differential equations and that of exact linear lumping are given.
Next, our symbolic results are presented. Section 4 shows a few
examples which had to be treated numerically. Finally, the results
are discussed and further goals are set. We mention that the present
work is a continuation of a few simple statements in \cite{btb} on
the symbolic lumping of a general two compartment model.
\section{Fundamental definitions}
\subsection{Reaction mechanism, compartmental system}
A \textit{chemical reaction mechanism} is a set of elementary
reactions. Formally, it is a system
$<\mathcal{M,R},\alpha,\beta>$, where
\begin{enumerate}
\item $\mathcal{M}$ and $\mathcal{R}$ are sets with $M$ and $R$
elements ($M,R\in \mathbb{N}$), $\mathcal{R}=\{1,2,\ldots,R\}$ and
$\mathcal{M}=\{\mathcal{X}_1,\mathcal{X}_2,\ldots,\mathcal{X}_M\},$
\item $\alpha$ and $\beta$ are matrices with non-negative
integers, whose names are \textit{stoichiometric
coefficients}\index{stoichiometry coefficients}, and for which
\begin{enumerate}
\item for all $r\in \mathcal{R}$, $\alpha(.,r)\neq\beta(.,r),$
\item if $\alpha(.,r)=\alpha(.,r^{'})$ and
$\beta(.,r)=\beta(.,r^{'}),$ then $r=r^{'},$ \item for all $m\in
\mathcal{M}$ there exists $r\in \mathcal{R}$ such that either
$\alpha(m,r)\neq 0$ or $\beta(m,r)\neq 0$ holds.
\end{enumerate}
\end{enumerate}
This mechanism can be represented in the form
\begin{equation} \label{formal_mechanism}
\sum_{m=1}^{M}\alpha(m,r)\mathcal{X}_{m}\longrightarrow
\sum_{m=1}^{M}\beta(m,r)\mathcal{X}_{m} \qquad (r\in \mathcal{R}).
\end{equation}
The entities on the two sides of the arrow are the \textit{reactant}
and \textit{product complexes}, respectively.
The number $\max\{\sum_{m=1}^{M}\alpha(m,r),r\in\mathcal{R}\}$ is
said to be the \textit{order} of the reaction; thus, \textit{first
order reaction}s are obtained if \(\forall r\in\mathcal{R}\)
\(\sum_{m=1}^{M}\alpha(m,r)\le 1.\) If in a first order reaction it
is also true that the length \(\sum_{m=1}^{M}\beta(m,r)\) of the
product complexes is also less than or equal to 1, then one has a
\textit{compartmental system}. These formal mechanisms are of great
practical importance, and are applied in many areas as mentioned in
the introduction.
Thus, a compartmental system is a reaction mechanism in which the
length of all the complexes is not more than one. In this case we
only have reaction steps of the type
$\mathcal{X}_m\rightarrow\mathcal{X}_p,\,\,
\mathcal{X}_m\rightarrow\mathcal{O},\,\,
\mathcal{O}\rightarrow\mathcal{X}_m \,\,(m,p\in\mathcal{M}),$
where $\mathcal{O}$ is the empty complex.
A \textit{generalized compartmental system} is a reaction in which
all the complexes contain a single species, and all the species
are contained in a single complex, i.e. it is a reaction
consisting of elementary reactions of three types
\begin{equation}\label{gencomp}
y_m\mathcal{X}_m\rightarrow y_p\mathcal{X}_p,\quad
y_m\mathcal{X}_m\rightarrow\mathcal{O},\quad \mathcal{O}\rightarrow
y_m\mathcal{X}_m, \quad (m,p\in\mathcal{M}),
\end{equation}
and \(\mathcal{X}_m\) is the constituent of a single complex only.
A generalized compartmental system with no inflow and with some
outflow is \textit{strictly half-open}, while it is
\textit{strictly open} if it contains inflows and possibly
outflows.
Reaction \eqref{formal_mechanism} is said to be
\textit{mass-conserving} if there exist positive numbers $\rho(1),
\rho(2)$, $\ldots, \rho(N)$ such that for all elementary reactions
\begin{equation}
\sum_{m=1}^{M}\alpha(m,r)\rho(m)= \sum_{m=1}^{M}\beta(m,r)\rho(m)
\end{equation}
holds. If the atomic structure of the species are not known, it is
not trivial to decide whether a reaction is mass-conserving or not
\cite{deak}, \cite{schuster}.
A generalized compartmental system is mass-conserving if and only if
it is closed: the empty complex is not present.
\subsection{Induced kinetic differential equations}
The usual continuous time, continuous state deterministic model
(or, induced kinetic differential equation) of reaction
\eqref{formal_mechanism} describing the time evolution of the
concentrations $c_m$ is the polynomial differential equation
\begin{equation}
\dot{c}_m=\sum_{r=1}^{R}(\beta(m,r)-\alpha(m,r))k_r\prod_{p=1}^M
c_p^{\alpha(p,r)},
\end{equation}
where $k_r$ denotes the rate coefficient, for all $r\in\mathcal{R}.$
The induced kinetic differential equation of a first order reaction
is of the form
\begin{equation}\label{linear}
\dot{c}=Ac+b
\end{equation}
with
\begin{equation}\label{req1}
a_{mp}\geq 0 \quad (m\neq p) \quad \mbox{and} \quad b_m\geq 0
\quad (m,p\in\mathcal{M}).
\end{equation}
The induced kinetic differential equation of a compartmental system
has an additional property
\begin{equation}\label{req2}
-a_{mm}\geq \sum_{\substack{p=1\\p\neq m}}^Ma_{pm}\qquad (m\in
\mathcal{M}).
\end{equation}
Thus, e.g. there is no compartmental system with the induced kinetic
differential equation $\dot{x}=x$ or with $\dot{x}=-0.5x+y, \quad
\dot{y}=-y+x.$
An easy construction proves that the converse of the above
statement is also true: a linear differential equation
\eqref{linear} fulfilling the requirements \eqref{req1} and
\eqref{req2} can be considered as the induced kinetic differential
equation of a compartmental system.
This statement can be generalized to get our next theorem showing
that if the right hand side of a kinetic differential equation is
the sum of univariate monomials and if all the variables have the
same exponent in all the rows, then -- if an additional condition
is also met and only then -- there exists an inducing generalized
compartmental system to the system of differential equations.
\begin{theo}
There exists an inducing generalized compartmental system of $M$
compartments to the system of differential equations
\begin{equation}\label{comp}
\dot{c}_m=\sum_{p=1}^{M}a_{mp}(c_p)^{y^p}+b_m
\end{equation}
(where for all $m,p\in\mathcal{M}, y^m, y^p\in\mathbb{N}, y^m\neq
y^p, if m\neq p, a_{mp},b_m\in\mathbb{R}$) which is
\begin{enumerate}
\item closed, if and only if $b_m=0, -a_{mm},a_{mp},d_m\in
\mathbb{R}_0^+; a_{mm}=d_my^m,$ \item strictly half-open, if and
only if $b_m=0, -a_{mm},a_{mp},d_m\in \mathbb{R}_0^+; a_{mm}\leq
d_my^m, \exists m, a_{mm}<d_my^m,$ \item strictly open, if and
only if $b_m, -a_{mm},a_{mp},d_m\in \mathbb{R}_0^+; a_{mm}\leq
d_my^m, \exists m \, b_m\in \mathbb{R}^+,$
\end{enumerate}
where throughout $$m,p\in\mathcal{M}, m\neq p, d_m:=
-\sum_{p=1}^M a_{pm}/y^m.$$
\end{theo}
\textbf{Proof.}
\begin{enumerate}
\item [\textbf{A)}]The induced kinetic differential equation of
\eqref{gencomp} is
\begin{equation*}
\begin{split}
\dot{c}_m=&-y^m(c_m)^{y^m}\sum_p k_{pm}+y^m\sum_p k_{mp}(c_p)^{y^p},\\
\dot{c}_m=&-y^m(c_m)^{y^m}\sum_p k_{pm}+y^m\sum_p k_{mp}(c_p)^{y^p},\quad (\exists\, k_{0m}\in\mathbb{R}^+)\\
\dot{c}_m=&-y^m(c_m)^{y^m}\sum_p k_{pm}+y^m\sum_p k_{mp}(c_p)^{y^p}+k_{m0},\quad (\exists\, k_{m0}\in\mathbb{R}^+)\\
& m\in\{1,2,\ldots,\mathcal{M}\};\, k_{mp}\in\mathbb{R}_0^+;\,y^p\in\mathbb{N}; p\in\{0,1\ldots,\mathcal{M}\}
\end{split}
\end{equation*}
Comparing the coefficients we get the only if part of the Theorem.
\item[\textbf{B)}] Given \eqref{comp} we construct a generalized
compartmental system \eqref{comp} as its induced kinetic
differential equation:
\begin{equation}\label{const}
y^p\mathcal{X}_p\stackrel{a_{mp}/y^m}{\longrightarrow} y^m\mathcal{X}_m,\quad
y^p\mathcal{X}_p\stackrel{d_p}{\longrightarrow}\mathcal{O},\quad
\mathcal{O}\stackrel{b_m/y^m}{\longrightarrow} y^m\mathcal{X}_m,
\end{equation}
$(m,p\in\{1,2,\ldots,\mathcal{M}\},m\neq p).$
\end{enumerate}
Reaction \eqref{const} induces closed, strictly half-open or
strictly open reactions, respectively.
\subsection{Exact linear lumping}
A special class of lumping is exact linear lumping.
A system $\dot{c}=f\circ c,$ with $f,c$ $n$-vectors can be
\textit{exactly lumped} by an $\hat{n}\times n$ real constant
matrix $Q$ ($\hat{n}<n$), called \textit{lumping matrix}, if for
$\hat{c}=Qc$ we can find an $\hat{n}$-function vector $\hat{f}$
such that $ \dot{\hat{c}}=\hat{f}\circ \hat{c}.$
Not every system is exactly lumpable. A sufficient and necessary
condition for the existence of exact lumping is $Qf(c)=Qf
(\overline{Q}Qc)$, where $\overline{Q}$ denotes any of the
generalized inverses of $Q$, i.e. $Q\overline{Q}=I_{\hat{n}},$ and
$I_{\hat{n}}$ is the $\hat{n}\times \hat{n}$ identity matrix
\cite{lirab89}.
This condition is equivalent to the requirement that the rows of
matrix $Q$ span an invariant subspace of ${f'}^{\top}(c)$ for all
$c$, where ${f'}^{\top}(c)$ denotes the transpose of the Jacobian of
$f$ at $c$. Therefore, in order to determine lumping matrices $Q$ we
need to determine the fixed ${f'}^{\top}(c)$-invariant subspaces\\
\cite{gohberg}.
In the case of linear differential equation \eqref{linear}, the
Jacobian matrix is just $A$, and then ${f'}^{\top}(c)=A^{\top}.$ In
this situation, fixed invariant subspaces exist, they are spanned by
eigenvectors, and they correspond to (constant) eigenvalues. So, a
linear system is always exactly lumpable and any
${f'}^{\top}(c)$-invariant subspaces will give a lumping matrix. In
this case therefore, we have to calculate the eigenvectors of
$A^{\top}.$
We mention here that, if $Q$ is an $\hat{n}\times n$ lumping matrix
and $P$ a nonsingular matrix of dimension $\hat{n},$ then $PQ$ is
also a lumping matrix.
It is not true that a given system can be lumped arbitrarily. For
example
\begin{equation}\label{i1i2}
\begin{split}
&S\stackrel{k_1}{\rightarrow} I_1\stackrel{k_2}{\rightarrow} P,\\
&S\stackrel{k_3}{\rightarrow} I_2\stackrel{k_4}{\rightarrow} P,\\
\end{split}
\end{equation}
cannot lead to a lumped system of the type
$S\stackrel{K_1}{\rightarrow} I\stackrel{K_2}{\rightarrow}P,$ for
$I:=I_1+I_2,$ except in the very special case $k_2=k_4$, contrary to\\
\cite{Conzelmann}.
The next question is whether the lumped system can have an
interpretation in terms of reactions (or more specially, in terms of
compartments), i.e. is the lumped system kinetic? To formulate this
criterion we use the notion of the generalized inverse matrix
\cite{rao73}.
Farkas \cite{fgy99} gave a sufficient and necessary condition
under which certain lumping schemes preserve the kinetic structure
of the original system: \textit{A nonnegative lumping matrix leads
to a kinetic differential equation if and only if it has a
nonnegative generalized inverse.}
For the absence of a nonnegative generalized inverse he proved
the following result: \textit{A nonnegative matrix has no
nonnegative generalized inverse if and only if it has a row such
that in the column of each positive entry there exists another
positive entry.}
\section{Symbolic results}
\subsection{Chains}
In a chain or catenary system the $M$ compartments are arranged in a
linear array such that every compartment exchanges material only
with its immediate neighbors and the possible steps are indicated by
nonnegative reaction rates. The coefficient matrix for a catenary
system has nonzero entries only in the main diagonal and the first
sub-diagonal and in the first super-diagonal. The latter case holds
only if it is reversible (bidirectional).
\subsubsection{Irreversible chains}
Let us consider a compartmental system, such as the one in
Fig.~\ref{IrrCat}, i.e. a chain with unidirectional steps.
\begin{figure}
\caption{Irreversible catenary system}
\label{IrrCat}
\end{figure}
In this case the coefficient matrix $A$ on the right hand side of
\eqref{linear} takes the form
\[
\left[
\begin{array}{ccccccc}
-k_1 & 0 & 0 & \ldots & 0 & 0 & 0\\
k_1 & -k_2 & 0 & \ldots & 0 & 0 & 0\\
0 & k_2 & -k_3 & \ldots & 0 & 0 & 0\\
\vdots & \vdots & \vdots & & \vdots & \vdots & \vdots\\
0 & 0 & 0 & \ldots & k_{M-2} & -k_{M-1} & 0\\
0 & 0 & 0 & \ldots & 0 & k_{M-1} & 0
\end{array}
\right].
\]
The eigenvalues of the transpose of this triangular matrix are
obviously the elements on the diagonal: $-k_1, -k_2, -k_3, \ldots,
-k_{M-1}$ and $0$ (and are the same as the eigenvalues of the original matrix). So, the corresponding eigenvectors can be found
easily, and they take the form:
\[
\begin{array}{cccccc}
\left[ 1 \right.& 0 & 0 & \ldots & 0 &\left. 0\right] \\
\left[ 1 \right.& \frac{k_1-k_2}{k_1} & 0 & \ldots & 0 &\left. 0\right] \\
\left[ 1 \right.& \frac{k_1-k_3}{k_1} & \frac{(k_1-k_3)(k_2-k_3)}{k_1k_2} & \ldots & 0 &\left. 0\right] \\
\vdots & \vdots & \vdots & \vdots & \vdots & \vdots \\
\left[1 \right.& \frac{k_1-k_{M-1}}{k_1}&\frac{(k_1-k_{M-1})(k_2-k_{M-1})}{k_1k_2}&\ldots &\frac{(k_1-k_{M-1})(k_2-k_{M-1})\cdots(k_{M-2}-k_{M-1})}{k_1k_2\ldots k_{M-1}} &\left. 0\right] \\
\left[1 \right.& 1 & 1 & \ldots & 1 &\left. 1\right] \\
\end{array}
\]
\hide{
\begin{displaymath}
[1,0,0,\ldots, 0],[1, -\frac{k_2-k_1}{k_1}, 0, \ldots, 0], [1,
-\frac{k_3-k_1}{k_1}, \frac{(k_3-k_2)(k_3-k_1)}{k_1k_2}, \ldots,
0],\ldots
\end{displaymath}
\begin{displaymath}
[1,-\frac{k_M-k_1}{k_1},\frac{(k_M-k_2)(k_M-k_1)}{k_1k_2},
-\frac{(k_M-k_3)(k_M-k_2)(k_M-k_1)}{k_1k_2k_3}, \ldots, 0],
[1,1,1,\ldots,1].
\end{displaymath}
}
(Here we only consider the robust case when all the reaction rate
coefficients are different. Then, the above eigenvectors are
independent.)
If we do not neglect inflows and outflows in a catenary system,
the principal diagonal of matrix $A$ will change, i.e. instead of
$-k_i$ we will have $-k_i-\mu_i$ in the first $M-1$ places (where
$\mu_i$ denotes the outflow coefficient for the species $X_i$),
and $-\mu_{M}$ in the last one, instead of $0$. The transpose of
the modified matrix has the following eigenvectors:
\[
\begin{array}{lcccc}
\left[ 1 \right.& 0 & 0 & \ldots &\left. 0\right] \\
\left[ \frac{k_1}{k_1-k_2+\mu_1-\mu_2} \right.& 1 & 0 & \ldots &\left. 0\right] \\
\left[ \frac{k_1k_2}{(k_1-k_3+\mu_1-\mu_3)(k_2-k_3+\mu_2-\mu_3)} \right.& \frac{k_2}{k_2-k_3+\mu_2-\mu_3} & 1 & \ldots &\left. 0\right] \\
\vdots & \vdots & \vdots & \vdots & \vdots \\
\left[1 \right.& 1 & 1 & \ldots &\left. 1\right], \\
\end{array}
\] corresponding to the eigenvalues $-k_1-\mu_1, -k_2-\mu_2, \ldots -k_{M-1}-\mu_{M-1}, -\mu_{M}.$
The graphical representation in this case, when outflows and inflows are incorporated into an irreversible chain, is:
\begin{figure}
\caption{Irreversible catenary system with inflows and outflows}
\label{IrrCat}
\end{figure}
To get a lumped system for this model, we can take some of the
eigenvectors above to generate several lumping matrices.
For example, let us consider an irreversible chain with five
compartments. Then the induced kinetic differential equation has the
following coefficient matrix:
\[
\left[
\begin{array}{ccccc}
-k_1-\mu_1 & 0 & 0 &0 &0\\
k_1 & -k_2-\mu_2 & 0 &0 & 0\\
0 & k_2 & -k_3-\mu_3 &0 & 0\\
0 & 0 & k_3 &-k_4-\mu_4 & 0\\
0 & 0 & 0 &k_4 & -\mu_5
\end{array}
\right].
\]
Let us compose $Q$ e.g. putting the eigenvectors
$\left[\frac{k_1}{k_1-k_2+\mu_1-\mu_2}\quad 1\quad 0\quad 0\quad
0\right]$ and
$\left[\frac{k_1k_2k_3}{(k_1-k_4+\mu_1-\mu_4)(k_2-k_4+\mu_2-\mu_4)(k_3-k_4+\mu_3-\mu_4)}\quad
\frac{k_2k_3}{k_3-k_4+\mu_3-\mu_4}\quad
\frac{k_3}{(k_2-k_4+\mu_2-\mu_4)(k_3-k_4+\mu_3-\mu_4)} \quad 1\quad
0\right]$
into it as rows. Then,
\[Q^T=
\left[
\begin{array}{cc}
\frac{k_1}{k_1-k_2+\mu_1-\mu_2} & \frac{k_1k_2k_3}{(k_1-k_4+\mu_1-\mu_4)(k_2-k_4+\mu_2-\mu_4)(k_3-k_4+\mu_3-\mu_4)}\\
1 & \frac{k_2k_3}{(k_2-k_4+\mu_2-\mu_4)(k_3-k_4+\mu_3-\mu_4)}\\
0 & \frac{k_3}{k_3-k_4+\mu_3-\mu_4}\\
0 & 1\\
0 & 0\\
\end{array}
\right].
\]
\hide{
\[Q=
\left[
\begin{array}{ccccc}
\frac{k_1}{k_1-k_2+\mu_1-\mu_2} & 1 & 0 & 0 & 0 \\
\frac{k_1k_2k_3}{(k_1-k_4+\mu_1-\mu_4)(k_2-k_4+\mu_2-\mu_4)(k_3-k_4+\mu_3-\mu_4)} & \frac{k_1k_2k_3}{(k_1-k_4+\mu_1-\mu_4)(k_2-k_4+\mu_2-\mu_4)(k_3-k_4+\mu_3-\mu_4)} & \frac{k_3}{k_3-k_4+\mu_3-\mu_4} & 1 & 0 \\
\end{array}
\right].
\]
}
After some calculations, we get the lumped system
$\hat{A}=QA\overline{Q}$, which induces the differential equation
below:
\[\left[
\begin{array}{c}
\dot{\hat{x}}_1\\
\dot{\hat{x}}_2\\
\end{array}
\right]=\left[
\begin{array}{cc}
-k_2-\mu_2 & 0 \\
0 & -k_4-\mu_4\\
\end{array}
\right]\left[
\begin{array}{c}
\hat{x}_1\\
\hat{x}_2
\end{array}
\right],
\] so we got a new compartmental system with two
compartments, where
\begin{equation*}
\begin{split}
\hat{x}_1&=\frac{k_1}{k_1-k_2+\mu_1-\mu_2}x_1+x_2\\
\hat{x}_2&=\frac{k_1k_2k_3}{(k_1-k_4+\mu_1-\mu_4)(k_2-k_4+\mu_2-\mu_4)(k_3-k_4+\mu_3-\mu_4)}x_1+\\
&+\frac{k_2k_3}{(k_2-k_4+\mu_2-\mu_4)(k_3-k_4+\mu_3-\mu_4)}x_2+\frac{k_3}{k_3-k_4+\mu_3-\mu_4}x_3+x_4.
\end{split}
\end{equation*}
The corresponding reaction (actually, a chain with no interaction between the compartments) can be illustrated as follows:
\begin{eqnarray*} \label{example1}
\hat{\mathcal{X}}_1\stackrel{k_2+\mu_2}{\rightarrow}\mathcal{O}
\stackrel{k_4+\mu_4}{\leftarrow}\hat{\mathcal{X}}_2,
\end{eqnarray*}
or it can be the mamillary system
$\hat{\mathcal{X}}_1\rightarrow\hat{\mathcal{X}}_3
\leftarrow\hat{\mathcal{X}}_2,$ with $\hat{\mathcal{X}}_3$ neglected in the induced kinetic differential equation.
\subsubsection{Irreversible chains with nonuniform directions}\label{nonunich}
Let us mention here that the irreversible case with nonuniform
direction of the arrows is simpler than the case of a reversible
chain.
As an example, consider the compartmental system with the following diagram:
\begin{figure}
\caption{An irreversible chain with nonuniform directions}
\label{RevCh}
\end{figure}
We can associate to it the kinetic differential equation $\dot{x}=Ax,$ where
\[A=
\left[
\begin{array}{ccccc}
-k_1 & 0 & 0 &0 & 0\\
k_1 & 0 & k_2 &0 & 0\\
0 & 0 & -k_2-k_3 &0 & 0\\
0 & 0 & k_3 &0 & k_4\\
0 & 0 & 0 &0 & -k_4
\end{array}
\right].
\]
The eigenvalues are: $-k_1, -k_2-k_3, -k_4,$ and $0,$ with
multiplicity 2. From the corresponding eigenvectors,
$[1,0,0,0,0],[0,0,1,0,0],[0,0,0,0,1],
\left[\cfrac{k_2+k_3}{k_2},\cfrac{k_2+k_3}{k_2},1,0,0\right]$ and
$\left[-\cfrac{k_3}{k_2},-\cfrac{k_3}{k_2},0,1,1\right]$ we can
determine a lot of lumping matrices. Depending on our choice, the
lumped system can be kinetic or not.
For example, if we take
\[Q=
\left[
\begin{array}{ccccc}
0 & 0 & 0 &0 & 1\\
0 & 0 & 1 &0 & 0\\
1 & 0 & 0 &0 & 0
\end{array}
\right],
\] then for the lumped system we get the kinetic differential equation system
\[\left[
\begin{array}{c}
\dot{\hat{x}}_1\\
\dot{\hat{x}}_2\\
\dot{\hat{x}}_3
\end{array}
\right]=\left[
\begin{array}{ccc}
-k_4 & 0 & 0\\
0 & -k_2-k_3 & 0\\
0 & 0 & -k_1
\end{array}
\right]\left[
\begin{array}{c}
\hat{x}_1\\
\hat{x}_2\\
\hat{x}_3
\end{array}
\right],
\] which can be illustrated via the diagram:
\begin{eqnarray*}
\mathcal{\hat{X}}_1\stackrel{k_4}{\longrightarrow}&&
\mathcal{O}\stackrel{k_1}{\longleftarrow}\mathcal{\hat{X}}_3\\
&&\uparrow \hbox{\scriptsize{$k_2+k_3$}}\\
&&\mathcal{\hat{X}}_2
\end{eqnarray*}
On the other hand, if we take
\[Q=
\left[
\begin{array}{ccccc}
1 & 0 & 0 &0 & 0\\
0 & 0 & 1 &0 & 0\\
\cfrac{k_2+k_3}{k_2} & \cfrac{k_2+k_3}{k_2} & 1 &0 & 0
\end{array}
\right],
\] this leads
to the matrix
\[\hat{A}=QA\overline{Q}=
\left[
\begin{array}{ccc}
-2k_1 &-\cfrac{k_1k_2}{k_2+k_3} & \cfrac{k_1k_2}{k_2+k_3}\\
-k_2 &-\cfrac{k_2^2}{k_2+k_3}-k_2-k_3 & \cfrac{k_2^2}{k_2+k_3}\\
-\cfrac{2k_1(k_2+k_3)}{k_2}-k_2 & -\cfrac{k_1(k_2+k_3)+k_2^2}{k_2+k_3}-k_2-k_3 & \cfrac{k_1(k_2+k_3)+k_2^2}{k_2+k_3}
\end{array}
\right].
\]
It can be seen that in this case the positivity conditions relative
to the convenient elements of the matrix are not fulfilled, as
expected in accordance with Lemma 1 in \cite{fgy99}. Consequently,
$\hat{A}$ does not result in a lumped system which has a kinetic
differential equation. The new variables are:
\begin{equation*}
\begin{split}
\hat{x}_1&=x_1\\
\hat{x}_2&=x_3\\
\hat{x}_3&=\frac{k_2+k_3}{k_2}x_1+\frac{k_2+k_3}{k_2}x_2+x_3.
\end{split}
\end{equation*}
\subsubsection{Reversible chains}
\begin{figure}
\caption{Reversible Chain}
\label{RevCh}
\end{figure}
To compute the eigenvectors even for a reversible chain consisting
of only five compartments is unsolvable symbolically. We shall give
a numerical example in section \ref{numrevCh} below.
\subsection{Mamillary systems}
In these systems all the compartments communicate only with a
central compartment, $X_{M+1}$, and there is no direct communication between the other
compartments. The possible steps are indicated by nonnegative
reaction rates. We shall call $X_{M+1}$ as the \textit{mother
compartment} and all the other compartments will be called
\textit{daughter} or \textit{peripheral} compartments.
\begin{figure}
\caption{Mamillary system}
\label{Mam}
\end{figure}
Only the irreversible case can be treated symbolically; a reversible
example will be treated numerically in section \ref{numrevMam}. A
class of reversible mamillary systems with a special structure can
still be treated symbolically, this will be shown in subsection
\ref{classmam}.
\subsubsection{Irreversible mamillary systems}
\textbf{Inward flows}
Let us consider an irreversible mamillary system with inward flows
such as the one in Fig.~\ref{InFlows}.
\begin{figure}
\caption{A mamillary system with inward flows only}
\label{InFlows}
\end{figure}
The coefficient matrix of the reaction rate constants is
\[
\left[
\begin{array}{cccccc}
-k_1 & 0 & \ldots & 0 &0 &0\\
0 & -k_2 & \ldots & 0 &0 & 0\\
\vdots & \vdots & \vdots & \vdots &\vdots &\vdots\\
0 & 0 & \ldots & 0 &-k_M & 0\\
k_1 & k_2 & \ldots & k_{M-1}& k_M & 0
\end{array}
\right].
\]
The eigenvalues of the transpose of this lower triangular matrix are
obviously the elements on the diagonal: $-k_1, -k_2, -k_3, \ldots,
-k_M$ and $0,$ with the corresponding eigenvectors:
\begin{displaymath}
[1,0,\ldots,0,0],[0,1, \ldots,0,0],\ldots, [0,0, \ldots,1,0], [1,1,\ldots,1,1].
\end{displaymath}
Notice that if we denote by $e_i$ the $i$-th element of the standard
basis for $\mathbb{R}^N$, $i\in\{1,2,\ldots, N\}$, then $e_1,
e_2,\ldots, e_M$ create the first $M$ eigenvectors of such a
compartmental system.
To lump the system of differential equations induced by this model,
we can choose some of these eigenvectors to generate several lumping
matrices.
In the first case, if we do not use the vector [1,1,\ldots,1,1] to
generate $Q$, only $\hat{M}$ of the first $M$ elements of the
standard basis for $\mathbb{R}^{M+1}$, that appear above, we will
receive a new compartmental system, with $\hat{M}$ compartments,
where the new species are taken from the old external ones only. In
this case lumping actually discards some peripheral compartments and
permutes the remaining ones.
If we take an $\hat{M}\times\hat{M}$ nonsingular matrix, $P,$ i.e. a
basis transformation matrix, then $PQ$ will be another lumping
matrix. It will consist of some of $P$'s columns, and values being 0
elsewhere. Accordingly the new compartments will be the linear
combinations of certain old peripheral compartments. An obvious
interpretation is that they are measured together.
Assume e.g. we have chosen $Q$ in the following way: it consists
of $e_i$, $e_j$ and $e_k$ of the natural basis
$\mathbb{R}^{M+1}$, $i,j,k\in\{1,2,\ldots, M\}$. Let $P\in \mathbb{R}^{3\times 3}$ be an invertible matrix.
Then
$$\hat{x}=PQx=x_ip_{.1}+x_jp_{.2}+x_kp_{.3},$$ where
$p_{.1},p_{.2},p_{.3}$ are the linearly independent columns of $P$, and the coordinates of the new composition vector
$\hat{x}$ are linear combinations of the external species $x_i, x_j$
and $x_k$ with (in general) different coefficients.
Now, suppose, the eigenvector $[1,1,\ldots,1]$ is contained in the
rows of matrix $Q.$ In this case the system of
equation $\hat{x}_i=\sum_{j=1}^{M+1}q_{ij}x_j$ defines new
compartments, composed by some of the existing peripheral ones, plus
the sum of the original one.
As an example let us consider the following irreversible mamillary
system with inward flows:
\begin{eqnarray*}
&&\mathcal{X}_3\\
&&\downarrow \hbox{\scriptsize{$k_3$}}\\
\mathcal{X}_1\stackrel{k_1}{\rightarrow}&&\mathcal{X}_4\stackrel{k_2}{\leftarrow}\mathcal{X}_2
\end{eqnarray*}
The induced kinetic differential equation is $\dot{x}=Ax$, where
\[A=
\left[
\begin{array}{cccc}
-k_1 & 0 & 0 & 0\\
0 & -k_2 & 0 & 0\\
0 & 0 & -k_3 & 0\\
k_1 & k_2 & k_3 & 0
\end{array}
\right].
\]
Using the fact that the eigenvectors of $A^T$ are $[1,0,0,0],
[0,1,0,0], [0,0,1,0],$ and $[1,1,1,1],$ we can set e.g.
\[Q=
\left[
\begin{array}{cccc}
0 & 0 & 1 & 0\\
1 & 0 & 0 & 0\\
1 & 1 & 1 & 1
\end{array}
\right].
\]
The new variables become
\[\left[
\begin{array}{c}
\hat{x}_1\\
\hat{x}_2\\
\hat{x}_3\\
\end{array}
\right]=\left[
\begin{array}{cccc}
0 & 0 & 1 & 0\\
1 & 0 & 0 & 0\\
1 & 1 & 1 & 1
\end{array}
\right]\left[
\begin{array}{c}
x_1\\
x_2\\
x_3\\
x_4
\end{array}
\right].
\] and the lumped system has the variables
\begin{equation}
\begin{split}
\hat{x}_1&=x_3\\
\hat{x}_2&=x_1\\
\hat{x}_3&=x_1+x_2+x_3+x_4.
\end{split}
\end{equation}
The resultant process obeys a differential equation
\[\left[
\begin{array}{c}
\dot{\hat{x}}_1\\
\dot{\hat{x}}_2\\
\dot{\hat{x}}_3\\
\end{array}
\right]=\left[
\begin{array}{ccc}
-k_3 & 0 & 0\\
0 & -k_1 & 0\\
0 & 0 & 0
\end{array}
\right]\left[
\begin{array}{c}
\hat{x}_1\\
\hat{x}_2\\
\hat{x}_3\\
\end{array}
\right]
\]
which is the induced kinetic differential equation e.g. of the reaction
\begin{eqnarray*} \label{example1}
\hat{\mathcal{X}}_1\stackrel{k_3}{\rightarrow}\mathcal{O}
\stackrel{k_1}{\leftarrow}\hat{\mathcal{X}}_2,
\end{eqnarray*} that is no more a mamillary system. Or, again, we can take
$\hat{\mathcal{X}}_1\rightarrow\hat{\mathcal{X}}_3
\leftarrow\hat{\mathcal{X}}_2,$ and say we are not interested in
the change of concentration of $\hat{\mathcal{X}}_3;$ we consider it as an external species.
\textbf{Outward flows}
\begin{figure}
\caption{Outward flows}
\label{OutFlows}
\end{figure}
The induced kinetic differential equation is $\dot{x}=Ax,$ where
\[ A=
\left[
\begin{array}{ccccc}
0 & 0 & \ldots & 0 & k_1\\
0 & 0 & \ldots & 0 & k_2\\
\vdots & \vdots & \vdots & \vdots &\vdots\\
0 & 0 & \ldots & 0 & k_M\\
0 & 0 & \ldots & 0 & -K
\end{array}
\right],
\] with $K=(k_1+k_2+\cdots+k_M).$
The transpose of it, $A^T$, has a single eigenvalue
$-K$ with the eigenvector
$[0,0,\ldots,0,1]$, and an eigenvalue 0 with multiplicity $M$,
with the corresponding independent eigenvectors
\[
\begin{array}{ccccc}
\left[ 1 \right.& 0 & \ldots & -\frac{k_1}{k_M} &\left. 0\right] \\
\left[ 0 \right.& 1 & \ldots & -\frac{k_2}{k_M} &\left. 0\right] \\
\vdots & \vdots & \vdots & \vdots & \vdots \\
\left[ 0 \right.& 0 & \ldots & -\frac{k_{M-1}}{k_M} &\left. 0\right] \\
\left[ 0 \right.& 0 & \ldots & \frac{x}{k_M} &\left. 1\right]
\end{array}
\]
\hide{
\begin{displaymath}
[1,0,\ldots,-\frac{k_1}{k_M},0],[0,1,
\ldots,-\frac{k_2}{k_M},0],\ldots, [0,0,
\ldots,-\frac{k_{M-1}}{k_M},0],
[0,0,\ldots,\frac{k_1+\cdots+k_M}{k_M},1].
\end{displaymath}
}
If we build up a lumping matrix, $Q$, we get reasonable result only
with eigenvectors belonging to the multiple eigenvalue $0$, since we
get $\hat{A}=0$ in all other cases, and it is not worth taking such
a $Q$.
If the eigenvector $[0,0,\ldots,0,1]$ appears in the lumping matrix,
we obtain a lumped system, whose coefficient matrix consists of the
$0$ elements, except a single element on the principal diagonal,
which has the value $-K.$ This can be represented by the extremely
simple reaction
$\hat{\mathcal{X}}\stackrel{K}{\rightarrow}\mathcal{O}.$
\hide{ As an example consider the following lumping matrix
\[
\left[
\begin{array}{ccccc}
0 & 0 & 1 & -\frac{k_3}{k_4} & 0\\
0 & 0 & 0 & 0 & 1\\
0 & 0 & 0 & \frac{k_1+k_2+k_3+k_4}{k_4} & 1
\end{array}
\right].
\]
}
\textbf{Irreversible mamillary systems with inward and outward flows}
Instead of giving a general treatment we shall take an example again, as in subsection \ref{nonunich}. Let us consider the mamillary system below.
\begin{figure}
\caption{An irreversible mamillary system with inward and outward flows}
\label{InOutFlows}
\end{figure}
The coefficient matrix of the induced kinetic differential equation is
\[
\left[
\begin{array}{cccccc}
-k_1 & 0 & 0 & 0 & 0 & 0 \\
0 & -k_2 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & k_3\\
0 & 0 & 0 & 0 & 0 & k_4\\
0 & 0 & 0 & 0 & 0 & k_5\\
0 & 0 & 0 & 0 & 0 & -(k_3+k_4+k_5)\\
\end{array}
\right],
\] with the simple eigenvalues $-k_1,-k_2, -K:=-(k_3+k_4+k_5),$
and with the triple eigenvalue 0. The corresponding eigenvectors of
its transpose are $[1,0,0,0,0,0],$ $[0,1,0,0,0,0],$ $[0,0,0,0,0,1],$
$\left[0,0,1,0,0,\cfrac{k_3}{K}\right],$
$\left[0,0,1,0,-\cfrac{k_3}{k_5},0\right],$
$\left[0,0,1,-\cfrac{k_3}{k_4},0,0\right].$
Taking the lumping matrix $$Q=\left[ \begin {array}{cccccc}
0&0&1&0&0&{\cfrac {k_{{3}}}{K}}\\\noalign{
}0&0&1&-{\cfrac
{k_{{3}}}{k_{{4}}}}&0&0
\\\noalign{
}1&0&0&0&0&0\end {array} \right],$$ we get the
lumped system
\begin{equation}
\begin{split}
\hat{x}_1&=x_3+\cfrac{k_3}{K}x_6\\
\hat{x}_2&=x_3-\cfrac{k_3}{k_4}x_4\\
\hat{x}_3&=x_1.
\end{split}
\end{equation}
In this case the lumped system's differential equation will be very
simple:
\[\left[
\begin{array}{c}
\dot{\hat{x}}_1\\
\dot{\hat{x}}_2\\
\dot{\hat{x}}_3\\
\end{array}
\right]=\left[
\begin{array}{ccc}
0 & 0 & 0\\
0 & 0 & 0\\
0 & 0 & -k_1
\end{array}
\right]\left[
\begin{array}{c}
\hat{x}_1\\
\hat{x}_2\\
\hat{x}_3\\
\end{array}
\right].
\]
We can associate it to the reaction
$\hat{\mathcal{X}}_3\stackrel{k_1}{\rightarrow}\mathcal{O}.$
\subsubsection{Simplicial compartmental systems}\label{classmam}
Suppose we have the following formal reaction steps as follows
\begin{eqnarray*} \label{example1}
\mathcal{O}\stackrel{d}{\leftarrow}\mathcal{X}_i
\begin{array}{c}
\hbox{\scriptsize{$c_{j-i}$}}\\[-0.9em]
\rightleftharpoons\\[-1.4em]
\hbox{\scriptsize{$c_{M-j+i}$}}\end{array}
\mathcal{X}_j\stackrel{d}{\rightarrow}\mathcal{O},\quad \mbox{ for }
i<j;\,i,j\in\{1,2,\ldots,M\}.
\end{eqnarray*}
The fact that the reaction rate coefficients are the same for many
reaction-antireaction pairs may come from the application when the
compartments are physically separated (by a membrane e.g.) parts of
the space. In general, such kinds of assumption are made in cases
when diffusion is modeled by mass transport between homogeneous
boxes; such models often arise \cite{sh}, \cite{sherr}.
The transpose of the coefficient matrix of the induced kinetic differential equation is
\[ A^{\top}=
\left[
\begin{array}{ccccc}
c_0 & c_1 & c_2 & \ldots & c_{M-1}\\
c_{M-1} & c_0 & c_1 & \ldots & c_{M-2}\\
c_{M-2} & c_{M-1} & c_0 & \ldots & c_{M-3}\\
\vdots & & & & \vdots \\
c_1 & c_2 & c_3 & \ldots & c_0
\end{array}
\right],
\] with $c_0:=-(c_1+c_2+\cdots +c_{M-1}+d).$ Such a matrix $A^{\top}$
(for which every row is a cyclic permutation of the top row) is
called a cyclic, or circulant matrix. Its eigenvalues can be
calculated easily \cite{gray06}. (Certainly, $A$ is a cyclic matrix,
as well.)
\begin{eqnarray*}
\begin{split}
\lambda_1&=\sum_{m=0}^{M-1}c_m\\
\lambda_2&=\sum_{m=0}^{M-1}c_m\varepsilon_1^m\\
\vdots\\
\lambda_M&=\sum_{m=0}^{M-1}c_m\varepsilon_{M-1}^m\\
\end{split}
\end{eqnarray*} where $\varepsilon_k:=e^{\frac{2k\pi i}{M}},\,(k=0,1,\ldots,M-1)$
are the roots of unity. \\The corresponding eigenvectors are
\[
\begin{array}{ccccc}
\left[ 1 \right.& 1 & 1 & \ldots &\left. 1\right] \\
\left[ 1 \right.& \varepsilon_1 & \varepsilon_1^2 & \ldots &\left. \varepsilon_1^{M-1}\right]\\
\vdots & & & & \vdots \\
\left[ 1 \right.& \varepsilon_{M-1} & \varepsilon_{M-1}^2 & \ldots &\left. \varepsilon_{M-1}^{M-1}\right]
\end{array}
\]
Here we meet a new problem which we will not discuss here further:
obviously, in the applications one needs real lumped systems.
To be more concrete, let us consider the special case (studying the
problem of complex numbers in the special case) of
Fig.~\ref{ciklikusSik}.
\begin{figure}
\caption{A simplicial compartmental system}
\label{ciklikusSik}
\end{figure}
(This system is a special reversible circular system with outflow.)
Now
$\varepsilon_0=1,\varepsilon_1=\cfrac{-1+i\sqrt{3}}{2},\varepsilon_2=\cfrac{-1-i\sqrt{3}}{2},$
thus the eigenvalues of
\[
\left[
\begin{array}{ccc}
c_0 & c_1 & c_2 \\
c_2 & c_0 & c_1 \\
c_1 & c_2 & c_0
\end{array}
\right]
\] are $\lambda_1=c_0+c_1+c_2,$ $\lambda_2=c_0+c_1\varepsilon_1+c_2
\varepsilon_1^2=c_0+c_1\cfrac{-1+i\sqrt{3}}{2}+c_2\cfrac{-1-i\sqrt{3}}{2},$
and $\lambda_3=c_0+c_1\varepsilon_2+c_2\varepsilon_2^2=c_0+c_1
\cfrac{-1-i\sqrt{3}}{2}+c_2\cfrac{-1+i\sqrt{3}}{2}.$
For $c_1=c_2$ the corresponding eigenvectors are $[1,1,1], [-1,0,1], [-1,1,0].$
In this case we can construct a few lumping matrices which lead to a new, simpler
system.
Furthermore, if $c_1\neq c_2$ we obtain the following eigenvectors:
$[1,1,1],$\\
$\left[-\cfrac{|b-c|-i\sqrt{3}(b+c)}{(2b+c)\textup{sign}(b-c)-i\sqrt{3}c},
-\cfrac{(b+2c)\textup{sign}(b-c)+i\sqrt{3}b}{(2b+c)\textup{sign}(b-c)-i\sqrt{3}c},1\right],$\\
$\left[-\cfrac{|b-c|+i\sqrt{3}(b+c)}{(2b+c)\textup{sign}(b-c)+i\sqrt{3}c},
-\cfrac{(b+2c)\textup{sign}(b-c)-i\sqrt{3}b}{(2b+c)\textup{sign}(b-c)+i\sqrt{3}c},1\right].$
\textbf{The effect of lumping on qualitative properties}
One of the major questions connected with lumping is: how are the
qualitative properties of the lumped and of the original system
connected? We investigated this problem in a more general setting in
\cite{tlrt}; here we add a new statement: suppose we lump a system
of $M$ compartments with a coefficient matrix having real
eigenvalues into a compartmental system of $\hat{M}$ compartments.
Then, none of the concentration versus time curves can have more
than $\hat{M}-2$ local extrema \cite{Pota}.
\section{A few numerical examples}\label{numer}
\subsection{A reversible chain}\label{numrevCh}
Consider a reversible chain formed by five chemical species, let
them be $\mathcal{X}_1, \mathcal{X}_2, \mathcal{X}_3, \mathcal{X}_4$
and $\mathcal{X}_5.$ Let the forward and reverse reaction rates be
$$k_1=1, k_2=4, k_3=2, k_4=1 \mbox{ and } k_{-1}=2, k_{-2}=4, k_{-3}=1,
k_{-4}=2,$$ respectively, as it can be seen in the following
chemical mechanism:
\begin{eqnarray*}
\mathcal{X}_1
\begin{array}{c}
\hbox{\scriptsize{1}}\\[-1.1em]
\rightleftharpoons\\[-1.1em]
\hbox{\scriptsize{1}}\end{array}\mathcal{X}_2
\begin{array}{c}
\hbox{\scriptsize{4}}\\[-1.1em]
\rightleftharpoons\\[-1.1em]
\hbox{\scriptsize{4}}\end{array}\mathcal{X}_3
\begin{array}{c}
\hbox{\scriptsize{2}}\\[-1.1em]
\rightleftharpoons\\[-1.1em]
\hbox{\scriptsize{5}}\end{array}\mathcal{X}_4
\begin{array}{c}
\hbox{\scriptsize{1}}\\[-1.1em]
\rightleftharpoons\\[-1.1em]
\hbox{\scriptsize{2}}\end{array}\mathcal{X}_5
\end{eqnarray*}
We can associate it with the following induced kinetic differential
equation system:{\small
$$\left\{
\begin{array}{ll}
\dot{x}_1=-x_1+x_2,\\
\dot{x}_2=x_1-5x_2+4x_3,\\
\dot{x}_3=4x_2-6x_3+5x_4,\\
\dot{x}_4=2x_3-6x_4+2x_5,\\
\dot{x}_5=x_4-2x_5.\\
\end{array}
\right.$$}
The eigenvectors of the transpose of its coefficient matrix are
collected in the rows of the matrix below: {\small
$$\left[ \begin {array}{ccccc} 0.2&- 0.2&-
0.2& 0& 1\\\noalign{
}- 0.689897&
0.069693& 0.240408& 0.449489& 1\\\noalign{
}
0.289897&- 2.869693& 4.159591&- 4.449489& 1
\\\noalign{
}- 0.2& 1&- 0.2&- 2& 1
\\\noalign{
} 1& 1& 1& 1& 1\end {array} \right].
$$}
If we take as a lumping matrix
$$Q=\left[ \begin {array}{ccccc} 0.289897&- 2.869693& 4.159591
&- 4.449489& 1\\\noalign{
} 0.2&- 0.2&-
0.2& 0& 1\end {array} \right],$$ after some calculations we receive
$$\hat{A}=\left[ \begin {array}{cc} - 10.898979&
0\\\noalign{
}{ 0 }&- 2\end {array} \right]$$
and we obtain the lumped model
\begin{eqnarray*}
\hat{\mathcal{X}}_1 \stackrel{10.89}{\longrightarrow}
\hat{\mathcal{O}} \stackrel{2}{\longleftarrow} \hat{\mathcal{X}}_1.
\end{eqnarray*}
\subsection{A reversible mamillary system}\label{numrevMam}
In the following, consider a reversible compartmental system with
five compartments. Let $\mathcal{X}_5$ be the mother compartment,
and $\mathcal{X}_1, \mathcal{X}_2, \mathcal{X}_3, \mathcal{X}_4,$
the peripheral ones. Suppose that all of the reaction rates
corresponding to the reactions from the mother compartment to the
peripheral ones have the same value, $K.$ Whereas, the reverse
reactions also have identical reaction rates, $k.$
To this chemical mechanism we can set up the system $$\left\{
\begin{array}{ll}
\dot{x}_1= -kx_1+Kx_5\\
\dot{x}_2= -kx_2+Kx_5\\
\dot{x}_3= -kx_3+Kx_5\\
\dot{x}_4= -kx_4+Kx_5\\
\dot{x}_5= k(x_1+x_2+x_3+x_4)-4Kx_5,\\
\end{array}
\right.$$ which describes the time evolution of the concentrations
of the species taking part in the reaction. Consequently, the
coefficient matrix will be
$$\left[ \begin {array}{ccccc} -k&0&0&0&K\\\noalign{
}0&-k&0&0&K
\\\noalign{
}0&0&-k&0&K\\\noalign{
}0&0&0&-k&K
\\\noalign{
}k&k&k&k&-4\,K\end {array} \right].
$$
Its transpose has a triple eigenvalue, $-k$, and two single
eigenvalues, 0 and and $-k-4K.$ The corresponding eigenvectors are
as follows: $[-1,0,0,1,0],$ $[-1,0,1,0,0],$ $[-1,1,0,0,0],$
$[1,1,1,1,1]$ and
$\left[-\cfrac{k}{4K},-\cfrac{k}{4K},-\cfrac{k}{4K},-\cfrac{k}{4K},1\right].$
Now, we can take several lumping matrices. For example if
$$Q=\left[ \begin {array}{ccccc} -1&0&0&1&0\\\noalign{
}-
\cfrac{k}{4K}&-\cfrac {k}{4K}&-\cfrac {k}{4K}&-\cfrac
{k}{4K}&1\\\noalign{
}-1&1&0&0&0\end {array} \right],
$$ we obtain the lumped system
$$\left\{
\begin{array}{ll}
\dot{\hat{x}}_1=-k\hat{x}_1 \\
\dot{\hat{x}}_2=-(4K+k)\hat{x}_2\\
\dot{\hat{x}}_3=-k\hat{x}_3,
\end{array}
\right.$$ and the corresponding model is
\begin{eqnarray*}
\hat{\mathcal{X}}_1\stackrel{k}{\longrightarrow}&&\mathcal{O}\stackrel{k}{\longleftarrow}\hat{\mathcal{X}}_3\\
&&\uparrow \hbox{\scriptsize{$4K+k$}}\\
&&\hat{\mathcal{X}}_2
\end{eqnarray*}
\subsection{Cycles}
\subsubsection{Irreversible cycles}
\begin{figure}
\caption{Irreversible circular system}
\label{IrrCirc}
\end{figure}
Consider an irreversible circular system with three compartments,
$\mathcal{X}_1,\mathcal{X}_2,\mathcal{X}_3$ and the corresponding
reaction rates $k_1,k_2,k_3.$ Then the coefficient matrix of the
induced kinetic differential equation is
$$A=\left[ \begin {array}{ccc} -k_1&0&k_3\\
\noalign{
}k_1&-k_2&0\\
\noalign{
}0&k_2&-k_3
\end {array} \right].$$
Since the eigenvectors of $A^{\top}$ are $[1,1,1],$\\
$\left[-\cfrac{k_1+k_2-k_3+\sqrt{k_1^2+(k_2-k_3)^2-2k_1(k_2+k_3)}}{2k_3},\right.$\\
$\left.\cfrac{k_2\big(-k_1+k_2-k_3+\sqrt{k_1^2+(k_2-k_3)^2-2k_1(k_2+k_3)}\big)}{2k_1k_3},1\right]$
and\\
$\left[\cfrac{-k_1-k_2+k_3+\sqrt{k_1^2+(k_2-k_3)^2-2k_1(k_2+k_3)}}{2k_3},\right.$\\
$\left.-\cfrac{k_2\big(k_1-k_2+k_3+\sqrt{k_1^2+(k_2-k_3)^2-2k_1(k_2+k_3)}\big)}{2k_1k_3},1\right]$
, respectively, building up $Q$ from the first two eigenvectors, in
the special case $k_1=1, k_2=2$ and $k_3=3$ we obtain the lumping
matrix
$Q=\left[ \begin {array}{ccc} 1&1&1\\
-\cfrac{i\sqrt{2}}{3}&\cfrac{-2+2i\sqrt{2}}{3}&1
\end {array} \right].$
After some calculations we get
$\hat{A}=\left[ \begin {array}{ccc} 0&0\\
0&-3-\sqrt{2}i
\end {array} \right].$
To receive a real valued matrix $\hat{A},$ we should take, for
example $k_1=1, k_2=1/2$ and $k_3=5/128.$
We can also illustrate the region of those values $k_2\in[0,20]$ and
$k_3\in[0,20],$ for which $k_1=1$ results in a lumped system with
kinetic structure, that is, a real valued matrix, $\hat{A}$ (see
fig. \ref{RegionPlot}).
\begin{figure}
\caption{The preferred values of $k_2$ and $k_3$ are those
\textbf{outside}
\label{RegionPlot}
\end{figure}
\subsubsection{Reversible cycles}
Consider the reversible cycle with five compartments $\mathcal{X}_1,
\mathcal{X}_2, \mathcal{X}_3, \mathcal{X}_4, $ and $\mathcal{X}_5.$
Suppose the reaction rates are all equal to a positive real number
$k.$
Then, we can assign to this mechanism a linear differential equation
to describe the time evolution of the species's concentrations, with
coefficient matrix
$$A=\left[ \begin {array}{ccccc} -2\,k&k&0&0&k\\\noalign{
}k&-2\,k
&k&0&0\\\noalign{
}0&k&-2\,k&k&0\\\noalign{
}0&0&k&-2\,k
&k\\\noalign{
}k&0&0&k&-2\,k\end {array} \right].$$
This is a special circular matrix. $A^{\top}=A$ has two double
eigenvalues, $\cfrac{-5+\sqrt{5}}{2}$ and $\cfrac{-5-\sqrt{5}}{2},$
and a single one, 0. With the corresponding eigenvectors,
$\left[\cfrac{\sqrt{5}-1}{2},\cfrac{-\sqrt{5}+1}{2},-1,0,1\right],$
$\left[-1,\cfrac{-\sqrt{5}+1}{2},\cfrac{\sqrt{5}-1}{2},1,0\right],$\\
$\left[\cfrac{-\sqrt{5}-1}{2},\cfrac{\sqrt{5}+1}{2},-1,0,1\right],$
$\left[1,\cfrac{\sqrt{5}+1}{2},\cfrac{-\sqrt{5}-1}{2},1,0\right],$
and $[1,1,1,1,1]$ we can determine several invariant subspaces in
order to find lumping matrices. Choose, for example,
$$Q=\left[ \begin {array}{ccccc} 1&1&1&1&1\\\noalign{
}-1&
\frac{\sqrt{5}+1}{2}&\frac{-\sqrt{5}-1}{2}&1&0\\\noalign{
}
\frac{-\sqrt{5}-1}{2}&\frac{\sqrt{5}+1}{2}&-1&0&1\end {array}
\right].$$ In this case the lumped system will be
\[\left[
\begin{array}{c}
\dot{\hat{x}}_1\\
\dot{\hat{x}}_2\\
\dot{\hat{x}}_3\\
\end{array}
\right]=\left[
\begin{array}{ccc}
0 & 0 & 0\\
0 & -\frac{5+\sqrt{5}}{2}k & 0\\
0 & 0 & -\frac{5+\sqrt{5}}{2}k
\end{array}
\right]\left[
\begin{array}{c}
\hat{x}_1\\
\hat{x}_2\\
\hat{x}_3\\
\end{array}
\right].
\]
We can associate it to the model
$\hat{\mathcal{X}}_2\stackrel{\frac{5+\sqrt{5}}{2}k}
{\longrightarrow}\mathcal{O}\stackrel{\frac{5+\sqrt{5}}{2}k}
{\longleftarrow}\hat{\mathcal{X}}_3.$
\section{Discussion, plans}
The most important classes of compartmental systems have been
reviewed from the point of view of symbolic lumpability. Practically
interesting lumped systems mainly arise from numerical calculations,
which can be carried out in all cases without difficulties. We used
the sentence "which is the induced kinetic differential equation of
the reaction" recurrently. However, given a kinetic differential
equation the inducing reaction is by far not unique\\ \cite[pages
67--69]{ET}.
\section{Appendix}
\textit{Suppose we are given two natural numbers, $n$ and $\hat{n}$,
$\hat{n}\leq n$, and an $\hat{n}\times n$ matrix $Q$ of full rank
with real elements. The question arises: what are the necessary and
sufficient conditions for the existence of a nonsingular
$\hat{n}\times\hat{n}$ matrix $P$ such that all elements of $PQ$ are
nonnegative?}
This question is hard enough to answer in a general case. Here is a
result when $\hat{n}\leq2.$ One can see, if $\hat{n}=1$, the
elements of $Q$ must have identical sign, for the existence of such
$P.$
Now, assume $\hat{n}=2,$ and take
\begin{equation}\label{Q}
Q=
\left[
\begin{array}{cccc}
a_{11} & a_{12} & \ldots & a_{1n}\\
a_{21} & a_{22} & \ldots & a_{2n}
\end{array}
\right].
\end{equation}
Furthermore, take
\begin{equation}\label{P}
P=
\left[
\begin{array}{cc}
p_{11} & p_{12}\\
p_{21} & p_{22}
\end{array}
\right].
\end{equation}
Then we obtain
\[PQ=
\left[
\begin{array}{cccc}
a_{11}p_{11}+a_{21}p_{12} & a_{12}p_{11}+a_{22}p_{12} & \ldots & a_{1n}p_{11}+a_{2n}p_{12}\\
a_{11}p_{21}+a_{21}p_{22} & a_{12}p_{21}+a_{22}p_{22} & \ldots & a_{1n}p_{21}+a_{2n}p_{22}
\end{array}
\right].
\]
The requirement is that all the elements of the matrix above should
be nonnegative real numbers. This assumption, i.e. the inequalities
$a_{1j}p_{i1}+a_{2j}p_{i2}\geq0,\,\,j=1,\ldots,n$ determine
half-planes in the plane $(p_{i1},p_{i2}),$ passing through the
origin, $i=1,2.$ Thus, the problem is to find the cases, when the
intersections of the corresponding planes (which is in accordance
with the first, resp. the second row in $PQ$) are not empty.
Examine the columns of the matrix $Q.$ We distinguish 9 cases. In
what follows, the symbols $+$, and $-$ indicate the presence of a
positive or a negative number in the matrix $Q$.
\begin{tabular}{ccccccccc}
1.&2.&3.&4.&5.&6.&7.&8.&9.\\
\(\left[\begin{array}{c}0\\0\end{array}\right]\)&
\(\left[\begin{array}{c}0\\-\end{array}\right]\)&
\(\left[\begin{array}{c}+\\-\end{array}\right]\)&
\(\left[\begin{array}{c}+\\0\end{array}\right]\)&
\(\left[\begin{array}{c}+\\+\end{array}\right]\)&
\(\left[\begin{array}{c}0\\+\end{array}\right]\)&
\(\left[\begin{array}{c}-\\+\end{array}\right]\)&
\(\left[\begin{array}{c}-\\0\end{array}\right]\)&
\(\left[\begin{array}{c}-\\-\end{array}\right]\)
\end{tabular}
For example, suppose the $i$th column of $Q$ is of type 3, i.e.
$a_{1i}$ is positive and $a_{2i}$ is negative. Then the inequality
$a_{1i}p_{11}+a_{2i}p_{12}\geq0$ corresponds to the case in the
fig.~\ref{felsik}.
\begin{figure}
\caption{A graphical representation for the case 3.}
\label{felsik}
\end{figure}
The slope of the line with equation $a_{1i}p_{11}+a_{2i}p_{12}=0$
depend on the number $-a_{1i}/a_{2i}.$ Here this fraction is a
positive number. The shaded region in the figure represents the
region that is excluded from the solution.
Thus, after the geometrical consideration, we can conclude: if the
matrix $Q$ contains columns of form $\left[\begin{smallmatrix}
a \\
b
\end{smallmatrix}
\right]$ and
$\left[\begin{smallmatrix}
-a \\
-b
\end{smallmatrix}
\right]$ simultaneously, $\forall \,\, a,b\in\mathbb{R},$ with $a$ and $b$
different from 0 at the same time, there does not exist nonsingular,
$2\times2$ matrix $P,$ which satisfies the requirement $PQ\geq0.$
This is the case when matrix $Q$ has a pair of columns of type 2 and 6, or 4 and 8, or 3 and 7, or 5 and 9. In the last two cases the elements could only differ in sign. Henceforth, for a shortest notation we will use 26,48,37,59 to point to pair of cases when the matrix $P$ does not exist.
Finding all the cases when the matrix $Q$ contains three columns
which precisely exclude together the existence of $P,$ we lean on
the geometrical representation again. Assume $Q$ does not contain a
pair of columns fitting the case described above. If we check the
three half-plane cases, we get the following result: 247, 257, 258,
358, 368, 369, 469, 479 and 569 are the cases that exclude each
other, i.e. the existence of $P,$ by all means. Furthermore, there
are other instances for the nonexistence of such a $P$, 259, 347,
357, 359, 367, 378, 379 and 459, but in these cases we still have to
verify another condition regarding the slopes. This fact will be
illustrated later in an example.
In the case of the intersection of four half-planes, assumed that we
did not find in $M$ columns corresponding to either cases given
earlier, we get only one case for empty intersection, specifically
for 2358, i.e. for a matrix that contains the columns
$\left[\begin{smallmatrix} 0 \\ - \end{smallmatrix} \right],$
$\left[\begin{smallmatrix} + \\ - \end{smallmatrix} \right],$
$\left[\begin{smallmatrix} + \\ + \end{smallmatrix} \right],$
$\left[\begin{smallmatrix} - \\ 0 \end{smallmatrix} \right] $
together, disregarding the order.
The cases presented previously exhaust all the cases, when to a
$2\times n$ matrix given in \eqref{Q} we cannot find a $2\times 2$
nonsingular matrix, $P,$ so that all the elements of $PQ$ are
nonnegative.
Let us take an example to illustrate the problem. Consider the matrix
$$Q=\left[
\begin{array}{rrrr}
5 & 2 & 2 & -3\\
-2 & 0 & 1 & -1
\end{array}
\right],$$ and $P$ as in \eqref{P}. Then
$$PQ=\left[
\begin{array}{rrrr}
5p_{11}-2p_{12} & 2p_{11} & 2p_{11}+p_{12} & -3p_{11}-p_{12}\\
5p_{21}-2p_{22} & 2p_{21} & 2p_{21}+p_{22} & -3p_{21}-p_{22}
\end{array}
\right],$$ whose elements must satisfy the system of inequalities:
\begin{equation}\label{ineq}
\begin{cases}
5p_{11}-2p_{12}& \geq 0, \\
2p_{11}& \geq 0, \\
2p_{11}+p_{12}& \geq 0, \\
-3p_{11}-p_{12}& \geq 0, \\
5p_{21}-2p_{22}& \geq 0, \\
2p_{21}& \geq 0, \\
2p_{21}+p_{22}& \geq 0, \\
-3p_{21}-p_{22}& \geq 0.
\end{cases}
\end{equation}
Consider the first four of them, and give a geometrical representation as in figure~\ref{rossz}.
\hide{
\begin{figure}
\caption{Example for nonexistence of $P$}
\label{rossz}
\end{figure}
}
\begin{figure}
\caption{Example for nonexistence of $P$}
\label{rossz}
\caption{Example for existence of $P$}
\label{jo}
\end{figure}
The picture was created with Maple program, and one can see, that in
this case the inequality system \eqref{ineq} does not have any
solution. This was only to be expected, because the matrix contains
columns of type 3, 4, 5 and 9, and 359 is a critical case, since the
slope of the line corresponding to case 9 is smaller then the one
corresponding to 5, i.e. $-(-3)/(-1)<-2/1.$
Notice that if we take $a_{13}=18$ in $Q,$ the situation will
change, i.e. the existence of $P$ will be insured, because in this
case the direction of the inequality regarding the slopes will
change, as it can be seen in Fig.~\ref{jo}.
\hide{
\begin{figure}
\caption{Example for existence of $P$}
\label{jo}
\end{figure}
}
Choose a point from the region that indicates the solution (the dark
region in Fig. \ref{jo}), such as $(p_{11},p_{12})=(1,-5).$ To
determine $p_{21}$ and $p_{22}$ one must choose their values from
the same region, except the case when the point $(p_{21},p_{22})$
can be found on the line defined by the origin and the point
$(p_{11},p_{12})=(1,-5).$ The justification of this statement is as
follows. The equation of the line that passes through the points
$(0,0)$ and $(p_{11},p_{12})$ is $y=(p_{12}/p_{11})x.$ If
$(p_{21},p_{22})$ is a point on this line, then it must satisfy the
relation $p_{11}p_{22}-p_{12}p_{21}=0,$ which is equivalent to the
condition $\det{P}=0,$ but $P$ cannot be singular. For example we
can choose $(p_{21},p_{22})=(1/2,-4).$ In this case $\det P=-3/2.$
Now, check the nonnegativity of the elements of the matrix $PQ$:
$$PQ=
\left[
\begin{array}{cc}
1 & -5 \\
\frac{1}{2} & -4
\end{array}
\right]
\left[
\begin{array}{rrrr}
5 & 2 & 18 & -3\\
-2 & 0 & 1 & -1
\end{array}
\right]=
\left[
\begin{array}{rrrr}
15 & 2 & 13 & 2\\
\frac{21}{2} & 1 & 5 & \frac{5}{2}
\end{array}
\right].
$$
\textit{Suppose we are given two natural numbers, $n$ and $\hat{n}$, $\hat{n}\leq n$, and an
$\hat{n}\times n$ matrix $Q$ of full rank with complex elements. The question arises, what are the
necessary and sufficient conditions for the
existence of a nonsingular $\hat{n}\times\hat{n}$ matrix $P$ such that all elements
of $PQ$ are real?}
Observe that for $\hat{n}=1,$ $Q$ must have a special form to find a suitable $P$ to it, i.e. we must have either
$Q=\left[im_1\,\, im_2\, \ldots\, im_n\right],$ with $m_j\in \mathbb{R},\, j=1,\ldots,n,$ or all the elements of $Q$ have to be real, but this is a trivial case.
Consider $\hat{n}>1$ and $Q$ an $n\times\hat{n}$ matrix, with $Q=Q_1+iQ_2$ where $Q_1,Q_2\in \mathcal{M}_{\hat{n}n}(\mathbb{R}).$ A sufficient condition for the existence of a $\hat{n}\times\hat{n}$ matrix $P$ such that all elements of $PQ$ are real is:
\begin{enumerate}
\item $Q_1$ and $Q_2$ are nonsingular;
\item $Q_1^TQ_2=Q_2^TQ_1.$
\end{enumerate}
In this case we can choose $P=Q_1^T-iQ_2^T.$ Realize that if the
conditions above are satisfied, then {\setlength\arraycolsep{2pt}
\begin{eqnarray*}
PQ & = &(Q_1^T-iQ_2^T)(Q_1+iQ_2)= Q_1^TQ_1+iQ_1^TQ_2-iQ_2^TQ_1+Q_2^TQ_2\\
& = & Q_1^2+Q_2^2+i(Q_1^TQ_2-Q_2^TQ_1)\in\mathcal{M}_{\hat{n}n}(\mathbb{R}).
\end{eqnarray*}
}
In what follows suppose that the conditions above are not satisfied
and take
$$Q=
\left[
\begin{array}{cccc}
a_{11}+ib_{11} & a_{12}+ib_{12} & \ldots & a_{1n}+ib_{1n}\\
a_{21}+ib_{21} & a_{22}+ib_{22} & \ldots & a_{2n}+ib_{2n}\\
\vdots & \vdots & \vdots & \vdots \\
a_{\hat{n}1}+ib_{\hat{n}1} & a_{\hat{n}2}+ib_{\hat{n}2} & \ldots & a_{\hat{n}n}+ib_{\hat{n}n}
\end{array}
\right],
$$
$$
P=
\left[
\begin{array}{cccc}
p_{11}+iq_{11} & p_{12}+iq_{12} & \ldots & p_{1\hat{n}}+iq_{1\hat{n}}\\
p_{21}+iq_{21} & p_{22}+iq_{22} & \ldots & p_{2\hat{n}}+iq_{2\hat{n}}\\
\vdots & \vdots & \vdots & \vdots \\
p_{\hat{n}1}+iq_{\hat{n}1} & p_{\hat{n}2}+iq_{\hat{n}2} & \ldots & p_{\hat{n}\hat{n}}+iq_{\hat{n}\hat{n}}
\end{array}
\right].
$$
The requirement that all the elements of $PQ$ are real is equivalent
to a linear, homogeneous system
\begin{equation}\label{nagyrendsz}
\sum_{j=1}^{\hat{n}}(p_{lj}b_{jk}+q_{lj}a_{jk})=0,\quad k=\overline{1,n},\,\, l=\overline{1,\hat{n}},
\end{equation}
where the number of unknowns are $2\hat{n}^2$ and the number of
equations are $\hat{n}n.$ Notice that this system has an interesting
property: it can be divided into independent subsystems regarding
the unknowns. In this case each of these subsystems can be solved
separately. Moreover, all of the results can be written in identical
form, since they have the same coefficient-matrix. Therefore, taking
$l=1$, consider and treat only the subsystem with $n$ equations
\begin{equation}\label{rendsz}
\sum_{j=1}^{\hat{n}}(p_{1j}b_{jk}+q_{1j}a_{jk})=0,\quad k=\overline{1,n},
\end{equation} with unknowns $p_{11}, p_{12},\ldots, p_{1,\hat{n}},$ and $q_{11}, q_{12},\ldots, q_{1,\hat{n}}.$
Depending on $n$ and $\hat{n}$ the number of unknowns can be smaller or bigger than the number of equations.
The coefficient-matrix is
\begin{equation}
\left[
\begin{array}{cccccccc}\label{matr}
b_{11} & b_{21} & \ldots & b_{\hat{n}1} & a_{11} & a_{21} & \ldots & a_{\hat{n}1}\\
b_{12} & b_{22} & \ldots & b_{\hat{n}2} & a_{12} & a_{22} & \ldots & a_{\hat{n}2}\\
\vdots & \vdots & \vdots & \vdots & \vdots & \vdots & \vdots & \vdots\\
b_{1n} & b_{2n} & \ldots & b_{\hat{n}n} & a_{1n} & a_{2n} & \ldots & a_{\hat{n}n}\\
\end{array}
\right].
\end{equation}
Denote by $d$ the main determinant for \eqref{matr}. Then, if $d\neq0,$ system \eqref{rendsz} has identical zero solution: $\underbrace{(0,\ldots, 0)}_{2\hat{n}}.$ In this case $\det(P)=0,$ so, $P$ does not satisfy the nonsingularity requirement.
For an adequate $P$ we must have $d=0.$ If it holds, we must specify
the rank of \eqref{matr}. Let us denote it by $r.$ If $r=2\hat{n},$
i.e. the system \eqref{rendsz} is determined, and has a unique
solution. But in this case all the other subsystems mentioned above
for \eqref{nagyrendsz} has the same (constant) solution. Thus, the
matrix $P$ will consist of identical rows, so we have $\det(P)=0$
again.
For $r=2\hat{n}-1$ the system is indefinite, and the solution has
the form
$$(\alpha_{1}c_{1}, \alpha_{2}c_{2},\ldots, \alpha_{2\hat{n}}c_{2\hat{n}}),$$ where $\alpha_{i}$ are parameters and $c_i$ represent constants, $i=1,\ldots,2\hat{n}.$
Consequently, $P$ can be given in such a way that its rows will be $k$-times for the others. Thus, we discover a nonsingular $P$ once more.
Finally, if $r<2\hat{n}-1,$ then there exists an adequate matrix
$P,$ because in this case the solution of \eqref{rendsz} can be
expressed with at least two parameters, and this gives possibility
for choosing linearly independent lines to $P.$
Now, look at an example. Consider
$$Q=\left[
\begin{array}{cccc}
1+i & 2+i & 4+2i & 2+2i\\
-1 & 2i & 4i & -2
\end{array}
\right], \quad \mbox{and take} \quad
P=\left[
\begin{array}{cc}
p_{11}+iq_{11} & p_{12}+iq_{12}\\
p_{21}+iq_{21} & p_{22}+iq_{22}
\end{array}
\right].
$$
In this case the coefficient-matrix \eqref{matr} is
$$\left[
\begin{array}{cccc}
1 & 0 & 1 & -1\\
1 & 2 & 2 & 0\\
2 & 4 & 4 & 0\\
2 & 0 & 2 & -2
\end{array}
\right],
$$ and for it $r=2<2\hat{n}-1.$ Thus, we can compute a nonsingular
$P\in\mathcal{M}_{22}(\mathbb{C}).$ Since the homogeneous, linear
system with coefficient-matrix above has solution $(-\alpha+\beta,
-\frac{1}{2}(\alpha+\beta),\alpha, \beta),$ we can take $p_{11}=0,
p_{12}=1, q_{11}=-1, q_{12}=-1$ and similarly $p_{21}=2, p_{22}=2,
q_{21}=-3, q_{22}=-1.$ After some calculations we obtain
$$PQ=\left[
\begin{array}{cccc}
0 & 3 & 6 & 0\\
3 & 9 & 18 & 8
\end{array}
\right].$$
\end{document} |
\begin{document}
\title{Finding Even Cycles Faster via Capped $k$-Walks}
\begin{abstract}
Finding cycles in graphs is a fundamental problem in algorithmic graph
theory. In this paper, we consider the problem of finding and reporting a
cycle of length $2k$ in an undirected graph $G$ with $n$ nodes and $m$ edges for
constant $k\ge 2$. A classic result by Bondy and Simonovits [J.
Combinatorial Theory, 1974] implies that if $m \geq 100k n^{1+1/k}$, then
$G$ contains a $2k$-cycle, further implying that one needs to consider only
graphs with $m = O(n^{1+1/k})$.
Previously the best known algorithms were an $O(n^2)$ algorithm due to
Yuster and Zwick [J. Discrete Math 1997] as well as a
$O(m^{2-(1+\ceil{k/2}^{-1})/(k+1)})$ algorithm by Alon et. al.
[Algorithmica 1997].
We present an algorithm that uses $O\left ( m^{2k/(k+1)} \right )$ time and
finds a $2k$-cycle if one exists. This bound is $O(n^2)$ exactly when $m =
\Theta(n^{1+1/k})$. When finding $4$-cycles our new bound coincides with
Alon et. al., while for every $k>2$ our new bound yields a polynomial
improvement in $m$.
Yuster and Zwick noted that it is ``plausible to conjecture that $O(n^2)$
is the best possible bound in terms of $n$''. We show ``conditional
optimality'': if this hypothesis holds then our $O(m^{2k/(k+1)})$ algorithm
is tight as well. Furthermore, a folklore reduction implies that no
\emph{combinatorial} algorithm can determine if a graph contains a
$6$-cycle in time $O(m^{3/2-\varepsilon})$ for any $\varepsilon>0$ unless boolean matrix
multiplication can be solved combinatorially in time $O(n^{3-\varepsilon'})$ for
some $\varepsilon' > 0$, which is widely believed to be false. Coupled with our
main result, this gives tight bounds for finding $6$-cycles combinatorially
and also separates the complexity of finding $4$- and $6$-cycles giving
evidence that the exponent of $m$ in the running time should indeed
increase with $k$.
The key ingredient in our algorithm is a new notion of \emph{capped
$k$-walks}, which are walks of length $k$ that visit only nodes according
to a fixed ordering. Our main technical contribution is an involved
analysis proving several properties of such walks which may be of
independent interest.
\end{abstract}
\section{Introduction}
We study a basic problem in algorithmic graph theory.
Namely, given an undirected and unweighted graph $G=(V,E)$ and an
integer $\ell$, does $G$ contain a cycle of length exactly $\ell$ (denoted
$C_\ell$)? If a $C_\ell$ exists, we would also like the algorithm to return
such a cycle.
As a special case, when $\ell = n$ is the number of nodes in the graph, we
are faced with the well-known problem of finding a hamiltonian cycle, which was
one of Karp's original 21 NP-complete problems \cite{Karp1972}.
In fact, the problem is NP-complete when $\ell = n^{\Omega(1)}$.
On the other end of the spectrum, when $\ell = O(1)$ is a constant, the
problem is in FPT\footnote{Informally, a problem of size $n$ parameterized
by $k$ is in FPT if it can be solved in time $f(k)\cdot n^{O(1)}$, where $f$ is
a function independent of $n$.}
as first shown by Monien in
1985~\cite{Monien85}, by giving an $O(f(\ell)\cdot m)$ algorithm to
determine if any given node $u$ is contained in a $C_{\ell}$. For $\ell = 3$,
this is the classical problem of triangle-finding, which can be done in
$O(n^\omega)$ time using matrix multiplication, where $\omega < 2.373$ is the
matrix multiplication exponent~\cite{LeGall:2014:PTF:2608628.2608664}. This can
be extended to finding a $C_\ell$ for any constant $\ell=O(1)$ in time
$O(n^\omega)$ expected and $O(n^\omega\log n)$
deterministically~\cite{AlonYZ95}. When $\ell$ is odd, this is the fastest
known algorithm, however for even $\ell = 2k = O(1)$ one can do better.
To appreciate the difference, we must first understand
the following basic graph theoretic result about even cycles: Bondy and
Simonovits~\cite{BONDY197497} showed that if a graph with $n$ nodes has more
than $100k n^{1+1/k}$ edges, then the graph contains a $C_{2k}$.
In contrast, a graph on $n$ nodes can have $\Theta(n^2)$ edges without containing
any odd cycle, e.g. $K_{\floor{n/2},\ceil{n/2}}$. Using this lemma of Bondy
and Simonovits, it was shown by Yuster and Zwick~\cite{YusterZ97} how to find a
$C_{2k}$ for constant $k$ in time $O(n^2)$. They note that \emph{``it seems
plausible to conjecture that $O(n^2)$ is the best possible bound in terms of
$n$''}. Furthermore, when $m\ge 100k\cdot n^{1+1/k}$
we can use the algorithm of Yuster Zwick~\cite{YusterZ97} to find a $C_{2k}$ in
$O(n)$ expected time.
Given this situation, we seek an algorithm with a running time
$O(m^{c_k})$, which utilizes the sparseness of the graph, when $m$ is less than
$100k\cdot n^{1+1/k}$. By the above discussion, such an algorithm can
be turned into a $O(n^{c_k(1+1/k)})$ time algorithm for finding a
$C_{2k}$.
Therefore, if we believe that $O(n^2)$ indeed is the correct running time in
terms of $n$, we must also believe that the best possible value for $c_k$
is $2 - 2/(k+1)$. This is further discussed in Section~\ref{sec:hard_intro}
below.
Our main result is to present an algorithm which obtains exactly this
running time in terms of $m$ and $k$ for finding a $C_{2k}$. We show the
following.
\begin{theorem}\label{thm:2k_cycle}
Let $G$ be an unweighted and undirected graph with $n$ nodes and $m$ edges, and let $k \geq 2$ be a positive integer.
A $C_{2k}$ in $G$, if one exists, can be found in $O(k^{O(k)}m^{\frac{2k}{k+1}})$.
\end{theorem}
Theorem~\ref{thm:2k_cycle} presents the first improvement in more than 20 years
over a result of Alon, et al.~\cite{AlonYZ97}, who gave an algorithm with
$c_k = 2 - (1 + \frac{1}{\ceil{k/2}})/(k+1)$, i.e.,
a running time of $O(m^{4/3})$ for $4$-cycles and $O(m^{13/8})$ for $6$-cycles.
For $4$-cycles we obtain the same bound with Theorem~\ref{thm:2k_cycle}, but
for any $k > 2$ our new bound presents a polynomial improvement. In fact
our algorithm for finding a $C_8$ is faster than the algorithm of Alon, et al.
for finding a $C_6$. A comparison with known algorithms is shown below in
Figure~\ref{fig:comparison}.
\begin{figure}
\caption{Comparisons of running times in terms of graph density. The
illustration shows our algorithm from \Cref{thm:2k_cycle}
\end{figure}
We present our algorithm as a black box reduction: Let $A$ be any algorithm
which can determine \emph{for a given node} $u$ if $u$ is contained in a
$C_{2k}$ in $O(f(k)\cdot m)$ time. Then our algorithm can transform $A$ into an
algorithm which finds a $C_{2k}$ in $O(g(k)\cdot m^{2k/(k+1)})$ time. Thus, one
may pick any such algorithm $A$ such as the original algorithm of
Monien~\cite{Monien85} or the seminal color-coding algorithm of Alon et
al.~\cite{AlonYZ95}. Our algorithm is conceptually simple, but the analysis is
technically involved and relies on a new understanding of the relationship
between the number of $k$-walks and the existence of a $C_{2k}$. By
introducing the notion of \emph{capped $k$-walks}, we show that an algorithm
enumerating all such capped $k$-walks starting in nodes with low degree will
either find a $2k$-cycle or spend at most $O(m^{2k/(k+1)})$ time. In some sense
this is a stronger version of the combinatorial lemma by Bondy and Simonovits,
as any graph with many edges must also have many capped $k$-walks.
\subsection{Hardness of finding cycles}\label{sec:hard_intro}
The literature on finding $\ell$-cycles is generally split into two kinds of
algorithms: \emph{combinatorial} and \emph{non-combinatorial} algorithms. Where
combinatorial algorithms (informally) are algorithms, which do not use the
structure of the underlying field and perform Strassen-like cancellation
tricks~\cite{Strassen69}. Interestingly, all known algorithms for finding
cycles of even length efficiently are combinatorial. There are several possible
explanations for this. One is that the hard instance for even cycles are
graphs, which are relatively sparse (i.e. $O(n^{1+1/k})$ edges), and in this
case it is difficult to utilize the power of fast matrix-multiplication.
Another is that matrix-multiplication based methods allows one to solve the
harder problem of directed graphs. Directed graphs are harder because we can no
longer make the guarantee that a $C_{2k}$ can always be found if the graph is
dense. Furthermore, a simple argument shows that the problem of finding a
$C_3$ can be reduced to the problem of finding a directed $C_\ell$ for any
$\ell > 3$. Especially this problem of finding a $C_3$ combinatorially has been
studied thoroughly in the line of work colloquially referred to as
\emph{Hardness in \textbf{P}}. This line of work is concerned with basing
hardness results on widely believed conjectures about problems in \textbf{P}
such as 3-SUM and APSP. One such popular conjecture (see
e.g.~\cite{AbboudW14,WilliamsW10}) is the combinatorial boolean matrix
multiplication (BMM) conjecture stated below.
\begin{conjecture}\label{conj:comb_bmm}
There exists no \emph{combinatorial} algorithm for multiplying two $n\times
n$ boolean matrices in time $O(n^{3-\varepsilon})$ for any $\varepsilon > 0$.
\end{conjecture}
It is known from \cite{WilliamsW10} that Conjecture~\ref{conj:comb_bmm} above
is equivalent to the statement that there exists no truly subcubic\footnote{An
algorithm running polynomially faster than cubic time, i.e. $O(n^{3-\varepsilon})$ for
$\varepsilon > 0$.} \emph{combinatorial} algorithm for finding a $C_3$ in graphs with
$n$ nodes and $\Theta(n^2)$ edges, and a simple reduction shows that this holds
for any odd $\ell \ge 3$. For even cycles, we show that a simple extension
to this folklore reduction gives the following result.
\begin{proposition}\label[proposition]{thm:clbs}
Let $k \ge 3$ be a fixed integer with $k\ne 4$. Then there exists no
\emph{combinatorial} algorithm that can find a $2k$-cycle in graphs with
$n$ nodes and $m$ edges in time $O(m^{3/2-\varepsilon})$ unless
Conjecture~\ref{conj:comb_bmm} is false.
\end{proposition}
As noted, the proof of \Cref{thm:clbs} is a rather simple extension of the
reduction for odd cycles, but for completeness, we include the proof in
\Cref{sec:clb}. In particular, \Cref{thm:clbs} implies that our $O(m^{3/2})$
time algorithm for finding $6$-cycles is optimal among combinatorial
algorithms. Interestingly, \Cref{thm:clbs} also creates a separation between
finding $4$-cycles and finding larger even cycles, as both Alon, et
al.~\cite{AlonYZ97} and Theorem~\ref{thm:2k_cycle} provide an algorithm for
finding $4$-cycles in time $O(m^{4/3})$., which is polynomially smaller than
$O(m^{3/2})$. This gives evidence that a trade-off dependent on $k$ like the
one obtained in Theorem~\ref{thm:2k_cycle} is indeed necessary.
An important point of \Cref{thm:2k_cycle}, as mentioned earlier, is that it
is optimal if we believe that $\Theta(n^2)$ is the correct running time in
terms of $n$. This is formalized in the theorem below. Furthermore, we show
that \Cref{thm:2k_cycle} implies that any hardness result of $n^{2-o(1)}$ would
provide a link between the time complexity of an algorithm and the existence of
dense graphs without $2k$-cycles. A statement, which is reminiscent of the
Erdős Girth Conjecture.
\begin{theorem}\label{thm:n2opt}
Let $k\ge 2$ be some fixed integer. For all $\varepsilon > 0$ there exists
$\delta > 0$ such that if no algorithm exists which
can find a $C_{2k}$-cycle in graphs with $n$ nodes and $m$ edges in time
$O(n^{2-\delta})$, then the following two statements
hold.
\begin{enumerate}
\itemsep-2pt
\item There is no algorithm which can detect if a graph contains a
$C_{2k}$ in time $O(m^{2k/(k+1) - \varepsilon})$.
\item There exists an infinite family of graphs $\mathcal{G}$, such
that each $G\in \mathcal{G}$ has $|E(G)| \ge |V(G)|^{1+1/k-\varepsilon}$
and contains no $C_{2k}$.
\end{enumerate}
\end{theorem}
\subsection{Other results}
A problem related to that of finding a given $C_\ell$ is to determine the girth
(length of shortest cycle) of a graph $G$. In undirected
graphs, finding the shortest cycle in general can be done in time
$O(n^{\omega})$ time due to a seminal paper by Itai and Rodeh
\cite{DBLP:journals/siamcomp/ItaiR78}, and the shortest directed cycle can
be found using an extra factor of $O(\log n)$. In undirected graphs they also
show that a cycle that exceeds the shortest by at most one can be found in
$O(n^2)$ time. It was shown by Vassilevska Williams and
Williams~\cite{WilliamsW10} that computing the girth exactly is essentially as hard as boolean
matrix multiplication, that is, finding a combinatorial, truly subcubic
algorithm for computing the girth of a graph would break
Conjecture~\ref{conj:comb_bmm}. Thus, an interesting question is whether one
can approximate the girth faster, and in particular a main open question as
noted by Roditty and Vassilevska Williams~\cite{RodittyW12} is whether one can
find a $(2-\varepsilon)$-approximation in $O(n^{2-\varepsilon'})$ for any constants
$\varepsilon,\varepsilon'> 0$. They answered this question affirmatively for
\emph{triangle-free graphs} giving a $8/5$-approximation in $O(n^{1.968})$
time~\cite{RodittyW12}. By plugging Theorem~\ref{thm:2k_cycle} into their
framework we obtain the following result.
\begin{theorem}
There exists an algorithm for computing a $8/5$-approximation of the girth
in a triangle-free graph $G$ in time $O(n^{1.942})$.
\end{theorem}
\subsection{Capped $k$-walks}\label{sec:capped_walks}
The main ingredient in our analysis is a notion of \emph{capped $k$-walks}
defined below.
\begin{definition}
Let $G = (V,E)$ be a graph and let $\preceq$ be a total ordering of
$V$.
For a positive integer, $k$, we say that a $(k+1)$-tuple $(x_0,\ldots,x_k) \in
V^{k+1}$ is called a \emph{$\preceq$-capped $k$-walk} if $(x_0,\ldots,x_k)$ is
a walk in $G$ and $x_0 \succeq x_i$ for each $i=1,2,\ldots,k$.
\end{definition}
When clear from the context we will refer to a $\preceq$-capped $k$-walk simply
by a capped $k$-walk. Our algorithm for finding $2k$-cycles essentially works
by enumerating all $\preceq$-capped $k$-walks (with some pruning applied),
where $\preceq$ is given by ordering nodes according to their degree. We will
show that by bounding the number of such $\preceq$-capped $k$-walks in graphs
with a not too large maximum degree, we obtain a bound on the running time of
our algorithm. Specifically, we show the following lemma.
\begin{lemma}\label[lemma]{lem:main_kwalks}
Let $G = (V,E)$ be a graph, let $k$ be a positive integer, and assume that
$G$ has maximum degree at most $m^{2/(k+1)}$. Let $\preceq$ be any ordering
of the nodes in $G$ such that $u \preceq v$ for all pairs of nodes $u,v$
such that $\operatorname{deg}(u) < \operatorname{deg}(v)$. If $G$ contains no $2k$-cycle, then the
number of $\preceq$-capped $k$-walks is at most $f(k) m^{2k/(k+1)}$, where
$f(k) = \left(O(k^2)\right)^{k-1} = k^{O(k)}$.
\end{lemma}
We also present a lower bound on the number of $\preceq$-capped $k$-walk, which
implies that graphs with a large number of edges contains a large number of
$\preceq$-capped $k$-walks.
\begin{lemma}\label[lemma]{lem:kwalks_lower}
Let $G = (V,E)$ be a graph with $n$ nodes and $m$ edges. Let $\preceq$ be
any ordering of $V$. The number of $\preceq$-capped $k$-walks is at least
$n \cdot \left ( \frac{m}{2n} \right )^k$
\end{lemma}
Lemmas~\ref{lem:main_kwalks} and~\ref{lem:kwalks_lower} imply that
graphs with more than $Ck^2n^{1+1/k}$ edges and maximum degree at most
$m^{2/(k+1)}$ have a $2k$-cycle, for a
sufficiently large constant $C > 0$. Except from the extra factor of $k$
and the bound on the maximum degree, this shows that \Cref{lem:main_kwalks}
is stronger than the lemma of Bondy and Simonovits, which states that
graphs with at least than $100kn^{1+1/k}$ edges contain a $2k$-cycle. Indeed,
a graph with few edges may still contain many capped $k$-walks.
\subsection{Techniques and overview}
\input{techno}
\subsection{Related work}\label{sec:related}
\input{related}
\subsection{Notation}
Let $G=(V,E)$ be a graph. For (not necessarily disjoint) sets of nodes
$A,B\subseteq V$ we let $E(A,B)$ denote the set of edges between $A$ and $B$ in
$G$, i.e. $E\cap (A\times B)$. We use $E(v,A)$ to denote $E(\{v\},A)$.
\section{Finding even cycles}\label{sec:2kcycles}
In this section we describe our algorithm for finding a $C_{2k}$ in an undirected graph
$G=(V,E)$ with $n$ nodes and $m$ edges. In our analysis we will assume
Lemma~\ref{lem:main_kwalks}, but we defer the actual proof of the lemma to
Section~\ref{sec:kwalks}.
Our algorithm works by creating a series of graphs $G_{\le 1}^k, \ldots,
G_{\le n}^k$ guaranteed to contain any $2k$-cycle that may exist. Furthermore,
the total size of these graphs can (essentially) be bounded by the total number
of $\preceq$-capped $k$-walks which is used to bound the running time.
\begin{proof}[Proof of Theorem~\ref{thm:2k_cycle}]
Let $A$ be any algorithm that takes a graph $H$ and a node $u$ in $H$
as input and determines if $u$ is contained in a $2k$-cycle in time
$O(g(k)\cdot |E(H)|)$.
Order the nodes of $G$ as $v_1,\ldots, v_n$ non-decreasingly by degree and
define $G_{\le i}$ to be the subgraph of $G$ induced by $v_1,\ldots, v_i$.
Let $G_{\le i}^k$ denote the subgraph of $G_{\le i}$ containing all
edges (and their endpoints) incident to nodes at distance $<k$ from $v_i$
in $G_{\le i}$. Now for each $i\in \{1,\ldots, n\}$ in increasing order we
create the graph $G_{\le i}^k$, run algorithm $A$ on $G_{\le i}^k$
and $v_i$, and return any $2k$-cycle found (stopping the algorithm).
If no such cycle is found for
any $i$ the algorithm returns that no $2k$-cycle exists in $G$.
For correctness let $C$ be any $2k$-cycle in $G$ and let $v_i$ be the
node in $C$ that is last in the ordering. It then follows from the
definition that $C$ is fully contained in $G_{\le i}^k$ and thus either the
algorithm returns a $2k$-cycle when $A$ is run on $G_{\le i}^k$ or
some other $2k$-cycle when $A$ is run on $G_{\le j}^k$ for $j <
i$. For the running time observe first that creating the graphs $G_{\le
i}^k$ and running algorithm $A$ on these graphs takes time proportional to
the total number of edges in these graphs. Thus what is left is to bound
this number of edges. The number of edges in $G_{\le
i}^k$ is bounded by the number of capped $k$-walks starting in $v_i$ in
$G$. Let $i$ be the largest value such that
$G_{\le i}^k$ does not contain a $2k$-cycle and $\operatorname{deg}(v_i)\le m^{2/(k+1)}$.
It then follows by Lemma~\ref{lem:main_kwalks} that the graphs $G_{\le
1}^k,\ldots, G_{\le i}^k$ contain at most a total number of $O(f(k)\cdot
m^{2k/(k+1)})$ edges. Furthermore, there are at most $m^{1-2/(k+1)}$ nodes
of degree $> m^{2/(k+1)}$, and thus the total number of edges over all the
graphs $G_{\le 1}^k,\ldots, G_{\le n}^k$ is at most $O(f(k)\cdot
m^{2k/(k+1)})$ giving the desired running time.
\end{proof}
As an example, the algorithm $A$ in the above proof could be the algorithm
of Monien~\cite{Monien85} or Alon et al.~\cite{AlonYZ95}.
\section{Bounding the number of capped $k$-walks}\label{sec:kwalks}
In this section we will prove Lemma~\ref{lem:main_kwalks}. Let $G=(V,E)$ be a
given graph. We will denote the nodes of $G$ by $u_1,\ldots, u_n$ or simply
$1,\ldots, n$ if it is clear from the context.
Recall the definition of $\snorm{\cdot}$ from the introduction. We note that
the following basic properties hold.
\begin{lemma}
\label[lemma]{lem:snormBasic}
For all vectors $u,v \in \mathbb{R}^n$ and $c \in \mathbb{R}$ we have:
\begin{align}
\notag
\snorm{u+v} &\le \snorm{u} + \snorm{v},
\\\notag
\snorm{cu} &= \abs{c} \cdot \snorm{u},
\\\notag
\snorm{u} = 0 &\iff u = 0
\, .
\end{align}
\end{lemma}
As mentioned in the introduction, we would like to use the $\snorm{\cdot}$-norm
of $X_G$ to bound the number of $k$-walks starting in a given subset
$S\subseteq V$. We can do this using the following lemma.
\begin{lemma}\label[lemma]{lem:kwalks_set}
Let $G = (V,E)$ be a graph with $n$ nodes and adjacency matrix $X_G$. Let
$S \subseteq V$ be a set of nodes. For any integer $k$ the number of
$k$-walks starting in $S$ is bounded by $\sqrt{\abs{S}}\snorm{X_G^k
\mathbf{1}}$.
\end{lemma}
\begin{proof}
Let $v = X_G^k \mathbf{1}$ and let $w$ be the vector such that $w_i = v_i$ when
$i \in S$ and $w_i = 0$ when $i \notin S$. Then the number of $k$-walks
starting in $S$ is exactly the sum of entries in $w$, i.e. it is
$\norm{w}_1$. So the number of $k$-walks starting in $S$ is bounded by
\begin{align}
\notag
\norm{w}_1 &=
\int_0^\infty
\abs{\set{i \mid w_i \ge x}}
dx
\\\notag &\le
\sqrt{\abs{S}}
\int_0^\infty
\sqrt{\abs{\set{i \mid w_i \ge x}}}
dx
\\\notag &=
\sqrt{\abs{S}} \snorm{w}
\\\notag &\le
\sqrt{\abs{S}} \snorm{v}
\, ,
\end{align}
as desired. Here the first inequality follows because $w$ has at most $|S|$
non-zero entries.
\end{proof}
To prove Lemma~\ref{lem:main_kwalks} we want to bound the quantity
$\snorm{X_G^k}$ for graphs, $G$, which do not contain a $2k$-cycle and
have maximum degree at most $m^{\frac{2}{k+1}}$.
To do this we will need the following lemmas, which are proved
in \Cref{sec:proofs}.
\begin{lemma}
\label[lemma]{lem:matrixSnormOnlyZeroOne}
Let $A$ be a real $n\times n$ matrix. If, for all vectors $v\in \{0,1\}^n$
we have $\snorm{Av} \le C\snorm{v}$ for some value $C$, then $\snorm{A}\le
16C$.
\end{lemma}
\begin{lemma}
\label[lemma]{lem:modifiedBS}
Let $G$ be a graph with and let $A$ and $B$ be subsets of nodes in $G$. Let $k \ge 2$
be an integer and assume that $G$ contains no $2k$-cycle. Then
\begin{align}
\abs{E(A,B)} \le
100k \cdot \left(\sqrt{\abs{A} \cdot \abs{B}}^{1+1/k} + \abs{A} + \abs{B} \right )\, .
\end{align}
\end{lemma}
We are now ready to prove the main technical lemma stated below.
\begin{lemma}
\label[lemma]{lem:mainTechnical}
Let $G = (V,E)$ be a graph with $m$ edges and let $k$ be a positive
integer. Assume that $G$ has maximum degree at most $m^{2/(k+1)}$ and does
not contain a $2k$-cycle. Let $X_G$ be the adjacency matrix for $G$, then
\begin{align}
\notag
\snorm{X_G} = O\left(k^2 m^{1/(k+1)} \right)
\, .
\end{align}
\end{lemma}
\begin{proof}
We denote the vertices of $G$ by $1,2,\ldots, n$ for convenience. By
\Cref{lem:matrixSnormOnlyZeroOne} we only need to show that $\snorm{X_G v}
= O\left(k^2 m^{1/(k+1)} \snorm{v}\right)$ for every vector $v$ where each
entry is either $0$ or $1$. Each such vector, $v$, can be viewed as a set
of nodes $A\subseteq V$, where $v_i$ is $1$ whenever $i\in A$ and $0$
otherwise. We will adopt this view and denote $v$ by $\mathbf{1}_A$.
In this case we have $\snorm{\mathbf{1}_A} =
\sqrt{\abs{A}}$. Thus it suffices to show that for all $A\subseteq V$
we have
\begin{align}
\label[equation]{eq:mainTechnicalFirstRewrite}
\snorm{X_G \mathbf{1}_A} = O \left( k^2 m^{1/(k+1)} \sqrt{A} \right )
\, .
\end{align}
Now fix an arbitrary $A \subseteq V$. We are going to show that
\eqref{eq:mainTechnicalFirstRewrite} holds. For every non-negative integer
$i$ we let $B_i$ denote the set of nodes in $G$ which have more than
$2^{i-1}$ but at most $2^i$ neighbours in $A$. That is
\begin{align}
\notag
B_i = \set{v \in V \mid \abs{E(v,A)} \in \left(2^{i-1},2^i\right] }
\, .
\end{align}
We note that by the definition of $\snorm{\cdot}$ we have that
\begin{align}
\notag
\snorm{X_G \mathbf{1}_A}
&\le
\sum_{i \ge 0}
2^i \sqrt{\sum_{j \ge i} \abs{B_j}}
\\\notag &\le
\sum_{i \ge 0}
2^i \sum_{j \ge i} \sqrt{\abs{B_j}}
\\\notag &<
2
\cdot
\sum_{i \ge 0}
2^i \sqrt{\abs{B_i}}
\, .
\end{align}
So in order to show \eqref{eq:mainTechnicalFirstRewrite} it suffices to
show \eqref{eq:mainTechnicalSecondRewrite} below
\begin{align}
\label[equation]{eq:mainTechnicalSecondRewrite}
\sum_{i\ge 0} 2^i\sqrt{\abs{B_i}} = O\!\left( k^2 m^{1/(k+1)} \sqrt{|A|} \right )\, .
\end{align}
or alternatively to show
\begin{align}
\label[equation]{eq:mainTechnicalSecondRewrite2}
\sum_{i\ge 0} 2^i\frac{\sqrt{\abs{B_i}}}{\sqrt{|A|}} = O\!\left( k^2 m^{1/(k+1)}\right )\, .
\end{align}
For an integer $i\ge 0$ let $t_i$ be defined by
\begin{align}
\notag
t_i = 2^i\cdot \frac{\sqrt{\abs{B_i}}}{\sqrt{\abs{A}}}
\, .
\end{align}
We will bound the value $t_i$ by looking at the number of edges between the
sets $B_i$ and $A$. Our plan is to bound the value $t_i$ in several ways,
and then taking a geometric mean will yield the result.
Observe first, that by the definition of $B_i$ we have
at least $2^{i-1} \abs{B_i}$ edges from $B_i$ to $A$, and hence $2^i
\abs{B_i} \le 2\abs{E(B_i,A)} \le 2m$. It follows that $t_i$ is bounded by
\begin{align}
\notag
t_i
= \frac{2^i \sqrt{\abs{B_i}}}{\sqrt{\abs{A}}}
= \frac{2^{i/2} \sqrt{2^i \abs{B_i}}}{\sqrt{\abs{A}}}
\le \frac{2^{i/2} \sqrt{2m}}{\sqrt{\abs{A}}}
\, .
\end{align}
Let $A_i$ be the subset of nodes of $A$ that are adjacent to a node in $B_i$,
then $E(B_i,A) = E(B_i,A_i)$. By \Cref{lem:modifiedBS} it also follows that
\begin{align}
\notag
t_i
&\le \frac{2\abs{E(B_i,A_i)}}{\sqrt{\abs{B_i} \cdot \abs{A}}}
\\\notag &\le
200k \sqrt{\abs{B_i} \cdot \abs{A_i}}^{1/k} +
200k \sqrt{\frac{\abs{B_i}}{\abs{A}}} +
200k \sqrt{\frac{\abs{A_i}}{\abs{B_i}}}
\, .
\end{align}
We also note that $t_i = 0$ whenever $i > d$ where $d$ is the smallest
integer such that $2^{d-1} > m^{2/(k+1)}$, since the maximum degree of the
graph is $m^{2/(k+1)}$. It follows that the sum $\sum_{i\ge 1}t_i$ can
be bounded by:
\begin{align}
\notag
&\quad O\!\left(
\sum_{i=1}^d
\min \set {
\frac{2^i \sqrt{\abs{B_i}}}{\sqrt{\abs{A}}},
k \sqrt{\abs{B_i}\abs{A_i}}^{\frac{1}{k}} +
k\sqrt{\frac{\abs{B_i}}{\abs{A}}} +
k\sqrt{\frac{\abs{A_i}}{\abs{B_i}}}
}
\right)
\\
\notag
&=
O\!\left(
\Sigma_1
+
\sum_{i=1}^d
\min \set {
\frac{2^i \sqrt{\abs{B_i}}}{\sqrt{\abs{A}}},
k\sqrt{\frac{\abs{B_i}}{\abs{A}}} +
k\sqrt{\frac{\abs{A_i}}{\abs{B_i}}}
}
\right)
\\
&=
\label{eq:s1_s2}
O\!\left(
\Sigma_1
+
\sum_{i=1}^d
\left(
k\sqrt{\frac{\abs{B_i}}{\abs{A}}} +
k\cdot2^{i/2}
\right)
\right)\,
\end{align}
where
\[
\Sigma_1 =
\sum_{i=1}^d
\min \set {
\frac{2^{i/2} \sqrt{2m}}{\sqrt{\abs{A}}},
k\sqrt{\abs{B_i} \cdot \abs{A}}^{1/k}
}
\]
Here, we have $\sqrt{\frac{\abs{A_i}}{\abs{B_i}}} \le 2^{i/2}$ because each
node of $B_i$ has at most $2^i$ neighbours in $A$.
Let $\Sigma_1$ and $\Sigma_2$ denote the two sums of \eqref{eq:s1_s2}
above respectively. We will start by bounding $\Sigma_2$. Since, by
definition, every node in $B_i$ has at least $2^{i-1}$ neighbours in $A_i$
and every node in $A_i$ has degree at most $m^{2/(k+1)}$ we see that
$\abs{B_i}2^{i-1} \le \abs{A_i}m^{2/(k+1)}$. Hence we get that:
\begin{align}
\notag
\Sigma_2 \le
\sum_{i=1}^d\left(
km^{1/(k+1)}2^{(1-i)/2} +
k2^{i/2}\right)
=
O \left ( km^{1/(k+1)} \right )\, .
\end{align}
Now we will bound $\Sigma_1$. First we note that $\abs{B_i}2^{i-1} \le m$ and
therefore $\abs{B_i} \le \frac{2m}{2^i}$. Inserting this gives us:
\begin{align}
\notag
\Sigma_1
\le
\sum_{i=1}^d
\min \set {
\frac{2^{i/2} \sqrt{2m}}{\sqrt{\abs{A}}},
k\sqrt{\frac{2m}{2^i} \cdot \abs{A}}^{1/k}
}\, .
\end{align}
Let $d_0$ be the largest integer such that $2^{d_0} \le
\frac{\abs{A}}{(2m)^{(k-1)/(k+1)}}$.
Then:
\begin{align}
\notag
\frac{2^{d_0/2} \sqrt{2m}}{\sqrt{\abs{A}}}
&
=
\Theta \left ( m^{1/(k+1)} \right )
\\
\notag
\sqrt{\frac{2m}{2^{d_0}} \cdot \abs{A}}^{1/k}
&
=
\Theta \left ( m^{1/(k+1)} \right )\, .
\end{align}
Inserting this gives us:
\begin{align}
\notag
\Sigma_1
& \le
k
\sum_{i=1}^d
\min \set {
\frac{2^{i/2} \sqrt{2m}}{\sqrt{\abs{A}}},
\sqrt{\frac{2m}{2^i} \cdot \abs{A}}^{1/k}
}
\\
& \le
k
\sum_{i=-\infty}^{\infty}
\min \set {
\frac{2^{i/2} \sqrt{2m}}{\sqrt{\abs{A}}},
\sqrt{\frac{2m}{2^i} \cdot \abs{A}}^{1/k}
}
\\
& \le
k
\sum_{i=-\infty}^{d_0}
\frac{2^{i/2} \sqrt{2m}}{\sqrt{\abs{A}}}
+
k
\sum_{i=d_0}^{\infty}
\sqrt{\frac{2m}{2^i} \cdot \abs{A}}^{1/k}
\\
& =
O\left ( k m^{1/(k+1)} \right )
\cdot
\left (
\sum_{i = 0}^{\infty} 2^{-i/2}
+
\sum_{i = 0}^{\infty} 2^{-i/k}
\right )
\\
& =
O \left ( k^2 m^{1/(k+1)} \right )\, .
\end{align}
Summarizing, we thus have that
\[
\sum_{i\ge 0} t_i = O\!\left(k^2\cdot m^{\frac{1}{k+1}}\right)\ ,
\]
and combining this with \eqref{eq:mainTechnicalSecondRewrite2},
\eqref{eq:mainTechnicalSecondRewrite} and
\eqref{eq:mainTechnicalFirstRewrite} now gives us the lemma.
\end{proof}
Using Lemma~\ref{lem:mainTechnical} above we are now ready to prove
Lemma~\ref{lem:main_kwalks} which we used to bound the number of
$\preceq$-capped $k$-walks in Section~\ref{sec:2kcycles}. The main idea in the
proof of Lemma~\ref{lem:main_kwalks} is to split the nodes $V$ into different
sets based on their degrees and then use Lemma~\ref{lem:mainTechnical} to bound
the $\snorm{\cdot}$-norm of the graphs induced by these sets individually.
\begin{proof}[Proof of Lemma~\ref{lem:main_kwalks}]
Let $V_i$ be the set of nodes $u$ with $\operatorname{deg}(u) \in
\left(2^{i-1},2^i\right]$, and let $V_{\le i} = \cup_{j \le i} V_j$ be the
set of nodes with $\operatorname{deg}(u) \in (0,2^i]$. Let $G_{\le i} = (V, E \cap V_{\le
i}^2)$ be the subgraph of $G$ induced by
$V_{\le i}$. Note that $G_{\le i}$ here is defined
slightly differently than we did in Section~\ref{sec:2kcycles} as we
consider entire sets of nodes $V_i$. Any $\preceq$-capped $k$-walk starting in
from a node $u \in V_{i}$ is contained in $X_{G_{\le i}}$. It follows by
Lemma~\ref{lem:kwalks_set} that the total number of $\preceq$-capped
$k$-walks in $G$ is bounded by
\begin{align}
\notag
\sum_{i \ge 0}
\sqrt{\abs{V_i}} \snorm{X_{G_i}^k \mathbf{1}}
&\le
\sum_{i \ge 0}
\snorm{X_{G_i}}^{k-1}
\sqrt{\abs{V_i}} \snorm{X_{G_i} \mathbf{1}}
\\
\label{eq:kwalks_bound}
&\le
\snorm{X_G}^{k-1}
\sum_{i \ge 0}
\sqrt{\abs{V_i}} \snorm{X_{G_i} \mathbf{1}}
\, .
\end{align}
We note that $X_{G_i} \mathbf{1} \le \sum_{j \le i} 2^j \mathbf{1}_{V_j}$, and hence
\begin{align}
\notag
\sum_{i \ge 0}
\sqrt{\abs{V_i}} \snorm{X_{G_i} \mathbf{1}}
&\le
\sum_{i \ge 0}
\sqrt{\abs{V_i}} \sum_{j \le i} \snorm{2^j \mathbf{1}_{V_j}}
\\\notag &=
\sum_{i \ge j \ge 0}
\sqrt{\abs{V_i}}
\cdot \sqrt{\abs{V_j}}
\cdot 2^j\, .
\end{align}
We now note that
\begin{align}
\notag
\sqrt{\abs{V_i}}
\cdot \sqrt{\abs{V_j}}
\cdot 2^j
&=
\sqrt{2^i\abs{V_i}}
\cdot \sqrt{2^j\abs{V_j}}
\cdot 2^{-(i-j)/2}
\\\notag &\le
\frac{2^i\abs{V_i} + 2^j\abs{V_j}}{2}
\cdot
2^{-(i-j)/2}
\, ,
\end{align}
which implies that
\begin{align}
\notag
\sum_{i \ge j \ge 0}
\sqrt{\abs{V_i}}
\cdot \sqrt{\abs{V_j}}
\cdot 2^j
&\le
\sum_{i \ge j \ge 0}
\frac{2^i\abs{V_i} + 2^j\abs{V_j}}{2}
\cdot
2^{-(i-j)/2}
\\\notag &=
\sum_{i \ge 0}
2^i \abs{V_i}
\sum_{\ell \ge 0} 2^{-\ell/2}
\\\notag &=
\frac{\sqrt{2}}{\sqrt{2}-1}
\sum_{i \ge 0}
2^i \abs{V_i}
\, .
\end{align}
Since $\sum_{i \ge 0} 2^i \abs{V_i}$ is at most twice as large as the sum of
degrees of the nodes in $G$ it is bounded by $4m$, and therefore
\begin{align}
\sum_{i \ge j \ge 0}
\sqrt{\abs{V_i}}
\cdot \sqrt{\abs{V_j}}
\cdot 2^j
\le
4 \cdot \frac{\sqrt{2}}{\sqrt{2}-1} m
<
14m
\, .
\end{align}
Combining this with \eqref{eq:kwalks_bound} and
Lemma~\ref{lem:mainTechnical} we get that the number of $\preceq$-capped
$k$-walks is at most
\begin{align}
\notag
14 \snorm{X_G}^{k-1} m = O\!\left((k^2)^{k-1}m^{\frac{2k}{k+1}}\right)
\, ,
\end{align}
which is what we wanted to show.
\end{proof}
Below we prove Lemma~\ref{lem:kwalks_lower}, which gives a lower bound on the number of capped $k$-walks.
\begin{proof}[Proof of Lemma~\ref{lem:kwalks_lower}]
Let $\Delta = \frac{m}{2n}$.
For a subgraph $F$ of $G$ we let $f(F)$ denote the subgraph $F'$ of $F$
obtained in the following way. Initially we let $F' = F$. As long as there
exists a node $v \in F'$ such that $\operatorname{deg}_{F'}(v) < \Delta$ we remove $v$
from $F'$. We continue this process until no node in $F'$ has fewer than
$\Delta$ neighbours and let $f(F) = F'$.
We now construct the sequences $(H_i)_{i \ge 0}, (H_i')_{i \ge 0}$ of subgraphs
of $G$ in the following manner. We let $H_0' = G$, and $H_0 = f(H_0')$. If $H_i$
is non-empty, let $v_i$ be the largest element in $H_i$, i.e. $v_i \succeq v$
for all $v \in H_i$, and define $H_{i+1}' = H_i \setminus \set{v_i}$. If $H_i$
is empty we let $H_{i+1}' = H_i$. In either case we let $H_{i+1} = f(H_{i+1}')$.
For all $i$ such that $H_i$ is non-empty, there exists at least \linebreak$\operatorname{deg}_{H_i}(v_i) \Delta^{k-1}$
capped $k$-walks $(x_1,\ldots,x_k)$ with $x_1 = v_i$. By the definition of $H_{i+1}'$
we have that $\operatorname{deg}_{H_i}(v_i) = \abs{E(H_i)} - \abs{E(H_{i+1}')}$. The total number
of capped $k$-walks in $G$ is therefore at least:
\begin{align}
\label{eq:cappedKWalksNumberOfWalks}
\sum_{i \ge 0}
\left ( \abs{E(H_i)} - \abs{E(H_{i+1}')} \right )
\Delta^{k-1}\, .
\end{align}
Now note that:
\begin{align}
\notag
&\quad \sum_{i \ge 0}
\left ( \abs{E(H_i)} - \abs{E(H_{i+1}')} \right )
\\
\label{eq:cappedKWalksSumOfEdges}
&=
\left (
\sum_{i \ge 0}
\abs{E(H_i')} - \abs{E(H_{i+1}')}
\right )
-
\left (
\sum_{i \ge 0}
\abs{E(H_i')} - \abs{E(H_{i})}
\right )
\, .
\end{align}
The first sum on the right hand side of \eqref{eq:cappedKWalksSumOfEdges} is a
telescoping sum that is equal to $m$. The second sum on the right hand side of
\eqref{eq:cappedKWalksSumOfEdges} can be bounded by noting that
$\abs{E(H_i')} - \abs{E(H_{i})}$ is at most $\Delta \cdot \abs{V(H_i' \setminus f(H_i'))}$,
since applying $f$ to $H_i'$ removes $\abs{V(H_i' \setminus f(H_i'))}$ nodes, and each node
removed had degree at most $\Delta$.
Since at most $n$ nodes are removed in total the sum is bounded by $n\Delta$. Hence
\eqref{eq:cappedKWalksSumOfEdges} is at least $m-n\Delta = \frac{m}{2}$.
Inserting this into \eqref{eq:cappedKWalksNumberOfWalks} gives that the number of
capped $k$-walks is at least
\begin{align}
\notag
\frac{m}{2} \cdot \Delta^{k-1} =
n \cdot \left ( \frac{m}{2n} \right )^k
\, ,
\end{align}
as desired.
\end{proof}
\section{Hardness of finding cycles}\label{sec:clb}
Theorem~\ref{thm:2k_cycle} presents an algorithm with a seemingly natural
running time in terms of $m$ and $k$. A natural question to ask is
whether the exponent of $m$ has to increase with $k$ and,
perhaps more interestingly, what the correct
exponent is. In this section we address the possibility of faster
algorithms, by proving \Cref{thm:n2opt,thm:clbs} discussed in the
introduction.
\begin{proof}[Proof of \Cref{thm:clbs}]
Let $G=(V,E)$ be the graph in which we wish to find a triangle with $|V| =
n$ and $|E| = \Theta(n^2)$. By Conjecture~\ref{conj:comb_bmm} it takes
$n^{3-o(1)}$ to find a triangle in $G$. Now create the graph $G'$
consisting of three copies, $A$, $B$, and $C$, of $V$. Denote each copy of
$u\in V$ in $A,B,C$ by $u_A,u_B,u_C$, respectively. For each edge $(u,v)\in
E$ add the edges $(u_A,v_B)$, $(u_B,v_C)$, and $(u_C,v_A)$ to $G'$. It now
follows that $G$ contains a triangle $u,v,w$ if and only if $G'$ contains a
triangle $u_A,v_B,w_C$.
Now Fix $x = \lceil (2k+1)/4 \rceil$ and note that $2k\ge 3x$ by the
restrictions to $k$. Create the graph $G^e_k$ by taking a copy of $G'$ and
performing the following changes: Replace each edge by a path of length
$x$. If $2k > 3x$ replace each node $u_A$ in $G^e_k$ by a path
$u_A^1,\ldots, u_A^{2k-3x+1}$. Otherwise if $2k=3x$ do nothing. We now
claim that $G^e_k$ contains a $C_{2k}$ if and only if $G$ contains a
triangle. Observe first, that if $G$ contains a triangle $u,v,w$, then
$u_A^1\leadsto v_V\leadsto w_C\leadsto u_A^{2k-3x+1}\leadsto u_A^1$ is a
cycle in $G^e_k$ and has length $3x + 2k-3x = 2k$. Now assume that $G^e_k$
has a cycle of length $2k$. If this cycle contains two nodes $u_A^1$ and
$v_A^1$ it must have length at least $4x > 2k$ and similar for $B$ and $C$
and $u_A^{2k-3x+1}$ and $v_A^{2k-3x+1}$. Thus, the cycle must exactly be of
the form $u_A^1\leadsto v_V\leadsto w_C\leadsto u_A^{2k-3x+1}\leadsto u_A^1$
and such a cycle can only have length $2k$ if all edges $(u_A,v_B)$,
$(v_B,w_C)$, and $(w_C,u_A)$ are present in $G'$. Now observe that for
constant $k$ the graph $G^e_k$ has $N = \Theta(n^2)$ nodes and $M =
\Theta(n^2)$ edges. It now follows from Conjecture~\ref{conj:comb_bmm} that
no algorithm can detect a $C_{2k}$ in $G^e_k$ in time $O(M^{3/2-\varepsilon}) =
O(n^{3-\varepsilon})$ for any $\varepsilon > 0$.
\end{proof}
The reduction for Proposition~\ref{thm:clbs} is shown in Figure~\ref{fig:clbs}
below.
\begin{figure}
\caption{The construction of $G^e_k$ from the proof of Lemma~\ref{thm:clbs}
\end{figure}
Finally, We show the ``conditional optimality'' stated in \Cref{thm:n2opt}. The theorem states that if $O(n^2)$ time is optimal, then our bound is the best that can be achieved.
\begin{proof}[Proof of \Cref{thm:n2opt}]
Let $\varepsilon > 0$ be given and let $\delta = \varepsilon$.
Assume there exists an algorithm
which finds a $2k$-cycle in time $O(m^{2k/(k+1)-\varepsilon})$. Now consider the
following algorithm: If $m\ge 100k\cdot n^{1+1/k}$ answer yes, and
otherwise run the given algorithm. This algorithm has running time
$O(n^{(1+1/k)\cdot(2k/(k+1)-\varepsilon)}) = o(n^{2-\delta})$.
Hence part (1) holds.
Now assume there are finitely many graphs $G$
such that $\abs{E(G)} \ge \abs{V(G)}^{1+1/k-\varepsilon}$. Then there must exist some constant $n_0$ such
that no graph with $n\ge n_0$ nodes and $m\ge n^{1+1/k-\varepsilon}$ edges
contains a $2k$-cycle. Now consider the following algorithm: Let $G=(V,E)$
be the graph we wish to detect a $C_{2k}$ in. If $|V| < n_0$ we can answer in
constant time. If $|V| \ge n_0$ and $|E| \ge |V|^{1+1/k-\varepsilon}$ answer no,
and otherwise run the algorithm of \Cref{thm:2k_cycle} to detect a $C_{2k}$
in time $O(|V|^{(1+1/k-\varepsilon)\cdot 2k/(k+1)}) = o(|V|^{2-\delta})$.
Hence part (2) holds.
\end{proof}
\section{Omitted proofs}\label{sec:proofs}
This section contains missing proofs from Section~\ref{sec:kwalks}.
\begin{proof}[Proof of \Cref{lem:matrixSnormOnlyZeroOne}]
Let $v \in \mathbb{R}^n$ be a vector such that each entry either is contained in $\left[2^{-1},1\right]$
or is $0$. Let $r = \abs{\operatorname{supp}(v)}$ and write $v$ as $v = \sum_{i=1}^r \lambda_i e_i$ for vectors
$e_i$ such that for each $e_i$ there is a single entry $(e_i)_j = 1$ and all other entries
are $0$.
Let $X_1,\ldots,X_r$ be independent random variables $\in \set{0,1}$ such that $E(X_i) = \lambda_i$.
By the concavity of $\snorm{\cdot}$ we then have
\begin{align}
\notag
\snorm{Av}
&=
\snorm{E \left ( A \sum_{i=1}^r X_i e_i\right )}
\le
E \left ( \snorm{A \sum_{i=1}^r X_i e_i} \right )
\\\notag &\le
E \left ( C\snorm{\sum_{i=1}^r X_i e_i} \right )
\le
C\sqrt{r}
\\
\label{eq:vectorHalfToOneBound}
&\le
2C\snorm{v}
\, .
\end{align}
Since $v$ was arbitrarily chosen \eqref{eq:vectorHalfToOneBound} holds for all vector
$v$ with entries in $\set{0} \cup [2^{-1},1]$.
Let $v \in \mathbb{R}^n$ be a vector where each entry is non-negative. We will show that
$\snorm{Av} \le 8C \snorm{v}$. For each integer $k$ let $v^{(k)} \in \mathbb{R}^n$ be the vector
containing the $i$'th entry of $v_i$ if $v_i \in (2^{k-1},2^k]$ and $0$ otherwise, i.e.
\begin{align}
\notag
v^{(k)}_i = \left [ v_i \in (2^{k-1},2^k] \right ] v_i
\, .
\end{align}
Using the triangle inequality and \eqref{eq:vectorHalfToOneBound} on the vectors
$2^{-k}v^{(k)}$ now gives us
\begin{align}
\notag
\snorm{Av} =
\snorm{\sum_{k} Av^{(k)}}
&\le
\sum_{k} 2^k \snorm{A 2^{-k}v^{(k)}}
\\\notag &\le
\sum_{k} 2^k \cdot 2C\snorm{2^{-k} v^{(k)}}
\\ \label{eq:AvLessThanSum}
&=
2C\sum_{k} \snorm{v^{(k)}}
\, .
\end{align}
Now we have that
\begin{align}
\notag
\sum_{k} \snorm{v^{(k)}}
&=
\sum_{k}
\int_0^{2^k}
\sqrt{\abs{\set{i \mid v^{(k)}_i \ge x}}}
dx
\\\notag & \le
\sum_{k}
2^k
\sqrt{\abs{\set{i \mid v^{(k)}_i \ge 2^{k-1}}}}
\\
\notag
& =
4
\sum_{k}
\int_{2^{k-2}}^{2^{k-1}}
\sqrt{\abs{\set{i \mid v^{(k)}_i \ge x}}}
dx
\\
\label{eq:sumLessThanv}
&
\le
4
\sum_{k}
\int_{2^{k-2}}^{2^{k-1}}
\sqrt{\abs{\set{i \mid v_i \ge x}}}
dx
=
4\snorm{v}\, .
\end{align}
Combining \eqref{eq:AvLessThanSum} and \eqref{eq:sumLessThanv} gives that $\snorm{Av} \le 8C\snorm{v}$
for every non-negative vector $v \in \mathbb{R}^n$ as desired.
Let $v \in \mathbb{R}^n$ be any real vector. Let $v^+$ and $v^-$ be defined by
\begin{align}
\notag
(v^+)_i = \max \set{v_i,0},
\ \
(v^-)_i = \max \set{-v_i,0}\, .
\end{align}
Then $v^+$ and $v^-$ have non-negative coordinates and $v = v^+ - v^-$. It is easy
to see that $\snorm{v} \ge \max\set{\snorm{v^+},\snorm{v^-}}$, and therefore:
$\snorm{v^+} + \snorm{v^-} \le 2\snorm{v}$. Now we get the result by the using the triangle
inequality:
\begin{align}
\notag
\snorm{Av} &=
\snorm{Av^{+} - Av^{-}}
\\\notag &\le
\snorm{Av^{+}} + \snorm{Av^{-}}
\\\notag &\le
8C \left ( \snorm{v^+} + \snorm{v^{-}} \right )
\\\notag &\le
16C \snorm{v}
\, .
\end{align}
It follows that $\snorm{A} \le 16C$.
\end{proof}
Below we show \Cref{lem:modifiedBS}, which can be seen as a modified version of the classic Bondy and Simonovits lemma, as we here argue about edges between any two subsets of the graph, instead of edges in the entire graph as in the original lemma \cite{BONDY197497}.
\begin{proof}[Proof of \Cref{lem:modifiedBS}]
Let $m=|E(A,B)|$ and let $E = E(A,B)$.
We will assume that $m \ge 100k\cdot(|A| + |B|)$ as the statement is
otherwise trivially true. We will assume that the graph contains no
$2k$-cycle and show that then $m\le 100k\cdot \sqrt{|A|+|B|}^{1+1/k}$.
Let $2\alpha = \frac{m}{|A|}$ and let $2\beta =
\frac{m}{|B|}$ be the average degrees of nodes in $A$ and $B$
respectively when restricted to $E$. Recursively remove any node from
$A$ respectively $B$ which does not have at least $\alpha$ respectively
$\beta$ edges in $E$. Then we remove strictly less than $\alpha\cdot
|A| + \beta\cdot |B| < m$ edges and thus have a non-empty graph
left.
Now fix some node $u\in A$ and let $L_0 = \{u\}$. Now define $L_{i+1}$ to be
the neighbours of the nodes in $L_i$ using the edges of $E$ for
$i=0,\ldots, k-1$. This gives us the sets $L_0,\ldots, L_k$.
Note that if $A\cap B = \emptyset$ we have $L_i\cap L_{i+1} =
\emptyset$ for each $i = 0,\ldots, k-1$. We will show by induction that
$|L_i| \le |L_{i+1}|$ for each $i=0,\ldots, k-1$. This is clearly true for
$i=0$ since $u$ has degree at least $\alpha\ge 50k$ by assumption. Now fix
some $i \ge 1$ and assume that the statement is true for all $j < i$. We will
assume that $i$ is even (the other case is symmetric). We know
from~\cite{BONDY197497,YusterZ97} that
\[
|E(L_i,L_{i+1})| \le 4k\cdot (|L_i| + |L_{i+1}|)\ ,
\]
as otherwise we can find a $2k$-cycle. By the induction hypothesis this
gives us
\[
|E(L_{i-1},L_i)| \le 8k\cdot |L_i|\ .
\]
Since $i$ is even we also know that
\[
\alpha\cdot |L_i| \le |E(L_{i-1},L_i)| + |E(L_i,L_{i+1})|\ ,
\]
and thus
\[
(\alpha - 8k)\cdot |L_i| \le |E(L_i,L_{i+1})| \le 4k\cdot (|L_i| +
|L_{i+1}|)\ .
\]
This gives us that $(\alpha-12k) \le 4k\cdot |L_{i+1}|$, and it follows
that
\[
|L_{i+1}| \ge \frac{\alpha-12k}{4k}\cdot |L_i|\ .
\]
By our assumption on $\alpha$ this proves that $|L_{i+1}| \ge L_i$. When
$i$ is odd the same argument gives us that $|L_{i+1}| \ge
\frac{\beta-12k}{4k}\cdot |L_i|$.
By the above discussion it follows that
\begin{align*}
|L_k| &\ge \left(\frac{\alpha -
12k}{4k}\right)^{\ceil{k/2}}\cdot\left(\frac{\beta-12k}{4k}\right)^{\floor{k/2}}
\\ &\ge \frac{\alpha^{\ceil{k/2}}\beta^{\floor{k/2}}}{(8k)^k}\ ,
\end{align*}
where the last inequality follows by our assumption the $\alpha,\beta \ge
50k$. Assume now that $k$ is odd (as the even case is handled similar). It
then follows that
\[
|B| \ge |L_k|
\ge \frac{\alpha^{\ceil{k/2}}\beta^{\floor{k/2}}}{(8k)^k}\ ,
\]
and a symmetric argument gives us
\[
|A| \ge \frac{\alpha^{\floor{k/2}}\beta^{\ceil{k/2}}}{(8k)^k}\ ,
\]
implying that
\[
\sqrt{|A|\cdot |B|} \ge \frac{\sqrt{\alpha\beta}^k}{(8k)^k}
= \frac{\sqrt{\frac{m^2}{4|A||B|}}^k}{(8k)^k}\ .
\]
Now taking the $k$th root and isolating $m$ yields exactly the bound we
wanted to show
\[
m\le 16k\cdot\sqrt{|A||B|}^{1+1/k}\ .
\]
In the above proof we assumed that $A$ and $B$ were disjoint in order to
apply the lemma of~\cite{BONDY197497,YusterZ97}. Now observe that if this
is not the case we can pick subsets $A'\subseteq A$ and $B'\subseteq B$
with $A'\cap B' = \emptyset$ and $E(A',B') \ge m/2$ and the argument now
follows through.
\end{proof}
\end{document} |
\begin{document}
\vbox to .5truecm{}
\begin{center}
\cmc
A Genus Two Curve Related\\
to the Class Number One Problem
\end{center}
\vk.3cm
\begin{center}
by
Viet Kh. Nguyen
\footnote{IMC Institute \& Fpt University\\
{\text {\hk.5cm}} 2010 Mathematics Subject Classification. Primary 11G30, 11R29, 11R11.}
\end{center}
\vk.1cm
\hk10.5cm{\it \`a la m\'emoire de V. A. Iskovskikh}
\vk.2cm
\begin{abstract} {We give another solution to the class number one problem by showing that imaginary quadratic fields $\Q(\sqrt{-d})$ with class number $h(-d)=1$ correspond to integral points on a genus two curve $\mscrK_3$. In fact one can find all rational points on $\mscrK_3$. The curve $\mscrK_3$ arises naturally via certain coverings of curves:\ $\mscrK_3\rg\mscrK_6$,\ $\mscrK_1\rg\mscrK_2$\ with $\mscrK_2\colon y^2=2x(x^3-1)$ denoting the Heegner curve, also in connection with the so-called Heegner-Stark covering $\mscrK_1\rg\mscrK_s$.}
\end{abstract}
\vk.5cm
{\bf 1.\ Introduction.}\quad In his famous {\it Disquisitiones Arithmeticae} (Article 303) Gauss hypothetically believed that there are only nine
imaginary quadratic fields $\Q(\sqrt{-d})$ with class number $h(-d)=1$,\ namely with\ $d=1, 2, 3, 7, 11, 19, 43, 67, 163$. It is known nowadays as
the Baker-Heegner-Stark theorem.
\vk.2cm
In the early 1980's V. A. Iskovskikh organized a seminar devoted to a beautiful approach of Heegner who was substantially first to give a
complete proof of the theorem of Baker-Heegner-Stark based on an approach using modular functions (\cite{Heeg52}, {\it cf.}
also \cite{Bir69}, \cite{Star73}, \cite{Ser89}, \cite{Star07}, \cite{Sche10}, \cite{Sha13}). More precisely, assuming $d\equiv 3\ ({\rm mod}\ 8)$
(the main case), Heegner showed that every such a field corresponds to an integral point on the so-called Heegner curve:
$$y^2=2x(x^3-1)\leqno{(\mscrK_2)}$$
In the higher class number case the statement reads as every field $\Q(\sqrt{-d})$,\ $d\equiv 3\ ({\rm mod}\ 8)$ corresponds to an integral point of
degree $h(-d)$ on $\mscrK_2$, provided $3\nmid d$. One may have a remark that the ``{\it exceptional}" field $\Q(\sqrt{-3})$ (corresponding to the
point $(0,0)$ on $\mscrK_2$) is the only case with $3\mid d$ such that $j$-invariant is a cube (namely equal to zero). On the contrary to the case of
higher class numbers one sees, for example if $h(-d)=2$, three fields
$\Q(\sqrt{-51})$,\ $\Q(\sqrt{-123})$ and $\Q(\sqrt{-267})$ have no representatives on $\mscrK_2$ along this line of arguments.
\vk.2cm
The aim of this note is to show that for our purpose one can involve another genus two curve $\mscrK_3$:
$$8 x^8-32 x^6 y+40 x^4 y^2+64 x^5-16 x^2 y^3-128 x^3 y+y^4+48 x y^2+96 x^2-32 y-24=0\leqno{(\mscrK_3)}$$
realized at that time by the author.
The main result of the note is
\vk.2cm
{\bf Theorem A.}\quad {\it The curve $\mscrK_3$ has the following integral points
\vk.2cm
\cen {\quad\ $(3,6)$,\quad\ $(-1,2)$,\qquad $(1,6)$,\qquad $(3,14)$,\qquad $(7,26)$,\quad $(-17,150)$ }
\vk.2cm
\no corresponding to the fields
\vk.2cm
\cen{ $\Q(\sqrt{-3})$,\ $\Q(\sqrt{-11})$,\ $\Q(\sqrt{-19})$,\ $\Q(\sqrt{-43})$,\ $\Q(\sqrt{-67})$,\
$\Q(\sqrt{-163})$}
\vk.2cm
\no respectively. Besides it has three ``extraneous" integral points:\ $(-1,-2)$,\ $(-3,6)$,\ $(1,2)$\ (in fact $(1,2)$\ is a double
point on $\mscrK_3$)\ plus two more rational points\ $\big(-\frac{9}{17},\frac{6}{289}\big)$,\ $\big(-\frac{155}{79},\frac{42486}{6241}\big)$.
All the listed are the only rational points on $\mscrK_3$.}
\vk.2cm
As a corollary one obtains a new solution to Gauss' ``{\it tenth discriminant problem}".
\vk.2cm
Nowadays with the aid of computational tools one can easily compute the genus of $\mscrK_3$.
A surprising observation is the fact that $\mscrK_3$ is birational to a genus 2 curve $\mscrK_s$ studied independently by Heegner and Stark
(\cite{Heeg52}, \cite{Star73}).
\vk.5cm
{\bf Acknowledgments.}\quad I would like to thank V. A. Abrashkin for a nice talk on the class number problem at the seminar mentioned in the
Introduction. This paper is dedicated to the 75th birthday of my teacher V. A. Iskovskikh with profound admiration and memory.
\vk.5cm
{\bf 2.\ The curve $\mscrK_3$.}\quad Let us first recall some known results from the Weber-Heegner-Birch theory (\cite{Web91},
\cite{Heeg52}, \cite{Bir69}, \cite{Sche10}). We shall use Heegner's notation $\sg$ for the Schl\"afli functions of Stufe $48$ which are defined by
$$\sg(\tau)=q^{-\frac{1}{24}}\ \prod_{n=1}^\iy\ \big( 1+q^{2n-1}\big);\quad
\sg_1(\tau)=q^{-\frac{1}{24}}\ \prod_{n=1}^\iy\ \big( 1-q^{2n-1}\big);$$
$$\sg_2(\tau)=\sqrt{2}\ q^{\frac{1}{12}}\ \prod_{n=1}^\iy\ \big( 1+q^{2n}\big);$$
with $q=e^{i\pi\tau}$,\ Im$(\tau)>0$. Then the $24th$ powers $\sg^{24}$,\ $-\sg_1^{24}$,\ $-\sg_2^{24}$ are the roots of
$$U^3-48 U^2+(768-j)U-4096=0\eqno{(2.1)}$$
where $j$ is the modular invariant.
\vk.2cm
We assume throughout $d$ is positive $\equiv 3\ ({\rm mod}\ 8)$ and put $\om=\frac{1}{2}(3+\sqrt{-d})$. Then $-\sg_2^{24}(\om)$ is real and positive.
Let $\phi$ denote the $3rd$ power $\sg_2^3(\om)$ up to a suitable $16th$ root of unity such that $\phi$ is real and positive. Then $\phi^8$ is an
algebraic integer satisfying equation $(2.1)$ with $j=j(\om)$ and by Weber's results $\phi^4,\ \phi^2$ are algebraic integers of degree $3h(-d)$.
Furthermore Weber's conjecture proved in \cite{Bir69} tells us that indeed $\sqrt{2}\ \phi$ is integral of degree $3h(-d)$, so it satisfies a cubic
equation
$$W^3-2 a_3 W^2+2 b_3 W-8=0\eqno{(2.2)}$$
with $a_3, b_3$ integral of degree $h(-d)$. A computation in conjunction with $(2.1)$ shows that $(a_3,b_3)$ gives rise to an integral point of
degree $h(-d)$ on $\mscrK_3$. In particular if $h(-d)=1$ we get the first statement of Theorem A.
\vk.2cm
If we argue in the same way with a cubic equation for $\phi^2$
$$T^3-2 a_2 T^2+2 b_2 T-8=0\eqno{(2.3)}$$
we get a relation between $a_2, b_2$ describing a curve $\mscrK_6$ which is birationally isomorphic to the Pell conics
$$\Big(\frac{k}{2}-(a_2+1)\Big)^2-2\Big(\frac{a_2+1}{2}\Big)^2=1$$
where\ $b_2=k(a_2-1)+2$.
\vk.2cm
In view of $(2.2)$,\ $(2.3)$ the covering $\mscrK_3\rg\mscrK_6$ can be given in the following explicit form
$$a_2=a_3^2-b_3,\quad 2 b_2=b_3^2-8 a_3\eqno{(2.4)}$$
from which may find interesting relations with the Pell sequence.
\vk.2cm
{\bf Remark\ 2.1.}\quad By virtue of $(2.4)$ one sees that $a_3$ satisfies the following equation
$$z^4-2 a_2 z^2-8z+(a_2^2-2 b_2)=0\eqno{(2.5)}$$
Therefore $(2.3)$ is nothing but Euler's resolvent for $(2.5)$.
\vk.2cm
{\bf Example\ 2.2.}\quad As noted in the Introduction we illustrate our approach with some values $d$ multiple of $3$. By using the Tables from
\cite{Web91} and \cite{Wat36} one sees that the fields $\Q(\sqrt{-51})$,\ $\Q(\sqrt{-123})$,\ $\Q(\sqrt{-267})$ give rise to three real quadratic
points
$$(-1,8+2\sqrt{17}),\quad (4+\sqrt{41},40+6\sqrt{41}),\quad (-10-\sqrt{89},310+32\sqrt{89})$$
on $\mscrK_3$ respectively.
\vk.5cm
{\bf 3.\ The curve $\mscrK_s$.}\quad If we assume in addition $3\nmid d$, then the $8th$ powers
$\sg^8$,\ $-\sg_1^8$,\ $-\sg_2^8$ are the roots of
$$V^3-\ga_2 V-16=0\eqno{(3.1)}$$
where $\ga_2$ is the Weber function of Stufe $3$ such that $\ga_2^3=j$.
\vk.2cm
In the notation of {\bf 2.} let $\var$ be a cubic root of $\phi$ that is real and positive. Then as above $\var^2$,\ $\sqrt{2}\ \var$ are algebraic
integral of degree $3h(-d)$. From $(3.1)$ and a cubic equation for $\var^2$
$$Z^3-2 \al_2 Z^2+2 \be_2 Z-2=0\eqno{(3.2)}$$
with $\al_2, \be_2$ integral of degree $h(-d)$ we come to the equation ($\mscrK_2$) of Heegner's curve with $x=\al_2$,\ $y=\be_2-2\al_2^2$.
\vk.2cm
Now writing a cubic equation for $\sqrt{2}\ \var$
$$S^3-2 \al_3 S^2+2 \be_3 T-4=0\eqno{(3.3)}$$
with $\al_3, \be_3$ integral of degree $h(-d)$ one gets a covering\ $\mscrK_1\rg\mscrK_2$ considered in \cite{Heeg52}, \cite{Star73} which can be
given by
$$\al_2=\al_3^2-\be_3,\quad 2 \be_2=\be_3^2-4 \al_3\eqno{(3.4)}$$
Hence the defining equation for $\mscrK_1$ is as follows
$$8 x^8-32 x^6 y+40 x^4 y^2+32 x^5-16 x^2 y^3-64 x^3 y+y^4+24 x y^2+24 x^2-8 y=0\leqno{(\mscrK_1)}$$
Despite a similarity with equation $(\mscrK_3)$ this is a curve of genus $9$.
\vk.2cm
{\bf Proposition\ 3.1.}\quad {\it The curve $\mscrK_1$ has exactly 6 integral points $(0,0)$,\ $(1,2)$,\ $(-1,0)$,\ $(0,2)$,\ $(-1,2)$,\ $(2,6)$
which are in $1-1$ correspondence with class number one fields
$\Q(\sqrt{-3})$,\ $\Q(\sqrt{-11})$,\ $\Q(\sqrt{-19})$,\ $\Q(\sqrt{-43})$,\ $\Q(\sqrt{-67})$,\ $\Q(\sqrt{-163})$.}
\vk.2cm
The proof follows immediately from $(3.4)$ and \cite{Heeg52}. The fact that these integral points exhaust all rational points on $\mscrK_1$ follows
from Heegner-Stark results (\cite{Heeg52}, \cite{Star73}) on rational points on curve $\mscrK_s$
$$w^2=2z(z^4+4 z^3-2 z^2+4z+1)\leqno{(\mscrK_s)}$$
The covering $\mscrK_1\rg\mscrK_s$ is given by
$$z=\frac{\be_3}{\al_3^2}-1;\quad w=4(z-2)y-2(3 z^2-2 z-1),\quad y=\frac{1}{\al_3^3}$$
It should be noted that Heegner used a little different normalization ({\it cf.} \cite{Heeg52}, formulae $(151)-(152)$). Furthermore in order to
enumerate all rational points of $\mscrK_s$ one may apply the argument of \cite{Heeg52} in solving equation $(\mscrK_2)$ in integers. A precise
statement is as follows.
\vk.2cm
{\bf Theorem\ 3.2.}\quad {\it The only rational points on the curve $\mscrK_s$ are\
$(0,0)$,\ $(\pm 4,1)$,\ $(\pm 4,-1)$,\ $\big(\pm\frac{7}{4},\frac{1}{2}\big)$,\ $(\pm 14,2)$.
}
\vk.2cm
{\bf Theorem\ 3.3.}\quad {\it The curves $\mscrK_3$ and $\mscrK_s$ are birationally isomorphic.}
\vk.2cm
Since $\mscrK_3$ is hyperelliptic of genus $2$ one may use van Hoeij's algorithm (\cite{vHoe02}) and a computing program ({\it e.g.} Maple) to write
down its Weierstrass normal form which turns out to be the equation $(\mscrK_s)$. The corresponding birational isomorphism $\mscrK_s\rg\mscrK_3$ is
given by
$$x=-\frac{z^4+8 z^3+2 w z+18 z^2+6 w-3}{z^4+4 z^3-2 z^2-12 z+1}$$
$$y=\frac{2\ P_8(z,w)}{(z^4+4 z^3-2 z^2-12 z+1)^2}$$
where
$$P_8(z,w)=(2 z^5+10 z^4+36 z^3+68 z^2+10 z-30)w$$
$$\hk4.5cm -z^8+60 z^6+192 z^5+82 z^4-128 z^3+172 z^2+64 z+7.$$
\vk.2cm
Theorem A now follows from Theorems 3.2 and 3.3.
\vk.2cm
We note that the inverse birational isomorphism $\mscrK_3\rg\mscrK_s$ has more complicated formulae for $(z,w)$. For completeness we include them
here in the following form
$$z=1-\frac{4 x^3-4 x y-y^2+4 x+4}{2 x^4+2 x^3-3 x^2 y-2 x y+6 x-y+2}$$
$$w=-\frac{2\ P_{12}(x,y)}{(x-1)(x^2+1)(x^2-2 x-1)(x^2+2 x+3)(x+1)^5}$$
where
$$\begin{aligned}
P_{12}(x,y)\ =\ \ & 4 x^{12}+252 x^{11}-24 x^{10} y+156 x^{10}-622 x^9 y+15 x^8 y^2+440 x^9-514 x^8 y+\\
+ & 322 x^7 y^2-x^6 y^3+1256 x^8-708 x^7 y+288 x^6 y^2-21 x^5 y^3+1536 x^7-620 x^6 y\\
+ & 310 x^5 y^2-19 x^4 y^3+1344 x^6-716 x^5 y+64 x^4 y^2-20 x^3 y^3+440 x^5-640 x^4 y\\
+ & 22 x^3 y^2-7 x^2 y^3-12 x^4-316 x^3 y-8 x^2 y^2-3 x y^3-124 x^3-140 x^2 y-6 x y^2\\
- & y^3-92 x^2-22 x y+y^2-16 x+2 y.
\end{aligned}$$
\vk.2cm
{\bf Remark\ 3.4.}\quad There is a map $\mscrK_2\rg\mscrK_6$ (\cite{Heeg52}) given by
$$a_2=4\al_2^3-6\al_2\be_2+3;\quad b_2=4\be_2^3-12\al_2\be_2+6$$
and similarly a map $\mscrK_1\rg\mscrK_3$
$$a_3=2\al_3^3-3\al_3\be_3+3;\quad b_3=\be_3^3-6\al_3\be_3+6$$
which turned out quite useful in the computation process.
\vk.5cm
\vk.5cm
\no IMC Institute \& Fpt University
\no Permanent: 8 Ton That Thuyet, My Dinh, Tu Liem, Hanoi, Vietnam
\no e-mail:\ [email protected],\ \ [email protected]
\end{document} |
\begin{document}
\title{
ule{6.5in}
\textbf{Blake Woodworth}
\url{[email protected]}\\
\textbf{Nathan Srebro}
\url{[email protected]}\\
{\small Toyota Technological Institute at Chicago, Chicago, IL 60637, USA}
\abstract{We provide an explicit construction and direct proof for the
lower bound on the number of first order oracle accesses required
for a {\em randomized } algorithm to minimize a convex Lipschitz
function.}
\section{Introduction}
We prove lower bounds for the complexity of first-order optimization using a randomized algorithm for the following problem:
\begin{equation} \label{eq:theproblem}
\min_{x \in \mathbb{R}^d : \norm{x}\leq B} f(x)
\end{equation}
where $f$ is convex and $L$-Lipschitz continuous with respect to the
Euclidean norm. We consider
a standard oracle access model: at each iteration the
algorithm selects, possibly at random, a vector $x \in \mathbb{R}^d, \norm{x}
\leq B$ based on the oracle's responses to previous queries. The oracle then
returns the function value $f(x)$ and some
subgradient $g \in \partial f(x)$ chosen by the oracle. We bound
the expected number of iterations $T$, as a function of $L$,$B$ and
$\epsilon$, needed to ensure that for any convex $L$-Lipschitz
function, any valid first order oracle, and any dimension,
$f(x_T) \leq \min_{x \in \mathbb{R}^d : \norm{x}\leq B} f(x) + \epsilon$.
We are interested in lower bounding dimension-independent performance
(i.e.~what an algorithm can guarantee in an arbitrary dimension)
and in our constructions we allow the dimension to grow as $\epsilon\rightarrow 0$.
\citet{NemirovskiYudin} carefully study both randomized and
deterministic first order optimization algorithms and give matching
upper and lower bounds for both, establishing a tight worst-case
complexity for \eqref{eq:theproblem}, whether using randomized or
deterministic algorithms, of $\Theta(L^2B^2/\epsilon^2)$ oracle
queries.
The lower bound for deterministic algorithms is fairly direct,
well-known and has been reproduced in many forms in books,
tutorials, and lecture notes in the ensuing four decades, as are the
lower bounds for algorithms (whether randomized or deterministic)
where the iterates $x_t$ are constrained to be in the span of previous
oracle responses. When the iterates are constrained to be in this
span, one can ensure the first $t+1$ iterates are spanned by the first
$t$ standard basis vectors $e_1,\ldots,e_t$, and that no point in this span can
be $O(1/\sqrt{t})$-suboptimal. For deterministic algorithms, even if
the iterates escape this span, one can adversarialy rotate the
objective function so that the algorithm only escapes in useless
directions, obtaining the exact same lower bound using a very similar
construction. Either way, a dimensionality of
$d=\Theta(T)=\Theta(L^2B^2/\epsilon^2)$ is sufficient to construct a
function requiring $\Theta(L^2B^2/\epsilon^2)$ queries to optimize.
Analyzing randomized algorithms which are allowed to leave the span of
oracle responses is trickier: the algorithm may guess directions, and
since even if we know the algorithm, we do not know in advance which
directions it will guess, we cannot rotate the function so as to avoid
these directions. \citet{NemirovskiYudin} do provide a detailed and
careful analysis for such randomized algorithms, using a recursive
reduction argument and without a direct construction. To the best of
our knowledge, this lower bound has not since been simplified, and so
lower bounds for randomized algorithms are rarely if ever covered in
books, tutorials and courses. In this note, we provide an explicit
construction establishing the following lower bound:
\begin{theorem} \label{thm:LB} For any $L,B$, $\epsilon \in
(0,\frac{LB}{2})$, dimension $d \geq
\frac{2L^8B^8}{\epsilon^8}\log\frac{L^4B^4}{\epsilon^4}$, and
any randomized optimization algorithm, there exists a convex
$L$-Lipschitz function $f: \set{x \in \mathbb{R}^d : \norm{x}\leq B}
\mapsto \mathbb{R}$ and an appropriate first order oracle such
that the algorithm must make $\Omega(L^2B^2/\epsilon^2)$ queries
to the oracle in expectation in order to find an
$\epsilon$-suboptimal point.
\end{theorem}
Our construction and proof directly captures the following intuition:
if the dimension is large enough, blindly guessing a direction becomes
increasingly difficult, and the algorithm should not gain much by such
random guessing. In the standard construction used for the
deterministic lower bound, guessing a direction actually does provide
information on all useful directions. However, by slightly perturbing the
standard construction, we are able to avoid such information leakage.
To do so, we use a technique we recently developed in order to analyze
finite sum structured optimization problems \cite{Woodworth16}.
In this note we only consider Lipschitz (non-smooth) functions without
an assumption of strong convexity. A reduction or simple modification
to the construction can be used to establish a lower bound for
Lipschitz (non-smooth) strongly convex functions. Applying the same
technique we use here to the standard lower bound construction for
smooth functions leads to lower bounds for randomized
algorithms for smooth non-strongly-convex and smooth strongly
convex first order optimization too. All of these lower bounds would
match those for deterministic optimization, with a polynomial increase
in the dimension required. This polynomial increase can likely
be reduced to a smaller polynomial through more careful
analysis.
Theorem \ref{thm:LB} (which we reiterate also follows from the
more detailed analysis of \citeauthor{NemirovskiYudin}) shows that
randomization cannot help first order optimization. It
is important to emphasize that this should not be taken for granted, and
that in other situations randomization {\em could} be beneficial.
For example, when optimizing finite sum structured objectives, randomization provably
reduces the oracle complexity \cite{Woodworth16}. It is thus
important to specifically and carefully consider randomized algorithms
when proving oracle lower bounds, and we hope this note will aid in
such analysis.
We would like to thank the authors of \cite{Carmon17} Yair Carmon, John Duchi, Oliver Hinder, and Aaron Sidford for pointing out a mistake with our original proof of Lemma \ref{lem:1term}, which has since been corrected.
\section{Proof of Theorem \ref{thm:LB}}
Without loss of generality assume $L = B = 1$. Consider a family of functions $\mathcal{F}$ of the form
\begin{equation}
f(x) = \max_{1\leq j \leq k} \left(\inner{x}{v_j} - j c \right)
\end{equation}
where $k = \frac{1}{4\epsilon^2}$, $c = \frac{\epsilon}{k}$, and the vectors $v_j$ are an orthonormal set in $\mathbb{R}^d$. Each of these functions is the maximum of linear functions thus convex and $1$-Lipschitz.
Drawing the orthonormal set of vectors $v_j$ uniformly at random specifies a distribution over the family of functions $\mathcal{F}$. Our approach will be to show that any \emph{deterministic} optimization algorithm must make at least $\Omega(1/\epsilon^2)$ oracle queries in expectation over the randomness in the choice of $f$. This implies through Yao's minimax principle a lower bound on the expected number of queries needed by a randomized algorithm on the worst-case function in $\mathcal{F}$. Therefore, for the remainder of the proof we need only consider deterministic optimization algorithms and functions drawn from this distribution over $\mathcal{F}$.
First, we show that minimizing a given function $f$ amounts to finding a vector $x$ which has significant negative correlation with \emph{all} of the vectors $v_j$. Consider the unit vector $\hat{x} = -\frac{1}{\sqrt{k}}\sum_{j=1}^k v_j$
\begin{equation}
f(\hat{x})
= \max_{1\leq j \leq k} \left( \inner{\hat{x}}{v_j} - j c \right)
= -\frac{1}{\sqrt{k}} - c
= -2\epsilon - c
\geq f(x^*)
\end{equation}
Therefore, for any $x$ such that $\inner{x}{v_j} > -\frac{c}{2}$ for some $j$,
\begin{equation} \label{eq:small_ip_suboptimal}
f(x)
\geq \inner{x}{v_j} - jc
> -\frac{c}{2} - kc
= -\frac{c}{2} - \epsilon
> f(\hat{x}) + \epsilon
\geq f(x^*) + \epsilon
\end{equation}
Consequently, any such $x$ cannot be $\epsilon$-suboptimal. Therefore, in order to show that the expected number of oracle queries is $\Omega(k) = \Omega(1/\epsilon^2)$, it suffices to show that the following event occurs with constant probability:
\begin{equation}
E = \left\llbracket\ \forall t \leq k\ \forall j \geq t\ \abs{\inner{x^{(t)}}{v_j}} < \frac{c}{2}\ \right\rrbracket
\end{equation}
Let $S_t = \spn{x^{(1)},...,x^{(t)}, v_1, ..., v_{t}}$ and let $S_t^\perp$ be its orthogonal complement. Let $P_t$ and $P^\perp_t$ be (orthogonal) projection operators onto $S_t$ and $S_t^\perp$ respectively. Consider the events
\begin{equation}
G_t = \left\llbracket\ \forall j \geq t\ \abs{\inner{\normalized{P_{t-1}^{\perp}x^{(t)}}}{v_j}} < \frac{c}{2(\sqrt{2}+\sqrt{k-1})}\ \right\rrbracket
\end{equation}
These events are useful because:
\begin{lemma} \label{lem:GimpE}
$\bigcap_{t=1}^k G_t \implies E$
\end{lemma}
\begin{proof}
Let $G_{\leq t}$ denote $\bigcap_{t'=1}^t G_{t'}$.
It suffices to show that for each $t\leq k$, $G_{\leq t} \implies \forall j \geq t\ \abs{\inner{x^{(t)}}{v_j}} < \frac{c}{2}$.
For each $t \leq k$ and $j \geq t$
\begin{equation}
\begin{aligned} \label{eq:GimpE1}
\abs{\inner{x^{(t)}}{v_j}}
&\leq \norm{x^{(t)}}\abs{\inner{\normediter{t}}{P_{t-1}v_j}} + \norm{x^{(t)}}\abs{\inner{\normediter{t}}{P_{t-1}^{\perp}v_j}} \\
&\leq \norm{P_{t-1}v_j} + \abs{\inner{\frac{P_{t-1}^{\perp}x^{(t)}}{\norm{x^{(t)}}}}{v_j}} \\
&\leq \norm{P_{t-1}v_j} + \abs{\inner{\normalized{P_{t-1}^{\perp}x^{(t)}}}{v_j}} \\
&\leq \norm{P_{t-1}v_j} + \frac{c}{2(\sqrt{2} + \sqrt{k-1})}
\end{aligned}
\end{equation}
First, we decomposed $v_j$ into its $S_{t-1}$ and $S_{t-1}^\perp$ components and applied the triangle inequality. Next, we used that $\norm{x^{(t)}} \leq 1$ and that the orthogonal projection operator $P_{t-1}^\perp$ is self-adjoint. Finally, we used that the projection operator is non-expansive and then applied the definition of $G_t$.
Next, we will prove by induction on $t$ that for all $t \leq k$ and all $j \geq t$, $G_{<t} \implies \norm{P_{t-1}v_j}^2 \leq \frac{c^2(t-1)}{2(\sqrt{2}+\sqrt{k-1})^2}$. The case $t=1$ is trivial since the left hand side is the projection of $v_j$ onto the empty set.
For the inductive step, fix any $t \leq k$ and $j \geq t$. Let $\hat{P}_t$ project onto $\spn{x^{(1)},...,x^{(t+1)},v_1,...,v_{t}}$ (this includes $x^{(t+1)}$ in contrast with $P_t$) and let $\hat{P}_t^\perp$ be the projection onto the orthogonal subspace.
Since $\set{x^{(1)},...,x^{(t-1)}, v_1,...,v_{t-1}}$ spans $S_{t-1}$, the Gram-Schmidt vectors
\begin{equation}
\normalized{P_0^\perp x^{(1)}}, \normalized{\hat{P}_0^\perp v_1}, \normalized{P_1^\perp x^{(2)}}, \normalized{\hat{P}_1^\perp v_2}, ..., \normalized{P_{t-2}^\perp x^{(t-1)}}, \normalized{\hat{P}_{t-2}^\perp v_{t-1}}
\end{equation}
are an orthonormal basis for $S_{t-1}$ (after ignoring any zero vectors that arise from projection).
We now write $\norm{P_{t-1}v_j}$ in terms of this orthonormal basis:
\begin{equation}
\begin{aligned} \label{eq:GimpE2}
\norm{P_{t-1} v_j}^2
&= \sum_{i=1}^{t-1} \inner{\normalized{P_{i-1}^\perp x^{(i)}}}{v_j}^2
+ \sum_{i=1}^{t-1} \inner{\normalized{\hat{P}_{i-1}^\perp v_i}}{v_j}^2 \\
&\leq \frac{c^2(t-1)}{4(\sqrt{2} + \sqrt{k-1})^2} + \sum_{i=1}^{t-1} \frac{1}{\norm{\hat{P}_{i-1}^\perp v_i}^2}\inner{\hat{P}_{i-1}^\perp v_i}{v_j}^2
\end{aligned}
\end{equation}
The inequality follows from the definition of $G_{<t}$. We must now bound the second term of \eqref{eq:GimpE2}. Focusing on the inner product one individual term in the sum
\begin{equation}\label{eq:rewriteprojection}
\begin{aligned}
\abs{\inner{\hat{P}_{i-1}^\perp v_i}{v_j}}
&= \abs{\inner{v_i}{v_j} - \inner{\hat{P}_{i-1}v_i}{v_j}} \\
&= \abs{\inner{P_{i-1}v_i}{v_j} + \inner{\inner{\normalized{P_{i-1}^\perp x^{(i)}}}{v_i}\normalized{P_{i-1}^\perp x^{(i)}}}{v_j}} \\
&\leq \abs{\inner{P_{i-1}v_i}{P_{i-1}v_j}} + \abs{\inner{\normalized{P_{i-1}^\perp x^{(i)}}}{v_i}\inner{\normalized{P_{i-1}^\perp x^{(i)}}}{v_j}}
\end{aligned}
\end{equation}
By the Cauchy-Schwarz inequality and the inductive hypothesis, the first term is bounded by $\norm{P_{i-1}v_i}\norm{P_{i-1}v_j} \leq \frac{c^2(i-1)}{2(\sqrt{2}+\sqrt{k-1})^2}$. By the definition of $G_{<t}$, the second term is bounded by $\frac{c^2}{4(\sqrt{2}+\sqrt{k-1})^2}$.
Furthermore, by our choice of $c = \frac{\epsilon}{k}$ and $k = \frac{1}{4\epsilon^2}$, $c = \frac{1}{2\sqrt{k}}$ so we conclude that
\begin{equation} \label{eq:GimpE2summand}
\begin{aligned}
\abs{\inner{\hat{P}_{i-1}^\perp v_i}{v_j}}
&\leq \frac{c^2(2i-1)}{4(\sqrt{2}+\sqrt{k-1})^2} \\
&= \frac{c}{4(\sqrt{2}+\sqrt{k-1})} \frac{2i-1}{2\sqrt{k}(\sqrt{2}+\sqrt{k-1})} \\
&\leq \frac{c}{4(\sqrt{2}+\sqrt{k-1})} \frac{2i-1}{2k} \\
&\leq \frac{c}{4(\sqrt{2}+\sqrt{k-1})}
\end{aligned}
\end{equation}
We have now upper bounded the inner products in \eqref{eq:GimpE2}, it remains to lower bound the norm in the denominator. Rewriting the projection $\hat{P}_{i-1}^\perp$ as in \eqref{eq:rewriteprojection}:
\begin{equation}
\begin{aligned}
\norm{\hat{P}_{i-1}^\perp v_i}^2
&= \abs{\inner{\hat{P}_{i-1}^\perp v_i}{v_i}} \\
&= 1 - \norm{P_{i-1}v_i}^2 - \inner{\normalized{P_{i-1}^\perp x^{(i)}}}{v_i}^2 \\
&\geq 1 - \frac{c^2(i-1)}{2(\sqrt{2}+\sqrt{k-1})^2} - \frac{c^2}{4(\sqrt{2}+\sqrt{k-1})^2}
\end{aligned}
\end{equation}
This quantity is at least $\frac{1}{2}$ because $c = \frac{1}{2\sqrt{k}} < 1$ so
\begin{equation}
\begin{aligned}
\frac{c^2(i-1)}{2(\sqrt{2}+\sqrt{k-1})^2} + \frac{c^2}{4(\sqrt{2}+\sqrt{k-1})^2}
&< \frac{2i-1}{(\sqrt{2}+\sqrt{k-1})^2} \\
&= \left(\frac{\sqrt{i-\frac{1}{2}}}{\sqrt{2}+\sqrt{k-1}}\right)^2 \\
&< \frac{1}{2} \left( \sqrt{\frac{i - \frac{1}{2}}{k}} \right)^2\\
&\leq \frac{1}{2}
\end{aligned}
\end{equation}
Combining this with \eqref{eq:GimpE2summand} and returning to \eqref{eq:GimpE2} we have that
\begin{equation}
\begin{aligned}
\norm{P_{t-1} v_j}^2
&\leq \frac{c^2(t-1)}{4(\sqrt{2} + \sqrt{k-1})^2} + \sum_{i=1}^{t-1} \frac{\inner{\hat{P}_{i-1}^\perp v_i}{v_j}^2}{\norm{\hat{P}_{i-1}^\perp v_i}^2} \\
&\leq \frac{c^2(t-1)}{4(\sqrt{2} + \sqrt{k-1})^2} + \sum_{i=1}^{t-1} 4\left(\frac{c}{4(\sqrt{2}+\sqrt{k-1})}\right)^2 \\
&= \frac{c^2(t-1)}{4(\sqrt{2} + \sqrt{k-1})^2} + \frac{c^2(t-1)}{4(\sqrt{2} + \sqrt{k-1})^2} = \frac{c^2(t-1)}{2(\sqrt{2} + \sqrt{k-1})^2}
\end{aligned}
\end{equation}
which completes the inductive step. Finally, we return to \eqref{eq:GimpE1} and conclude that
\begin{equation}
\begin{aligned}
\abs{\inner{x^{(t)}}{v_j}}
&\leq \norm{P_{t-1}v_j} + \frac{c}{2(\sqrt{2} + \sqrt{k-1})} \\
&\leq \sqrt{\frac{c^2(t-1)}{2(\sqrt{2} + \sqrt{k-1})^2}} + \frac{c}{2(\sqrt{2} + \sqrt{k-1})} \\
&= \frac{c(\sqrt{2} + \sqrt{t-1})}{2(\sqrt{2} + \sqrt{k-1})} \leq \frac{c}{2}
\end{aligned}
\end{equation}
This completes the proof.
\end{proof}
In our model of computation, the oracle can provide the algorithm with any subgradient at the query point. We can therefore design a resisting oracle which returns subgradients that are as uninformative as possible. At a given point $x$, the subdifferential of $f$ is
\begin{equation}
\partial f(x) = \textrm{Conv}\set{v_\ell\ :\ \ell \in \argmax_{1\leq j \leq k} \left(\inner{x}{v_j} - j c \right)}
\end{equation}
and our resisting oracle will return as a subgradient
\begin{equation}
v_{\ell}\quad\text{where}\quad \ell = \min\set{\ \argmax_{1\leq j \leq k} \left(\inner{x}{v_j} - j c \right)\ }
\end{equation}
That is, the returned subgradient will always be a single vector $v_\ell$ for the smallest value of $\ell$ that corresponds to a valid subgradient.
\begin{lemma} \label{lem:oracleresponses}
For each $t \leq k$, let $g^{(t)} \in \partial f\left(x^{(t)}\right)$ be the subgradient returned by the oracle. Then $G_{\leq t} \implies g^{(t)} \in \set{v_1,...,v_t}$.
\end{lemma}
\begin{proof}
This follows from the structure of the objective function $f$ and our choice of subgradient oracle. In the proof of Lemma \ref{lem:GimpE}, we established that $G_{\leq t} \implies \forall j \geq t\ \abs{\inner{x^{(t)}}{v_j}} < \frac{c}{2}$. Thus for any $j > t$
\begin{equation}
\inner{x^{(t)}}{v_t} - tc > -\frac{c}{2} - tc = \frac{c}{2} - (t+1)c > \inner{x^{(t)}}{v_j} - jc
\end{equation}
Therefore, no $j > t$ can index a maximizing term in $f$ so $g^{(t)} \subseteq \set{v_1,...,v_t}$.
\end{proof}
By Lemma \ref{lem:GimpE} and the chain rule of probability
\begin{equation} \label{eq:chainrule}
\mathbb{P}\left[ E \right] \geq \mathbb{P}\left[ \bigcap_{t=1}^k G_t \right] = \prod_{t=1}^k \mathbb{P}\left[ G_t\ \middle|\ G_{<t} \right]
\end{equation}
Focusing on a single term in the product:
\begin{lemma} \label{lem:1term}
For any $t \leq k$, $\mathbb{P}\left[ G_t\ \middle|\ G_{<t} \right] > 1 - (k-t+1)\exp\left( \frac{-c^2(d-2t+1)}{8(\sqrt{2}+\sqrt{k-1})^2}\right)$
\end{lemma}
\begin{proof}
The key to lower bounding $\mathbb{P}\left[ G_t\ \middle|\ G_{<t} \right]$ is to show that for $j \geq t$ the vector $\normalized{P_{t-1}^\perp v_j}$ is uniformly distributed on the unit sphere in $S_{t-1}^\perp$ conditioned on $G_{<t}$ and $\set{v_1,...,v_{t-1}}$. If we can show this, then the inner product in the definition of $G_t$ is effectively between a fixed vector and a random unit vector, and the probability that this is large decreases rapidly as the dimension grows.
Fix an arbitrary $t \leq k$ and $j \geq t$. Let $V_{<t} := \set{v_1, ...,v_{t-1}}$ be any set of orthonormal vectors in $\mathbb{R}^d$. We will show that the density $p_{V_{\geq t}}\left(V_{\geq t}\ \middle|\ G_{<t}, V_{<t}\right)$ is invariant under rotations which preserve $\set{x^{(1)},...,x^{(t)}, v_1,...,v_{t-1}}$.
Let $R$ be any rotation $R^TR = I_{d\times d}$ such that $\forall w\in\spn{x^{(1)},...,x^{(t-1)}, v_1,...,v_{t-1}}$ $Rw = R^Tw = w$. We will show that $p_{V_{\geq t}}\left(V_{\geq t}\ \middle|\ G_{<t}, V_{<t}\right) = p_{V_{\geq t}}\left(RV_{\geq t}\ \middle|\ G_{<t}, V_{<t}\right)$. To begin
\begin{equation}
p_{V_{\geq t}}\left(V_{\geq t}\ \middle|\ G_{<t}, V_{<t}\right) =
\frac{\mathbb{P}\left( G_{<t}\ \middle|\ V \right)p_V(V)}{\mathbb{P}\left( G_{<t}\ \middle|\ V_{<t} \right)p_{V_{<t}}(V_{<t})}
\end{equation}
and
\begin{equation}
p_{V_{\geq t}}\left(RV_{\geq t}\ \middle|\ G_{<t}, V_{<t}\right) =
\frac{\mathbb{P}\left( G_{<t}\ \middle|\ V \right)p_V(RV)}{\mathbb{P}\left( G_{<t}\ \middle|\ V_{<t} \right)p_{V_{<t}}(V_{<t})}
\end{equation}
Since $V = \set{v_1,...,v_k}$ is marginally distributed uniformly, $p_V(V) = p_V(RV)$, so it only remains to show that $\mathbb{P}\left( G_{<t}\ \middle|\ V \right) = \mathbb{P}\left( G_{<t}\ \middle|\ RV \right)$. Recall that at this time we are considering an arbitrary \emph{deterministic} algorithm minimizing a randomly selected $f$. Thus for any particular $V$, which fixes $f$, either $G_{<t}$ holds or it does not--so the probabilities are either 0 or 1.
We will show by induction that for every $i < t$, if $\mathbb{P}\left( G_{<t}\ \middle|\ V \right) = 1$ then $\mathbb{P}\left( G_{<t}\ \middle|\ RV \right) = 1$ too. The case $i = 1$ is trivial since $G_{<1}$ is independent of $V$. Consider now some $1 < i < t$, and suppose that $\mathbb{P}\left( G_{<i}\ \middle|\ V \right) = 1$. Since $G_{<i} \implies G_{<s}$ for $s < i$, $\mathbb{P}\left( G_{<s}\ \middle|\ V \right) = 1$ for all $s \leq i$ and by the inductive hypothesis $\mathbb{P}\left( G_{<s}\ \middle|\ RV \right) = 1$ for all $s < i$. Thus, it just remains to show that $P(G_{i-1}\ |\ G_{<i-1}, RV) = 1$. Let $P'_{i}$ be the projection operator onto $\set{x'^{(1)},...,x'^{(i)},Rv_1,...,Rv_i}$ where the $x'$ are the oracle queries made by the algorithm when $f$ is determined by $RV$. For any $\ell \geq i-1$, consider
$\abs{\inner{\normalized{{P'}_{i-2}^\perp {x'}^{(i-1)}}}{Rv_\ell}}$.
Since $G_{<i-1}$ holds when $f$ is determined by $V$, by Lemma \ref{lem:oracleresponses} the queries $\set{x^{(1)},...,x^{(i-1)}}$ are determined by $\set{v_1,...,v_{i-2}}$. Since $G_{<i-1}$ holds when $f$ is determined by $RV$ and $\set{v_1,...,v_{i-2}} = \set{Rv_1,...,Rv_{i-2}}$, ${x'}^{(i-1)} = x^{(i-1)}$. Furthermore, since $R$ preserves $\set{x^{(1)},...,x^{(i-2)},v_1,...,v_{i-2}}$, it is also the case that ${P'}_{i-2}^\perp = P_{i-2}^\perp$.
Finally, since $P_{i-2}^\perp x^{(i-1)} = x^{(i-1)} - P_{i-2} x^{(i-1)} \in \spn{x^{(1)},...,x^{(i-1)},v_1,...,v_{i-2}}$, it is unchanged by $R^T$, therefore
\begin{equation}
\abs{\inner{\normalized{{P'}_{i-2}^\perp {x'}^{(i-1)}}}{Rv_\ell}}
= \abs{\inner{R^T\normalized{P_{i-2}^\perp x^{(i-1)}}}{v_\ell}}
= \abs{\inner{\normalized{P_{i-2}^\perp x^{(i-1)}}}{v_\ell}}
< \frac{c}{2(\sqrt{2}+\sqrt{k-1})}
\end{equation}
since $G_{i-1}$ holds when $f$ is determined by $V$. Therefore, we conclude that $p_{V_{\geq t}}\left(V_{\geq t}\ \middle|\ G_{<t}, V_{<t}\right)$ is invariant under rotations that preserve $\set{x^{(1)},...,x^{(t-1)},v_1,...,v_{t-1}}$.
For a given $j \geq t$, the marginal density of $v_j$ conditioned on $G_{<t}, V_{<t}$ is invariant under $R$. By Lemma \ref{lem:oracleresponses}, since the optimization algorithm is deterministic, the queries $\set{x^{(1)},...,x^{(t)}}$ are completely determined given $G_{<t}, V_{<t}$, thus the projection $P_{t-1}^\perp$ is also determined by $G_{<t}, V_{<t}$. Therefore, the random vectors $\normalized{P_{t-1}^\perp v_j}$ and $\normalized{P_{t-1}^\perp Rv_j}$ have the same density. The rotation $R$ preserves $S_{t-1}$ and $R$ preserves length, so $\normalized{P_{t-1}^\perp Rv_j} = R\normalized{P_{t-1}^\perp v_j}$ and we conclude that the distribution of $\normalized{P_{t-1}^\perp v_j}$ conditioned on $G_{<t}, V_{<t}$ is spherically symmetric on $S_{t-1}^\perp$.
We can now lower bound $\mathbb{P}\left[ G_t\ \middle|\ G_{<t} \right] = \mathbb{E}_{V_{<t}}\left[\mathbb{P}\left[ G_t\ \middle|\ G_{<t}, V_{<t} \right]\right] \geq \inf_{V_{<t}} \mathbb{P}\left[ G_t\ \middle|\ G_{<t}, V_{<t} \right]$.
For any $V_{<t}$,
\begin{equation}
\begin{aligned}
\mathbb{P}\left[ G_t\ \middle|\ G_{<t}, V_{<t} \right]
&= \mathbb{P}\left[\forall j \geq t\ \abs{\inner{\normalized{P_{t-1}^{\perp}x^{(t)}}}{v_j}} < \frac{c}{2(\sqrt{2}+\sqrt{k-1})} \ \middle|\ G_{<t}, V_{<t} \right] \\
&\geq 1- \sum_{j=t}^k \mathbb{P}\left[\abs{\inner{\normalized{P_{t-1}^{\perp}x^{(t)}}}{v_j}} \geq \frac{c}{2(\sqrt{2}+\sqrt{k-1})} \ \middle|\ G_{<t}, V_{<t} \right] \\
&\geq 1- \sum_{j=t}^k \mathbb{P}\left[\abs{\inner{\normalized{P_{t-1}^{\perp}x^{(t)}}}{\normalized{P_{t-1}^\perp v_j}}} \geq \frac{c}{2(\sqrt{2}+\sqrt{k-1})} \ \middle|\ G_{<t}, V_{<t} \right]
\end{aligned}
\end{equation}
The first term in the inner product is fixed given $G_{<t}, V_{<t}$, and we showed above that the second term is a unit vector that is distributed spherically symmetrically on the unit sphere in $S_{t-1}^\perp$ given $G_{<t}, V_{<t}$. Therefore, each probability is equal to $\mathbb{P}\left(\inner{u}{e_1} \geq \frac{c}{2(\sqrt{2}+\sqrt{k-1})} \right)$ where $u$ is uniformly random on the unit sphere in $\mathbb{R}^{d'}$ where $d' = \dim(S_{t-1}^\perp) \geq k - 2t + 2$.
Imagining a unit sphere with ``up" and ``down" corresponding to $\pm e_1$, $\mathbb{P}\left(\inner{u}{e_1} \geq \frac{c}{2(\sqrt{2}+\sqrt{k-1})} \right)$ is the surface area of the ``end caps" of the sphere lying above and below circles of radius $R := \sqrt{1 - \left(\frac{c}{2(\sqrt{2}+\sqrt{k-1})}\right)^2}$, which is strictly smaller than the surface area of a full sphere of radius $R$. Therefore,
\begin{equation}
\begin{aligned}
&\mathbb{P}\left[\abs{\inner{\normalized{P_{t-1}^{\perp}x^{(t)}}}{\normalized{P_{t-1}^\perp v_j}}} \geq \frac{c}{2(\sqrt{2}+\sqrt{k-1})} \ \middle|\ G_{<t}, V_{<t} \right] \\
&< \frac{\textrm{SurfaceArea}_{d-2t+2}(R)}{\textrm{SurfaceArea}_{d-2t+2}(1)} \\
&= R^{d-2t + 1} \\
&= \left(1 - \left(\frac{c}{2(\sqrt{2}+\sqrt{k-1})}\right)^2 \right)^{\frac{d-2t+1}{2}} \\
&\leq \exp\left( -\left(\frac{c}{2(\sqrt{2}+\sqrt{k-1})}\right)^2\frac{d-2t+1}{2} \right)
\end{aligned}
\end{equation}
With the final inequality coming from the fact that $1-x \leq \exp(-x)$. This holds for each $j \geq t$, therefore,
\begin{equation}
\begin{aligned}
\mathbb{P}\left[ G_t\ \middle|\ G_{<t} \right]
&\geq \inf_{V_{<t}} \mathbb{P}\left[ G_t\ \middle|\ G_{<t}, V_{<t} \right] \\
&\geq 1 - (k-t+1) \exp\left( -\left(\frac{c}{2(\sqrt{2}+\sqrt{k-1})}\right)^2\frac{d-2t+1}{2} \right) \\
&= 1 - (k-t+1)\exp\left( \frac{-c^2(d-2t+1)}{8(\sqrt{2}+\sqrt{k-1})^2}\right)
\end{aligned}
\end{equation}
\end{proof}
Finally, bringing together Lemma \ref{lem:1term} and \eqref{eq:chainrule}
\begin{lemma} \label{lem:alltogether}
For any $\epsilon \in (0,\frac{1}{2})$, and dimension $d \geq \frac{2}{\epsilon^8}\log\frac{1}{\epsilon^4}$, $\mathbb{P}\left[E\right] > \frac{15}{16}$, where the probability is over the random choice of $\set{v_j}$.
\end{lemma}
\begin{proof}
By Lemma \ref{lem:1term}, for all $t$
\[ \mathbb{P}\left[ G_t\ \middle|\ G_{<t} \right] > 1 - (k-t+1)\exp\left( \frac{-c^2(d-2t+1)}{8(\sqrt{2}+\sqrt{k-1})^2}\right) \]
Combining this with Equation \eqref{eq:chainrule}:
\begin{equation}
\begin{aligned}
\mathbb{P}\left[ E \right] &\geq \prod_{t=1}^k \mathbb{P}\left[ G_t \ \middle|\ G_{<t} \right] \\
&> \prod_{t=1}^k \left( 1 - (k-t+1)\exp\left( \frac{-c^2(d-2t+1)}{8(\sqrt{2}+\sqrt{k-1})^2}\right) \right) \\
&> \left( 1 - k\exp\left( \frac{-c^2(d - 2k + 1)}{40k}\right) \right)^k \\
&\geq 1 - k^2\exp\left( \frac{-c^2(d - 2k + 1)}{40k}\right)
\end{aligned}
\end{equation}
Thus, when $\epsilon < \frac{1}{2}$ and
$d \geq \frac{2}{\epsilon^8}\log\frac{1}{\epsilon^4} \geq \frac{1}{\epsilon^8}\log\frac{1}{\epsilon^4} + 2k -1$
then
$\mathbb{P}\left[ E \right] > \frac{15}{16}$
\end{proof}
Thus $E$ occurs with constant probability when the dimension is sufficiently large, and when $E$ does occur, the algorithm must make at least $k$ queries the subgradient oracle of $f$ in order to find an $\epsilon$-suboptimal solution. Thus the expected number of oracle queries for any deterministic algorithm on the specified distribution over $\mathcal{F}$ is at least $\frac{15k}{16} = \Omega(1/\epsilon^2)$, applying Yao's minimax principle completes the proof.
{\footnotesize
}
\end{document} |
\begin{document}
\allowdisplaybreaks
\title[Log-majorization and Lie-Trotter formula]{Log-majorization and
Lie-Trotter formula for the Cartan barycenter on probability measure spaces}
\author[Hiai and Lim]{Fumio Hiai and Yongdo Lim}
\address{Tohoku University (Emeritus), Hakusan 3-8-16-303, Abiko 270-1154, Japan}\email{[email protected]}
\address{Department of Mathematics, Sungkyunkwan University, Suwon 440-746, Korea} \email{[email protected]}
\date{\today}
\maketitle
\begin{abstract}
We extend Ando-Hiai's log-majorization for the weighted geometric
mean of positive definite matrices into that for the Cartan
barycenter in the general setting of probability measures on the
Riemannian manifold of positive definite matrices equipped with
trace metric. The main key is the settlement of the
monotonicity problem of the Cartan barycenteric map on the space of
probability measures with finite first moment for the stochastic
order induced by the cone. We also derive a version of Lie-Trotter
formula and related unitarily invariant norm inequalities for the
Cartan barycenter as the main application of log-majorization.
\end{abstract}
\noindent \textit{2010 Mathematics Subject Classification}. 15A42,
47A64, 47B65, 47L07
\noindent \textit{Key words and phrases.} Positive definite matrix,
Cartan barycenter, Wasserstein distance, log-majorization,
Lie-Trotter formula, unitarily invariant norm
\section{Introduction}
Let $A$ be an $m\times m$ positive definite matrix with eigenvalues
$\langlembda_{j}(A)$, $1\le j\le m$, arranged in decreasing order, i.e.,
$\langlembda_{1}(A)\geq \bulletots\geq \langlembda_{m}(A)$ with counting multiplicities.
The {\it log-majorization} $A\underset{\log}{\prec} B$ between positive definite
matrices $A$ and $B$ is defined if
$$\prod_{i=1}^{k}\langlembda_{i}(A) \leq \prod_{i=1}^{k}\langlembda_{i}(B) \quad\mbox{for }
1\le k\le m-1,\mbox{ and } \det A = \det B.
$$
The log-majorization gives rise to powerful devices in deriving
various norm inequalities and has many important applications in
operator means, operator monotone functions, statisticalmechanics,
quantum information theory, eigenvalue analysis, etc., see, e.g.,
{\cite{BLP,BG,HiP1}}. For instance,
$A\underset{\log}{\prec} B$ implies $||| A|||\leq |||B|||$
for all unitarily invariant norms $|||\bulletot |||$.
As a complementary counterpart of the Golden-Thompson trace inequality,
Ando and Hiai \cite{AH} established the log-majorization on the matrix
geometric mean of two positive definite matrices: for positive
definite matrices $A,B$ and $0\le\alpha\leq 1$,
\begin{eqnarray*}
A^{t}\#_{\alpha}B^{t}\underset{\log}{\prec} (A\#_{\alpha} B)^{t},
\qquad t\geq 1,
\end{eqnarray*}
where $A\#_{\alpha}B:=A^{1/2}(A^{-1/2}BA^{-1/2})^{\alpha}A^{1/2}$,
the {\it $\alpha$-weighted geometric mean} of $A$ and $B$.
This provides various norm inequalities for unitarily invariant norms via
the Lie-Trotter formula $
\lim_{t\to0}(A^{t}\#_{\alpha}B^{t})^{\frac{1}{t}}=e^{(1-\alpha)\log
A+\alpha \log B}$. For instance,
$|||(A^{t}\#_{\alpha}B^{t})^{\frac{1}{t}}|||$ increases to
$|||e^{(1-\alpha)\log A+\alpha \log B}|||$ as $r\searrow 0$ for any
unitarily invariant norm. Ando-Hiai's log-majorization has many
important applications in matrix analysis and inequalities, together
with Araki's log-majorization \cite{Ar} extending the Lieb-Thirring
and the Golden-Thompson trace inequalities.
The matrix geometric mean $A\#_{\alpha}B$, that plays the central
role in Ando-Hiai's log-majorization, appears as the unique (up to
parametrization) geodesic curve $\alpha\in[0,1]\mapsto
A\#_{\alpha}B$ between $A$ and $B$ on the Riemannian manifold ${{\mathbf B}bb
P}_{m}$ of positive definite matrices of size $m$, an important
example of Cartan-Hadamard Riemannian manifolds. Alternatively, the
geometric mean $A\#_{\alpha}B$ is the Cartan barycenter of the
finitely supported measure $(1-\alpha)\delta_{A}+\alpha \delta_{B}$
on ${{\mathbf B}bb P}_{m}$, which is defined as the unique minimizer of the
least squares problem with respect to the Riemannian distance $d$
(see Section 2 for definition). Indeed, for a general probability
measure $\mu$ on $\mathbb{P}_m$ with finite first moment, the Cartan
barycenter of $\mu$ is defined as the unique minimizer as follows:
\begin{displaymath}
G(\mu) := \underset{Z \in \mathbb{P}_m}{\argmin} \int_{\mathbb{P}_m}
\bigl[d^{2} (Z, X)-d^2(Y,X)\bigr]d\mu(X)
\end{displaymath}
(see Section 2 for more details).
In particular, when $\mu=\sum_{j=1}^nw_j\delta_{A_j}$ is a discrete probability measure
supported on a finite number of $A_1,\dots,A_n\in\mathbb{P}_m$, the Cartan
barycenter $G(\mu)$ is the {\it Karcher mean} of $A_1,\dots,A_n$, which has extensively
been discussed in these years by many authors as a multivariable extension of the
geometric mean (see \cite{BH,LL1,Ya1} and references therein).
The first aim of this paper is to establish the log-majorization
(Theorem \ref{T:MAIN}) for the Cartan barycenter in the general
setting of probability measures in the Wasserstein space ${\mathcal
P}^{1}({{\mathbf B}bb P}_{m})$, the probability measures on $\mathbb{P}_m$ with
finite first moment. In this way, we first establish the monotonicity
of the Cartan barycenteric map on ${\mathcal P}^{1}({{\mathbf B}bb P}_{m})$ for
the stochastic order induced by the cone of positive semidefinite matrices,
and then generalize the log-majorization in \cite{AH} (as mentioned above)
and in \cite{HiP} (for the Karcher mean of multivariables) to the setting of
probability measures. Our second aim is to derive the Lie-Trotter
formula (Theorem \ref{T8}) for the Cartan barycenter
\begin{equation*}
\lim_{t\to0}G(\mu^t)^{1\over t}=\exp\int_{\mathbb{P}_m}\log A\,d\mu(A)
\end{equation*}
under a certain integrability assumption on $\mu$, where $\mu^{t}$
is the $t$th power of the measure $\mu$ inherited from the matrix
powers on ${{\mathbf B}bb P}_{m}$. Moreover, to demonstrate the usefulness of
our log-majorization, we obtain several unitarily invariant norm
inequalities (Corollary \ref{C9}) based on the above Lie-Trotter
formula.
The main tools of the paper involve the theory of nonpositively
curved metric spaces and techniques from probability measures on
metric spaces and the recent combination of the two (see
\cite{St03,AGS,Vi1}). Not only are these tools crucial for our
developments, but also, we believe, significantly enhance the
potential usefulness of the Cartan barycenter of probability
measures in matrix analysis and inequalities. They overcome the
limitation to the multivariable (finite number of matrices) setting,
and provide a new bridge between two different important fields of
studies of matrix analysis and probability measure theory on
nonpositively curved metric spaces.
\section{Cartan barycenters}
Let ${{\mathbf B}bb H}_{m}$ be the Euclidean space of $m \times m$ Hermitian
matrices equipped with the inner product $\langlengle X,Y \ranglengle :=
{\mathrm{tr}}(XY)$. The {\it Frobenius norm} $\|\bulletot\|_{2}$ defined
by $\|X\|_{2} = (\tr X^{2})^{1/2}$ for $X \in {{\mathbf B}bb H}_m$ gives rise
to the Riemannian structure on the open convex cone ${{\mathbf B}bb P}_{m}$
of $m\times m$ positive definite matrices with the metric
\begin{equation}\langlebel{metric}
\langlengle X,Y\ranglengle_{A} := {\mathrm{tr}}(A^{-1} X A^{-1} Y), \qquad
A\in\mathbb{P}_m,\ X,Y\in\mathbb{H}_m,
\end{equation}
where the tangent space of $\mathbb{P}_m$ at any point $A\in\mathbb{P}_m$ is
identified with $\mathbb{H}_m$. The Riemannian exponential
at $A \in {{\mathbf B}bb P}_{m}$ is given by
\begin{eqnarray*}\exp_{A}(X) =
A^{\frac{1}{2}}\exp(A^{-\frac{1}{2}}XA^{-\frac{1}{2}})A^{\frac{1}{2}}\end{eqnarray*}
and its inverse is
\begin{eqnarray*}\log_{A}(X) =
A^{\frac{1}{2}}\log(A^{-\frac{1}{2}}XA^{-\frac{1}{2}})A^{\frac{1}{2}}.\end{eqnarray*}
Then ${{\mathbf B}bb P}_m$ is a {\it Cartan-Hadamard Riemannian manifold}, a
simply connected complete Riemannian manifold with nonpositive
sectional curvature (the canonical $2$-tensor is nonnegative). The
{\it Riemannian trace metric} (i.e., the geodesic distance with
respect to \eqref{metric}) on ${{\mathbf B}bb P}_m$ is given by
$$
d(A,B) := \big\| \log A^{-\frac{1}{2}} B A^{-\frac{1}{2}} \big\|_{2},
$$
and the unique (up to parametrization) geodesic shortest curve
joining $A$ and $B$ is $t\in[0,1] \mapsto A \#_{t} B =
A^{\frac{1}{2}}(A^{-\frac{1}{2}}BA^{-\frac{1}{2}})^{t}A^{\frac{1}{2}}$.
The nonpositively curved property is equivalently stated as
\begin{eqnarray}\langlebel{NP}d^{2}(A\#_{t}B,C)&\leq&
(1-t)d^{2}(A,C)+td^{2}(B,C)-(1-t)td^{2}(A,B).
\end{eqnarray}
See \cite{LL01,Bh} for more about these Riemannian structures.
Let $\mathcal{B}={\mathcal B}({{\mathbf B}bb P}_m)$ be the algebra of Borel
sets, the smallest $\sigma$-algebra containing the open sets of ${{\mathbf B}bb P}_m$. We note that
the Euclidean topology on ${{\mathbf B}bb P}_m$ coincides with the metric topology of the trace metric
$d$. Let ${\mathcal P}={\mathcal P}(\mathbb{P}_m)$ be the set of all probability measures on
$({{\mathbf B}bb P}_m, {\mathcal B})$ and ${\mathcal P}_c={\mathcal P}_c(\mathbb{P}_m)$ the set of
all compactly supported $\mu\in{\mathcal P}$. Let ${\mathcal P}_{0}={\mathcal P}_{0}(\mathbb{P}_m)$
be the set of all $\mu \in {\mathcal P}$ of the form
$\mu = (1/n) \sum_{j=1}^{n} \delta_{A_{j}}$, where $\delta_A$ is the point measure of mass
$1$ at $A \in {{\mathbf B}bb P}$. For $p\in[1,\infty)$ let ${\mathcal P}^{p}={\mathcal P}^{p}(\mathbb{P}_m)$
be the set of probability measures with \emph{finite $p$-moment}, i.e., for some (and hence
all) $Y\in {{\mathbf B}bb P}_m$,
$$ \int_{ {{\mathbf B}bb P}_m} d^p(X,Y)\,d\mu(X) < \infty. $$
We say that $\omega \in \mathcal P({{\mathbf B}bb P}_m
\times {{\mathbf B}bb P}_m)$ is a \emph{coupling} for $\mu,\nu \in \mathcal P$ if
$\mu,\nu$ are the marginals of $\omega$, i.e., if for all $B \in\mathcal{B}$,
$ \omega(B \times {{\mathbf B}bb P}_m) = \mu(B)$ and $\omega({{\mathbf B}bb P}_m \times
B) = \nu(B)$. We note that one such coupling is the product measure
$\mu \times \nu$. We denote the set of all couplings for $\mu,\nu
\in \mathcal P(\mathbb{P}_m)$ by $\Pi(\mu,\nu)$.
The $p$-\emph{Wasserstein distance} $d_{p}^W$ on ${\mathcal P}^{p}$ is defined by
$$ d_{p}^{W}(\mu,\nu) := \left[ \inf_{\pi \in \Pi(\mu,\nu)}
\int_{{{\mathbf B}bb P}_m \times {{\mathbf B}bb P}_m} d^p(X,Y)\,d\pi(X,Y) \right]^{\frac{1}{p}}. $$
It is known that $d_{p}^W$ is a complete metric on ${\mathcal P}^{p}$ and
${\mathcal P}_{0}$ is dense in ${\mathcal P}^{p}$ \cite{St03}.
Note that $\mathcal{P}_{0} \subset \mathcal{P}_c \subset \mathcal{P}^{q} \subset
\mathcal{P}^{p} \subset \mathcal{P}^{1}$ and $d^{W}_{p} \leq d^{W}_{q}$ for
$1 \leq p \leq q < \infty$.
We note that these basic results on probability measure spaces hold
in general setting of complete metric spaces in which cases
separability assumption is necessary.
The following result on Lipschitz property of push-forward maps between metric spaces
appears in \cite{LL5}, where $X,Y$ are metric spaces and the
distance $d_p^W$ on $\mathcal{P}^p(X),\mathcal{P}^p(Y)$ are defined as above.
\begin{lemma}\langlebel{L:lip}
Let $f:X\to Y$ be a Lipschitz map with Lipschitz constant $C$. Then
the push-forward map $f_*:\mathcal{P}^p(X)\to \mathcal{P}^p(Y)$,
$f_{*}(\mu)=\mu\circ f^{-1}$, is Lipschitz with respect to $d_p^W$
with Lipschitz constant $C$ for $1\leq p<\infty$.
\end{lemma}
\begin{definition}
The {\it Cartan barycenter} map $G: {\mathcal P}^1({{\mathbf B}bb P}_m)\to {{\mathbf B}bb P}_m$ is
defined by
\begin{displaymath}
G(\mu) := \underset{Z \in \mathbb{P}_m}{\argmin} \int_{\mathbb{P}_m}
\bigl[d^{2} (Z, X)-d^2(Y,X)\bigr]\,d\mu(X),\qquad\mu\in\mathcal{P}^1(\mathbb{P}_m)
\end{displaymath}
for a fixed $Y$. The uniqueness and existence of the minimizer is well-known and the
unique minimizer is independent of $Y$ (see \cite[Proposition 4.3]{St03}). On
${\mathcal P}^2({{\mathbf B}bb P}_m)$, the Cartan barycenter is determined by
\begin{displaymath}
G(\mu) = \underset{Z \in \mathbb{P}_m}{\argmin} \int_{\mathbb{P}_m}
d^{2} (Z, X)\,d\mu(X).
\end{displaymath}
For a discrete measure $\mu=\sum_{j=1}^{n}w_j\delta_{A_{j}}$, $G(\mu)$ is the
Karcher mean of $A_1,\dots,A_n$ with a weight $(w_1,\dots,w_n)$, see, e.g., \cite{LL1,Ya1}.
\end{definition}
The following contraction property appears in \cite{St03}.
\begin{theorem}[Fundamental Contraction Property] \langlebel{T:ft}
For every $\mu,\nu \in {\mathcal P}^p(\mathbb{P}_m)$, $p\ge1$,
$$ d(G(\mu),G(\nu)) \leq d_{1}^{W}(\mu,\nu)\leq d_p^W(\mu,\nu). $$
\end{theorem}
\section{Karcher equations and monotonicity}
A map $g:{{\mathbf B}bb P}_{m}\to {{\mathbf B}bb R}$ is called \emph{uniformly convex}
if there is a strictly increasing function $\phi:[0,\infty)\to
[0,\infty)$ such that
$$g(A\#B)\leq \frac{1}{2}(g(A)+g(B))-\phi(\delta(A,B))$$ for all
$A,B\in {{\mathbf B}bb P}_{m}$. For a continuous uniformly convex function
$g$, it has a unique minimizer of $g$ (see \cite{St03}) and
coincides with the unique point that vanishes the (either Riemannian
or Euclidean) gradient, whenever it is differentiable, see
\cite{LL13}.
By (\ref{NP}), the map
$$Z\mapsto \int_{\mathbb{P}_m}\bigl[d^{2} (Z,
X)-d^2(Y,X)\bigr]\,d\mu(X),\qquad\mu\in\mathcal{P}^1(\mathbb{P}_m)$$ is uniformly
convex. The next theorem is a characterization of $G(\mu)$ in terms
of the unique solution to the Karcher equation.
\begin{theorem}\langlebel{T:kare}
For every $\mu\in\mathcal{P}^1(\mathbb{P}_m)$, $G(\mu)$ is the unique solution
$Z\in\mathbb{P}_m$ to the Karcher equation
$$
\int_{\mathbb{P}_m}\log Z^{-1/2}XZ^{-1/2}\,d\mu(X)=0.
$$
\end{theorem}
\begin{proof} Let $\mu\in\mathcal{P}^1(\mathbb{P}_m)$.
We first show that the Euclidean gradient of the function
$$
Z\in\mathbb{P}_m\ \longmapsto\
\varphi(Z):=\int_{\mathbb{P}_m}\bigl[d^2(Z,X)-d^2(Y,X)\bigr]\,d\mu(X)
$$
is
$$
Z^{-1/2}\biggl(\int_{\mathbb{P}_m}\log Z^{1/2}X^{-1}Z^{1/2}\,d\mu(X)\biggr)Z^{-1/2}.
$$
More precisely, with
$$
F(Z,X):=2Z^{-1/2}\bigl(\log
Z^{1/2}X^{-1}Z^{1/2}\bigr)Z^{-1/2},\qquad Z,X\in\mathbb{P}_m,
$$
we shall prove that
\begin{equation}\langlebel{F-3.1}
\varphi(Z+H)=\varphi(Z)+\int_{\mathbb{P}_m}\tr F(Z,X)H\,d\mu(X)+o(\|H\|_2)
\end{equation}
as $\|H\|_2\to0$ for $H\in\mathbb{H}_m$.
For each fixed $X\in\mathbb{P}_m$, let
$$
\psi(Z):=d^2(Z,X)=\tr\bigl(\log X^{-1/2}ZX^{-1/2}\bigr)^2,\qquad
Z\in\mathbb{P}_m.
$$
It is not difficult to compute the gradient of $\psi(Z)$ is
$F(Z,X)$, i.e.,
$$
\psi(Z+H)=\psi(Z)+\tr F(Z,X)H+o(\|H\|_2)
$$
as $\|H\|_2\to0$ for $H\in\mathbb{H}_m$. Then, for every $Z\in\mathbb{P}_m$ and
$H\in\mathbb{H}_m$, by the Lebesgue convergence theorem one can prove that
\begin{align*}
{d\over dt}\,\varphi(Z+tH){\mathbf B}ig|_{t=0}
&=\lim_{t\to0}{\varphi(Z+tH)-\varphi(Z)\over t} \\
&=\lim_{t\to0}\int_{\mathbb{P}_m}{\psi(Z+tH)-\psi(Z,X)\over t}\,d\mu(X) \\
&=\int_{\mathbb{P}_m}\tr F(Z,X)H\,d\mu(X).
\end{align*}
This formula for the directional derivative is enough to give
\eqref{F-3.1} (due to the finite dimensionality).
\end{proof}
For any ${\mathcal U}\subset {{\mathbf B}bb P}_{m}$, we define ${\mathcal
U}^{\ua} = \{B\in {{\mathbf B}bb P}_{m} : A \leq B \ {\mathrm{for\ some}}\ A
\in {\mathcal U}\}$. A set ${\mathcal U}$ is an \emph{upper set} if
${\mathcal U}^{\ua}={\mathcal U}$. For $\mu,\nu\in \mathcal{P}({{\mathbf B}bb
P}_{m})$, we define $\mu\leq \nu$ if $\mu({\mathcal U})\leq
\nu({\mathcal U})$ for all open upper sets $\mathcal U$. This
partial order on $\mathcal{P}({{\mathbf B}bb P}_{m})$ is a natural extension
of the usual one; $A_{j}\leq B_{\sigma(j)}$ for some permutation
$\sigma$ and $j=1,\dots, n$ if and only if
$(1/n)\sum_{j=1}^{n}\delta_{A_{j}}\leq
(1/n)\sum_{j=1}^{n}\delta_{B_{j}}$, as seen from the marriage
theorem.
We recall the well-known L\"owner-Heinz
inequality:
$$0<A\leq B \ \ \ {\mathrm{implies}} \ \ \ A^{t}\leq B^{t}, \ \ t\in [0,1].$$
The next theorem is the monotonicity property of the Cartan
barycenter $G$ on $\mathcal{P}^1(\mathbb{P}_m)$ and extends the recent works of
Lawson-Lim \cite{LL1} and Bhatia-Karandikar \cite{BK} on the space
of finitely (and uniformly) supported measures, which can be viewed
as a multivariate L\"owner-Heinz inequality.
\begin{theorem}\langlebel{T:mono}
Let $\mu,\nu\in\mathcal{P}^1(\mathbb{P}_m)$. If $\mu\le\nu$, then $G(\mu)\le
G(\nu)$.
\end{theorem}
\begin{proof}
Assume that $\mu,\nu\in\mathcal{P}^1(\mathbb{P}_m)$ and $\mu\le\nu$. For each
$n\in\mathbb{N}$ let $\Sigma_n:=\{X\in\mathbb{P}_m:(1/n)I\le X\le nI\}$ and
$$
\mu_n:=\mu|_{\Sigma_n}+\mu(\mathbb{P}_m\setminus\Sigma_n)\delta_{(1/n)I},\qquad
\nu_n:=\nu|_{\Sigma_n}+\nu(\mathbb{P}_m\setminus\Sigma_n)\delta_{nI}.
$$
Then, as in the proof of \cite[Section 6]{KL}, we have
$\mu_n\le\nu_n$. Since $\mu_n,\nu_n\in\mathcal{P}_c(\mathbb{P}_m)$, we have
$G(\mu_n)\le G(\nu_n)$ by \cite[Theorem 5.5\,(6)]{KL}. We now prove that
$d_1^W(\mu_n,\mu)\to0$ as $n\to\infty$. From a basic fact on the
convergence in Wasserstein spaces (see \cite[Theorem 7.12]{Vi1}) we may prove
that $\mu_n\to\mu$ weakly and
\begin{equation*}
\lim_{n\to\infty}\int_{\mathbb{P}_m}\|\log
X\|_2\,d\mu_n(X)=\int_{\mathbb{P}_m}\|\log X\|_2\,d\mu(X).
\end{equation*}
Since $\mu(\mathbb{P}_m\setminus\Sigma_n)\to0$, it is obvious that
$\mu_n\to\mu$ weakly. Note that
\begin{align*}
\int_{\mathbb{P}_m}\|\log X\|_2\,d\mu_n(X)
&=\int_{\Sigma_n}\|\log X\|_2\,d\mu(X)+\|\log((1/n)I)\|_2\,\mu(\mathbb{P}_m\setminus\Sigma_n) \\
&=\int_{\Sigma_n}\|\log X\|_2\,d\mu(X)+\sqrt m\,(\log
n)\,\mu(\mathbb{P}_m\setminus\Sigma_n).
\end{align*}
Note also that if $X\in\mathbb{P}_m\setminus\Sigma_n$, then either the largest
eigenvalue of $X$ satisfies $\langlembda_1(X)>n$ or the smallest one does $\langlembda_m(X)<1/n$,
so we have $\|\log X\|_2\ge\log n$. Therefore, since
$\int_{\mathbb{P}_m}\|\log X\|_2\,d\mu(X)<\infty$, we have
$$
(\log
n)\,\mu(\mathbb{P}_m\setminus\Sigma_n)\le\int_{\mathbb{P}_m\setminus\Sigma_n}\|\log
X\|_2\,d\mu(X) \ \longrightarrow\ 0\quad\mbox{as $n\to\infty$},
$$
so that
$$
\lim_{n\to\infty}\int_{\mathbb{P}_m}\|\log X\|_2\,d\mu(X)
=\lim_{n\to\infty}\int_{\Sigma_n}\|\log
X\|_2\,d\mu(X)=\int_{\mathbb{P}_m}\|\log X\|_2\,d\mu(X).
$$
We thus have $d_1^W(\mu_n,\mu)\to0$, which implies
$\delta(G(\mu_n),G(\mu))\to0$ by the fundamental contraction
property, so $\|G(\mu_n)-G(\mu)\|_2\to0$. Since
$\|G(\nu_n)-G(\nu)\|_2\to0$ similarly, $G(\mu)\le G(\nu)$ follows by
taking the limit of $G(\mu_n)\le G(\nu_n)$.
\end{proof}
\section{Log-majorization}
For $1\leq k\leq m$ and $A\in {{\mathbf B}bb P}_{m}$, let $\Lambda^{k}A$ be
the $k$th {\it antisymmetric tensor power} of $A$. See \cite{AH,BK,HiP1}
for basic properties of $\Lambda^{k}$; for instance,
\begin{eqnarray}
\Lambda^{k}(AB)&=& (\Lambda^{k}A)(\Lambda^{k}B), \nonumber\\
\langlebel{step 3}\Lambda^{k}(A^{t})&=&(\Lambda^{k}A)^{t}, \ \ t>0,\\
\langlebel{step2}\langlembda_{1}(\Lambda^{k}A)&=&\prod_{j=1}^{k}\langlembda_{j}(A).
\end{eqnarray}
The $k$th antisymmetric tensor power map $\Lambda^k$ maps ${{\mathbf B}bb P}_{m}$
continuously into ${{\mathbf B}bb P}_{\ell}$ where $\ell:={m\choose k}$.
This induces the push-forward map
$$\Lambda^{k}_{*}: {\mathcal P}({{\mathbf B}bb P}_{m})\to {\mathcal P}({{\mathbf B}bb
P}_{\ell}), \qquad
\Lambda^{k}_{*}(\mu):=\mu\circ (\Lambda^{k})^{-1},$$
that is, $\Lambda^{k}_{*}(\mu)({\mathcal
O})=\mu((\Lambda^{k})^{-1}({\mathcal O}))$ for all Borel sets
${\mathcal O}\subset{{\mathbf B}bb P}_{\ell}$.
\begin{proposition}\langlebel{P:Lip}
The map $\Lambda^{k}:{{\mathbf B}bb P}_{m}\to {{\mathbf B}bb P}_{\ell}$ is
Lipschitzian, that is,
$$d(\Lambda^{k}A,\Lambda^{k}B)\leq \alpha_{m,k}\ d(A,B),\qquad
A,B\in {{\mathbf B}bb P}_{m},
$$
where
$\alpha_{m,k}:=\sqrt{k{m-1\choose k-1}}$.
Furthermore, $\Lambda^{k}_{*}: {\mathcal P}^p({{\mathbf B}bb P}_{m})\to
{\mathcal P}^p({{\mathbf B}bb P}_{\ell})$ is Lipschitzian for every $p\geq 1$, that is,
$$d_{p}^W(\Lambda^k_{*}(\mu),\Lambda^k_{*}(\nu))\leq
\alpha_{m,k} \ d_{p}^W(\mu,\nu), \ \ \ \ \mu,\nu\in {\mathcal
P}^{p}({{\mathbf B}bb P}_{m}).$$
\end{proposition}
\begin{proof}
The eigenvalue list of
$\Lambda^{k}(A^{-\frac{1}{2}}BA^{-\frac{1}{2}})=
(\Lambda^{k}A)^{-\frac{1}{2}}(\Lambda^{k}B)(\Lambda^{k}A)^{-\frac{1}{2}}$
is
$$\prod_{j=1}^{k}\langlembda_{i_{j}}(A^{-\frac{1}{2}}BA^{-\frac{1}{2}}),\qquad
1\leq i_{1}<\bulletots<i_{k}\leq m.$$ Hence
\begin{eqnarray*}
d^{2}(\Lambda^kA,\Lambda^kB)&=&\big\|\log
\Lambda^k(A^{-\frac{1}{2}}BA^{-\frac{1}{2}})\big\|_{2}^2\\&=&
\sum_{1\leq i_{1}<\bulletots<i_{k}\leq m}\log^2
\left(\prod_{j=1}^{k}\langlembda_{i_{j}}(A^{-\frac{1}{2}}BA^{-\frac{1}{2}})\right)
\\&=&
\sum_{1\leq i_{1}<\bulletots<i_{k}\leq m}\left[\sum_{j=1}^k\log
\langlembda_{i_{j}}(A^{-\frac{1}{2}}BA^{-\frac{1}{2}})\right]^2
\\&\le&
\sum_{1\leq i_{1}<\bulletots<i_{k}\leq
m}k\sum_{j=1}^k\log^2
\langlembda_{i_{j}}(A^{-\frac{1}{2}}BA^{-\frac{1}{2}})\\
&=&k{m-1\choose k-1}\sum_{i=1}^m\log^2
\langlembda_{i}(A^{-\frac{1}{2}}BA^{-\frac{1}{2}}) \\
&=&k{m-1\choose k-1}d^{2}(A,B).
\end{eqnarray*}
The Lipschitz continuity of $\Lambda^{k}_{*}$ follows by Lemma
\ref{L:lip}.
\end{proof}
The following is an extension of the result by Bhatia and Karandikar \cite[Theorem 4.4]{BK}
for finitely supported measures to general probability measures in $\mathcal{P}^1(\mathbb{P}_m)$.
\begin{theorem} For $p\geq 1$, the following diagram commute:
\[ \begin{CD}
{{\mathbf B}bb P}_{m} @>\Lambda^{k}>> {{\mathbf B}bb P}_{\ell} \\
@AGAA @AAGA\\
{\mathcal P}^p({{\mathbf B}bb P}_{m}) @>\Lambda^{k}_{*}>> {\mathcal
P}^p({{\mathbf B}bb P}_{\ell})
\end{CD},
\]
that is,
\begin{eqnarray}\langlebel{E:comu}G\circ \Lambda^{k}_{*}=\Lambda^{k}\circ G.
\end{eqnarray}
\end{theorem}
\begin{proof}
Let $\mu\in {\mathcal P}^{1}({{\mathbf B}bb P}_{m})$.
By Theorem \ref{T:kare}, letting $Z:=G(\mu)$, we may prove that
$$
\int_{\mathbb{P}_{\ell}}\log
\bigl(\Lambda^k(Z)\bigr)^{-1/2}X\bigl(\Lambda^k(Z)\bigr)^{-1/2}\,d(\Lambda^k_*\mu)(X)=0,
$$
i.e., $
\int_{\mathbb{P}_m}\log\bigl[\Lambda^k\bigl(Z^{-1/2}XZ^{-1/2}\bigr)\bigr]\,d\mu(X)=0$. Note that
\begin{eqnarray*}
\log\bigl[\Lambda^k\bigl(Z^{-1/2}XZ^{-1/2}\bigr)\bigr]
&=&\log\bigl(Z^{-1/2}XZ^{-1/2}\bigr)^{\otimes k}{\mathbf B}ig|_{({{\mathbf B}bb C}^m)^{\Lambda k}} \\
&=&{\mathbf B}iggl(\sum_{j=1}^kI^{\otimes(j-1)}\otimes\bigl(\log
Z^{-1/2}XZ^{-1/2}\bigr) \otimes
I^{\otimes(k-j)}{\mathbf B}iggr){\mathbf B}igg|_{({{\mathbf B}bb C}^m)^{\Lambda k}},
\end{eqnarray*}
where $({{\mathbf B}bb C}^m)^{\Lambda k}$ is the $k$-fold antisymmetric tensor
space of ${{\mathbf B}bb C}^m$.
Since $\int_{\mathbb{P}_m}\log Z^{-1/2}XZ^{-1/2}\,d\mu(X)=0$, we have
\begin{align*}
&\int_{\mathbb{P}_m}\log\bigl[\Lambda^k\bigl(Z^{-1/2}XZ^{-1/2}\bigr)\bigr]\,d\mu(X) \\
&\qquad={\mathbf B}iggl(\sum_{j=1}^kI^{\otimes(j-1)}\otimes
\biggl(\int_{\mathbb{P}_m}\log Z^{-1/2}XZ^{-1/2}\,\mu(X)\biggr) \otimes
I^{\otimes(k-j)}{\mathbf B}iggr){\mathbf B}igg|_{({{\mathbf B}bb C}^m)^{\Lambda k}}=0.
\end{align*}
\end{proof}
Next, we introduce powers of probability measures on ${{\mathbf B}bb P}_m$.
\begin{definition}
For $t\in {{\mathbf B}bb R}\setminus \{0\}$ and
$\mathcal{O}\in {\mathcal B}({{\mathbf B}bb P}_m)$, we let $ \mathcal{O}^{t}
:= \{ A^{t}: A \in \mathcal{O} \} $ and
\begin{eqnarray*}
\mu^{t} (\mathcal{O}) := \mu(\mathcal{O}^{\frac{1}{t}}).
\end{eqnarray*}
In terms of push-forward measures, $\mu^{t}=g_{*}\mu$, where $g(X):=X^{t}$. Note that
$\mu^{t}\in {\mathcal P}^p(\mathbb{P}_m)$ if $\mu\in {\mathcal P}^p(\mathbb{P}_m)$.
\end{definition}
By (\ref{step 3}) and the definition of push-forward map, we have
\begin{eqnarray}\langlebel{E:Hiai}
\Lambda^{k}_{*}(\mu^{t})=\Lambda^{k}_{*}(\mu)^t, \qquad
\mu\in {\mathcal P}^p(\mathbb{P}_m), \ t\neq 0.
\end{eqnarray}
In \cite{KLL},
Kim-Lee-Lim established that $\|G(\mu^{t})\|\leq \|G(\mu)^{t}\| $
for $\mu\in {\mathcal P}^2(\mathbb{P}_m)$ and $t\geq 1$, where $\|\bulletot\|$
denotes the operator norm. It follows from the monotonicity of
Cartan barycenter and its the characterization via the Karcher equation.
In the present situation, the same method based on Theorems \ref{T:kare}
and \ref{T:mono} proves that
\begin{eqnarray}\langlebel{T:AHI}\|G(\mu^{t})\|\leq \|G(\mu)^{t}\|, \qquad
\mu\in {\mathcal P}^1(\mathbb{P}_m), \ \ t\geq 1.
\end{eqnarray}
The main result of this section is the following:
\begin{theorem}\langlebel{T:MAIN}
For every $\mu\in {\mathcal P}^1(\mathbb{P}_m)$ and $t\geq 1$,
\begin{eqnarray*}
G(\mu^{t})\underset{\log}{\prec}G(\mu)^{t}.
\end{eqnarray*}
In particular, for any unitary invariant norm $|||\bulletot|||$,
$$|||G(\mu^{t})|||\leq |||G(\mu)^{t}|||,\qquad t\ge1.$$
\end{theorem}
\begin{proof}
For $1\le k\le m$ we have
\begin{eqnarray*}
\prod_{j=1}^{k}\langlembda_{j}(G(\mu^{t}))&=&\langlembda_{1}(\Lambda^{k}G(\mu^{t}))
=\|\Lambda^{k}G(\mu^{t})\|\\
&=&\big\|G\bigl(\Lambda^{k}_{*}(\mu^{t})\bigr)\big\|
=\big\|G\bigl((\Lambda^{k}_{*}(\mu))^{t}\bigr)\big\| \\
&\leq&\big\|G\bigl((\Lambda^{k}_{*}(\mu))\bigr)\big\|^t
=\prod_{j=1}^{k}\langlembda_{j}(G(\mu)^{t}),
\end{eqnarray*}
where \eqref{step2}, \eqref{E:comu}, \eqref{E:Hiai}, and
\eqref{T:AHI} have been used. It remains to show that $\det G(\mu^t)=\det
G(\mu)^t$. When $k=m$, since $\Lambda^m=\det$ and
$G\bigl((\Lambda^m_*(\mu))^t\bigr)$ is a positive scalar, the equalities shown
above say that $\det G(\mu^t)=G\bigl((\Lambda^m_*(\mu))^t\bigr)$. In the
one-dimensional case on $\mathbb{P}_1=(0,\infty)$, we find by a direct
computation that
$$
G(\nu)=\exp\int_{(0,\infty)}\log x\,d\nu(x)
$$
for every $\nu\in\mathcal{P}^1((0,\infty))$. Therefore,
\begin{align*}
G\bigl((\Lambda^m_*(\mu))^t\bigr)&=\exp\int_{(0,\infty)}\log
x\,d(\Lambda^m_*(\mu)^t)(x)
=\exp\int_{\mathbb{P}_m}\log({\det}^tA)\,d\mu(A) \\
&=\exp\int_{\mathbb{P}_m}t\tr(\log A)\,d\mu(A)
={\det}^t\biggl(\exp\int_{\mathbb{P}_m}\log A\,d\mu(A)\biggr) \\
&=\det G(\mu)^t,
\end{align*}
implying that $\det G(\mu^t)=\det G(\mu)^t$.
\end{proof}
By a consequence of the preceding theorem, we have the following:
\begin{corollary} For every $\mu\in {\mathcal P}^1({{\mathbf B}bb P}_{m})$,
\begin{equation*}
G(\mu^{q})^{\frac{1}{q}}\underset{(\log)}{\prec}G(\mu^{p})^{\frac{1}{p}},
\qquad 0<p\leq q,
\end{equation*}
\begin{equation*}
G(\mu^{p})^{\frac{1}{p}}\underset{(\log)}{\prec}G(\mu)\underset{(\log)}{\prec}
G(\mu^{\frac{1}{p}})^{p},\qquad p\geq 1,
\end{equation*}
and therefore
\begin{equation}\langlebel{E:LL}
|||G(\mu^{q})^{\frac{1}{q}}|||\leq
|||G(\mu^{p})^{\frac{1}{p}}|||,
\qquad 0<p\leq q,
\end{equation}
\begin{equation*}
|||G(\mu^{p})^{\frac{1}{p}}|||\leq |||G(\mu)|||\leq
|||G(\mu^{\frac{1}{p}})^{p}|||, \ \ \ \ p\geq 1
\end{equation*}
for all unitarily invariant norms $|||\bulletot |||$.
\end{corollary}
\section{Lie-Trotter formula}
The Lie-Trotter formula for the Cartan (or Karcher) mean of multivariable
positive definite matrices is
\begin{equation*}
{\underset{t\to 0}{\lim}}\, G(
A_1^t,.s,A_n^t)^{\frac{1}{t}}=\exp\left(\frac{1}{n}
\sum_{j=1}^{n}\log A_{j}\right),
\end{equation*}
see \cite{HiP,BJL,BG}.
In this section we establish the Lie-Trotter formula and associated norm
inequalities for probability measures in a certain sub-class of $\mathcal{P}^1(\mathbb{P}_m)$.
\begin{lemma}\langlebel{L1}
For every $X\in\mathbb{P}_m$,
$$
\|\log X\|\le\log(\|X\|+\|X^{-1}\|).
$$
Moreover, for every $r>0$ there exists a constant $c_r>0$ such that
$$
\|\log X\|_2\le c_r(\|X\|+\|X^{-1}\|)^r,\qquad X\in\mathbb{P}_m.
$$
\end{lemma}
\begin{proof}
Since $\|X^{-1}\|^{-1}I\le X\le\|X\|I$, we have
$(-\log\|X^{-1}\|)I\le \log X\le(\log\|X\|)I$ so that
$$
\|\log X\|=\max\bigl\{\log\|X\|,\log\|X^{-1}\|\bigr\}\le\log(\|X\|+\|X^{-1}\|).
$$
Next, for any $r>0$, since $\lim_{x\to\infty}(\log x)/x^r=0$,
$b_r:=\sup_{x\ge1}(\log x)/x^r<\infty$. Noting that
$\|X\|+\|X^{-1}\|\ge2\sqrt{\|X\|\,\|X^{-1}\|}\ge2$, we have
\begin{align*}
\|\log X\|_2&\le\sqrt m\,\|\log X\|\le\sqrt m\,\log\bigl(\|X\|+\|X^{-1}\|\bigr) \\
&\le\sqrt m\,b_r\bigl(\|X\|+\|X^{-1}\|\bigr)^r,\qquad X\in\mathbb{P}_m.
\end{align*}
\end{proof}
Now, for $\mu\in\mathcal{P}(\mathbb{P}_m)$ we consider the condition
\begin{equation}\langlebel{F-4.1}
\int_{\mathbb{P}_m}\bigl(\|X\|+\|X^{-1}\|\bigr)\,d\mu(X)<\infty.
\end{equation}
\begin{lemma}\langlebel{L3}
If $\mu\in\mathcal{P}(\mathbb{P}_m)$ satisfies \eqref{F-4.1}, then
$\mu\in\mathcal{P}^p(\mathbb{P}_m)$ for every $p\in[1,\infty)$.
\end{lemma}
\begin{proof}
By Lemma \ref{L1} with $r=1/p$ we have
$$
\|\log X\|_2\le k_{1/p}\bigl(\|X\|+\|X^{-1}\|\bigr)^{1/p},\qquad
X\in\mathbb{P}_m.
$$
Therefore,
$$
\int_{\mathbb{P}_m}d^p(X,I)\,d\mu(X)=\int_{\mathbb{P}_m}\|\log X\|_2^p\,d\mu(X)
\le
k_{1/p}^p\int_{\mathbb{P}_m}\bigl(\|X\|+\|X^{-1}\|\bigr)\,d\mu(X)<\infty,
$$
implying $\mu\in\mathcal{P}^p(\mathbb{P}_m)$.
\end{proof}
When $\mu$ satisfies \eqref{F-4.1}, one can define the arithmetic and the
harmonic means of $\mu$ as
$$
\int_{\mathbb{P}_m}X\,d\mu(X),\qquad\biggl(\int_{\mathbb{P}_m}X^{-1}\,d\mu(X)\biggr)^{-1},
$$
respectively. By Lemma \ref{L3} one can also define the Cartan
barycenter $G(\mu)$.
The next lemma will be useful in the proof of our main result of this section.
\begin{lemma}\langlebel{L4}
Assume that $\mu\in\mathcal{P}(\mathbb{P}_m)$ satisfies \eqref{F-4.1}. Then there
exist a sequence $\{\mu_n\}_{n=1}^{\infty}$ in $\mathcal{P}_c(\mathbb{P}_m)$ such that,
as $n\to\infty$,
$$
d_1^W(\mu_n,\mu)\longrightarrow0,
$$
and
$$
\int_{\mathbb{P}_m}X\,d\mu_n(X)\longrightarrow\int_{\mathbb{P}_m}X\,d\mu(X),\quad
\int_{\mathbb{P}_m}X^{-1}\,d\mu_n(X)\longrightarrow\int_{\mathbb{P}_m}X^{-1}\,d\mu(X).
$$
\end{lemma}
\begin{proof}
For each $n\in\mathbb{N}$ let $\Sigma_n$ be as in the proof of Theorem \ref{T:mono} and
define $\mu_n\in\mathcal{P}_c(\mathbb{P}_m)$ as
$$
\mu_n:=\mu|_{\Sigma_n}+\mu(\mathbb{P}_m\setminus\Sigma_n)\delta_{I}.
$$
We then have $d_1^W(\mu_n,\mu)\to0$ as in the proof of Theorem
\ref{T:mono}, since $\mu_n$ converges weakly to $\mu$ and
$$
\int_{\mathbb{P}_m}\|\log X\|_2\,d\mu_n(X)
=\int_{\Sigma_n}\|\log X\|_2\,d\mu(X)\longrightarrow
\int_{\mathbb{P}_m}\|\log X\|_2\,d\mu(X)
$$
as $n\to\infty$. On the other hand, by assumption \eqref{F-4.1} we have
$$
\int_{\mathbb{P}_m}X\,d\mu_n(X)=\int_{\Sigma_n}X\,d\mu(X)+\mu(\mathbb{P}_m\setminus\Sigma_n)I
\longrightarrow\int_{\mathbb{P}_m}X\,d\mu(X),
$$
and similarly $\int_{\mathbb{P}_m}X^{-1}\,d\mu_n(X)\to\int_{\mathbb{P}_m}X^{-1}\,d\mu(X)$.
\end{proof}
The following {\it AGH $($arithmetic-geometric-harmonic$)$ mean
inequalities} were shown for $\mu\in\mathcal{P}_0(\mathbb{P}_m)$ in \cite[Theorem
2]{Ya2} and extended in \cite{KL} to the case of
$\mu\in\mathcal{P}_c(\mathbb{P}_m)$. We further extend it to the case of $\mu$
satisfying \eqref{F-4.1}.
\begin{proposition}[AGH inequalities]\langlebel{P5}
If $\mu\in\mathcal{P}(\mathbb{P}_m)$ satisfies \eqref{F-4.1}, then
\begin{equation}\langlebel{F-4.8}
\biggl(\int_{\mathbb{P}_m}X^{-1}\,d\mu(X)\biggr)^{-1}\le G(\mu)\le
\int_{\mathbb{P}_m}X\,d\mu(X).
\end{equation}
\end{proposition}
\begin{proof}
By Lemma \ref{L4} choose a sequence $\{\mu_n\}$ in $\mathcal{P}_1(\mathbb{P}_m)$
such that $d_1^W(\mu_n,\mu)\to0$ (hence $G(\mu_n)\to G(\mu)$ by
Theorem \ref{T:ft}) and
$$
\int_{\mathbb{P}_m}X\,d\mu_n(X)\longrightarrow\int_{\mathbb{P}_m}X\,d\mu(X),\quad
\int_{\mathbb{P}_m}X^{-1}\,d\mu_n(X)\longrightarrow\int_{\mathbb{P}_m}X^{-1}\,d\mu(X).
$$
Since inequalities \eqref{F-4.8} hold for $\mu_n$, the result follows
by taking the limit of \eqref{F-4.8} for $\mu_n$.
\end{proof}
\begin{remark}\rm
Let $X_{ij}$ and $(X^{-1})_{ij}$ denote the $(i,j)$-entries of
$X,X^{-1}$, respectively. Then it is clear that the functions
$X\in\mathbb{P}_m\mapsto X_{ij},(X^{-1})_{ij}$ are integrable with respect
to $\mu$ for all $i,j=1,\dots,m$ if and only if condition
\eqref{F-4.1} holds. Hence \eqref{F-4.1} is the best possible assumption
for the AGH mean inequalities in Proposition \ref{P5} to make sense.
\end{remark}
\begin{lemma}\langlebel{L7}
For every $\mu\in\mathcal{P}(\mathbb{P}_m)$ with \eqref{F-4.1},
$$
{1\over t}\log\int_{\mathbb{P}_m}X^t\,d\mu(X) \longrightarrow\int_{\mathbb{P}_m}\log X\,d\mu(X),
$$
or equivalently,
$$
\left(\int_{{{\mathbf B}bb
P}_{m}}X^{t}d\mu(X)\right)^{1\over t}=\exp\int_{{{\mathbf B}bb P}_{m}}\log X\,d\mu(X)
$$
as $t\to0$ with $|t|\le1$.
\end{lemma}
\begin{proof}
First, note that $\int_{\mathbb{P}_m}\log X\,d\mu(X)$ exists by Lemma \ref{L1}. For any $X\in\mathbb{P}_m$
we write
$$
X^t=e^{t\log X}=I+t\log X+R(t,X),
$$
where
$$
R(t;X):=\sum_{n=2}^\infty{t^n\over n!}(\log X)^n.
$$
Assuming $|t|\le1$ we have
$$
\|R(t,X)\|\le t^2\sum_{n=0}^\infty{1\over n!}\,\|\log X\|^n
=t^2e^{\|\log X\|} \le t^2\bigl(\|X\|+\|X^{-1}\|\bigr)
$$
by Lemma \ref{L1}. Therefore,
$$
\int_{\mathbb{P}_m}\|R(t,X)\|\,d\mu(X)\le
t^2\int_{\mathbb{P}_m}\bigl(\|X\|+\|X^{-1}\|\bigr)\,d\mu(X)
=O(t^2)\quad\mbox{as $t\to0$},
$$
so that we have
$$
\int_{\mathbb{P}_m}X^t\,d\mu(X)=I+t\int_{\mathbb{P}_m}\log X\,d\mu(X)+O(t^2).
$$
This implies that
$$
{1\over t}\log\int_{\mathbb{P}_m}X^t\,d\mu(X) =\int_{\mathbb{P}_m}\log
X\,d\mu(X)+O(t),
$$
and hence
$$
\lim_{t\to0}{1\over t}\log\int_{\mathbb{P}_m}X^t\,d\mu(X)=\int_{\mathbb{P}_m}\log
X\,d\mu(X).
$$
\end{proof}
Finally, for $\mu\in\mathcal{P}(\mathbb{P}_m)$ we consider the condition
\begin{equation}\langlebel{F-4.9}
\int_{\mathbb{P}_m}\bigl(\|X\|+\|X^{-1}\|\bigr)^r\,d\mu(X)<\infty
\end{equation}
for some $r>0$. It is obvious that if \eqref{F-4.9} holds for $r>0$,
then it also holds for any $r'\in(0,r]$. Moreover, for any $r>0$, condition
\eqref{F-4.9} is equivalent to
$$
\int_{\mathbb{P}_m}\bigl(\|X^r\|+\|X^{-r}\|\bigr)\,d\mu(X)<\infty,
$$
so that both $\mu^r$ and $\mu^{-r}$ satisfy \eqref{F-4.1}.
Our main result of this section is the following:
\begin{theorem}[Lie-Trotter formula]\langlebel{T8}
Let $\mu\in\mathcal{P}(\mathbb{P}_m)$ satisfying \eqref{F-4.9} for some $r>0$. Then
we have
\begin{equation}\langlebel{F-4.10}
\lim_{t\to0}G(\mu^t)^{1\over t}=\exp\int_{\mathbb{P}_m}\log X\,d\mu(X).
\end{equation}
\end{theorem}
\begin{proof}
First, assume that $\mu$ satisfies \eqref{F-4.1}. For any
$t\in[-1,1]\setminus\{0\}$, by using Proposition \ref{P5} to $\mu^t$ we have
\begin{align*}
\biggl(\int_{\mathbb{P}_m}X^{-t}\,d\mu(X)\biggr)^{-1}&=
\biggl(\int_{\mathbb{P}_m}X^{-1}\,d\mu^t(X)\biggr)^{-1} \\
&\le G(\mu^t)\le\int_{\mathbb{P}_m}X\,d\mu^t(X)=\int_{\mathbb{P}_m}X^t\,d\mu(X).
\end{align*}
Since $\log x$ is operator monotone on $(0,\infty)$, the above
inequalities give
\begin{align*}
&-{1\over t}\log\int_{\mathbb{P}_m}X^{-t}\,d\mu(X)
\le\log G(\mu^t)^{1\over t}\le{1\over t}\log\int_{\mathbb{P}_m}X^t\,d\mu(X)
\quad\mbox{if $0<t\le1$}, \\
&-{1\over t}\log\int_{\mathbb{P}_m}X^{-t}\,d\mu(X) \ge\log
G(\mu^t)^{1\over t}\ge{1\over t}\log\int_{\mathbb{P}_m}X^t\,d\mu(X)
\quad\mbox{if $-1\le t<0$}.
\end{align*}
From Lemma \ref{L7} this implies that
\begin{equation}\langlebel{F-4.11}
\lim_{t\to0}\log G(\mu^t)^{1\over t}=\int_{\mathbb{P}_m}\log X\,d\mu(X).
\end{equation}
Next, assume that $\mu$ satisfies \eqref{F-4.9} for some $r>0$, that
is, $\mu^r$ satisfies \eqref{F-4.1}. The above case yields
$$
\lim_{t\to0}\log G\bigl((\mu^r)^t\bigr)^{1\over t}=\int_{\mathbb{P}_m}\log X\,d\mu^r(X).
$$
Note that the left-hand side in the above is
$$
\lim_{t\to0}\log G(\mu^{rt})^{1\over t}=r\lim_{t\to0}\log G(\mu^t)^{1\over t},
$$
while the right-hand side is
$$
r\int_{\mathbb{P}_m}\log X\,d\mu(X).
$$
Hence we have \eqref{F-4.11} again, which implies \eqref{F-4.10}.
\end{proof}
The next corollary extends \cite[Corollary 2]{BG} to the case of probability measures
satisfying \eqref{F-4.9}.
\begin{corollary}\langlebel{C9}
Assume that $\mu\in\mathcal{P}(\mathbb{P}_m)$ satisfies \eqref{F-4.9} for an $r>0$
and $|||\bulletot|||$ is any unitarily invariant norm. Then
\begin{itemize}
\item[(a)] For every $t>0$,
\begin{equation}\langlebel{F-4.12}
\big|\big|\big|G(\mu^{-t})^{-{1\over t}}\big|\big|\big|
=\big|\big|\big|G(\mu^t)^{1\over t}\big|\big|\big|
\le\bigg|\bigg|\bigg|\exp\int_{\mathbb{P}_m}\log X\,d\mu(X)\bigg|\bigg|\bigg|,
\end{equation}
and $\big|\big|\big|G(\mu^t)^{1\over t}\big|\big|\big|$
increases to $\big|\big|\big|\exp\int_{\mathbb{P}_m}\log X\,d\mu(X)\big|\big|\big|$
as $t\searrow0$.
\item[(b)] If $0<t\le r$, then
\begin{align}
&\bigg|\bigg|\bigg|\left(\int_{{{\mathbf B}bb P}_{m}}X^{-t}
\,d\mu(X)\right)^{-{1\over t}}\bigg|\bigg|\bigg|
\le\big|\big|\big|G(\mu^t)^{1\over t}\big|\big|\big| \nonumber\\
&\quad\leq\bigg|\bigg|\bigg|\exp\int_{\mathbb{P}_m}\log
X\,d\mu(X)\bigg|\bigg|\bigg|\leq
\bigg|\bigg|\bigg|\left(\int_{{{\mathbf B}bb P}_{m}}X^{t}
\,d\mu(X)\right)^{1\over t}\bigg|\bigg|\bigg|. \langlebel{F-4.13}
\end{align}
Furthermore,
$\big|\big|\big|\bigl(\int_{{{\mathbf B}bb P}_{m}}X^{t}\,d\mu(X)\bigr)^{1\over t}\big|\big|\big|$
decreases to $\big|\big|\big|\exp\int_{\mathbb{P}_m}\log X\,d\mu(X)\big|\big|\big|$ and
$\big|\big|\big|\bigl(\int_{{{\mathbf B}bb P}_{m}}X^{-t}\,d\mu(X)\bigr)^{-{1\over t}}\big|\big|\big|$
increases to $\big|\big|\big|\exp\int_{\mathbb{P}_m}\log X\,d\mu(X)\big|\big|\big|$ as
$r\ge t\searrow0$.
\end{itemize}
\end{corollary}
\begin{proof}
When $\mu\in\mathcal{P}^1(\mathbb{P}_m)$ (without condition \eqref{F-4.9}), from the invariance
$G(\mu^{-1})=G(\mu)^{-1}$ as immediately seen from Theorem \ref{T:kare},
we find that $G(\mu^{-t})^{-{1\over t}}=G(\mu^t)^{1\over t}$,
implying the equality in \eqref{F-4.12}. It follows from \eqref{E:LL} that
$\big|\big|\big|G(\mu^t)^{1\over t}\big|\big|\big|$ is increasing as $t\searrow0$.
In the rest, assume \eqref{F-4.9} for an $r>0$.
(a)\enspace
The inequality in \eqref{F-4.12} is immediately seen from Theorem \ref{T8} together with
$\big|\big|\big|G(\mu^t)^{1\over t}\big|\big|\big|$ being increasing noted above.
(b)\enspace
Assume that $0<t'<t\le r$ and prove that
\begin{align}
\int_{\mathbb{P}_m}X^{t'}\,d\mu(X)
&\le\biggl(\int_{\mathbb{P}_m}X^t\,d\mu(X)\biggr)^{t'\over t}, \langlebel{F-4.14}\\
\biggl(\int_{\mathbb{P}_m}X^{-t'}\,d\mu(X)\biggr)^{-1}
&\ge\biggl(\int_{\mathbb{P}_m}X^{-t}\,d\mu(X)\biggr)^{-{t'\over t}}. \langlebel{F-4.15}
\end{align}
For each $n\in\mathbb{N}$ let $\Sigma_n$ be as in the proof of Lemma \ref{L4}.
Since $X^t$ and $X^{t'}$ are uniformly continuous on the compact set $\Sigma_n$, one can
choose a sequence of simple functions $\sum_{j=1}^{k_\ell}A_{\ell,j}1_{\mathcal{Q}_{\ell,j}}$,
$\ell\in\mathbb{N}$, with $A_{\ell,j}\in\Sigma_n$ and Borel partitions
$\{\mathcal{Q}_{\ell,j}\}_{j=1}^{k_\ell}$ of $\Sigma_n$ such that, as $\ell\to\infty$,
$$
\sum_{j=1}^{k_\ell}A_{\ell,j}^t\mu(\mathcal{Q}_{\ell,j})\longrightarrow
\int_{\Sigma_n}X^t\,\mu(X),\qquad
\sum_{j=1}^{k_\ell}A_{\ell,j}^{t'}\mu(\mathcal{Q}_{l,j})\longrightarrow
\int_{\Sigma_n}X^{t'}\,\mu(X).
$$
Due to the operator concavity of $x^{t'/t}$ on $(0,\infty)$, we have
$$
\sum_{j=1}^{k_\ell}\mu(\mathcal{Q}_{\ell,j})A_{\ell,j}^{t'}+\mu(\mathbb{P}_m\setminus\Sigma_n)I
\le{\mathbf B}iggl(\sum_{j=1}^{k_\ell}\mu(\mathcal{Q}_{\ell,j})A_{\ell,j}^t
+\mu(\mathbb{P}_m\setminus\Sigma_n)I{\mathbf B}iggr)^{t'\over t}.
$$
Letting $l\to\infty$ gives
$$
\int_{\Sigma_n}X^{t'}\,d\mu(X)+\mu(\mathbb{P}_m\setminus\Sigma_n)I \le
\biggl(\int_{\Sigma_n}X^t\,d\mu(X)+\mu(\mathbb{P}_m\setminus\Sigma_n)I\biggr)^{t'\over t}.
$$
Since $\|X^t\|$ and $\|X^{t'}\|$ are integrable with respect to $\mu$, \eqref{F-4.14}
follows by taking the limit of the above inequality as $n\to\infty$. Then, \eqref{F-4.15}
also follows by replacing $\mu$ with $\mu^{-1}$ in \eqref{F-4.14}. Now, similarly to
the proof of \cite[Theorem~1]{BG} we see that for $1\le j\le m$, as $r\ge t\searrow0$,
the $j$th eigenvalue of $\bigl(\int_{\mathbb{P}_m}X^t\,d\mu(X)\bigr)^{1\over t}$ is decreasing
and that of $\bigl(\int_{\mathbb{P}_m}X^{-t}\,d\mu(X)\bigr)^{-{1\over t}}$ is increasing.
Furthermore, by applying Lemma \ref{L7} to $\mu^r$ we have
$$
\biggl(\int_{\mathbb{P}_m}X^t\,d\mu^r(X)\biggr)^{1\over t}\ \longrightarrow
\ \exp\int_{\mathbb{P}_m}\log X\,d\mu^r(X)\quad\mbox{as $t\to0$ with $|t|\le1$},
$$
which is rephrased as
$$
\biggl(\int_{\mathbb{P}_m}X^t\,d\mu(X)\biggr)^{1\over t}\ \longrightarrow
\ \exp\int_{\mathbb{P}_m}\log X\,d\mu(X)\quad\mbox{as $t\to0$ with $|t|\le r$}.
$$
Hence, as $r\ge t\searrow0$,
$\big|\big|\big|\bigl(\int_{{{\mathbf B}bb P}_{m}}X^{t}\,d\mu(X)\bigr)^{1\over t}\big|\big|\big|$
decreases to $\big|\big|\big|\exp\int_{\mathbb{P}_m}\log X\,d\mu(X)\big|\big|\big|$ while
$\big|\big|\big|\bigl(\int_{{{\mathbf B}bb P}_{m}}X^{-t}\,d\mu(X)\bigr)^{-{1\over t}}\big|\big|\big|$
increases to the same. In view of (a) it remains to show the first inequality in
\eqref{F-4.13}. But this is immediately seen by applying \eqref{F-4.8} to $\mu^t$ for
$0<t\le r$.
\end{proof}
\begin{remark}\rm
The following example shows that condition \eqref{F-4.9} is not satisfied for any $r>0$
even if we have $\mu\in\mathcal{P}^p(\mathbb{P}_m)$ for all $p>0$. For instance, choose $X_n\in\mathbb{P}_m$
such that $X_n\ge I$ and $\|X_n\|=n^n$, and define
$$
\mu:=\sum_{n=1}^\infty{1\over2^n}\,\delta_{X_n}.
$$
Then, for any $r>0$,
$$
\int_{\mathbb{P}_m}\|X\|^r\,d\mu(X)=\sum_{n=1}^\infty{(n^r)^n\over2^n}=\infty,
$$
while
$$
\int_{\mathbb{P}_m}\|\log X\|_2^p\,d\mu(X)
\le\sum_{n=1}^\infty{\bigl(m\log^2\|X_n\|\bigr)^{p/2}\over2^n}
=m^{p/2}\sum_{n=1}^\infty{(n\log n)^p\over2^n}<\infty
$$
for all $p>0$.
\end{remark}
\begin{problem}\rm
Do Theorem \ref{T8} and part (a) of Corollary \ref{C9} hold for
general $\mu\in\mathcal{P}^1(\mathbb{P}_m)$ without assumption \eqref{F-4.9}? In
part (b) of Corollary \ref{C9}, we cannot define $\int_{\mathbb{P}_m}X^{\pm
t}\,d\mu(X)$ for general $\mu\in\mathcal{P}^1$, while part (a) makes sense
for general $\mu\in\mathcal{P}^1$.
\end{problem}
\subsection*{Acknowledgments}
The
authors thank Hiroyuki Osaka and Takeaki Yamazaki for inviting the
workshop on Quantum Information Theory and Related Topics 2016 in
Ritsumeikan University where this work was initiated. The work of
F.~Hiai was supported by Grant-in-Aid for Scientific Research
(C)26400103. The work of Y.~Lim was supported by the National
Research Foundation of Korea (NRF) grant funded by the Korea
government(MEST) No.2015R1A3A2031159 and 2016R1A5A1008055.
\end{document} |
\mathcal{B}egin{document}
\maketitle
\mathcal{B}egin{center}
Version of \today
\mathrm{e}nd{center}
\makeatletter
\mathfrak{r}enewcommand\tableofcontents{
\subsection*{\contentsname}
\@starttoc{toc}
}
\makeatother
\mathcal{B}egin{small}
\setcounter{tocdepth}{3} \tableofcontents
\mathrm{e}nd{small}
\setcounter{section}{0}
\section*{\textsc{Introduction}}
\mathcal{A}ddcontentsline{toc}{section}{\textsc{Introduction}}
Let $(K,|.|)$ be a
field of characteristic $0$
which is complete with respect to an
ultramentric absolute value $|.|$, and whose
residual field $k$ has positive characteristic $p>0$.
Denote by $\mathscr{O}_K:=\{x\in K\;|\;|x|\leq 1\}$ its ring of
integers.
The Robba ring $\mathfrak{R}_K$ is the ring of power
series $f(T)=\sum_{i\in\mathbb{Z}}a_iT^i$, $a_i\in K$,
for which there exists an unspecified $\varepsilon<1$
(depending on $f$)
such that $f(T)$ converges on the annulus
$\{\varepsilon<|T|<1\}$.
In a previous work \cite{Rk1} (see also \cite{Rk1-NP})
we described the isomorphism classes of rank one
solvable differential equations over $\mathfrak{R}_K$.
In particular we have obtained a criterion permitting to
read in the coefficients of the differential equation the
solvability.
\if{
under the assumption that $K$ contains the $p^m$-roots
of unity $\mathcal{B}s{\mu}_{p^m}$, for all $m$ large enough.
Namely, let $\mathscr{C}W$ denote the additive group of Witt
co-vectors, and let $\overline{\mathrm{F}}$ be its Frobenius, then the
group $\mathrm{Pic}^{\mathrm{sol}}(
\mathfrak{R}_K)$, under tensor product, of the
isomorphism classes of rank one solvable differential
equations is given by
\mathcal{B}egin{equation}\label{eq : Pic sol}
\mathrm{Pic}^{\mathrm{sol}}(
\mathfrak{R}_K)\;:=\;
\frac{\mathscr{C}W(T^{-1}k[T^{-1}])}{
(\overline{\mathrm{F}}-1)\mathscr{C}W(T^{-1}k[T^{-1}])}
\oplus\frac{\mathbb{Z}_p}{\mathbb{Z}}\;.
\mathrm{e}nd{equation}
}\fi
In another work \cite{Pu-q-Diff}
(see also \cite{Diff-Gamma}) we studied
the phenomena of deformation of $q$-difference
equations and we have proved that, under the solvability
condition, the category of differential equation is
equivalent to that of $q$-difference equations (this
generalizes previous works of Yves André
and Lucia Di Vizio
\cite{An-DV}, \cite{DV-Dwork}).
In this paper we are interested to differential and
$q$-difference equations over the Amice's ring
$\mathcal{E}_K$. This ring is
formed by formal power series
$f(T)=\sum_{i\in\mathbb{Z}}a_iT^i$, $a_i\in K$, that
are bounded (i.e. $\sup_i|a_i|<+\infty$), and such that
$\lim_{i\to-\infty}|a_i|=0$. It is the ring used by
J.M.Fontaine in the theory of $(\phi,\Gamma)$-modules
\cite{Fo}.
A classification of rank one
differential (or $q$-difference) equations over the ring
$\mathcal{E}_K$ is not known, and it seems reasonable
to think that such a classification
will be quite different in nature with respect to that
obtained in \cite{Rk1} for differential
equations over the Robba ring $\mathfrak{R}_K$.
This will not be the goal of
this paper. We here obtain a criterion of solvability
for differential and $q$-difference equations
similar to that in \cite{Rk1}.
We actually describe completely the precise nature of the
solutions of differential and difference equations as
exponentials of Artin-Hasse type.
As a corollary we obtain that every differential equation
over $\mathcal{E}_K$ has a basis in which the
associated operator has coefficients in
$\mathscr{O}_K[[T^{-1}]]$.
This constitutes an analogous of the Katz canonical
extension theorem \cite{Katz-Can} (see also \cite{Matsuda-Unipotent}).\\
The results of this paper have been obtained in 2005,
during our PhD at the university of Paris, under the
supervision of Gilles Christol. \\
\textbf{Acknowledgments}
Step 4 in the proof of Proposition
\mathfrak{r}ef{division of the problem over Amice} is due to
Gilles Christol,
we want here to express our gratitude to him for helpful
discussions.
\section*{\textsc{First part :
solvability of rank one differential equations over
$\mathcal{E}_K$}}
\mathcal{A}ddcontentsline{toc}{section}{\textsc{First part :
solvability of rank one differential equations over
$\mathcal{E}_K$}}
\section{Notations}
Let $\mathbb{R}_{\mathfrak{r}eq 0}$ be the interval of real
numbers that are greater than or equal to $0$.
Let $K$ be a complete valued field of characteristic $0$,
with ring of integers $\mathscr{O}_K:=\{x\in K,|x|\leq 1\}$, and
maximal ideal $\mathfrak{p}_K:=\{x\in K,|x|< 1\}$. We
assume that the residual field
$k:=\mathscr{O}_K/\mathfrak{p}_K$ has positive
characteristic $p>0$.
If $I\subseteq\mathbb{R}_{\mathfrak{r}eq 0}$ is any interval, we
denote by $\mathcal{A}_{K}(I)$ the ring of analytic functions on
the space $\{|T|\in I\}$. If $0\in I$ this is an open or
closed disk, in this case we have
\mathcal{B}egin{equation}
\mathcal{A}_K(I)\;:=\;\{\sum_{i\mathfrak{r}eq 0}a_iT^i\;,\;a_i\in K,
\lim_{i\to\infty}|a_i|\mathfrak{r}ho^i=0,\textrm{ for all }
\mathfrak{r}ho\in I\}\;.
\mathrm{e}nd{equation}
If $0\notin I$ it is an open, closed, or semi-open
annulus and we have
\mathcal{B}egin{equation}
\mathcal{A}_K(I)\;:=\;\{\sum_{i\in\mathbb{Z}}a_iT^i\;,\;
a_i\in K,
\lim_{i\to\pm\infty}|a_i|\mathfrak{r}ho^i=0,
\textrm{ for all }\mathfrak{r}ho\in I\}\;.
\mathrm{e}nd{equation}
For all $\mathfrak{r}ho\in I$ we have a norm on $\mathcal{A}_K(I)$ given by
$|\sum_{i\in\mathbb{Z}} a_i T^i|_\mathfrak{r}ho:=\sup_i|a_i|\mathfrak{r}ho^i$.
And $\mathcal{A}_K(I)$ is complete with respect to the
Frechet
topology defined by the family of norms
$\{|.|_\mathfrak{r}ho\}_{\mathfrak{r}ho\in I}$. We define the
\mathrm{e}mph{Robba ring} as
\mathcal{B}egin{equation}
\mathfrak{R}_K\;:=\;
\cup_{\varepsilon>0}\mathcal{A}_K(]1-\varepsilon,1[)\;.
\mathrm{e}nd{equation}
The topology of the ring $\mathfrak{R}_K$ is the limit of
the topologies of $\mathcal{A}_K(]1-\varepsilon,1[)$ which are
Frechet spaces. It is hence a $\mathcal{LF}$ topology.
The \mathrm{e}mph{Amice's ring} $\mathcal{E}_K$ is defined as
\mathcal{B}egin{equation}
\mathcal{E}_K\;:=\;\{\sum_{i\in\mathbb{Z}}a_iT^i,\;
a_i\in K,\; \sup_i|a_i|<+\infty,\; \lim_{i\to-\infty}|a_i|
=0\}\;.
\mathrm{e}nd{equation}
It is a complete valued ring with respect to
the Gauss norm $|\sum a_iT^i|_1:=\sup|a_i|$.
Its ring of integers $\mathscr{O}_{\mathcal{E}_K}=
\{f\in\mathcal{E}_K\;|\;|f|_1\leq 1\}$ is a local
ring, with residual field $k((t))$
(i.e. a field of Laurent power series with coefficients in
$k$).
If $K$ is discretely valued, $\mathcal{E}_K$ is
moreover a field.
We define the \mathrm{e}mph{bounded Robba ring} as
$\mathcal{E}^{\partial_Tag}_K:=\mathfrak{R}_K\cap\mathcal{E}_K$. If $K$ is
discretely valued, it is a field. $\mathfrak{R}_K$ and
$\mathcal{E}_K$ induce two distinct topologies on
$\mathcal{E}^{\partial_Tag}_K$, and this last is dense in $\mathfrak{R}_K$
and in $\mathcal{E}_K$ with respect to the
corresponding topologies.
\subsection{Differential modules and radius of
convergence}
Let $A$ be one of the rings $\mathcal{A}_K(I)$ or
$\mathcal{E}_K$. The $A$-module of continuous
differentials $\mathscr{O}mega^1_{A/K}$ is
free and one dimensional over $A$.
Let $d:A\to A$ be a non trivial derivation corresponding
to a generator of $\mathscr{O}mega^1_{A/K}$.
A differential module over $A$ is a finite free
$A$-module $M$, together with a linear map
$\nabla:M\to M$, called \mathrm{e}mph{connection}, satisfying
the Leibniz rule $\nabla(fm)=d(f)m+f\nabla(m)$,
$f\in A$, $m\in M$.
In this paper we will always assume the rank of $M$ to
be $1$. We denote by $\partial_T:= T\frac{d}{dT}$.
If a basis of $M$ is given,
then $\nabla$ becomes an operator of the form
$f\mapsto \partial_T(f)-g\cdot f:A\to A$, where $g\in A$.
We say then that $M$ is defined by the operator $\partial_T-g$.
With respect to another basis $M$ will be represented by
another operator $\partial_T-g_2$, and $g_2$ is related to $g$
by the rule $g_2=g+\frac{\partial_T(h)}{h}$, where
$h\in A^{\times}$ is the
base change matrix.
We denote by $M_1\otimes M_2$ the tensor product of
two differential modules
$(M_1,\nabla_1)$ and $(M_2,\nabla_2)$. This is a
differential module whose underling $A$-module is
$M_1\otimes_A M_2$, and whose connection
is $\nabla_1\otimes\mathrm{Id}+
\mathrm{Id}\otimes\nabla_2$.
If $\partial_T-g_1$ and $\partial_T-g_2$ are associated operators with
respect to some bases, then $\partial_T-(g_1+g_2)$ will be the
operator of $M_1\otimes M_2$ with respect to the tensor
product of the bases.
Let now $\partial_T-g(T)$ be a differential operator with
$g\in\mathcal{A}_K(I)$,
and let $\mathscr{O}mega/K$ be any complete valued
field extension of $K$. For all $x\in\mathscr{O}mega$, $|x|\in I$,
we look at $\mathscr{O}mega[[T-x]]$ as an $\mathcal{A}_K(I)-$differential
algebra by the Taylor map
\mathcal{B}egin{equation}\label{eq : Taylor solution}
f(T)\mapsto\sum_{k\mathfrak{r}eq
0}(\frac{d}{dT})^k(f)(x)\frac{(T-x)^k}{k!}\;:\;
\mathcal{A}_K(I)\longrightarrow\mathscr{O}mega[[T-x]]\;.
\mathrm{e}nd{equation}
Define inductively $g_{[k]}(T)$ as $g_{[0]}:=1$,
$g_{[1]}:=g(T)/T$, and for all $k\mathfrak{r}eq 1$ we set
$g_{[k+1]}:=\frac{d}{dT}(g_{[n]})+g_{[k]}g_{[1]}$.
The Taylor solution of $\partial_T-g(T)$ at $x$ is then
\mathcal{B}egin{equation}\label{s_x(T)}
s_x(T):=\sum_{k\mathfrak{r}eq 0} g_{[k]}(x)\frac{(T-x)^k}{k!}\;.
\mathrm{e}nd{equation}
\index{Taylor solution} Indeed $\partial_T (s_x(T))=g(T)s_x(T)$. The
radius of convergence of $s_x(T)$ at $x$ is, by the usual
definition,
\mathcal{B}egin{equation}\label{eq : liminf}
\index{Ray(M,x)@$Ray(M,x)$}
\liminf_k(|g_{[k]}(x)|/|k!|)^{-\frac{1}{k}}\;.
\mathrm{e}nd{equation}
\mathcal{B}egin{definition}
We set
\mathcal{B}egin{equation}
\omega\;:=\;|p|^{\frac{1}{p-1}}\;<\;1\;.
\mathrm{e}nd{equation}
\mathrm{e}nd{definition}
\mathcal{B}egin{definition}\label{eq:radius}
The radius of convergence of $M$ at $\mathfrak{r}ho\in I$ is
\mathcal{B}egin{eqnarray}\label{eq : radius of conv}
\index{Ray(M,rho)@$Ray(M,\mathfrak{r}ho)$}
Ray(M,\mathfrak{r}ho)&:=&\min\Bigl(\mathfrak{r}ho\;,\;\liminf_k(|g_{[k]}|_\mathfrak{r}ho/|k!|)^{-1/k}\Bigr)\nonumber\\
&=&
\min\Bigl(\mathfrak{r}ho\;,\;\omega\mathcal{B}igl[\limsup_k(|g_{[k]}|_\mathfrak{r}ho)^{1/k}\mathcal{B}igr]^{-1}\Bigr)\;.
\mathrm{e}nd{eqnarray}
We say that $M$ is solvable at $\mathfrak{r}ho$ if
$Ray(M,\mathfrak{r}ho)=\mathfrak{r}ho$.
\mathrm{e}nd{definition}
This number represents the minimum radius of
convergence of a solution at an unspecified point
$x$ of norm $|x|=\mathfrak{r}ho$. More precisely there exists a
complete field extension $\mathscr{O}mega/K$
and a point $t_\mathfrak{r}ho\in\mathscr{O}mega$, with
$|t_\mathfrak{r}ho|=\mathfrak{r}ho$, such that for all $g\in\mathcal{A}_K(I)$ one has
$|g|_\mathfrak{r}ho=|g(t_\mathfrak{r}ho)|_\mathscr{O}mega$. Such a point is called a
$\mathfrak{r}ho$-\mathrm{e}mph{generic point} (cf. \cite{Ch-Ro}). We deduce that
\mathcal{B}egin{equation}
Ray(M,\mathfrak{r}ho)\;=\;
\min(\;\mathfrak{r}ho\;,\;\min_{|x|=\mathfrak{r}ho,\;x\in\mathscr{O}mega}\{\textrm{Radius of }s_x(T)\}\;)\;.
\mathrm{e}nd{equation}
Indeed this follows from \mathrm{e}qref{eq : liminf}
and from the fact that
$|g_{[k]}|_\mathfrak{r}ho=
\max_{|x|=\mathfrak{r}ho,\;x\in\mathscr{O}mega}|g_{[k]}(x)|_\mathscr{O}mega=
|g_{[k]}(t_\mathfrak{r}ho)|$.
\mathcal{B}egin{remark}
The second equality of \mathrm{e}qref{eq : radius of conv}
follows from the fact that the
sequence $|k!|^{1/k}$ is convergent to $\omega$, and
$|g_{[k]}|_\mathfrak{r}ho^{1/k}$
is bounded by $\max(|g_{[1]}|_\mathfrak{r}ho,\mathfrak{r}ho^{-1})$. The presence of $\mathfrak{r}ho$
in the minimum makes this definition invariant under change of
basis in $M$.
\mathrm{e}nd{remark}
If now $\partial_T-g(T)$ is a differential operator with
$g(T)\in\mathcal{E}_K$, then \mathrm{e}qref{eq : radius of conv}
has a meaning for $\mathfrak{r}ho=1$ and it is an invariant by base
changes of $M$.
\mathcal{B}egin{remark}\label{remark : properties}
We shall recall the following facts, that will be
systematically used in the sequel:
\mathcal{B}egin{enumerate}
\item If $M$ is a differential module over
$\mathcal{E}_K$, then Definition \mathfrak{r}ef{eq:radius}
has a meaning for $\mathfrak{r}ho=1$;
\item If $M$ is a differential module over $\mathcal{A}_K(I)$, and
if $I$ is not reduced to a point, then the function
$\mathfrak{r}ho\mapsto Ray(M,\mathfrak{r}ho)$ has the following properties
\mathcal{B}egin{enumerate}
\item It is continuous on $I$.
\item It is piecewise of the form $\mathcal{A}lpha\mathfrak{r}ho^\mathcal{B}eta>0$,
which is usually quoted as the \mathrm{e}mph{$\log$-affinity
property} (this means that the function $r\mapsto
\log(Ray(M,\mathrm{e}xp(r)))=\log(\mathcal{A}lpha)+\mathcal{B}eta r$ is affine).
\item The slopes $\mathcal{B}eta$ are natural numbers.
\mathrm{e}nd{enumerate}
\item Recall that for all differential module $M,N$ one has
\mathcal{B}egin{equation}\label{eq : tensor product radius}
Ray(M\otimes N,\mathfrak{r}ho)\;\mathfrak{r}eq\;
\min(Ray(M,\mathfrak{r}ho),Ray(N,\mathfrak{r}ho))
\mathrm{e}nd{equation}
and equality holds if
$Ray(M,\mathfrak{r}ho)\neq Ray(N,\mathfrak{r}ho)$
(cf. \cite[Remark 1.2]{Rk1}). Notice that
if for a given $\mathfrak{r}ho$ we have
$Ray(M,\mathfrak{r}ho)= Ray(N,\mathfrak{r}ho)$, it
often happens that $Ray(M,\mathfrak{r}ho')\neq Ray(N,\mathfrak{r}ho')$
holds in a neighborhood of $\mathfrak{r}ho$
with the individual exception of $\mathfrak{r}ho$,
so \mathrm{e}mph{by continuity} we deduce that \mathrm{e}qref{eq :
tensor product radius} is an equality also at
$\mathfrak{r}ho$.
\item The $p$-th ramification $f(T)\mapsto f(T^p)$ is a
$K$-linear ring endomorphism of $\mathcal{E}_K$ and
of $\mathcal{A}_K(I)$ which is called (somehow improperly)
\mathrm{e}mph{Frobenius map}. We denote it by $\varphi$.
By extension of scalars one can define an exact
endo-functor which is called pull-back by Frobenius,
denoted by $\varphi^*$
(cf. \cite{Astx}, \cite[1.2.3, 1.2.4]{Rk1}).
The functor associates to a differential equation
$\partial-g(T)$ the differential equation
$\partial_T-p\cdot g(T^p)$. This is a technical tool
of the theory used mainly to ``\mathrm{e}mph{move the radii}''
of convergence of a differential module. More precisely if
$M$ is a differential module over $\mathcal{A}_K(I^p)$,
then for all $\mathfrak{r}ho\in I$ one has
\mathcal{B}egin{equation*}
Ray(\varphi^*(M),\mathfrak{r}ho)\mathfrak{r}eq
\mathfrak{r}ho\cdot \min\Bigl(\Bigl(\frac{Ray(M,\mathfrak{r}ho^p)}{\mathfrak{r}ho^p}\Bigr)^{1/p}\;,\;
|p|^{-1} \frac{Ray(M,\mathfrak{r}ho^p)}{\mathfrak{r}ho^p}\Bigr),
\mathrm{e}nd{equation*}
and equality holds if $Ray(M,\mathfrak{r}ho^p)\neq\omega^p\mathfrak{r}ho^p$
(cf. \cite[Thm.7.2]{Astx}, \cite[10.3.2]{Kedlaya-book}).
If $Ray(M,\mathfrak{r}ho^p)>\omega\mathfrak{r}ho^p$,
it is known that the functor can
be (improperly speaking) ``\mathrm{e}mph{inverted}'', this
means that there exists a differential module $N$ such
that $\varphi^*(N)\cong M$, and that
such a module is unique (for a more precise statement
see \cite[Thm. 7.5]{Astx},
\cite[10.4.2]{Kedlaya-book}).
We say that $N$ is an
\mathrm{e}mph{antecedent by Frobenius of $M$}.
\mathrm{e}nd{enumerate}
We refer to \cite{Rk1}, for the proof of these sentences
and for all further properties and definitions.
\mathrm{e}nd{remark}
\section{Criterion of solvability for differential equations over \protect{$\mathcal{E}_K$}}
\label{section crit of solv} In this section we obtain a criterion
of solvability for differential equations over $\mathcal{E}_K$.
After a technical part (cf. Proposition
\mathfrak{r}ef{division of the problem over Amice}),
the main result will be actually an immediate
consequence of the Lemma
\mathfrak{r}ef{criteria of solvability lemma2}.
\mathcal{B}egin{lemma}[Small radius]\label{small radius2}
Let $\partial_T-g(T)$, $g(T)\in\mathcal{E}_K$. Then
$Ray(\partial_T-g(T),1)<\omega$ if and only if $|g(T)|_1>1$.
In this case we have
\mathcal{B}egin{equation}
Ray(\partial_T-g(T),1)\;=\;
\omega\cdot|g(T)|_1^{-1}\;.
\mathrm{e}nd{equation}
\mathrm{e}nd{lemma}
\mathcal{B}egin{proof}
See \cite[Lemma 1.1]{Rk1}.
\mathrm{e}nd{proof}
\subsection{Technical results}
There is no domain of the affine line where all the power
series in $\mathcal{E}_K$ converge.
If $\mathrm{M}$ is a differential module associated with
the operator $\partial_T-g$, with $g\in\mathcal{E}_K$,
it is useful to have a basis of $\mathrm{M}$
in which $g$ converges on some domain.
For this, for all functions
$g(T)=\sum_{i\in\mathbb{Z}}a_iT^i$
we set
$g^-(T):=\sum_{i\leq -1}a_iT^i$, and
$g^+(T):=\sum_{i\mathfrak{r}eq 1}a_iT^i$.
The following proposition expresses any solvable $\mathrm{M}$ as
tensor product of some
solvable differential modules defined over a disk
centered at $0$ and a disk centered at $\infty$.
The ``\mathrm{e}mph{Step} $4$'' of the proof is due to
G.Christol.
\mathcal{B}egin{proposition}
\label{division of the problem over Amice}
Let $\partial_T-g(T)$, $g(T)\in\mathcal{E}_K$, be an
equation which is solvable at $\mathfrak{r}ho=1$.
Then $\partial_T-g^-(T)$, $\partial_T-a_0$, and $\partial_T-g^+(T)$ are all
solvable at $\mathfrak{r}ho=1$.
\mathrm{e}nd{proposition}
\mathcal{B}egin{proof}\mathrm{e}mph{---Step 1:}
By \mathrm{e}qref{s_x(T)}, the equation $\partial_T-g^{-}(T)$ (resp.
$\partial_T-g^{+}(T)$) has a convergent solution at $\infty$ (resp. at
$0$), hence $Ray(\partial_T-g^{-}(T),\mathfrak{r}ho)=\mathfrak{r}ho$, for large values of
$\mathfrak{r}ho$ (resp. $Ray(\partial_T-g^{-}(T),\mathfrak{r}ho)=\mathfrak{r}ho$, for $\mathfrak{r}ho$ close to
$0$). On the other hand, a direct computation proves that there is a $R^0>0$ such that
$Ray(\partial_T-a_0,\mathfrak{r}ho)=R^0\cdot\mathfrak{r}ho$, for all $\mathfrak{r}ho$. Let
\mathcal{B}egin{eqnarray}
R^-&:=&Ray(\partial_T-g^{-}(T),1)\;,\\
R^+&:=&Ray(\partial_T-g^{+}(T),1)\;,\\
R^0&:=&Ray(\partial_T-a_0,1)\;.
\mathrm{e}nd{eqnarray}
We have to prove that $R_0=R^-=R^+=1$.\\
\mathrm{e}mph{---Step 2:} We begin by proving that
$R^+=R^-$,
and that $R^0\mathfrak{r}eq R^-=R^+$. In the
following picture $R:=R^-=R^+$, and for all operators $L$,
we let $r:=\log(\mathfrak{r}ho)$ and
$R(L,r):=\log(Ray(L,\mathfrak{r}ho)/\mathfrak{r}ho)$.
\mathcal{B}egin{center}
\mathcal{B}egin{picture}(300,80)
\put(150,0){\vector(0,1){80}} \put(0,60){\vector(1,0){300}}
\put(260,65){$r=\log(\mathfrak{r}ho)$} \put(155,75){$R(r)$}
\put(0,62){\mathcal{B}egin{tiny}$0\leftarrow\mathfrak{r}ho$\mathrm{e}nd{tiny}}
\put(50,60){\line(6,-1){60}}
\put(110,50){\line(2,-1){30}}
\put(140,35){\line(2,-5){10}}
\put(147.5,7.5){$\mathcal{B}ullet$}\put(152,7.5){\tiny{$\log(R)$}}
\put(170,55){\line(6,1){30}}
\put(170,55){\line(-1,-1){15}}
\put(155,40){\line(-1,-6){5}}
\put(0,23){\line(1,0){300}}
\put(147.5,57.5){$\mathcal{B}ullet$}
\put(83,75){\mathcal{B}egin{tiny}$R(\partial_T+g(T),0)$\mathrm{e}nd{tiny}}
\put(135,75){\vector(1,-1){12}}
\put(80,55){\circle{10}}
\put(60,45){\line(2,1){15.5}}
\put(0,40){\mathcal{B}egin{tiny}$R(\partial_T-g^+(T),r)$\mathrm{e}nd{tiny}}
\put(180,55){\circle{10}}
\put(200,45){\line(-2,1){15.5}}
\put(200,40){\mathcal{B}egin{tiny}$R(\partial_T-g^-(T),r)$\mathrm{e}nd{tiny}}
\put(100,23){\circle{10}}
\put(80,13){\line(2,1){15.5}}
\put(0,10){\mathcal{B}egin{tiny}$R(\partial_T-a_0,r)=\log(R_0)$\mathrm{e}nd{tiny}}
\put(147.5,0){$\mathcal{B}ullet$}
\put(152.5,0){\mathcal{B}egin{tiny}$\log(\omega)$\mathrm{e}nd{tiny}}
\qbezier[100](0,2.5)(150,2.5)(300,2.5)
\put(50,-2){\mathcal{B}egin{tiny}$\partial_Townarrow$small
radius$\partial_Townarrow$\mathrm{e}nd{tiny}}
\mathrm{e}nd{picture}
\mathrm{e}nd{center}
Since $\partial_T-g$ is the tensor product of $\partial_T-g^-$,
$\partial_T-g^+$, and $\partial_T-a_0$, we deduce from point iii) of Remark \mathfrak{r}ef{remark : properties} that if two among
$R^-,R^+,R^0$ are $1$, then the third is also equal to $1$.
Assume now by contrapositive that at least two among
$R^-,R^+,R^0$ are strictly less than $1$. Then either
$R^-<1$ or $R^+<1$. We want to prove that
$R^+=R^-$, and that $R^0\mathfrak{r}eq R^-=R^+$.
We assume for instance that $R^-<1$, the case
where $R^+<1$ can be proved symmetrically.
The function $r\mapsto R(\partial_T-g^{-}(T),r)$ is concave,
and $Ray(\partial_T-g^-(T),1)=1$ if and only if the
slope of $r\mapsto R(\partial_T-g^{-}(T),r)$ is $0$ for
$r\to 0^+$.
The map $r\mapsto R(\partial_T-g^{-}(T),r)$ for $r\to 0^+$ is
strictly positive and the slope of $R(\partial_T-a_0,r)=\log(R_0)$
is $0$. We deduce from point iii) of Remark \mathfrak{r}ef{remark : properties} that $Ray(\partial_T-g^-(T),\mathfrak{r}ho)\neq Ray(\partial_T-a_0,\mathfrak{r}ho)$
with the possible exception of an isolated $\mathfrak{r}ho$.
Hence
$Ray(\partial_T-(a_0+g^-(T)),\mathfrak{r}ho)=
\min(Ray(\partial_T-g^-(T),\mathfrak{r}ho),\mathfrak{r}ho R^0)$, for all
$\mathfrak{r}ho>1$ close to $1$. By continuity, this equality holds
at $\mathfrak{r}ho=1$, that is
\mathcal{B}egin{equation}
Ray(\partial_T-(a_0+g^-(T)),1)\;=\;\min(R^-,R^0)\;.
\mathrm{e}nd{equation}
Now since
$\partial_T-g(T)$ is the tensor product of $\partial_T-g^+(T)$ and
$\partial_T-(a_0+g^-(T))$, and since $Ray(\partial_T-g(T),1)=1$, we
have again by point iii) of Remark
\mathfrak{r}ef{remark : properties} that
\mathcal{B}egin{equation}
R^+\;:=\;Ray(\partial_T-g^+(T),1) \;=\;
Ray(\partial_T-(a_0+g^-(T)),1)\;=\;\min(R^-,R^0)\;.
\mathrm{e}nd{equation}
We now claim that $R^0\mathfrak{r}eq R^-$, so the previous
equality implies $R^+=R^-$. Indeed if $R^->R^0$,
then $R^+=R^0$.
Hence, as above, by concavity we deduce that
for all $\mathfrak{r}ho<1$ one has $Ray(\partial_T-g^{+}(T),\mathfrak{r}ho)\neq
Ray(\partial_T-a_0,\mathfrak{r}ho)$, and that
$Ray(\partial_T-(a_0+g^{+}(T)),1)=R^0<R^-$. This implies
$Ray(\partial_T-g(T),1)=\min(R^0,R^-)=R^0<1$,
contradicting the solvability of $\partial_T-g$. Hence we must have $R^0\mathfrak{r}eq R^-=R^+$.\\
\mathrm{e}mph{---Step 3:} If $R$ denotes the number
$R^-=R^+$, then we have $R\mathfrak{r}eq \omega$.
Indeed if $R^-<\omega$
or $R^+<\omega$, then, by \mathfrak{r}ef{small radius2}, $|g^-(T)|_1>1$ or
$|g^+(T)|_1>1$, hence $|g(T)|_1>1$ which is in contradiction with
the small radius lemma \mathfrak{r}ef{small radius2},
since the equation $\partial_T-g(T)$ is solvable.\\
\mathrm{e}mph{---Step 4:} We now prove that $R>\omega$. For
this we need two lemmas:
\mathcal{B}egin{lemma}[\protect{\cite[4.8.5]{Ch}}]\label{Katz}
Let $\partial_T-g(T)$, $g(T)\in\mathcal{E}_K$, $|g(T)|_1\leq 1$ be some
equations. Then $Ray(\partial_T-g(T),1)>\omega$ if and only if
$|g_{[s]}(T)|_1<1$, for some $s\mathfrak{r}eq 1$.\footnote{See
Lemma \mathfrak{r}ef{q-Katz} for the q-analogue of this
lemma.}
$\Box$
\mathrm{e}nd{lemma}
\mathcal{B}egin{lemma}\label{|a_i|<1}
If $Ray(\partial_T-g(T),1)>\omega$, where $g(T)=\sum a_iT^i$, then
$|a_i|<1$, for all $i\leq -1$.
\mathrm{e}nd{lemma}
\mathcal{B}egin{proof} The matrix of $d/dT$ is $g_{[1]}:=g(T)/T$. By definition one
has
\mathcal{B}egin{eqnarray*}
Ray(\partial_T-g(T),1)=Ray(d/dT-g_{[1]}(T),1)&=&
\min\mathcal{B}igl(1,\liminf_s(|g_{[s]}(T)|_1/|s!|)^{-1/s}\mathcal{B}igr)\\
&=&\min\mathcal{B}igl(1,\omega\cdot\liminf_s(|g_{[s]}(T)|_1)^{-1/s}\mathcal{B}igr)\;,
\mathrm{e}nd{eqnarray*}
where $g_{[s]}(T)$ is associated to the derivation
$(\frac{d}{dT})^s$. Since $Ray(\partial_T-g(T),1)>\omega$,
hence $\lim_{s\to\infty}|g_{[s]}(T)|_1=0$. In particular
$|g_{[s]}(T)|_1<1$, for some $s\mathfrak{r}eq 1$. Moreover, by
the small radius lemma \mathfrak{r}ef{small radius2},
we have $|g(T)|_1\leq 1$.
We proceed by contrapositive: let $-d$ be the smallest index such that $|a_{-d}|=1$. The
reduction of $g_{[1]}(T)=g(T)/T$ in $k(\!(t)\!)$ is of the form
$\overline{g_{[1]}(T)}=\overline{a}_{-d}t^{-d-1}+\cdots$. If
$-d\leq -1$, then an induction on the equation
$g_{[s+1]}=\frac{d}{dx}(g_{[s]})+g_{[s]}g_{[1]}$ shows that
$\overline{g_{[s]}(T)}=\overline{a}_{-d}^st^{(-d-1)s}+\cdots\neq
0$. This is in contradiction with the fact that
$|g_{[s]}(T)|_1<1$,
for some $s\mathfrak{r}eq 1$.
\mathrm{e}nd{proof}
Let us show now that $R>\omega$. Since $R^+=R^-=R$, it is
sufficient to show that $R^->\omega$. By Lemma \mathfrak{r}ef{|a_i|<1}, we have
$|a_i|<1$, for all $i\leq -1$. Since $\lim_{i\to-\infty}|a_i|=0$,
hence $|g^-(T)|_1<1$. Then Lemma \mathfrak{r}ef{Katz} implies
$R^->\omega$.\\
\mathrm{e}mph{---Step 5:} Since $R>\omega$, then we can take the
antecedent by Frobenius of $\partial_T-g^-(T)$, $\partial_T-g^+(T)$, $\partial_T-a_0$.
More precisely, there exists $f^+(T)=\sum_{i\mathfrak{r}eq
0}b^+_iT^i\in\mathcal{A}([0,1[)^{\times}$, $f^-(T)=\sum_{i\leq
0}b^-_iT^i\in\mathcal{A}([1,\infty])^{\times}$, and there are functions
$g^{(1),-}(T)=\sum_{i\leq 0}a_i^{(1),-}T^i$,
$g^{(1),+}(T)=\sum_{i\mathfrak{r}eq 0}a_i^{(1),+}T^i$, $b_0\in K$ such that
\mathcal{B}egin{eqnarray*}
pb_0&=&a_0+n\;,\quad\textrm{ for some } n\in\mathbb{Z}\;,\\
pg^{(1),-}(T^p)^\sigma&=&g^-(T)+\frac{\partial_T(f^-(T))}{f^-(T)}\;,\\
pg^{(1),+}(T^p)^\sigma&=&g^+(T)+\frac{\partial_T(f^+(T))}{f^+(T)}\;,
\mathrm{e}nd{eqnarray*}
where $\sigma:K\to K$ is an endomorphism of fields lifting of the $p$-th power map of $k$, and
$(\sum a_i T^i)^\sigma$ means $\sum \sigma(a_i) T^i$.
We see immediately that $b_0^+\neq 0$ and $b_0^-\neq 0$, and that
$v_T(\partial_T(f^+)/f^+)\mathfrak{r}eq 1$ and $v_{T^{-1}}(\partial_T(f^-)/f^-)\mathfrak{r}eq 1$,
where $v_T$ is the $T-$adic valuation, and $v_{T^{-1}}$ is the
$T^{-1}-$adic valuation. Since $g^-(T)$ and $g^+(T)$ have no
constant term, we deduce that $a_0^{(1),+}=0$ and
$a_0^{(1),-}=0$. Observe now that both $f^-$ and
$f^+$ belong to $\mathcal{E}_K^{\times}$, hence
$\partial_T-\mathcal{B}igl(g^{(1),-}(T)+b_0+g^{(1),+}(T)\mathcal{B}igr)$ is an
antecedent of Frobenius of $\partial_T-g(T)$, and it is then
solvable.\\
\mathrm{e}mph{--- Step 6:} Steps $1$, $2$, $3$, $4$ are still true for the
antecedent. In particular, if we set
\mathcal{B}egin{eqnarray}
R^-(1)&:=&Ray(\partial_T-g^{(1),-}(T),1)\;,\\
R^+(1)&:=&Ray(\partial_T-g^{(1),+}(T),1)\;,\\
R^0(1)&:=&Ray(\partial_T-b_0,1)\;,
\mathrm{e}nd{eqnarray}
then we must have $R^-(1)=R^{+}(1)>\omega$. Let
$R(1):=R^{-}(1)=R^{+}(1)$, then $R(1)=R^{1/p}$ by the property of
the antecedent. This implies $R>\omega^{1/p}$.
Now the condition $R(1)>\omega$, guarantee the
existence of the antecedent of the antecedent, and the
process can be iterated indefinitely. This shows that
$R>\omega^{1/p^h}$ for all $h\mathfrak{r}eq 0$, that is $R=1$.
\mathrm{e}nd{proof}
\mathcal{B}egin{corollary}\label{g^+ is trivial}
We have $a_0 \in \mathbb{Z}_p$ and
$\partial_T-g^+(T)$ is trivial.
\mathrm{e}nd{corollary}
\mathcal{B}egin{proof}
By the transfer theorem, the Taylor solution at $0$ of
$\partial_T-g^+(T)$ is convergent in the open unit disk. This
solution is invertible with inverse the solution of the dual
differential module, hence it is bounded and belongs to
$\mathcal{E}_K$.
\mathrm{e}nd{proof}
The following corollary, together with Corollary
\mathfrak{r}ef{canonical ext over amice}, constitute
the analogue of the Katz's
canonical extension functor \cite{Katz-Can}:
\mathcal{B}egin{corollary}[Katz's canonical extension]
Let $M$ be a solvable rank one differential module over
$\mathcal{E}_K$
represented in a basis by the operator $\partial_T-g(T)$, with
$g(T)=\sum_{i\in\mathbb{Z}}a_iT^i\in\mathcal{E}_K$.
Then there exists a basis of $M$ in which the associated
operator is
\mathcal{B}egin{equation}
\partial_T-(a_0+g^-(T))\;.
\mathrm{e}nd{equation}
In particular $M$ comes by scalar extension from a
differential module over the closed unit disk
$D:=\{|T|\mathfrak{r}eq 1\}\cup\{\infty\}$ centered at $\infty$.
It has a regular singularity at $\infty$ if and only if
$a_0\in\mathbb{Z}$, and it has no singularities on $D$
otherwise.
\mathrm{e}nd{corollary}
\subsection{Criterion of solvability}
Following \cite{Rk1} we now introduce an exponential
series which is the solution of our differential equations.
We refer to \cite{Rk1} for all notations and properties.
We set $\mathbb{J}:=\{n\in\mathbb{Z}\;|\; (n,p)=1, n\mathfrak{r}eq 1 \}$.
For all ring $A$ (not necessarily with unit element) we
denote by $\mathcal{B}s{\mathrm{W}}(A)$ the ring of $p$-typical Witt vectors
of infinite length with coefficients in $A$.
Its elements are sequences $\mathcal{B}s{a}=(a_0,a_1,\ldots)$
of elements of $A$. For all $m\mathfrak{r}eq 0$ we call
\mathrm{e}mph{phantom vector} of $\mathcal{B}s{a}$ the tuple
$\phi_m(\mathcal{B}s{a}):=a_0^{p^m}+pa_1^{p^{m-1}}+\cdots+p^ma_m$.
The map $\mathcal{B}s{\mathrm{W}}(A)\to A^{\mathbb{N}}$ associating to
$\mathcal{B}s{a}$ the tuple $(\phi_0(\mathcal{B}s{a}),\phi_1(\mathcal{B}s{a}),
\ldots)$ is a morphism of functors in rings.
In order to make a more evident distinction between
Witt vectors and phantom components, we denote Witt
vector by the letter $\lambda$ and phantom components
by the letter $\phi$, moreover we also use a bracket
$\ph{\phi_0,\phi_1,\ldots}$ to indicate an element of the
Ring $A^{\mathbb{N}}$.
Let now $A=T\mathscr{O}_K[[T]]$. We now recall some notions
from \cite[Section 4.3]{Rk1}.
For all $\mathcal{B}s{\lambda}=(\lambda_0,\lambda_1,
\ldots)\in\mathcal{B}s{\mathrm{W}}(K)$ and all integer $d>0$ we set
\mathcal{B}egin{equation}
\mathcal{B}s{\lambda}T^d\;:=\;
(\lambda_0T^d,\lambda_1T^{pd},\lambda_2T^{p^2d},
\ldots)\;\in\;\mathcal{B}s{\mathrm{W}}(TK[[T]])\;.
\mathrm{e}nd{equation}
To a sum
$\sum_{d>0}\mathcal{B}s{\lambda}_dT^d\in\mathcal{B}s{\mathrm{W}}(TK[[T]])$ we
associate the following exponential of Artin-Hasse type
\mathcal{B}egin{equation}
E(\sum_{d>0}\mathcal{B}s{\lambda}_dT^d,1)\;=\;
\prod_{d>0}\mathrm{e}xp(\sum_{m\mathfrak{r}eq
0}\phi_{d,m}\frac{T^{dp^m}}{p^m})
\mathrm{e}nd{equation}
where for all $d>0$ the tuple $(\phi_{d,0},\phi_{d,1},
\ldots)$ is the phantom vector of $\mathcal{B}s{\mathcal{B}s{\lambda}}_d$.
The map $\mathcal{B}s{\mathrm{W}}(K)\to K^{\mathbb{N}}$ being an
isomorphism, it easy to prove that any exponential of the
form $\mathrm{e}xp(\sum_{d>0}b_d\frac{T^d}{d})
\in 1+TK[[T]]$
can be uniquely decomposed as
\mathcal{B}egin{equation}
\mathrm{e}xp(\sum_{d>0}b_d\frac{T^d}{d})\;=\;
\mathrm{e}xp(\sum_{n\in\mathbb{J}}
\sum_{m\mathfrak{r}eq 0}b_{np^m}
\frac{T^{np^m}}{np^m})\;=\;
\mathrm{e}xp(\sum_{n\in\mathbb{J}}
\sum_{m\mathfrak{r}eq 0}\phi_{n,m}
\frac{T^{np^m}}{p^m})\;=\;
E(\sum_{n\in\mathbb{J}}\mathcal{B}s{\lambda}_nT^n,1)\;,
\mathrm{e}nd{equation}
where $\phi_{n,m}=b_{np^m}/n$,
and $\mathcal{B}s{\mathcal{B}s{\lambda}}_n\in\mathcal{B}s{\mathrm{W}}(K)$ is defined as the unique Witt
vector with phantom vector $(\phi_{n,0},\phi_{n,1},
\ldots)$. We refer to \cite[Section 4.3]{Rk1} for further
properties.
The following Lemma asserts that
solutions of rank one solvable differential equations over
the open unit disk are those exponentials as above whose
Witt vectors have coefficients in $\mathscr{O}_K$.
\mathcal{B}egin{lemma}\label{criteria of solvability lemma2}
The differential equation $\partial_T-g^+(T)$, $g^+(T)=\sum_{i\mathfrak{r}eq
1}a_iT^i\in\mathcal{A}([0,1[)$ is solvable if and only if there exists a
family $\{\mathcal{B}s{\lambda}_{n}\}_{n\in\mathbb{J}}$, $\mathcal{B}s{\lambda}_n\in\mathcal{B}s{\mathrm{W}}(\mathscr{O}_K)$, with phantom
components $\phi_{n}=(\phi_{n,0},\phi_{n,1},\ldots)$ satisfying
\mathcal{B}egin{equation}\label{a_np^m=n phi_n,m}
a_{np^m}=n\phi_{n,m}\;,\qquad\textrm{ for all }n\in\mathbb{J},\; m\mathfrak{r}eq
0\;.
\mathrm{e}nd{equation}
In other words, we have $\mathrm{e}xp(\sum_{i\mathfrak{r}eq 1}a_i
\frac{T^i}{i})=E(\sum_{n\in\mathbb{J}}\mathcal{B}s{\lambda}_nT^n,1)$, where
\mathcal{B}egin{equation}\label{E(sum_J lb_n T^n,1)}
E(\sum_{n\in\mathbb{J}}\mathcal{B}s{\lambda}_nT^n,1):=\mathrm{e}xp(\sum_{n\in\mathbb{J}}\sum_{m\mathfrak{r}eq
0}\phi_{n,m}T^{np^m}/p^m)\;.
\mathrm{e}nd{equation}
\mathrm{e}nd{lemma}
\mathcal{B}egin{proof}
The formal series $E(\sum_{n\in\mathbb{J}}\mathcal{B}s{\lambda}_nT^{n},1)\in
1+T\mathscr{O}_K[[T]]$ is solution of the equation
$L:=\partial_T-\sum_{n\in\mathbb{J}}\sum_{m\mathfrak{r}eq 0}n\phi_{n,m}T^{np^m}$. Since this
exponential converges in the unit disk, then $Ray(L,\mathfrak{r}ho)=\mathfrak{r}ho$,
for all $\mathfrak{r}ho<1$, and $L$ is solvable.
Conversely, assume that $\partial_T-g^+(T)$
is solvable. Then the Witt vectors
$\mathcal{B}s{\lambda}_n=(\lambda_{n,0},\lambda_{n,1},\ldots)$ are defined by the
relation \mathrm{e}qref{a_np^m=n phi_n,m}. For example, for all $n\in\mathbb{J}$ we
have
\mathcal{B}egin{equation}
\label{expliciting lambda_i in function of a_i}
\lambda_{n,0} = \frac{a_{n}}{n} \;\;,\qquad \lambda_{n,1} =
\frac{1}{p}\left(\frac{a_{np}}{n} - \mathcal{B}igl(\frac{a_n}{n}\mathcal{B}igr)^p
\mathfrak{r}ight)\;.
\mathrm{e}nd{equation}
We must show that $|\lambda_{n,m}|\leq 1$, for all $n\in\mathbb{J}$,
$m\mathfrak{r}eq 0$.
\mathrm{e}mph{---Step 1:} By the small radius Lemma \mathfrak{r}ef{small radius2},
we have $|a_i|\leq 1$, for all $i\mathfrak{r}eq 1$. Hence, by
\mathrm{e}qref{expliciting lambda_i in function of a_i}, for all $n\in\mathbb{J}$,
we have $|\lambda_{n,0}|\leq 1$. Then the exponential
$$E(\sum_{n\in\mathbb{J}}(\lambda_{n,0},0,0,\ldots)T^{n},1)=
\mathrm{e}xp(\sum_{n\in\mathbb{J}}\sum_{m\mathfrak{r}eq
0}\lambda_{n,0}^{p^m}\frac{T^{np^m}}{p^m})$$ converges in the unit
disk and is solution of the operator $Q^{(0)}:=\partial_T - h^{(0)}(T)$,
where $h^{(0)}(T)=\sum_{n\in\mathbb{J}}\sum_{m\mathfrak{r}eq
0}n\lambda_{n,0}^{p^m}T^{np^m}$. So $Q^{(0)}$ is
solvable.
\mathrm{e}mph{---Step 2:} The tensor product operator $\partial_T - (g^+(T) -
h^{(0)}(T))$ is again solvable and satisfies
$g^+(T)-h^{(0)}(T)=p\cdot g^{(1)}(T^p)$, for some $g^{(1)}(T)\in T
K[[T]]$. In other words, the ``antecedent by ramification''
$\varphi_p^*$ of the equation $\partial_T - (g^+(T) - h^{(0)}(T))$ is
given by $\partial_T-g^{(1)}(T)$, which is then solvable.
\mathrm{e}mph{---Step 3: } We observe that $g^{(1)}(T) \!=\!\frac{1}{p}
\sum_{n\in\mathbb{J}}\sum_{m\mathfrak{r}eq
0}(a_{np^{m+1}}-n(\frac{a_n}{n})^{p^{m+1}}) T^{np^{m}}$, and again
by the small radius lemma, we have
$|a_{np}\!-n(\frac{a_n}{n})^{p}|\leq |p|$,
which implies $|\lambda_{n,1}|\leq 1$.
The process can be iterated indefinitely. This proves that
$|\lambda_{n,m}|\leq 1$ for all $n,m$.
\mathrm{e}nd{proof}
\mathcal{B}egin{remark}\label{discussion}We shall now consider the general
case of an equation $\partial_T-g(T)$, with
$g(T)=\sum_{i\in\mathbb{Z}}a_iT^i\in\mathcal{E}_K$, and get a
criterion of solvability. Suppose that $\partial_T-g(T)$ is solvable. We
know that $\partial_T-g^-(T)$, $\partial_T-a_0$ and $\partial_T-g^+(T)$ are all solvable
(cf. \mathfrak{r}ef{division of the problem over Amice}). We can then
consider $\partial_T-g^-(T)$ as an operator on $]1,\infty]$ (instead of
$[1,\infty]$), and the precedent lemma \mathfrak{r}ef{criteria of
solvability lemma2} give us the existence of a family of Witt
vector $\{\mathcal{B}s{\lambda}_{-n}\}_{n\in\mathbb{J}}\subset\mathcal{B}s{\mathrm{W}}(\mathscr{O}_K)$, satisfying
$a_{-np^m} = -n\phi_{-n,m}$, for all $n\in\mathbb{J}$, and all $m\mathfrak{r}eq 0$.
Conversely, suppose given two families $\{\mathcal{B}s{\lambda}_{-n}\}_{n\in\mathbb{J}}$ and
$\{\mathcal{B}s{\lambda}_{n}\}_{n\in\mathbb{J}}$, with $\mathcal{B}s{\lambda}_n\in\mathcal{B}s{\mathrm{W}}(\mathscr{O}_K)$. Since the phantom
components of $\mathcal{B}s{\lambda}_n$ are bounded by $1$, then $|a_i|$ is bounded
by $1$, and then $g^+(T)$ belongs to $\mathcal{E}_K$.
What we need now is a condition on the family
$\{\mathcal{B}s{\lambda}_{-n}\}_{n\in\mathbb{J}}$
in order that the series
\mathcal{B}egin{equation}
g^-(T):=\sum_{n\in\mathbb{J}}\sum_{m\mathfrak{r}eq 0}-n
\phi_{-n,m}T^{-np^m}
\mathrm{e}nd{equation}
belongs to $\mathcal{E}_K$.
\mathrm{e}nd{remark}
\mathcal{B}egin{proposition}\label{criteria for belong to Amice}
Let $\{\mathcal{B}s{\lambda}_{-n}\}_{n\in\mathbb{J}}$, $\mathcal{B}s{\lambda}_{-n}\in\mathcal{B}s{\mathrm{W}}(\mathscr{O}_K)$, be a family of
Witt vectors. Let $\ph{\phi_{-n,0},\phi_{-n,1},\ldots}$ be the
phantom vector of
$\mathcal{B}s{\lambda}_{-n}:=(\lambda_{-n,0},\lambda_{-n,1},\ldots)$. The series
$$g^{-}(T):=\sum_{n\in\mathbb{J}}\sum_{m\mathfrak{r}eq 0}-n\phi_{-n,m}T^{-np^m}\;,$$
belongs to $\mathcal{E}_K$ if and only if
\mathcal{B}egin{equation}\label{lb<1 and lim_n lb_n,m =0}
\left\{\mathcal{B}egin{array}{rcl}
|\lambda_{-n,m}|<1&,& \textrm{ for all }n\in\mathbb{J}\;,\;\textrm{ for all }m\mathfrak{r}eq 0\;;\\
&&\\
\lim_{n\in\mathbb{J},n\to\infty}\lambda_{-n,m}=0&,&\textrm{ for all }
m\mathfrak{r}eq 0\;,
\mathrm{e}nd{array}\mathfrak{r}ight.
\mathrm{e}nd{equation}
as in the picture
\mathcal{B}egin{center}
\mathcal{B}egin{scriptsize}
\mathcal{B}egin{picture}(120,60)
\put(0,10){\vector(0,1){40}} \put(0,10){\vector(1,0){120}}
\put(-10,40){$m$}\put(120,0){$n$}\put(130,0){.}
\put(17.5,8.25){$\mathcal{B}ullet$} \put(37.5,8.25){$\mathcal{B}ullet$}
\put(57.5,8.25){$\mathcal{B}ullet$} \put(77.5,8.25){$\mathcal{B}ullet$}
\put(97.5,8.25){$\mathcal{B}ullet$}
\put(17.5,27.5){$\mathcal{B}ullet$} \put(37.5,27.5){$\mathcal{B}ullet$}
\put(57.5,27.5){$\mathcal{B}ullet$} \put(77.5,27.5){$\mathcal{B}ullet$}
\put(97.5,27.5){$\mathcal{B}ullet$}
\put(17.5,47.5){$\mathcal{B}ullet$} \put(37.5,47.5){$\mathcal{B}ullet$}
\put(57.5,47.5){$\mathcal{B}ullet$} \put(77.5,47.5){$\mathcal{B}ullet$}
\put(97.5,47.5){$\mathcal{B}ullet$}
\put(110,47.5){$\longrightarrow 0$}
\put(110,27.5){$\longrightarrow 0$}
\mathrm{e}nd{picture}
\mathrm{e}nd{scriptsize}
\mathrm{e}nd{center}
\mathrm{e}nd{proposition}
We need the following lemma:
\mathcal{B}egin{lemma}\label{phi_j to 0 Leftrightarrow lambda_j<1}
Let $\mathcal{B}s{\lambda}=(\lambda_{0},\lambda_{1},\ldots)\in\mathcal{B}s{\mathrm{W}}(\mathscr{O}_K)$ be a Witt
vector, and let $\ph{\phi_0,\phi_1,\ldots}\in\mathscr{O}_K^{\mathbb{N}}$ be
its phantom vector. Then $\phi_j\to 0$ in $\mathscr{O}_K$ if and only if
$|\lambda_j|<1$, for all $j\mathfrak{r}eq 0$.
\mathrm{e}nd{lemma}
\mathcal{B}egin{proof}
The set of Witt vectors whose phantom components
go to $0$ is clearly an ideal $I\subset\mathcal{B}s{\mathrm{W}}(\mathscr{O}_K)$
containing
$\mathcal{B}s{\mathrm{W}}(\frak{p}_K)$, where $\frak{p}_K$ is the
maximal ideal of $\mathscr{O}_K$.
Reciprocally, suppose $\phi_j\to 0$, since
$\phi_j=\lambda_0^{p^j}+p\lambda_1^{p^{j-1}}+\cdots+p^j\lambda_j$,
we have $|\lambda_0|<1$. Then
$\mathcal{B}s{\lambda}^{(1)}:=(0,\lambda_1,\lambda_2,\ldots)=\mathcal{B}s{\lambda}-(\lambda_0,0,\ldots)$
lies again in the ideal $I$, and hence $\phi_j(\mathcal{B}s{\lambda}^{(1)})=
p\lambda_1^{p^{j-1}}+ p^{2}\lambda_2^{p^{j-2}}+
\cdots+p^j\lambda_j\to 0$. This shows that
$|\lambda_1|<1$. Proceeding inductively one sees that
$|\lambda_j|<1$, for all $j\mathfrak{r}eq 0$. \mathrm{e}nd{proof}
We now are ready to give the proof of Proposition
\mathfrak{r}ef{criteria for belong to Amice} :
\mathcal{B}egin{proof}
Assume that
$g^-(T)=\sum_{n\in\mathbb{J}}\sum_{m\mathfrak{r}eq 0}-n\phi_{-n,m}T^{-np^m}$ lies in
$\mathcal{E}_K$. This happens if and only if $\lim_{np^m\to
\infty}\phi_{-n,m}=0$, and implies $\lim_{m\to
\infty}\phi_{-n,m}=0$ for all $n\in\mathbb{J}$. By Lemma
\mathfrak{r}ef{phi_j to 0
Leftrightarrow lambda_j<1}, we have $|\lambda_{-n,m}|<1$, for all
$n\in\mathbb{J}$ and all $m\mathfrak{r}eq 0$. An easy induction shows that
$\lim_{n\in\mathbb{J},n\to\infty}\lambda_{-n,m}=0$, for all $m\mathfrak{r}eq 0$.
Reciprocally, assume that $\{\mathcal{B}s{\lambda}_{-n}\}_{n\in\mathbb{J}}$ satisfies the
condition \mathrm{e}qref{lb<1 and lim_n lb_n,m =0}. We must show that
$\lim_{np^m\to\infty}\phi_{-n,m}=0$. For all $\varepsilon>0$, we
choose $k\mathfrak{r}eq 0$ such that $|p^{k+1}|< \varepsilon$. By
assumption, for all $0\leq m\leq k$, there exists $N_m$ such that
$|\lambda_{-n,m}|< \varepsilon$, for all $n\mathfrak{r}eq N_m$. Let
$N:=\max(N_0,\ldots,N_k)$. Then
$$\phi_{-n,m}=\underbrace{\lambda_{-n,0}^{p^m}+
\cdots+p^{k}\lambda_{-n,k}^{p^{m-k}}}_{< \varepsilon\;,\textrm{ if
} n\mathfrak{r}eq N}+
\underbrace{p^{k+1}\lambda_{-n,k+1}^{p^{m-k-1}}+\cdots+
p^m\lambda_{-n,m}}_{<\varepsilon}\;.$$ Hence
$|\phi_{-n,m}|<\varepsilon$, if $n\mathfrak{r}eq N$. On the other hand, by
assumption, there is $\partial_Telta<1$ such that
$|\lambda_{-n,m}|\leq\partial_Telta<1$, for all $m=0,\ldots,k$,
$n=0,\ldots,N$. Hence there exists $M$ such that
$|\lambda_{-n,0}^{p^m}|,\ldots,|\lambda_{-n,k}^{p^{m-k}}|<\varepsilon$,
for all $m\mathfrak{r}eq M$. Then $|\phi_{-n,m}|\leq \varepsilon$, for all
$n\mathfrak{r}eq N$, $m\mathfrak{r}eq M$. Hence
$\lim_{np^m\to\infty}\phi_{-n,m}=0$.\mathrm{e}nd{proof}
\mathcal{B}egin{definition}\label{Conv}
We denote by $\mathrm{Conv}(\mathcal{E})$
\index{Conv@$\mathrm{Conv}(\mathcal{E})$}the set of families
$\{\mathcal{B}s{\lambda}_{-n}\}_{n\in\mathbb{J}}$, with
$\mathcal{B}s{\lambda}_{-n}=(\lambda_{-n,0},\lambda_{-n,1},\ldots)\in\mathcal{B}s{\mathrm{W}}(\mathscr{O}_K)$,
satisfying condition \mathrm{e}qref{lb<1 and lim_n lb_n,m =0}.
\mathrm{e}nd{definition}
\mathcal{B}egin{corollary}[Criterion of solvability]\label{criterion of solv over amice}
The equation $\partial_T-g(T)$,
$g(T)=\sum_{i\in\mathbb{Z}}a_iT^i\in\mathcal{E}_K$, is solvable if
and only if $a_0\in\mathbb{Z}_p$, and there exist two families
$\{\mathcal{B}s{\lambda}_{-n}\}_{n\in\mathbb{J}}$ and $\{\mathcal{B}s{\lambda}_{-n}\}_{n\in\mathbb{J}}$ with
$\mathcal{B}s{\lambda}_{-n},\mathcal{B}s{\lambda}_{n}\in\mathcal{B}s{\mathrm{W}}(\mathscr{O}_K)$, for all $n\in\mathbb{J}$, such that
$\{\mathcal{B}s{\lambda}_{-n}\}_{n\in\mathbb{J}}\in\mathrm{Conv}(\mathcal{E}_K)$, and
moreover
\mathcal{B}egin{equation}
a_{np^m}=n\phi_{n,m}\quad,\quad a_{-np^m}=-n\phi_{-n,m}\;.
\mathrm{e}nd{equation}
In other words, its formal solution
$T^{a_0}\mathrm{e}xp(\sum_{i\leq-1}a_i\frac{T^i}{i}) \mathrm{e}xp(\sum_{i\mathfrak{r}eq
1}a_i\frac{T^i}{i})$ can be represented by the symbol
\mathcal{B}egin{equation}\label{formal solution}
T^{a_0}\cdot\mathrm{e}xp(\sum_{n\in\mathbb{J}}\sum_{m\mathfrak{r}eq
0}\phi_{-n,m}\frac{T^{-np^m}}{p^m})\cdot\mathrm{e}xp(\sum_{n\in\mathbb{J}}\sum_{m\mathfrak{r}eq
0}\phi_{n,m}\frac{T^{np^m}}{p^m})\;,
\mathrm{e}nd{equation}
where $(\phi_{-n,0},\phi_{-n,1},\ldots)$ (resp.
$(\phi_{n,0},\phi_{n,1},\ldots)$) is the phantom vector of
$\mathcal{B}s{\lambda}_{-n}$ (resp. $\mathcal{B}s{\lambda}_n$), and hence
\mathcal{B}egin{equation}\label{matrix of a solvable differential equation}
\qquad\qquad
g(T)\;=\;\sum_{n\in\mathbb{J}}\sum_{m\mathfrak{r}eq
0}-n\phi_{-n,m}T^{-np^m}+a_0+\sum_{n\in\mathbb{J}}\sum_{m\mathfrak{r}eq
0}n\phi_{n,m}T^{np^m}\;.\qquad\qquad\Box
\mathrm{e}nd{equation}
\mathrm{e}nd{corollary}
\mathcal{B}egin{corollary}[Katz's canonical extension]
\label{canonical ext over amice}
Let
$\partial_T\textrm{-}\mathrm{Mod}(\mathcal{A}_K([1,\infty]))^{\mathrm{sol}}_{rk=1}$
be the category of rank one differential modules over
$\mathcal{A}_K([1,\infty])$, solvable at all $\mathfrak{r}ho\mathfrak{r}eq 1$, with a regular
singularity at $\infty$ (i.e. the matrix of $\partial_T$ converge at
$\infty$ and hence belongs to $\mathcal{A}_K([1,\infty])$). The scalar
extension functor
$$\partial_T\textrm{-}\mathrm{Mod}(\mathcal{A}_K([1,\infty]))^{\mathrm{sol}}_{rk=1} \longrightarrow
\partial_T\textrm{-}\mathrm{Mod}(\mathcal{E}_K)^{\mathrm{sol}}_{rk=1}$$ is
an equivalence.
\mathrm{e}nd{corollary}
\mathcal{B}egin{proof}
Corollary \mathfrak{r}ef{criterion of solv over amice} shows that
gives a correspondence between the objects. Indeed, all differential
equations $\partial_T-g(T)$ over $g(T)=g^-(T)+a_0+g^+(T)\in\mathcal{E}_K$
is isomorphic over $\mathcal{E}_K$ to the equation
$\partial_T-(g^-(T)+a_0)$. On the other hand, let
$M,N\in\partial_T-Mod(\mathcal{A}_K([1,\infty]))^{\mathrm{sol}}_{rk=1}$, and let
$\partial_T-g_M$, $\partial_T-g_N$ be the operators corresponding to a chosen
basis of $M$ and $N$. An element of $\mathrm{Hom}(M,N)\stackrel{\sim}{\to}
M^{\vee}\otimes N$ is then a solution of the operator
$\partial_T-(g_N-g_M)$. This solution will be of the form
$y(T)=T^{a_0}\mathrm{e}xp(\sum_{n\in\mathbb{J}}\sum_{m\mathfrak{r}eq
0}\phi_{-n,m}T^{-np^m}/p^m)$, for some $\phi$. Since we are
supposing that this solution belongs to $\mathcal{A}_K([1,\infty])$, then
$a_0\in\mathbb{Z}$ and this exponential lies in
$\mathcal{A}_K([1,\infty])$. Since the same argument works for
$\mathrm{Hom}_{\partial_T}(M\otimes\mathcal{E}_K,N\otimes\mathcal{E}_K)$, and
since $\mathcal{A}_K([1,\infty])\subset\mathcal{E}_K$, then the map
$\mathrm{Hom}_{\partial_T}(M,N)\to\mathrm{Hom}_{\partial_T}(M\otimes\mathcal{E}_K,N\otimes\mathcal{E}_K)$
is bijective.
\mathrm{e}nd{proof}
\mathcal{B}egin{remark}\label{missing morphism}
We are not able to obtain a complete description of the
isomorphism class of a given differential equation over
$\mathcal{E}_K$. Namely, over the Robba ring
$\mathfrak{R}_K$, we know that a solution of a
differential equation lies in $\mathfrak{R}_K$ if and only
if the
corresponding Witt vector
(in a convenient basis of $M$) satisfies a certain property
\cite[Theorem 3.1]{Rk1}. But we do not have the
analogous
result over $\mathcal{E}_K$. In other words, we do not
have a
necessary and sufficient condition on the Witt vector
$\sum_{n\in\mathbb{J}}\mathcal{B}s{\lambda}_{-n}T^{-n}$ in order that
$E(\sum_{n\in\mathbb{J}}\mathcal{B}s{\lambda}_{-n}T^{-n},1)$ belongs to $\mathcal{E}_K$.
\mathrm{e}nd{remark}
\section*{\textsc{Second part :
solvability of rank one $q$-difference equations over
$\mathcal{E}_K$}}
\mathcal{A}ddcontentsline{toc}{section}{\textsc{Second part :
solvability of rank one $q$-difference equations over
$\mathcal{E}_K$}}
We shall establish the $q-$analogue of the results of section
\mathfrak{r}ef{section crit of solv}. In order to do that, we will need some
numerical lemmas (cf. section \mathfrak{r}ef{numerical lemmas}) and a
generalization of the result of E.Motzkin (cf. \cite{Motz}, and
section \mathfrak{r}ef{The motzkin decomposition} below).
As a consequence we will prove that for
$|q-1|<\omega$ we have an equivalence of
$q$-confluence as in \cite{Pu-q-Diff}.
We shall point out that, almost all statements are true for
$|q-1|<1$. The only obstructions to obtain the
confluence in the case $\omega\leq|q-1|<1$ are
\mathcal{B}egin{enumerate}
\item the
existence of the ``antecedent by Frobenius'' (used in
``Step $5$'' of Proposition
\mathfrak{r}ef{q-division of the problem over Amice}), which
is proved in
\cite{DV-Dwork} only for $|q-1|<\omega$;
\item the ``\mathrm{e}mph{Step }$0$'' of Theorem
\mathfrak{r}ef{q-criteria of solvability lemma}.
\mathrm{e}nd{enumerate}
Namely, the existence of an antecedent by Frobenius
holds with $|q-1|<1$ over the Robba Ring, but
the proof uses the Confluence \cite{Pu-q-Diff}. It is
reasonable to conjecture that a more direct proof is
possible generalizing \cite{DV-Dwork} to the case
$|q-1|<1$. The author hopes
that these difficulties will be overcoming in future.
For these reasons the hypothesis $|q-1|<\omega$ will be
introduced systematically starting from
\mathfrak{r}ef{hypothesis q-1<omega}
on. Before Hypothesis \mathfrak{r}ef{hypothesis q-1<omega}
we will suppose
that $|q-1|<1$.
\section{Some numerical Lemmas}\label{numerical lemmas}
\mathcal{B}egin{lemma}\label{coefficients of logarithm}
Let us fix an integer $j\mathfrak{r}eq 0$. If $j\mathfrak{r}eq 1$ we assume
$\omega^{1/p^{j-1}}<\mathfrak{r}ho<\omega^{1/p^{j}}$, and
if $j=0$ we assume $\mathfrak{r}ho<\omega$. Then
\mathcal{B}egin{equation}\label{eq : condition j t}
\frac{\mathfrak{r}ho^{p^j}}{|p^j|}\;>\;
\sup(\mathfrak{r}ho^{r}/|r|\;:\; r\mathfrak{r}eq 1,r\neq p^j) \;.
\mathrm{e}nd{equation}
Moreover, we have
\mathcal{B}egin{equation}
\mathfrak{r}ho<\frac{\mathfrak{r}ho^p}{|p|}<\cdots<\frac{\mathfrak{r}ho^{p^{j-1}}}{|p^{j-1}|}<\frac{\mathfrak{r}ho^{p^j}}{|p^j|}\quad;\quad
\frac{\mathfrak{r}ho^{p^j}}{|p^j|}>\frac{\mathfrak{r}ho^{p^{j+1}}}{|p^{j+1}|}>\frac{\mathfrak{r}ho^{p^{j+2}}}{|p^{j+2}|}>\cdots\;.
\mathrm{e}nd{equation}
\mathrm{e}nd{lemma}
\mathcal{B}egin{proof}
If $r\neq p^k$, for all $k\mathfrak{r}eq 0$, then $|r|=|p|^{v}$,
with $v:=v_p(r)$, hence
$\mathfrak{r}ho^{r}/|r| < \mathfrak{r}ho^{p^{v}}/|p|^{v}$.
This proves \mathrm{e}qref{eq : condition j t}.
Now the condition $\mathfrak{r}ho^{p^{k-1}}/|p^{k-1}|<\mathfrak{r}ho^{p^k}/|p^k|$ is
equivalent to $\mathfrak{r}ho_1<\frac{\mathfrak{r}ho_1^p}{|p|}$, where
$\mathfrak{r}ho_1:=\mathfrak{r}ho^{p^{k-1}}$, and it
is verified if and only if
$\mathfrak{r}ho_1>\omega$, that is $\mathfrak{r}ho>\omega^{\frac{1}{p^{k-1}}}$.
On the other hand,
the inequality $\mathfrak{r}ho^{p^{k-1}}/|p^{k-1}|>\mathfrak{r}ho^{p^k}/|p^k|$ is
equivalent to $\mathfrak{r}ho<\omega^{\frac{1}{p^{k}}}$.
\mathrm{e}nd{proof}
\mathcal{B}egin{lemma}\label{coefficients of exponential}
Let $n\mathfrak{r}eq 1$ be a natural number. Let $l(n):=[\log_p(n)]$, where
$[x]$ denotes the greatest integer smaller than or equal to the real
number $x$. Then for all $k\mathfrak{r}eq n$ we have
\mathcal{B}egin{equation}
\left|\frac{k!}{n!}\mathfrak{r}ight|^{\frac{1}{k-n}}\;\mathfrak{r}eq\;|p|^{l(n)+1}\;.
\mathrm{e}nd{equation}
In particular, if $c\leq |p|^{l(n)+1}$,
then for all $k\mathfrak{r}eq n$ we have
\mathcal{B}egin{equation}
\label{rho^n/n! geq rho^k/k! for all k geq n}
\frac{c^n}{|n!|}\;\mathfrak{r}eq\;
\frac{c^k}{|k!|}\;.
\mathrm{e}nd{equation}
\mathrm{e}nd{lemma}
\mathcal{B}egin{proof}
If $k=n$, the relation is trivial; suppose $k>n$. The
equation \mathrm{e}qref{rho^n/n! geq rho^k/k! for all k geq n} is equivalent
to $c\leq|\frac{k!}{n!}|^{\frac{1}{k-n}}$. Since
$|n!|=\omega^{n-S_n}$, where $S_n$ is the sum of the
digits of the base $p$ expansion of $n$, then
$|\frac{k!}{n!}|^{\frac{1}{k-n}}=\omega^{1+\frac{S_n-S_k}{k-n}}$.
If $n=n_0+n_1p+n_2p^2+\cdots+n_{l(n)}p^{l(n)}$, with $0\leq n_i\leq p-1$,
then $S_n=n_0+n_1+\cdots +n_{l(n)}$, hence $1\leq S_n\leq (p-1)(l(n)+1)$.
This shows that
\mathcal{B}egin{equation}
1+\frac{S_n-S_k}{k-n}\leq 1+\frac{(p-1)(l(n)+1)-1}{k-n}\leq 1+
(p-1)(l(n)+1) -1 =(p-1)(l(n)+1)\;.
\mathrm{e}nd{equation}
Hence $|\frac{k!}{n!}|^{\frac{1}{k-n}}\mathfrak{r}eq
\omega^{(p-1)(l(n)+1)}=|p|^{l(n)+1}$, for all $k>n$.
\mathrm{e}nd{proof}
\mathcal{B}egin{definition}\label{Def : q^a}
Let $q\in K$ be such that $|q-1|<1$.
For all complete
valued field extension $\mathscr{O}mega/K$, and all
$\mathcal{A}lpha\in\mathscr{O}mega$ we define
\mathcal{B}egin{equation}
q^\mathcal{A}lpha:=((q-1)+1)^\mathcal{A}lpha:=
\sum_{k\mathfrak{r}eq
0}\mathcal{B}inom{\mathcal{A}lpha}{k}(q-1)^k\;,
\mathrm{e}nd{equation}
where $\tbinom{\mathcal{A}lpha}{k}:=\frac{\mathcal{A}lpha(\mathcal{A}lpha-1)(\mathcal{A}lpha-2)\cdots(\mathcal{A}lpha-k+1)}{k!}$.
\mathrm{e}nd{definition}
If $|\mathcal{A}lpha|>1$, then
$|\tbinom{\mathcal{A}lpha}{k}|=\frac{|\mathcal{A}lpha|^k}{|k!|}$, hence $q^\mathcal{A}lpha$
converges exactly for $|q-1|<\omega/|\mathcal{A}lpha|$.
If $|\mathcal{A}lpha|\leq 1$, then $q^\mathcal{A}lpha$ converges at least for
$|q-1|<\omega$, in particular if
$\mathcal{A}lpha\in\mathbb{Z}_p$, then $q^{\mathcal{A}lpha}$ converges
at least for $|q-1|<1$. For a detailed
discussion on the radius of convergence of $q^\mathcal{A}lpha$
see \cite[Ch.IV, Prop.7.3]{DGS}.
\mathcal{B}egin{lemma}\label{(q^a-1)/(q-1) --> a}
Let $\mathcal{A}lpha\in\mathscr{O}mega$ and $q\in K$ be as in Definition
\mathfrak{r}ef{Def : q^a}. Then
\mathcal{B}egin{equation}
\lim_{q\to 1}\frac{q^\mathcal{A}lpha-1}{q-1}=\mathcal{A}lpha\;.
\mathrm{e}nd{equation}
\mathrm{e}nd{lemma}
\mathcal{B}egin{proof} Write $\frac{(q^\mathcal{A}lpha-1)}{(q-1)}=\frac{((q-1)+1)^\mathcal{A}lpha-1}{(q-1)}=
\mathcal{A}lpha+\sum_{k\mathfrak{r}eq
2}\tbinom{\mathcal{A}lpha}{k}(q-1)^{k-1}$.
Let $s:=\max(|\mathcal{A}lpha|,1)$, and for all $n\mathfrak{r}eq 1$ let
$l(n):=[\log_p(n)]$.
We now prove that if $|q-1|\leq |p|^{l(2)+1}/s$,
then for all $k\mathfrak{r}eq 2$ we have
$|\tbinom{\mathcal{A}lpha}{k}(q-1)^{k-1}|\leq
|\tbinom{\mathcal{A}lpha}{2}(q-1)|$ which is enough to conclude.
Assume $k\mathfrak{r}eq n\mathfrak{r}eq 1$. The condition
$|\tbinom{\mathcal{A}lpha}{k}(q-1)^{k-1}|\leq
|\tbinom{\mathcal{A}lpha}{n}(q-1)^{n-1}|$ is equivalent to
\mathcal{B}egin{equation}\label{4444}
|q-1|\leq |\tbinom{\mathcal{A}lpha}{n}/\tbinom{\mathcal{A}lpha}{k}|^{\frac{1}{k-n}}=
\left(\frac{|k!|}{|n!|}
\frac{1}{|(\mathcal{A}lpha-n)\cdots(\mathcal{A}lpha-k+1)|}\mathfrak{r}ight)^{\frac{1}{k-n}}\;.
\mathrm{e}nd{equation}
By Lemma \mathfrak{r}ef{coefficients of exponential} we know that $(\frac{k!}{n!})^{\frac{1}{k-n}}\mathfrak{r}eq
|p|^{l(n)+1}$. On the other hand, it is clear that
$|(\mathcal{A}lpha-n)\cdots(\mathcal{A}lpha-k+1)|\leq s^{k-n}$.
Hence the right hand side of \mathrm{e}qref{4444} is bigger than $|p|^{l(n)+1}/s$. The claim is proved.\mathrm{e}nd{proof}
\mathcal{B}egin{lemma}\label{valuation of q^d-1}
Let $j\mathfrak{r}eq 0$.
If $j=0$, assume that $|q-1|<\omega$, and if $j\mathfrak{r}eq 1$ we assume that $\omega^{1/p^{j-1}}<|q-1|<\omega^{1/p^j}$. Let $d:=\mathcal{A}lpha
p^m\in\mathbb{Z}_p$, with $\mathcal{A}lpha\in\mathbb{Z}_p$ such that $(\mathcal{A}lpha,p)=1$. Let
$i:=\min(m,j)$. Then
\mathcal{B}egin{equation}
|q^d-1|=|d|\cdot\frac{|q-1|^{p^i}}{|p|^i}=|p^{m-i}||q-1|^{p^i}\;.
\mathrm{e}nd{equation}
\mathrm{e}nd{lemma}
\mathcal{B}egin{proof} Since $(\mathcal{A}lpha,p)=1$, hence
$\left|\mathcal{B}inom{\mathcal{A}lpha}{1}\mathfrak{r}ight|=1$. Then
\mathcal{B}egin{equation}
|q^\mathcal{A}lpha-1|=|((q-1)+1)^\mathcal{A}lpha-1|=|
\sum_{k=1}^\infty\tbinom{\mathcal{A}lpha}{k}(q-1)^k|=|q-1|\;.
\mathrm{e}nd{equation}
Moreover, one has
$|q^{\mathcal{A}lpha p^m}-1| = |((q^\mathcal{A}lpha-1)+1)^{p^m}-1| =
|\sum_{k=1}^{p^m}\tbinom{p^m}{k}(q^\mathcal{A}lpha-1)^k|$.
Since for all $k\leq p^m$ one has $|\mathcal{B}inom{p^m}{k}|=\frac{|p|^m}{|k|}$, we deduce
$|\mathcal{B}inom{p^m}{k}(q^\mathcal{A}lpha-1)^k|=|p^m|\frac{\mathfrak{r}ho^k}{|k|}$. The claim follows from
Lemma \mathfrak{r}ef{coefficients of logarithm} applied to
$\mathfrak{r}ho=|q-1|=|q^\mathcal{A}lpha-1|$.
\mathrm{e}nd{proof}
\section{The Motzkin decomposition}\label{The motzkin
decomposition}
In \cite{Motz} a decomposition theorem for analytic
element over
an affino\"id domain of the line
(i.e. a set of type
$\mathbb{P}_K^1-\cup_{i=1,\ldots,n}\mathrm{D}^-_K(a_i,r_i)$) is
proved. In \cite{Ch-Motz} G.Christol generalizes this
decomposition for matrices with coefficients in analytic
functions. We now generalize that theorem for series in
$\mathcal{E}_K$ (cf. \mathfrak{r}ef{motzkin}).\\
Let $I\subseteq\mathbb{R}_{\mathfrak{r}eq 0}$ be any non empty interval.
We set $I_0:=I\cup [0,\mathfrak{r}ho]$ (resp.
$I_\infty:= I\cup[\mathfrak{r}ho,+\infty]$), where $\mathfrak{r}ho\in I$. As
an example if $I=[r_1,r_2[$ then $I_0=[0,r_2[$ and
$I_\infty=[r_1,+\infty]$.
\mathcal{B}egin{theorem}\label{Thm : Motzkin annulus}
Let $I\subseteq\mathbb{R}_{\mathfrak{r}eq 0}$ be any interval.
Then each invertible function $a(T)\in\mathcal{A}_K(I)^\times$
can be uniquely written as
\mathcal{B}egin{equation}\label{eq : deco - interval}
a(T)\;=\;\lambda\cdot T^N \cdot a^-(T)\cdot a^+(T)\;,
\mathrm{e}nd{equation}
where $\lambda\in K$, $N\in\mathbb{Z}$,
$a^+(T)=1+\mathcal{A}lpha_1T+\mathcal{A}lpha_2T^2+\cdots
\in 1+T\mathcal{A}_K(I_0)^\times$ and
$a^-(T)=1+\mathcal{A}lpha_{-1}T^{-1}+\mathcal{A}lpha_{-2}T^{-1}+
\cdots \in 1+T^{-1}\mathcal{A}_K(I_\infty)^\times$.
\mathrm{e}nd{theorem}
Before giving the proof we need two lemmas.
Let $\overline{I}$ be the closure of $I$ in
$\mathbb{R}$. Invertible functions are bounded, so it
has a meaning to consider their norm $|.|_\mathfrak{r}ho$ for all
$\mathfrak{r}ho\in\overline{I}$.
\mathcal{B}egin{lemma}\label{|a_-i|<1}
Let
$a^+(T)=1+\mathcal{A}lpha_{1}T+\mathcal{A}lpha_{2}T^{2}+\cdots$
be an invertible function in $\mathcal{A}_K(I_0)$.
If $r\in I_0$, for all $i\mathfrak{r}eq 1$ we have
$|\mathcal{A}lpha_i|r^i< 1$.
If $r\in \overline{I_0}$ for all $i\mathfrak{r}eq 1$ we have
$|\mathcal{A}lpha_{i}|r^{i}\leq 1$.
The same claim holds for functions $a^-(T)\in
\mathcal{A}_K(I_\infty)$.
\if{
Let $a^-(T)=1+\mathcal{A}lpha_{-1}T^{-1}+\mathcal{A}lpha_{-2}T^{-2}+\cdots$
be an invertible function in $1+T^{-1}\mathcal{A}_K([r,\infty])$.
Let $a^+(T)=1+\mathcal{A}lpha_{1}T+\mathcal{A}lpha_{2}T^{2}+\cdots$
be an invertible function in $\mathcal{A}_K([0,r[)$. Then
for all $i\mathfrak{r}eq 1$ we have
$|\mathcal{A}lpha_i|r^i\leq 1$ and
$|\mathcal{A}lpha_{-i}|r^{-i}<1$.
}\fi
\mathrm{e}nd{lemma}
\mathcal{B}egin{proof}
By replacing $T$ with $\mathfrak{r}amma_r T$, with
$|\mathfrak{r}amma_r|=r$, we
can suppose $r=1$.
Since $a^+$ is invertible, its valuation polygon has no
breaks (cf. \cite[Chapitre 2]{Ch-Ro}), so
for all $\mathfrak{r}ho\leq 1$ we have
$|a^+|_\mathfrak{r}ho=|a^+(0)|=1$.
Hence $|\mathcal{A}lpha_i|\leq 1$ for all $i\mathfrak{r}eq 1$.
If now $r=1\in I_0$, and if there exists $i\mathfrak{r}eq 1$ such that
$|\mathcal{A}lpha_{i}|=1$, the reduced series
$\overline{a^+(T)}\in k[T]$ is a non constant polynomial.
The zeros of $\overline{a^+(T)}$ lift into zeros of $a^+(T)$,
which contradicts the fact that $a^+(T)$ is invertible,
hence without zeros in the closed unit disks.
\if{Let now $X=T^{-1}$, and let
$P(X):=a^-(X^{-1})=1+\mathcal{A}lpha_{-1}X+\mathcal{A}lpha_{-2}X^2+\cdots$.
As for $a^+$ for all $i\mathfrak{r}eq 1$
we have $|\mathcal{A}lpha_{-i}|\leq 1$.
If now there exists $i\mathfrak{r}eq 1$ such that
$|\mathcal{A}lpha_{-i}|=1$, the reduced series
$\overline{P(X)}\in k[X]$ is not constant.
The zeros of $\overline{P(X)}$ lift into zeros of $P(X)$,
which contradicts the fact that $P(X)$ is invertible,
hence without zeros in the closed unit disks.
}\fi
\if{
Then there exists a root $\mathcal{B}ar{\mathcal{A}lpha}$ of
$\overline{P^-(X)}$ in $k^{\mathrm{alg}}$. This shows that there is
$\zeta$ in $K^{\mathrm{alg}}$, with $|\zeta|=1$ and such that
$|P(\zeta)|<1$. If $\zeta$ is a zero of $P^-(X)$, then $P^-(X)$ is not
invertible, this is in contradiction with the hypothesis. Hence
$\zeta$ is not a zero of $P^-(X)$. Let us write $P(X)=\sum_{i\mathfrak{r}eq
0}b_i(X-\zeta)^i$. Since $|P^-(\zeta)|<1$, then $|b_0|<1$. Since
$|P^-(X)|_1=1$, then $\lim_{\mathfrak{r}ho\to 1}\sup_{i\mathfrak{r}eq 0}|b_i|\mathfrak{r}ho^i=1$.
This implies that the polygon of valuation of $|P^-(X)|$
has a change of slope. Hence $P^-(X)$ has a zero in the closed unit ball
$|T|\leq 1$. This is the desired contradiction.
}\fi
\mathrm{e}nd{proof}
\mathcal{B}egin{lemma}\label{c_0=1}
Let $\mathfrak{r}ho\in\overline{I}$. Let
$a^-(T)=1+\mathcal{A}lpha_{-1}T^{-1}+\mathcal{A}lpha_{-2}T^{-2}+\cdots\in
\mathcal{A}_K(I_\infty)^{\times}$, and
$a^+(T)=1+\mathcal{A}lpha_{1}T+\mathcal{A}lpha_{2}T^{2}+\cdots\in
\mathcal{A}_K(I_0)^{\times}$ be invertible functions.
Then
\mathcal{B}egin{equation}
|a^-(T)\cdot a^+(T)-1|_\mathfrak{r}ho < 1\;.
\mathrm{e}nd{equation}
\mathrm{e}nd{lemma}
\if{
\mathcal{B}egin{lemma}\label{c_0=1}
Let $r_1\leq r_2$ and $\mathfrak{r}ho\in [r_1,r_2]$. Let
$a^-(T)=1+\mathcal{A}lpha_{-1}T^{-1}+\mathcal{A}lpha_{-2}T^{-2}+\cdots\in
\mathcal{A}_K([r_1,\infty])^{\times}$, and
$a^+(T)=1+\mathcal{A}lpha_{1}T+\mathcal{A}lpha_{2}T^{2}+\cdots\in
\mathcal{A}_K([0,r_2[)^{\times}$ be invertible functions.
Then
\mathcal{B}egin{equation}
|a^-(T)\cdot a^+(T)-1|_\mathfrak{r}ho < 1\;.
\mathrm{e}nd{equation}
\mathrm{e}nd{lemma}
}\fi
\mathcal{B}egin{proof} Write $a^-(T)a^+(T)=\sum_{n\in\mathbb{Z}}c_nT^n$. If we
define $\mathcal{A}lpha_0:=1$, then, for all $n\mathfrak{r}eq 0$ one has
$c_n=\sum_{k=0}^\infty
\mathcal{A}lpha_{n+k}\mathcal{A}lpha_{-k}$, and
$c_{-n}=\sum_{k=0}^{\infty}\mathcal{A}lpha_{-n-k}\mathcal{A}lpha_k$. By
Lemma \mathfrak{r}ef{|a_-i|<1}, either
for all $k\mathfrak{r}eq 1$ we have
$|\mathcal{A}lpha_{-k}|\mathfrak{r}ho^{-k}<1$, and $|\mathcal{A}lpha_k|\mathfrak{r}ho^k\leq 1$, or for all $k\mathfrak{r}eq 1$ we have
$|\mathcal{A}lpha_{-k}|\mathfrak{r}ho^{-k}\leq 1$, and $|\mathcal{A}lpha_k|\mathfrak{r}ho^k< 1$. Since $\lim_{k\to\pm\infty}|\mathcal{A}lpha_{k}|\mathfrak{r}ho^{k}=0$
then for all $n\mathfrak{r}eq 1$ one have
$|c_{n}|\mathfrak{r}ho^n<1$, and
$|c_{-n}|\mathfrak{r}ho^{-n}<1$, and $|c_0-1|< 1$.
\mathrm{e}nd{proof}
\mathcal{B}egin{proof}[Proof of Theorem \mathfrak{r}ef{Thm : Motzkin annulus}] We first prove the claim for a rational
fraction $a=P/Q$, $P,Q\in K[T]$.
Let $Z_0$ and $V_0$ (resp. $Z_\infty$ and $V_\infty$)
be the set of its zeros and poles respectively whose
valuation belongs to $I_0$ (resp. $I_\infty$). Since
$\mathrm{Gal}(K^{\mathrm{alg}}/K)$ acts by
isometric maps, the polynomials
$P_0 :=\prod_{z\in Z_0-\{0\}}(T-z)$,
$P_\infty :=\prod_{z\in Z_\infty}(T-z)$,
$Q_0 :=\prod_{v\in V_0-\{0\}}(T-v)$,
$Q_\infty:=\prod_{v\in V_\infty}(T-v)$ lie in $K[T]$
since their coefficients are invariant by Galois.
Now $P=\mathcal{A}lpha T^s P_0P_\infty$ and
$Q=\mathcal{B}eta T^r Q_0Q_\infty$, for convenient
$\mathcal{A}lpha,\mathcal{B}eta\in K$, $r,s\in\mathbb{N}$.
We then have $a^+(T)=\mathcal{A}lpha' P_\infty/Q_\infty$,
$a^-(T)=\mathcal{B}eta'P_0/Q_0$, for convenient constants
$\mathcal{A}lpha',\mathcal{B}eta'\in K$.
We now deduce \mathrm{e}mph{by density}
the case where $I$ is a compact interval. If $\|.\|_I$
is the sup-norm on $\{|T|\in I\}$, the Frechet
topology of $\mathcal{A}_K(I)$ is given by the individual norm
$\|.\|_I$, and $(\mathcal{A}_K(I), \|.\|_I)$ is a Banach algebra.
Let $a(T)=\sum_{i\in\mathbb{Z}}b_iT^i$ be
as in the claim. For all $\mathfrak{r}ho\in I$ we have
$\lim_{i\to\pm\infty}|b_i|\mathfrak{r}ho^i=0$ so for all
$\mathfrak{r}ho\in I$ we can consider the integer
$N_\mathfrak{r}ho:=\min(i\;|\;|b_i|\mathfrak{r}ho^i=|a(T)|_\mathfrak{r}ho)$.
Since $a$ is invertible, the $\log$-function
$r\mapsto\log(|a(T)|_{\mathrm{e}xp(r)})$ is affine
on $\overline{I}$ of slope $N\in\mathbb{Z}$.
This means that
$N_\mathfrak{r}ho=N$ for all $\mathfrak{r}ho\in\overline{I}-\inf(I)$.
Moreover if
$\inf(I)\in I$ the equality also holds at $\mathfrak{r}ho=\inf(I)$ by
\cite[Thm. 5.4.7]{Ch-Ro}. Multiplying by
$(b_NT^N)^{-1}$ we can assume $N=0$ and
$|a|_\mathfrak{r}ho=1$ for all $\mathfrak{r}ho\in\overline{I}$.
Let $a_n(T)$ be a sequence of rational fractions
convergent to $a(T)$.
Then for $n$ sufficiently large $a_n(T)$ has no poles
nor zeros on $\{|T|\in I\}$, hence $a_n(T)$ admits such
a decomposition:
$a_n(T)=\lambda_nT^{N_n}a^-_n(T)a_n^+(T)$.
Moreover there exists $n_0$
such that for all $n\mathfrak{r}eq n_0$ we have $N_n=0$, and
$|\lambda_n|=1$.
We now prove that, if $a^+_n=1+h_n^+$ and
$a^-_n=1+h_n^-$, then for all $n,m\mathfrak{r}eq n_0$ the
norms $|\lambda_n-\lambda_m|$, $\|a_n^+-a_m^+\|_I=\|h_n^+-h_m^+\|_I$, and $\|a_n^--a_m^-\|_I=\|h_n^--h_m^-\|_I$ are all bounded by $\|a_n-a_m\|_I$. Since $T^{-1}\mathcal{A}_K([r_1,\infty])$ and
$T\mathcal{A}_K([0,r_2])$ are closed sets in $\mathcal{A}_K(I)$,
this will be enough to show that the sequences
$n\mapsto\lambda_n$, $n\mapsto h_n^-$, and $n\mapsto h_n^+$, all converge in $K$, $T^{-1}\mathcal{A}_K([r_1,\infty])$ and
$T\mathcal{A}_K([0,r_2])$ respectively.
This will be enough to obtain the desired decomposition
\mathrm{e}qref{eq : deco - interval}.
Let $n,m\mathfrak{r}eq n_0$. We let
$1+h^-:=\frac{1+h_n^-}{1+h_m^-}$ and
$1+h^+:=\frac{1+h_m^+}{1+h_n^+}$. Then
\mathcal{B}egin{eqnarray}
\| a_n-a_m\|_I\;=\;
\| \lambda_na_n^-a_n^+-\lambda_ma_m^-a_m^+
\|_I
&=&\Bigl\| \frac{\lambda_na_n^-a_n^+-
\lambda_ma_m^-a_m^+}{a_n^+a_m^-}\Bigr\|_I\\
&=&\Bigl\|\lambda_n\frac{a_n^-}{a_m^-}-\lambda_m\frac{a_m^+}{a_n^+} \Bigr\|_I\\
&=&\|(\lambda_n-\lambda_m)+
\lambda_nh^--\lambda_mh^+\|_I
\mathrm{e}nd{eqnarray}
We now notice that $h^-$ (resp. $h^+$)
is a power series of the form
$b_{-1}T^{-1}+b_{-2}T^{-2}+\cdots$
(resp. $b_{1}T+b_{2}T^{2}+\cdots$), hence
for all $\mathfrak{r}ho\in I$ we have
\mathcal{B}egin{eqnarray}
|(\lambda_n-\lambda_m)+\lambda_nh^-
-\lambda_mh^+|_\mathfrak{r}ho&\;=\;&
\max(|\lambda_n-\lambda_m|,
|\lambda_n|\sup_{i\leq -1}|b_i|\mathfrak{r}ho^i,
|\lambda_m|\sup_{i\mathfrak{r}eq 1}|b_i|\mathfrak{r}ho^i)\\
&\;=\;&
\max(|\lambda_n-\lambda_m|, |h^-|_\mathfrak{r}ho,
|h^+|_\mathfrak{r}ho)\;.
\mathrm{e}nd{eqnarray}
So we find
\mathcal{B}egin{eqnarray}
\|a_n-a_m\|_I\;=\;
\sup(|\lambda_n-\lambda_m|,\|h^-\|_I,\|h^+\|_I)\;.
\mathrm{e}nd{eqnarray}
Now $\|h^+\|_I=\Bigl\|
\frac{1+h_m^+}{1-h_n^+}-1
\Bigr\|_I=\Bigl\|
\frac{h_m^+-h_n^+}{1-h_n^+}
\Bigr\|_I=\|h_m^+-h_n^+\|_I$, and analogously
$\|h^-\|_I=\|h_m^--h_n^-\|_I$. This gives the desired
inequalities.
The case where $I$ is non compact is deduced
by expressing $I$ as increasing union of compact
intervals $J_n\subset J_{n+1}\subset I$.
The uniqueness of the decomposition shows that the
decomposition over $J_n$ coincides with that over
$J_{n+1}$, and we conclude.
\mathrm{e}nd{proof}
\mathcal{B}egin{theorem}\label{motzkin}
Assume that $K$ is discretely valuated.
Let $a(T)\in\mathcal{E}_K$. Then
there exist $\lambda\in K$, $N\in\mathbb{Z}$, $a^-(T)=1+h^-(T)$
invertible in $1+T^{-1}\mathcal{A}_K([1,\infty])$, with $h^-(T)=\sum_{i\leq
-1}\mathcal{A}lpha_iT^i$, and $a^+(T)=1+h^+(T)$ invertible in
$1+T\mathcal{A}_K([0,1[)$, with $h^+(T)=\sum_{i\mathfrak{r}eq 1}\mathcal{A}lpha_iT^i$, such
that
$$a(T)=\lambda\cdot T^N \cdot a^-(T)\cdot a^+(T).$$
Moreover, such a decomposition is unique.
\mathrm{e}nd{theorem}
\mathcal{B}egin{proof}
The claim can not be deduced immediately ``by density''
because rational fractions are not dense in
$\mathcal{E}_K$ with respect to the Gauss norm
$|.|_1$.
However the claim holds for functions in $\mathcal{E}^{\partial_Tag}_K$
because they converge on some
annulus.\footnote{Actually rational fractions are dense
in $\mathcal{E}^{\partial_Tag}_K$ with respect to the $\mathcal{LF}$ topology
induced by the Robba ring $\mathfrak{R}_K$.}
Now $\mathcal{E}^{\partial_Tag}_K$ is dense in
$\mathcal{E}_K$ with respect to the Gauss norm.
The assumption $K$ discretely valued arises now to
prove that
$\inf\{i\in\mathbb{Z},
\textrm{ such that }|b_i|=|a(T)|_1\}$
is not equal to $+\infty$. This guarantee the existence of
$N<+\infty$.
We can now reproduce the same proof as
Theorem \mathfrak{r}ef{Thm : Motzkin annulus} replacing
$\|.\|_I$ by the Gauss norm $|.|_1$. We obtain the
desired decomposition.
\mathrm{e}nd{proof}
\mathcal{B}egin{remark}
As already mentioned, if the functions converge in some
appropriate domains, the above results extend to
matrices \cite{Ch-Motz}, \cite[Thm.6.5]{Astx}.
We do not know whether such a generalization exists for
matrices with coefficients in $\mathcal{E}_K$.
The main applications from our point of view would be
the study of differential equations with coefficients in
that ring.
\mathrm{e}nd{remark}
\section{Criterion of solvability for $q$-difference equations over $\mathcal{E}_K$}
\label{q-crit of solv}
\mathcal{B}egin{hypothesis}\label{K is discrete valued}
From now on the valuation on $K$ will be discrete valuation, in
order to have theorem \mathfrak{r}ef{motzkin}.
\mathrm{e}nd{hypothesis}
We denote by
\mathcal{B}egin{eqnarray}
&&\sigma_q:f(T)\mapsto
f(qT)\;,\qquad\partial_T:=T\frac{d}{dT}\;,\\
&&d_q:=\frac{\sigma_q-1}{(q-1)T}\;,\;\;\quad\qquad\Delta_q:=\frac{\sigma_q-1}{q-1}
\;.
\mathrm{e}nd{eqnarray}
Let $A$ be one of the rings $\mathfrak{R}_K$,
$\mathcal{E}_K$, $\mathcal{E}^{\partial_Tag}_K$, $\mathcal{A}_K(I)$.
A $q$-difference equation is finite free $A$-module $M$
together with an automorphism $\sigma_q:M\simto M$
satisfying $\sigma_q(am)=\sigma_q(a)\sigma_q(m)$ for
all $a\in A$, $m\in M$. This corresponds in a basis of
$M$ to an expression of
the form $\sigma_q(Y)=a(q,T)Y$, where
$a(q,T)\in GL_n(A)$.
From the action of $\sigma_q:
M\simto M$ we can define the action of $d_q$ and
$\Delta_q$ on $M$. In a basis of $M$ the action of
$d_q$ amounts to an equation of the form
$d_q(Y)=g_{[1]}(q,T)Y$, with
$g_{[1]}(q,T)=\frac{a(q,T)-\mathrm{Id}}{q-1}\in M_n(A)$. As for differential equations
we can attribute to such a module a radius of
convergence. Namely the formal solution is given by
\mathcal{B}egin{equation}
Y_q(T,t)\;:=\;\sum_{s\mathfrak{r}eq 0}g_{[s]}(q,t)\cdot
\frac{(T-t)_{q,s}}{[s]_q^!}
\mathrm{e}nd{equation}
where for all natural $n\mathfrak{r}eq 0$
\mathcal{B}egin{equation}
[n]^!_q\;:=\;
\frac{\prod_{i=1}^n(q^i-1)}{(q-1)^n}
\mathrm{e}nd{equation}
is the $q-$factorial, and $(T-t)_{q,s}:=
(T-t)(T-qt)\cdots(T-q^{s-1}t)$
(cf. \cite{Pu-q-Diff} for more
details), and $g_{[s]}$ is the matrix of the action of
$d_q^n$ on $M$. Namely $g_[0]=\mathrm{Id}$,
$g_{[1]}=\frac{a(q,T)-1}{(q-1)T}$, and for all
$s\mathfrak{r}eq 2$ one has $g_{[s+1]}(q,T)=
d_q(g_{[s]}(q,T))+\sigma_q(g_{[s]}(q,T))\cdot
g_{[1]}(q,T)$.
The radius of convergence of $d_q-g_{[1]}$
is then defined as
\mathcal{B}egin{equation}
Ray(d_q-g_{[1]}(q,T),\mathfrak{r}ho)\;:=\;
\min(\liminf_s(|g_{[s]}(q,T)|_\mathfrak{r}ho/[s]_q^!)^{-1/s},\mathfrak{r}ho)
\mathrm{e}nd{equation}
This number is attached to the operator $d_q-g_{[1]}$,
but it is not invariant by base changes of $M$.
The radius is always less than or equal to $\mathfrak{r}ho$,
if it is equal to $\mathfrak{r}ho$ we say that $\sigma_q-a(q,T)$ is
\mathrm{e}mph{solvable} at $\mathfrak{r}ho$. If $A=\mathcal{E}_K$ and $\mathfrak{r}ho=1$ we simply say
\mathrm{e}mph{solvable} (without specifying $\mathfrak{r}ho=1$).
If $A=\mathfrak{R}_K$, we say that the equation is
solvable if
$\lim_{\mathfrak{r}ho\to 1^-}Ray(\sigma_q-a(q,T),\mathfrak{r}ho)=1$.
\subsection{Preliminary lemmas}
\mathcal{B}egin{lemma}\label{limit of q-factorial}
Assume $|q-1|<1$.
Then the sequence $|[n]^!_q|^{1/n}$ converges to a
real number strictly less than $1$, we
call $\omega_q < 1$ that number.
Moreover, let $\kappa$ be the smallest integer such that
$|q^\kappa-1|<\omega$, then
$$\omega_q=\left\{\mathcal{B}egin{array}{ccl}
\omega &\textrm{ if }& \kappa=1\;,\\
(|\frac{q^\kappa-1}{q-1}|\cdot\omega)^{\frac{1}{\kappa}} &\textrm{
if }& \kappa\mathfrak{r}eq 2\;.
\mathrm{e}nd{array}\mathfrak{r}ight.$$
\mathrm{e}nd{lemma}
\mathcal{B}egin{proof} \cite[3.5]{DV-Dwork}. \mathrm{e}nd{proof}
\mathcal{B}egin{lemma}\label{d_q(f) leq k! f}
Let $|q-1|<1$. For all $f(T)\in\mathcal{A}_K(I)$, for all
$\mathfrak{r}ho\in I$ and all $k\mathfrak{r}eq 1$, we have
$|\frac{d_q^k}{[k]^!_q}(f)|_\mathfrak{r}ho\leq
\mathfrak{r}ho^{-k}|f|_\mathfrak{r}ho$. The same
result is true for $f\in\mathcal{E}_K$ and $\mathfrak{r}ho=1$.
\mathrm{e}nd{lemma}
\mathcal{B}egin{proof}\cite[2.1]{DV-Dwork}.\mathrm{e}nd{proof}
\mathcal{B}egin{lemma}[$q$-Small Radius, $q-$analogue of
Lemma \mathfrak{r}ef{small radius2}]
\label{q-Young}
Let $q\in K$, $|q-1|<1$, and let
$I\subseteq\mathbb{R}_{\mathfrak{r}eq 0}$ be any interval.
Let $\sigma_q-a(q,T)$, $a(q,T)\in\mathcal{A}_K(I)$ be some
rank one $q$-difference equation. Let
$R_\mathfrak{r}ho:=Ray(\sigma_q-a(q,T),\mathfrak{r}ho)$ be the radius of convergence
of the equation at $\mathfrak{r}ho\in I$. Then
\mathcal{B}egin{equation}
\label{R_rho geq ....}
R_\mathfrak{r}ho\mathfrak{r}eq\frac{\omega_q}{\max(|g_{[1]}(q,T)|_\mathfrak{r}ho,\mathfrak{r}ho^{-1})}=\frac{\omega_q\cdot\mathfrak{r}ho\cdot|q-1|}{\max(|a(q,T)-1|_\mathfrak{r}ho,|q-1|)}
\mathrm{e}nd{equation}
Moreover $R_\mathfrak{r}ho<\omega_q\cdot\mathfrak{r}ho$ if and only if
$|a(q,T)-1|_\mathfrak{r}ho>|q-1|$, and in this case
\mathcal{B}egin{equation}\label{R_rho = .... small radius}
R_\mathfrak{r}ho = \frac{\omega_q\cdot \mathfrak{r}ho\cdot |q-1|}{|a(q,T)-1|_\mathfrak{r}ho}\;.
\mathrm{e}nd{equation}
The same assertions hold for solvable $q$-difference equations
over $\mathcal{E}_K$, with $\mathfrak{r}ho=1$.
\mathrm{e}nd{lemma}
\mathcal{B}egin{proof} Let $g_{[s]}(q,T)\in\mathcal{A}_K(I)$ be the $1\times 1$ matrix of
$(d_q)^s$. By definition
\mathcal{B}egin{eqnarray}\label{q-radius explicit formula}
Ray(d_q-g_{[1]}(q,T),1)&=&
\min\mathcal{B}igl(\mathfrak{r}ho\;,\;\liminf_s(|g_{[s]}(q,T)|_1/|[s]^!_q|)^{-\frac{1}{s}}\mathcal{B}igr)\nonumber\\
&=&\min\mathcal{B}igl(\mathfrak{r}ho\;,\;\omega_q\cdot\liminf_s(|g_{[s]}(q,T)|_1)^{-\frac{1}{s}}\mathcal{B}igr)\;.\quad\qquad
\mathrm{e}nd{eqnarray}
One has inductively $|g_{[s]}|_\mathfrak{r}ho \leq
\max(|g_{[1]}|_\mathfrak{r}ho,\mathfrak{r}ho^{-1})^s$, this shows \mathrm{e}qref{R_rho geq
....}. Moreover, if $|g_{[1]}|_\mathfrak{r}ho>\mathfrak{r}ho^{-1}$, then
$|g_{[s]}|_\mathfrak{r}ho = \max(|g_{[1]}|_\mathfrak{r}ho,\mathfrak{r}ho^{-1})^s$ and \mathrm{e}qref{R_rho
= .... small radius} holds. Reciprocally, if
$R_\mathfrak{r}ho<\omega_q\cdot\mathfrak{r}ho$, then, by \mathrm{e}qref{R_rho geq ....}, one has
$|a(q,T)-1|> |q-1|$. \mathrm{e}nd{proof}
\mathcal{B}egin{lemma}\label{lambda=1 and N=0}
Let $|q-1|<1$. Let $\sigma_q-a(q,T)$ be a
rank one \mathrm{e}mph{solvable} equation such that $a(q,T)\in\mathfrak{R}_K$ or
$a(q,T)\in\mathcal{E}_K$. Let $a(q,T)=\lambda_q
T^{N}a^-(q,T)a^+(q,T)$ be the Motzkin decomposition of $a(q,T)$
(cf. Theorems \mathfrak{r}ef{Thm : Motzkin annulus}, \mathfrak{r}ef{motzkin}), then $N=0$ and $|\lambda_q-1|<1$.
\mathrm{e}nd{lemma}
\mathcal{B}egin{proof}
The solvability implies $|a(q,T)-1|_1\leq |q-1|<1$ (cf.
Lemma \mathfrak{r}ef{q-Young}), hence $|a(q,T)|_1=1$. More precisely, with the
notations as in the proof of Lemma \mathfrak{r}ef{c_0=1}, one has
$|\lambda_q\sum_{n\in\mathbb{Z}}c_nT^{n+N}-1|_1\leq|q-1|<1$. We
know that $\sup_{n\neq 0}|c_n|<1$ and $|c_0-1|<1$ (cf. Lemma
\mathfrak{r}ef{c_0=1}). If $N\neq 0$, then $|\lambda_q c_0T^N|_1< 1$ and
$|\lambda_q c_{-N}-1|<1$. The first one implies $|\lambda_q|<1$,
which contradicts the second one. Hence $N=0$.
We deduce that $|\lambda_q
c_0-1|<1$ which implies $|\lambda_q-1|<1$.
\mathrm{e}nd{proof}
\mathcal{B}egin{lemma}\label{q-radius of constant}
Let $|q-1|<1$. There exists $R_0>0$ such that
$Ray(\sigma_q-\lambda_q,\mathfrak{r}ho)=R_0\cdot\mathfrak{r}ho$, for all
$\mathfrak{r}ho\in[0,\infty[$.
\mathrm{e}nd{lemma}
\mathcal{B}egin{proof}
By \cite[1.2.4]{DV-Dwork}, one has
$$\left|g_{[n]}(T)\mathfrak{r}ight|_\mathfrak{r}ho^{\frac{1}{n}}=
\frac{|\sum_{j=0}^{n}(-1)^j\mathcal{B}inom{n}{j}_{q^{-1}}q^{\frac{-j(j-1)}{2}}
\lambda_q^j|^{1/n}}{|q-1|\cdot\mathfrak{r}ho}\;.$$ Since the numerator does
not depend on $\mathfrak{r}ho$, the lemma is proved.
\mathrm{e}nd{proof}
\subsection{The settings}
\label{the settings}
As for differential equations, we shall find a description of the
formal solution of a given solvable $q-$difference equation
\mathcal{B}egin{equation}\label{q-diff equation}
\sigma_q(y_q) = a(q,T)\cdot y_q\;,
\mathrm{e}nd{equation}
with $a(q,T)\in\mathcal{E}_K$. We will show that solutions of
$q-$difference equations are actually solutions of
differential equation of the form \mathrm{e}qref{formal solution}. By Lemma \mathfrak{r}ef{lambda=1 and N=0},
we know that
\mathcal{B}egin{equation}
a(q,T)=\lambda_q\cdot a^-(q,T)\cdot a^+(q,T)\;,
\mathrm{e}nd{equation}
with $a^-(q,T):=1+\sum_{i\leq -1} \mathcal{A}lpha_iT^i$, and
$a^+(q,T):=1+\sum_{i\mathfrak{r}eq 1} \mathcal{A}lpha_iT^i$. Now write formally
\mathcal{B}egin{equation}
a^-(q,T):=\mathrm{e}xp(\sum_{i\leq -1}a_iT^i)\;,\qquad
a^+(q,T):=\mathrm{e}xp(\sum_{i\mathfrak{r}eq 1}a_iT^i)\;.
\mathrm{e}nd{equation}
Then the formal solution of \mathrm{e}qref{q-diff equation} is
\mathcal{B}egin{equation}\label{formal solution explicit}
y_q(T):=\mathrm{e}xp\mathcal{B}igl(\sum_{i\leq -1}\frac{a_i}{q^i-1}T^i\mathcal{B}igr)\cdot
q^{a_0}\cdot\mathrm{e}xp\mathcal{B}igl(\sum_{i\mathfrak{r}eq 1}\frac{a_i}{q^i-1}T^i\mathcal{B}igr)\;.
\mathrm{e}nd{equation}
We are interested to study this exponential in the case in which
the equation \mathrm{e}qref{q-diff equation} is solvable. The
main result will be the Criterion of solvability
\mathfrak{r}ef{criterion of solvability for q-difference}.
\subsection{Technical results}
In this section $q\in\mathrm{D}^-(1,1)$ is fixed. We will
omit the index $q$ in the series. The following
proposition is the
q-analogue of Proposition
\mathfrak{r}ef{division of the problem over Amice} for the Robba
ring.
\mathcal{B}egin{proposition}
\label{q-division of the problem over Robba} Let
$|q-1|<1$. Let $\sigma_q-a(T)$, $a(T)=\lambda a^-(T)
a^+(T) \in \mathfrak{R}_K$ be a solvable equation. Then $\sigma_q-a^-(T)$,
$\sigma_q-\lambda$, $\sigma_q-a^+(T)$ are all solvable.
\mathrm{e}nd{proposition}
\mathcal{B}egin{proof}
With analogous notations of Proposition \mathfrak{r}ef{division of the problem over
Amice}, we find the following picture:
\mathcal{B}egin{center}
\mathcal{B}egin{picture}(300,80)
\put(150,0){\vector(0,1){80}} \put(0,60){\vector(1,0){300}}
\put(260,65){$r=\log(\mathfrak{r}ho)$} \put(155,75){$R(r)$}
\put(0,62){\mathcal{B}egin{tiny}$0\leftarrow\mathfrak{r}ho$\mathrm{e}nd{tiny}}
\put(50,60){\line(6,-1){60}}
\put(110,50){\line(2,-1){30}}
\put(140,35){\line(2,-5){10}}
\put(170,55){\line(6,1){30}}
\put(170,55){\line(-1,-1){15}}
\put(155,40){\line(-2,-5){12}}
\put(0,23){\line(1,0){300}}
\put(147.5,57.5){$\mathcal{B}ullet$}
\put(83,75){\mathcal{B}egin{tiny}$R(\sigma_q-a(T),0)$\mathrm{e}nd{tiny}}
\put(135,75){\vector(1,-1){12}}
\put(140.5,57.5){$\mathcal{B}ullet$}
\put(103,67){\mathcal{B}egin{tiny}$\log(1\!\!-\!\!\varepsilon)$\mathrm{e}nd{tiny}}
\put(130,67){\vector(2,-1){10}}
\qbezier[30](143,10)(143,35)(143,60)
\put(80,55){\circle{10}}
\put(60,45){\line(2,1){15.5}}
\put(0,40){\mathcal{B}egin{tiny}$R(\sigma_q-a^+(T),r)$\mathrm{e}nd{tiny}}
\put(180,55){\circle{10}}
\put(200,45){\line(-2,1){15.5}}
\put(200,40){\mathcal{B}egin{tiny}$R(\sigma_q-a^-(T),r)$\mathrm{e}nd{tiny}}
\put(100,23){\circle{10}}
\put(80,13){\line(2,1){15.5}}
\put(0,10){\mathcal{B}egin{tiny}$R(\sigma_q-\lambda,r)=\log(R_0)$\mathrm{e}nd{tiny}}
\put(147.5,0){$\mathcal{B}ullet$}
\put(152.5,0){\mathcal{B}egin{tiny}$\log(\omega_q)$\mathrm{e}nd{tiny}}
\qbezier[100](0,2.5)(150,2.5)(300,2.5)
\put(50,-2){\mathcal{B}egin{tiny}$\partial_Townarrow$small
radius$\partial_Townarrow$\mathrm{e}nd{tiny}}
\mathrm{e}nd{picture}
\mathrm{e}nd{center}
Since there exists a common interval
$I:=]1-\varepsilon,1[$ in
which all operators exist, and since the slope of
$Ray(\sigma_q-a^-,\mathfrak{r}ho)$
(resp. $Ray(\sigma_q-a^+,\mathfrak{r}ho)$) is
strictly positive (resp. negative) in $I$, hence there are at
most $3$ points in which these graphics cross.
Hence, by continuity, for all $\mathfrak{r}ho\in I$ one has
\mathcal{B}egin{equation}\label{ray=min}
Ray(\sigma_q-a,\mathfrak{r}ho)=\min(\;Ray(\sigma_q-a^-,\mathfrak{r}ho)\;,\;Ray(\sigma_q-a^+,\mathfrak{r}ho)\;,\;Ray(\sigma_q-\lambda,\mathfrak{r}ho)\;)\;.
\mathrm{e}nd{equation}
By assumption $\lim_{\mathfrak{r}ho\to
1^-}Ray(\sigma_q-a,\mathfrak{r}ho)=1$, hence the claim follows. \mathrm{e}nd{proof}
We now give the
q-analogue of Proposition
\mathfrak{r}ef{division of the problem over Amice} for the ring $\mathcal{E}_K$:
\mathcal{B}egin{proposition}
\label{q-division of the problem over Amice}
Let $|q-1|<\omega$. Let $\sigma_q-a(T)$, $a(T)=\lambda a^-(T)
a^+(T) \in \mathcal{E}_K$, be a solvable equation. Then
$\sigma_q-a^-(T)$, $\sigma_q-\lambda$, $\sigma_q-a^+(T)$ are all
solvable.
\mathrm{e}nd{proposition}
\mathcal{B}egin{proof} \label{proof of first reduction} Steps $1$ and $2$ of this
proof coincide with the same steps of the proof of Proposition \mathfrak{r}ef{division
of the problem over Amice}. We will expose it without
proofs for fixing notation. The first part of this proposition
does not use the hypothesis $|q-1|<\omega$, so we will assume this
hypothesis starting from Hypothesis
\mathfrak{r}ef{hypothesis q-1<omega}.
--- \mathrm{e}mph{Step 1 : }By \cite[3.6]{DV-Dwork}, the equation
$\sigma_q-a^{-}(T)$ (resp. $\sigma_q-a^{+}(T)$) has a convergent
solution at $\infty$ (resp. at $0$), hence
$Ray(\sigma_q-a^{-}(T),\mathfrak{r}ho)=\mathfrak{r}ho$, for large values of $\mathfrak{r}ho$
(resp. $Ray(\sigma_q-a^{-}(T),\mathfrak{r}ho)=\mathfrak{r}ho$ for $\mathfrak{r}ho$ close to
$0$). Let $R^0$ be as in Lemma \mathfrak{r}ef{q-radius of constant},
\mathcal{B}egin{eqnarray}
R^-&:=&Ray(\sigma_q-a^{-}(T),1)\;,\\
R^+&:=&Ray(\sigma_q-a^{+}(T),1)\;.
\mathrm{e}nd{eqnarray}
--- \mathrm{e}mph{Step 2 : We have $R^+ = R^-$ and $R^0 \mathfrak{r}eq R^- = R^+$ (as in
the following picture in which $R := R^- = R^+$)}.\\
We set $r:=\log(\mathfrak{r}ho)$, and
$R(r):=\log(Ray(\sigma_q-a(T),\mathfrak{r}ho)/\mathfrak{r}ho)$.
\mathcal{B}egin{center}
\mathcal{B}egin{picture}(300,80)
\put(150,0){\vector(0,1){80}} \put(0,60){\vector(1,0){300}}
\put(260,65){$r=\log(\mathfrak{r}ho)$} \put(155,75){$R(r)$}
\put(0,62){\mathcal{B}egin{tiny}$0\leftarrow\mathfrak{r}ho$\mathrm{e}nd{tiny}}
\put(50,60){\line(6,-1){60}}
\put(110,50){\line(2,-1){30}}
\put(140,35){\line(2,-5){10}}
\put(147.5,7.5){$\mathcal{B}ullet$}\put(152,7.5){\tiny{$\log(R)$}}
\put(170,55){\line(6,1){30}}
\put(170,55){\line(-1,-1){15}}
\put(155,40){\line(-1,-6){5}}
\put(0,23){\line(1,0){300}}
\put(147.5,57.5){$\mathcal{B}ullet$}
\put(77,75){\mathcal{B}egin{tiny}$R(\sigma_q-a(T),0)$\mathrm{e}nd{tiny}}
\put(135,75){\vector(1,-1){12}}
\put(80,55){\circle{10}}
\put(60,45){\line(2,1){15.5}}
\put(0,40){\mathcal{B}egin{tiny}$R(\sigma_q-a^+(T),r)$\mathrm{e}nd{tiny}}
\put(180,55){\circle{10}}
\put(200,45){\line(-2,1){15.5}}
\put(200,40){\mathcal{B}egin{tiny}$R(\sigma_q-a^-(T),r)$\mathrm{e}nd{tiny}}
\put(100,23){\circle{10}}
\put(80,13){\line(2,1){15.5}}
\put(0,10){\mathcal{B}egin{tiny}$R(\sigma_q-\lambda_q,r)=\log(R_0)$\mathrm{e}nd{tiny}}
\put(147.5,0){$\mathcal{B}ullet$}
\put(152.5,0){\mathcal{B}egin{tiny}$\log(\omega_q)$\mathrm{e}nd{tiny}}
\qbezier[100](0,2.5)(150,2.5)(300,2.5)
\put(50,-2){\mathcal{B}egin{tiny}$\partial_Townarrow$small
radius$\partial_Townarrow$\mathrm{e}nd{tiny}}
\mathrm{e}nd{picture}\\\mathrm{e}mph{ }\\
\mathrm{e}nd{center}
--- \mathrm{e}mph{Step 3 : We have $R\mathfrak{r}eq \omega_q$.}\\
Indeed, if $R^-=R^+<\omega_q$, then, by the small radius Lemma \mathfrak{r}ef{q-Young},
$|a^-(T)-1|_1>|q-1|$ and $|a^+(T)-1|_1>|q-1|$.
We shall now show
that this implies that $|a(T)-1|_1>|q-1|$, which is in
contradiction with the small radius lemma, since the
equation $\sigma_q-a(T)$ is solvable.
\mathcal{B}egin{lemma}\label{|x+y+xy|=sup(|x|,|y|)}
Let $(R,|.|)$ be an ultrametric valued ring. Let $h^-,h^+\in R$ be
two elements satisfying $|h^-|<1$, and
$|h^-+h^+|=\sup(|h^-|,|h^+|)$. Then
\mathcal{B}egin{equation}
|h^-+h^++h^-h^+|\;=\;\sup(|h^-|,|h^+|)\;.
\mathrm{e}nd{equation}
\mathrm{e}nd{lemma}
\mathcal{B}egin{proof} If $|h^+|>|h^-|$, then
$|h^-+h^++h^-h^+|=|h^+|$.
If $|h^+|\leq|h^-|<1$, then
$|h^-h^+|<|h^-|=\max(|h^-|,|h^+|)=|h^-+h^+|$.
\mathrm{e}nd{proof}
\mathrm{e}mph{Proof of Step $3$: } Write $a^-(T)=1+h_q^-(T)$ and
$\lambda_q\cdot a^+(T)=1+(\lambda_q-1)+\lambda_q\cdot h_q^+(T)$.
Namely, in the notations of Theorem
\mathfrak{r}ef{Thm : Motzkin annulus}, we have
$h_q^-(T)=\sum_{i\leq -1}\mathcal{A}lpha_iT^i$ and $h^+_q(T)=\sum_{i\mathfrak{r}eq
1}\mathcal{A}lpha_iT^i$. We apply Lemma \mathfrak{r}ef{|x+y+xy|=sup(|x|,|y|)} to the
field $R:=\mathcal{E}_K$, $h^-:=h^-_q(T)$ and
$h^+:=(\lambda_q-1)+\lambda_qh^+_q(T)$. Indeed
$|h^-+h^+|_1=\sup(|h^-|_1,|h^+|_1)$, and $|h^-|_1<1$ by Lemma \mathfrak{r}ef{|a_-i|<1}. Lemma
\mathfrak{r}ef{|x+y+xy|=sup(|x|,|y|)} then implies
\mathcal{B}egin{equation}
|a(T)-1|_1\;=\; |(1+h^-)(1+h^+)-1|_1\;=\;
|h^-+h^++h^-h^+|_1
\;=\;
\sup(|h^-|_1,|h^+|_1).
\mathrm{e}nd{equation}
Now, if $R^-< \omega_q$, then $|a^-(T)-1| > |q-1|$, that is
$|h^-(T)|>|q-1|$. Hence $|a(T)-1|_1>|q-1|$, which implies that the
radius of $\sigma_q-a(T)$ is small (cf. Lemma \mathfrak{r}ef{q-Young}).
Since, by assumption, $Ray(\sigma_q-a(T),1)=1$, this is absurd
and then $R\mathfrak{r}eq \omega_q$.\\
--- \mathrm{e}mph{Step 4 : We have $R>\omega_q$.}
Since $R=R^-$
it is enough to show that $R^->\omega_q$.
By Lemma \mathfrak{r}ef{q-|a_i|<1} below we have
$|a^--1|<|q-1|$. On the other hand Lemma
\mathfrak{r}ef{q-Katz} proves that this implies
$R^->\omega_q$.
\mathcal{B}egin{lemma}[\protect{$q$-analogue of Lemma
\mathfrak{r}ef{|a_i|<1}}]\label{q-|a_i|<1}
Assume that the Motzkin decomposition of
$a(T)\in\mathcal{E}_K$ is
$a(T):=\lambda_q a^-(T)a^+(T)$, with
$|\lambda_q-1|<1$. If
$Ray(\sigma_q-a(T),1)>\omega_q$, then we have $a^-(T)=1+h^-_q(T)$, where $h^-_q(T)=\sum_{i\leq -1}\mathcal{A}lpha_iT^i$ satisfies $|h_q^-|_1<|q-1|$.
\mathrm{e}nd{lemma}
\mathcal{B}egin{proof} Consider the operator $d_q-g_{[1]}(T)$, with
$g_{[1]}(T):=\frac{a(T)-1}{(q-1)T}$, and write
\mathcal{B}egin{equation}\label{g_1 of tensor product}
g_{[1]}(T)=\frac{a^-(T)-1}{(q-1)T}+a^{-}(T)\frac{\lambda_qa^+(T)-1}{(q-1)T}
=g_{[1]}^-(T)+a^{-}(T)\frac{\lambda_qa^+(T)-1}{(q-1)T}\;,
\mathrm{e}nd{equation}
with $g_{[1]}^-(T):=\frac{a^-(T)-1}{(q-1)T}$. Since
$Ray(d_q-g_{[1]}(T),1)>\omega_q$, hence, by
\mathrm{e}qref{q-radius explicit formula}
and Lemma \mathfrak{r}ef{limit of q-factorial}, one has
$\lim_{s\to\infty}|g_{[s]}(T)|_1=0$. In particular
$|g_{[s]}(T)|_1<1$, for some $s\mathfrak{r}eq 1$. Moreover, by the Small
Radius Lemma \mathfrak{r}ef{q-Young}, we have $|g_{[1]}(T)|_1\leq 1$. These
facts imply our claim in the following way.
By contrapositive, suppose that
$|a^-(T)-1|_1\mathfrak{r}eq |q-1|$. Our assumption
$Ray(\sigma_q-a(T),1)>\omega_q$ is enough to obtain
Steps 1,2,3. In particular Step 3 says $Ray(\sigma_q-a^-(T),1)\mathfrak{r}eq \omega_q$. Then, by
Lemma \mathfrak{r}ef{q-Young}, $|g^-_{[1]}(T)|_1\leq 1$, and hence
$|a^-(T)-1|_1=|q-1|$. This means $|g^-_{[1]}(T)|_1 =1$.
We now look to $g_{[1]}$ and get a contradiction
exploiting
\mathrm{e}qref{g_1 of tensor product} and the fact that
$|g_{[1]}^-(T)|_1= 1$. Namely, write as usual
$a^-(T)=1+\sum_{i\mathfrak{r}eq 1}\mathcal{A}lpha_{-i}T^{-i}$.
Let $-d\leq -1$ be the smallest index such that
$|\mathcal{A}lpha_{-d}|=|q-1|$. Observing equation
\mathrm{e}qref{g_1 of tensor product} we see that by
Lemma \mathfrak{r}ef{|a_-i|<1} the reduction $a^-(T)$ is $1$,
and the reduction of $\frac{\lambda_qa^+(T)-1}{(q-1)T}$ lies in $t^{-1}k[[t]]$, so the
reduction of $g_{[1]}(T)$ in $k(\!(t)\!)$
is of the form
$\overline{g_{[1]}(T)}=
\overline{\mathcal{A}lpha}t^{-d-1}+(\textrm{terms of higher degree})$,
where $\overline{\mathcal{A}lpha}$ is the reduction of
$\mathcal{A}lpha_{-d}/(q-1)$.
A simple
induction on the equation
$g_{[s+1]}=d_q(g_{[s]})+\sigma_q(g_{[s]})g_{[1]}$ shows that
$\overline{g_{[s]}(T)} = \overline{\mathcal{A}lpha}^st^{(-d-1)s}+(\textrm{terms of higher degree})$,
this is in contradiction with the fact that $|g_{[s]}(T)|_1<1$,
for some $s\mathfrak{r}eq 1$.\mathrm{e}nd{proof}
\mathcal{B}egin{lemma}[\protect{$q$-analogue of Lemma
\mathfrak{r}ef{Katz}}]\label{q-Katz}
Let $q\in\mathrm{D}^-(1,1)$. Let $\Delta_q-g(T)$,
$g(T)\in\mathcal{E}_K$, be some equation. Suppose that
$|g(T)|_1\leq 1$. Then $Ray(\Delta_q-g(T),1)>\omega_q$ if and only if
$|g_{[s]}(T)|<1$, for some $s\mathfrak{r}eq 1$.
\mathrm{e}nd{lemma}
\mathcal{B}egin{proof}
Condition $|g(T)|_1\leq 1$ guarantee
that
$n\mapsto|g_{[n]}|_1$ is decreasing. Indeed,
$|g_{[1]}|_1=|T^{-1}g(T)|_1\leq 1$ and inductively
$|g_{[n+1]}|_1=
|d_q(g_{[n]})+\sigma_q(g_{[n]})g_{[1]}|_1\leq
\sup(|g_{[n]}|_1,
|g_{[n]}g_{[1]}|_1)=|g_{[n]}|_1\sup(1,|g_{[1]}|_1)=
|g_{[n]}|_1$. So if $Ray(d_q-g(T),1)>\omega_q$, it
follows from \mathrm{e}qref{q-radius explicit formula} that
$\lim_n|g_{[n]}(T)|_1=0$.
Assume now that $|g_{[n]}|_1<1$, for some $n\mathfrak{r}eq 1$.
Since the sequence $n\mapsto|g_{[n]}|_1$
is decreasing, there exists $h>0$
such that $|g_{[p^h]}|_1<1$. We now fix such an $h$,
and we obtain an estimation of $Ray(d_q-g(T),1)$.
By \cite[1.2.2]{DV-Dwork}, one has
$$d_q^{(m+1)p^h}(y)=d_q^{p^h}(d_q^{mp^h}(y))=
\sum_{r=0}^{p^h}\mathcal{B}inom{p^h}{r}_q d_q^{r}(g_{[mp^h]})\cdot
\sigma_q^r(g_{[p^h-r]})\sigma_q^r(y)\;.$$ Then
$g_{[(m+1)p^h]}=\sum_{r=0}^{p^h}\mathcal{B}inom{p^h}{r}_q
d_q^{r}(g_{[mp^h]})\cdot \sigma_q^r(g_{[p^h-r]})a(T)a(qT)\cdots
a(q^{r-1}T)$. Now for all $j\mathfrak{r}eq 0$ one has
$|a(q^jT)|_1=|a(T)|_1=1$, and on the other hand
$|d_q^k(f)|_1\leq |[k]^!_q||f|_1$
(cf. Lemma \mathfrak{r}ef{d_q(f) leq k! f}).
Moreover $|\tbinom{p^h}{r}_q|=|[p^h]_q[p^h-1]_q\cdots
[p^h-r+1]_q|/|[r]^!_q|$, where $[n]_q:=\frac{q^n-1}{q-1}$. Since
$|[p^h]_q|<|[p]_q|$, we obtain
\mathcal{B}egin{equation}\label{..E..}
|g_{[(m+1)p^h]}|_1\;\leq\;
\sup(|[p]_q|,|g_{[p^h]}|_1)\cdot
|g_{[mp^h]}|_1\;.
\mathrm{e}nd{equation}
We deduce that for all $m\mathfrak{r}eq 1$ one has
$|g_{[mp^h]}|_1\leq s^m$, where
$s:=\sup(|[p]_q|,|g_{[p^h]}|_1)<1$.
Now we obtain a similar estimation for all $n\mathfrak{r}eq p^h$.
We let $m(n):=[n/p^h]\mathfrak{r}eq 1$, where $[a]$ is the
greatest integer smaller than or equal to $a$. Then
$m(n)p^h\leq n$ and $|g_{[n]}|_1\leq |g_{[m(n)p^h]}|_1\leq s^{m(n)}$.
Finally we now obtain the required estimation. We have
\mathcal{B}egin{equation}\label{yyyyo}
\left|\frac{g_{[n]}}{[n]_q^!}\mathfrak{r}ight|_1^{\frac{1}{n}}
\;\leq\;
\frac{s^{m(n)/n}}{|[n]_q^!|^{1/n}}
\;\leq\;
\frac{s^{1/p^h}}{
|[n]_q^!|^{1/n}}
\;\xrightarrow[]{\;\;n\;\mapsto\infty\;\;}\;
\frac{s^{1/p^h}}{\omega_q}\;.
\mathrm{e}nd{equation}
By \mathrm{e}qref{q-radius explicit formula}, this gives
$Ray(\Delta_q-g_{[1]},1)\mathfrak{r}eq \omega_q/s^{1/p^h}>\omega_q$. \mathrm{e}nd{proof}
\mathcal{B}egin{hypothesis}\label{hypothesis q-1<omega}
From now on we will suppose that
$|q-1|<\omega$. This implies $\omega_q=\omega$.
\mathrm{e}nd{hypothesis}
Hypothesis \mathfrak{r}ef{hypothesis q-1<omega}
is necessary to have Theorem
\cite{DV-Dwork}: the antecedent by Frobenius.\\
--- \mathrm{e}mph{Step 5: Since $|q-1|<1$, and since $R>\omega$, then,
by \cite{DV-Dwork}, we can take the antecedent by Frobenius of
$\sigma_q-a^-(T)$, $\sigma_q-a^+(T)$ and
$\sigma_q-\lambda_q$.}\\
More precisely, there exist a finite extension $K^{(1)}/K$, an
$f^+(T)=\sum_{i\mathfrak{r}eq 0}b^+_iT^i\in\mathcal{A}_{K^{(1)}}([0,1[)^\times$,
$f^-(T)=\sum_{i\leq 0}b^-_iT^i\in\mathcal{A}_{K^{(1)}}([1,\infty])^\times$,
and there are functions $a^{(1),-}(T)=\sum_{i\leq
0}\mathcal{A}lpha_i^{(1),-}T^i\in\mathcal{E}_{K^{(1)}}$,
$a^{(1),+}(T)=\sum_{i\mathfrak{r}eq
0}\mathcal{A}lpha_i^{(1),+}T^i\in\mathcal{E}_{K^{(1)}}$, and
$\lambda_q^{(1)}\in K^{(1)}$ such that
\mathcal{B}egin{eqnarray*}
(\lambda_q^{(1)})^p & = & \lambda_q\;;\\
a^{(1),-}(T^p)^\sigma \cdot a^{(1),-}(q
\cdot T^{p})^\sigma\cdots
a^{(1),-}(q^{p-1}T^{p})^\sigma&=&
a^-(T)\cdot\frac{f^-(q\cdot T)}{f^-(T)}\;;\\
a^{(1),+}(T^p)^\sigma
\cdot a^{(1),+}(q\cdot T^{p})^\sigma\cdots
a^{(1),+}(q^{p-1}T^{p})^\sigma&=&
a^+(T)\cdot
\frac{f^+(q\cdot T)}{f^+(T)}\;,
\mathrm{e}nd{eqnarray*}
where, for all functions
$a(T):=\sum \mathcal{A}lpha_iT^i\in\mathcal{E}_K$,
we let $a(T)^\sigma:=\sum\sigma(\mathcal{A}lpha_i)T^{i}$.
These conditions imply
immediately that $b_0^+\neq 0$, $b_0^-\neq 0$, and that
$f^+(qT)/f^+(T)) = 1+u_1T+u_2T^2+\cdots$, and $f^-(qT)/f^-(T) =
1+u_{-1}T^{-1}+u_{-2}T^{-2}+\cdots$. Since
$a^-(T)=1+\mathcal{A}lpha_{-1}T^{-1}+\cdots$, and
$a^+(T)=1+\mathcal{A}lpha_1T+\cdots$, this implies that
$\mathcal{A}lpha_0^{(1),+}=1$ and $\mathcal{A}lpha_0^{(1),-}=1$. Hence the function
\mathcal{B}egin{equation}
a^{(1)}(T)\;:=\;
\lambda_q^{(1)}\cdot a^{(1),-}(T)\cdot a^{(1),+}(T)
\mathrm{e}nd{equation}
lies in $\mathcal{E}_K$, and it is the Motzkin
decomposition of Theorem \mathfrak{r}ef{motzkin}.
Observe now that both $f^-$ and
$f^+$ belong to $\mathcal{E}^{\times}_K$, hence
$\sigma_q-a^{(1)}(T)$ is an antecedent of Frobenius of
$\sigma_q-a(T)$ over $\mathcal{E}_K$, and it is then
solvable.
--- \mathrm{e}mph{Step 6 : } Steps $1$, $2$, $3$, $4$ are still true for the antecedent. In
particular if
\mathcal{B}egin{eqnarray}
R^-(1)&:=&Ray(\partial_T-g^{(1),-}(T),1)\;,\\
R^+(1)&:=&Ray(\partial_T-g^{(1),+}(T),1)\;,\\
R^0(1)&:=&Ray(\partial_T-b_0,1)\;.
\mathrm{e}nd{eqnarray}
we must have $R^-(1)=R^{+}(1)>\omega$. Let
$R(1):=R^{-}(1)=R^{+}(1)$, then $R(1)=R^{1/p}$, by the property of
the antecedent. This implies $R>\omega^{1/p}$.
Now the process can be iterated since
$R(1)>\omega$, and we can again consider
the antecedent. This shows that $R>\omega^{1/p^h}$,
for all $h\mathfrak{r}eq 0$, that is $R=1$.
Proposition \mathfrak{r}ef{q-division of
the problem over Amice} hence follows. \mathrm{e}nd{proof}
\mathcal{B}egin{corollary}[q-analogue of \mathfrak{r}ef{g^+ is trivial}]\label{q-analogue of g^+ is trivial}
Let $q\in\mathrm{D}^-(1,1)$. Let $\sigma_q-a(T)$ be a solvable
differential equation. Let $a(T)=\lambda\cdot a^{-}(T)\cdot
a^{+}(T)$ be the Motzkin decomposition of $a(T)$. Then
$\lambda=q^{a_0}$, for some $a_0\in K$. Moreover, this operator is
isomorphic to $\sigma_q-\lambda\cdot a^{-}(T)$.
\mathrm{e}nd{corollary}
\mathcal{B}egin{proof} See the proof of \mathfrak{r}ef{g^+ is trivial}. \mathrm{e}nd{proof}
\mathcal{B}egin{remark}
The unique obstruction to generalize Proposition
\mathfrak{r}ef{q-division of the problem over Amice} and Corollary
\mathfrak{r}ef{q-analogue of g^+ is trivial}
to the case $|q-1|<1$ is represented by the so called
Weak Frobenius structure for $q-$difference modules
over a disk with $|q-1|<1$. This is proved in
\cite{DV-Dwork} in the case $|q-1|<\omega$.
The assumption $|q-1|<\omega$ is also used in the
sequel, where we consider logarithms of the
exponentials. E.g. see Step 0 of Lemma
\mathfrak{r}ef{q-criteria of solvability lemma}.
\mathrm{e}nd{remark}
\subsection{Criterion of Solvability}
\mathcal{B}egin{lemma}[q-analogue of
\mathfrak{r}ef{criteria of solvability lemma2}]
\label{q-criteria of solvability lemma}
Let $|q-1|<\omega$. Suppose that $a(T)=a^{+}(T)$ is
the Motzkin decomposition of $a(T)$. Write
$a^+(T)=\mathrm{e}xp(\sum_{i\mathfrak{r}eq 1}a_iT^i)\in\mathcal{A}([0,1[)^{\times}$ (cf. the
settings of \mathfrak{r}ef{the settings}). Then the $q-$difference equation
$\sigma_q-a^+(T)$ is solvable if and only if there exists a family
$\{\mathcal{B}s{\lambda}_{n}\}_{n\in\mathbb{J}}$, where $\mathcal{B}s{\lambda}_n\in\mathcal{B}s{\mathrm{W}}(\mathscr{O}_K)$ has phantom
components $\phi_{n}=(\phi_{n,0},\phi_{n,1},\ldots)$
satisfying
\mathcal{B}egin{equation}\label{q-a_np^m=n phi_n,m}
a_{np^m}=\frac{(q^{np^m}-1)}{p^m}\cdot
\phi_{n,m}\;,\qquad\textrm{
for all }n\in\mathbb{J},\; m\mathfrak{r}eq 0\;,
\mathrm{e}nd{equation}
for all $n\in\mathbb{J}$, and all $m\mathfrak{r}eq 0$. In other words, the
formal solution of the equation $\sigma_q(y)=ay$ is
\mathrm{e}qref{formal solution explicit}
\mathcal{B}egin{equation}
y(T)\;=\;
E(\sum_{n\in\mathbb{J}}\mathcal{B}s{\lambda}_nT^n,1)\;:=\;
\mathrm{e}xp(\sum_{n\in\mathbb{J}}\sum_{m\mathfrak{r}eq
0}\phi_{n,m}\frac{T^{np^m}}{p^m})\;.
\mathrm{e}nd{equation}
\mathrm{e}nd{lemma}
\mathcal{B}egin{proof}
The formal
series $E(\sum_{n\in\mathbb{J}}\mathcal{B}s{\lambda}_nT^n,1)$ belongs to
$1+T\cdot\mathfrak{p}_K[[T]]\subset\mathcal{E}_K$, and it is solution
of the equation $L:=\sigma_q-\mathrm{e}xp(\sum_{n\in\mathbb{J}}\sum_{m\mathfrak{r}eq
0}\phi_{n,m}(q^{np^m}-1)T^{np^m})$.
Since this
exponential
converges in the open unit disk, then
$Ray(L,\mathfrak{r}ho)=\mathfrak{r}ho$, for all
$\mathfrak{r}ho<1$. Hence, by continuity of the radius,
$Ray(L,1)=1$ and $L$
is solvable.
Conversely, suppose that $\sigma_q-a^+(T)$ is
solvable, then the Witt vectors
$\mathcal{B}s{\lambda}_n=(\lambda_{n,0},\lambda_{n,1},\ldots)$ are
defined by the relation \mathrm{e}qref{q-a_np^m=n phi_n,m}. For example, for all $n\in\mathbb{J}$
we have
\mathcal{B}egin{equation}
\lambda_{n,0} = \frac{a_{n}}{(q^{n}-1)} \quad,\qquad \lambda_{n,1}
= \frac{1}{p}\left(\frac{p\cdot a_{np}}{(q^{np}-1)} -
\Bigl(\frac{a_n}{(q^{n}-1)}\Bigr)^{\!p} \mathfrak{r}ight)\;.
\mathrm{e}nd{equation}
We must show that $|\lambda_{n,m}|\leq 1$, for all
$n\in\mathbb{J}$, $m\mathfrak{r}eq 0$.\\
--- \mathrm{e}mph{Step $0$ : We have $|\lambda_{n,0}|=|\phi_{n,0}|\leq 1$ for all
$n\in\mathbb{J}$.}\\
This results by the small radius Lemma \mathfrak{r}ef{q-Young} as
follows: denote the argument of the
exponential $a^+(T)$ by
$\phi^+_q(T):=\sum_{n\in\mathbb{J}}\sum_{m\mathfrak{r}eq
0}\phi_{n,m}(q^{np^m}-1)T^{p^m}/p^m$.
By Lemma \mathfrak{r}ef{q-Young}, one has
$|a^+(T)-1|_1=|\mathrm{e}xp(\phi_q^+)-1|_1\leq |q-1|$. Since
$|q-1|<\omega$, then $|\mathrm{e}xp(\phi_q^+)-1|_1<\omega$, hence
$\phi_q^+=\log(\mathrm{e}xp(\phi_{q}^+))$ and
$|\phi_q^+|_1=|\mathrm{e}xp(\phi_q^+)-1|_1\leq|q-1|$. This implies
$|\phi_{n,m}(q^{np^m}-1)/p^m|\leq |q-1|$, for all $n\in\mathbb{J}$ and all
$m\mathfrak{r}eq 0$. In particular, for $m=0$ we have
$|\lambda_{n,0}|=|\phi_{n,0}|\leq 1$, for all $n\in\mathbb{J}$.\\
--- \mathrm{e}mph{Step $1$ :} By Step 0 the exponential
$$E(\sum_{n\in\mathbb{J}}(\lambda_{n,0},0,0,\ldots)T^{n},1)=
\mathrm{e}xp(\sum_{n\in\mathbb{J}}\sum_{m\mathfrak{r}eq
0}\lambda_{n,0}^{p^m}\frac{T^{np^m}}{p^m})$$ converges in the unit
disk and is solution of the operator $Q^{(0)}:=\sigma_q-
a^{(0)}(T)$, with $a^{(0)}(T)=\mathrm{e}xp(\sum_{n\in\mathbb{J}}\sum_{m\mathfrak{r}eq
0}\lambda_{n,0}^{p^m}(q^{np^m}-1)\frac{T^{np^m}}{p^m})$.
$Q^{(0)}$ is then solvable.\\
--- \mathrm{e}mph{Step 2 :} The tensor product operator $\sigma_q - (a^+(T)/a^{(0)}(T))$
is again solvable. We have explicitly
$$\frac{a^+(T)}{a^{(0)}(T)}=
\mathrm{e}xp(\sum_{n\in\mathbb{J}}\sum_{m\mathfrak{r}eq
0}(\phi_{n,m}-\lambda_{n,0}^{p^m})(q^{np^m}-1)\frac{T^{np^m}}{p^m})\;.$$
This operator corresponds to the family of Witt vectors
$\{\mathcal{B}s{\lambda}_n-(\lambda_{n,0},0,0,\ldots)=(0,\lambda_{n,1},\lambda_{n,2},\ldots)\}_{n\in\mathbb{J}}$.
Observe that the coefficient corresponding to $m=0$ is equal to
$0$, for all $n\in\mathbb{J}$. This leads us to compute easily the
``antecedent by ramification'' of $\sigma_q - a^+(T)/a^{(0)}(T)$,
namely this antecedent is given by $\sigma_q-a^{(1)}(T)$, with
$$a^{(1)}(T):=\mathrm{e}xp(\sum_{n\in\mathbb{J}}\sum_{m\mathfrak{r}eq 0}
(\phi_{n,m}-\lambda_{n,0}^{p^m})(q^{np^m}-1)\frac{(q-1)}{(q^{p}-1)}\frac{T^{np^{m-1}}}{p^m})\;.$$
In other words, we have
$$a^{(1)}(T^p)\cdot a^{(1)}(qT^p)\cdot a^{(1)}(q^2T^p)\cdots
a^{(1)}(q^{p-1}T^p)=\frac{a^+(T)}{a^{(0)}(T)}\;.$$
--- \mathrm{e}mph{Step 3 :} The antecedent
is again solvable, hence, as in Step $0$, we find
$|\phi_{n,1}-\lambda_{n,0}^p|\leq |q^p-1|=|p|$, which implies
$|\lambda_{n,1}|\leq 1$.
The process can be iterated indefinitely.
\mathrm{e}nd{proof}
\if{
\mathcal{B}egin{remark}\label{what happens when q-1>1 ?}
For $|q-1|\mathfrak{r}eq \omega$ we do not know whether there
exist solvable rank one
$q-$difference equations that are not strongly confluent
(cf. terminology of \cite{Pu-q-Diff}). Such an equation is
such that its solution $y(T)$ is an exponential of type
$E(\sum_{n\in\mathbb{J}}\mathcal{B}s{\lambda}_nT^n,1)$, which lies in
$\mathscr{O}_K[[T]]$ but such
that $\mathcal{B}s{\lambda}_n\in \mathcal{B}s{\mathrm{W}}(K)-\mathcal{B}s{\mathrm{W}}(\mathscr{O}_K)$, for some $n\in\mathbb{J}$.
The author does
not know examples of such Witt vectors.
\mathrm{e}nd{remark}
}\fi
\mathcal{B}egin{remark}[q-analogue of Remark
\mathfrak{r}ef{discussion}]\label{q-discussion}
We shall now consider the general
case of an equation $\sigma_q-a(T)$, with
$a(T)=\lambda\cdot
a^{-}(T)a^{+}(T)\in\mathcal{E}_K$, and get a criteria
of solvability.
We proceed as in Remark \mathfrak{r}ef{discussion}. Suppose given two
families $\{\mathcal{B}s{\lambda}_{-n}\}_{n\in\mathbb{J}}$ and $\{\mathcal{B}s{\lambda}_{n}\}_{n\in\mathbb{J}}$, with
$\mathcal{B}s{\lambda}_n\in\mathcal{B}s{\mathrm{W}}(\mathscr{O}_K)$. By Lemma
\mathfrak{r}ef{a^+ belong always to E} below, $a^+(T)$
belongs always to $\mathcal{E}_K$. On the other hand,
we will prove
(cf. Lemma \mathfrak{r}ef{q-criteria for belong to Amice}) that the
series
$a^-(T)$ belongs to $\mathcal{E}_K$ if and only if the
family
$\{\mathcal{B}s{\lambda}_{-n}\}_{n\in\mathbb{J}}$ belongs to
$\mathrm{Conv}(\mathcal{E}_K)$
(cf. Definition \mathfrak{r}ef{Conv}).
\mathrm{e}nd{remark}
\mathcal{B}egin{notation}\label{q-notations}
Let $\sigma_q- a(q,T)$, $a(q,T)\in\mathcal{E}_K$ be a solvable
differential equation. Let $a(q,T):=q^{a_0}\cdot a^-(q,T)\cdot
a^+(q,T)$, $a_0\in\mathbb{Z}_p$, be the Motzkin decomposition of
$a(q,T)$. In the notations of Lemma \mathfrak{r}ef{q-criteria of solvability
lemma} we can write
\mathcal{B}egin{equation} a^-(q,T)=\mathrm{e}xp(\phi_q^-(T))\quad,\quad
a^+(q,T)=\mathrm{e}xp(\phi_q^+(T))\;,
\mathrm{e}nd{equation}
\mathcal{B}egin{eqnarray}\label{gygy}
\phi_q^-(T)&:=&\sum_{n\in\mathbb{J}}\sum_{m\mathfrak{r}eq
0}\phi_{-n,m}(q^{-np^m}-1)\frac{T^{-np^m}}{p^m}\;,\\%\quad,\quad
\label{gygyg}\phi_q^+(T)&:=&\sum_{n\in\mathbb{J}}\sum_{m\mathfrak{r}eq
0}\phi_{n,m}(q^{np^m}-1)\frac{T^{np^m}}{p^m}\;.
\mathrm{e}nd{eqnarray}
For all $n\in\mathbb{J}$ we denote by
$\mathcal{B}s{\lambda}_n,\mathcal{B}s{\lambda}_{-n}\in\mathcal{B}s{\mathrm{W}}(K)$ the
Witt vectors with phantom vectors
$\lr{\phi_{n,0},\phi_{n,1},\ldots}$ and
$\lr{\phi_{-n,0},\phi_{-n,1},\ldots}$ respectively.
In other words, the solution of $\sigma_q-a(q,T)$ can be
represented by the symbol
\mathcal{B}egin{equation}\label{q-formal solution}
y(T):=T^{a_0}\cdot\mathrm{e}xp(\sum_{n\in\mathbb{J}}\sum_{m\mathfrak{r}eq
0}\phi_{-n,m}\frac{T^{-np^m}}{p^m})\cdot\mathrm{e}xp(\sum_{n\in\mathbb{J}}\sum_{m\mathfrak{r}eq
0}\phi_{n,m}\frac{T^{np^m}}{p^m})\;,
\mathrm{e}nd{equation}
as well as for differential equations.
\mathrm{e}nd{notation}
\mathcal{B}egin{lemma}\label{a^+ belong always to E}
Let $|q-1|<\omega$. Let $\{\mathcal{B}s{\lambda}_n\}_{n\in\mathbb{J}}$ be a family of Witt
vectors such that $\mathcal{B}s{\lambda}_n\in\mathcal{B}s{\mathrm{W}}(\mathscr{O}_K)$. Then $a^+(T)$ belongs to
$\mathcal{E}_K$.
\mathrm{e}nd{lemma}
\mathcal{B}egin{proof} We use the notations of \cite{Rk1}.
Let
$P(X)=(X+1)^p-1$ be the Lubin-Tate series
corresponding to the
formal multiplicative group $\hat{\mathbb{G}}_m$. The
phantom vector of $[q^n-1]_{P}\in\mathcal{B}s{\mathrm{W}}(\mathscr{O}_K)$ is then
$\ph{q^n-1,q^{np}-1,q^{np^2}-1,\cdots}$, for all
$n\in\mathbb{Z}$. Then, for all $n\in\mathbb{J}$, the phantom
vector of $[q^n-1]_{P}\cdot \mathcal{B}s{\lambda}_{n}$ is
$$\ph{(q^{n}-1)\phi_{n,0}\;,\;(q^{np}-1)\phi_{n,1}\;,\;(q^{np^2}-1)\phi_{n,2}\;,\;\ldots}\;.$$
Hence we can express $a^+(q,T)$ as a product of Artin-Hasse
exponentials
$$a^+(q,T)=\prod_{n\in\mathbb{J}}E([q^{n}-1]_{P}\cdot \mathcal{B}s{\lambda}_{n}\;,\;T)\;.$$
Since $[q^{n}-1]_{P}\cdot\mathcal{B}s{\lambda}_n\in\mathcal{B}s{\mathrm{W}}(\mathscr{O}_K)$, then
for all $n\in\mathbb{J}$ the Artin-Hasse exponential $E([q^{n}-1]_{P}\cdot
\mathcal{B}s{\lambda}_{n}\;,\;T)$ belongs to $1+T\mathscr{O}_K[[T]]$, which is contained in
$\mathcal{E}_K$.
\mathrm{e}nd{proof}
\mathcal{B}egin{lemma}[q-analogue of Proposition
\mathfrak{r}ef{criteria for belong to Amice}]\label{q-criteria for belong to Amice}\label{a(T) belong to E_k}
Let $|q-1|<\omega$. Let $\{\mathcal{B}s{\lambda}_{-n}\}_{n\in\mathbb{J}}\in\mathcal{B}s{\mathrm{W}}(\mathscr{O}_K)$ be a
family of Witt vectors. Then the
following assertions are equivalent:\\
$(1)$ The series $a^-(T)=\mathrm{e}xp(\phi_q^-(T))$ belongs to $\mathcal{E}_K$;\\
$(2)$ $\phi_q^-(T)$ belongs to $\mathcal{E}_K$;\\
$(3)$ $\{\mathcal{B}s{\lambda}_{-n}\}_{n\in\mathbb{J}}\in\mathrm{Conv}(\mathcal{E})$ (cf. Definition \mathfrak{r}ef{Conv}).
\mathrm{e}nd{lemma}
\mathcal{B}egin{proof}
The equivalence $(2)\mathcal{L}eftrightarrow (3)$ follows from
Proposition \mathfrak{r}ef{criteria for belong to Amice}.
We firstly observe that since by assumption we have
$\mathcal{B}s{\lambda}_{-n}\in\mathcal{B}s{\mathrm{W}}(\mathscr{O}_K)$, then $|\phi_{-n,m}|\leq 1$ and
so
\mathcal{B}egin{equation}\label{phi_q^-<omega}
|(q^{-np^m}-1)\phi_{-n,m}p^{-m}| =
|q-1|\cdot|\phi_{-n,m}|
\leq|q-1|< \omega\;.
\mathrm{e}nd{equation}
Hence $|\phi_q^-(T)|_1\leq |q-1|<\omega$.
Now assume that
$\phi_q^-(T)\in\mathcal{E}_K$. Since the exponential
series converges in the disk
$D_{\mathcal{E}_K}^-(0,\omega):=
\{f\in\mathcal{E}_K\;|\;|f|_1<\omega\}$,
then $\mathrm{e}xp(\phi_q^-(T))\in\mathcal{E}_K$.
Conversely, assume that
$\mathrm{e}xp(\phi_q^-(T))\in\mathcal{E}_K$. Since, for all $\mathfrak{r}ho>1$,
$|\phi_q^-(T)|_\mathfrak{r}ho<|q-1|$, then
$\phi_q^-(T)\in\mathrm{D}_{\mathcal{A}_K([\mathfrak{r}ho,\infty])}^{-}(0,\omega):=\{f\in\mathcal{A}_K([\mathfrak{r}ho,\infty])\;|\;|f|_\mathfrak{r}ho<\omega\}$,
and hence $\mathrm{e}xp(\phi_q^-(T))$ converge in $\mathcal{A}_K([\mathfrak{r}ho,\infty])$,
for all $\mathfrak{r}ho>1$. Moreover,
$|\mathrm{e}xp(\phi_q^-(T))-1|_\mathfrak{r}ho=|\phi_q^-(T)|_\mathfrak{r}ho\leq|q-1|<\omega$,
for all $\mathfrak{r}ho>1$. By continuity, we have
$|\mathrm{e}xp(\phi_q^-(T))|_1=|\phi_q^-(T)|_1 \leq |q-1|<\omega$. Now the
logarithm converges in the disk
$D_{\mathcal{E}_K}(1,1^-):=\{f\in\mathcal{E}_K\;|\;|f|_1<1\}$,
hence $\phi_q^-(T)=\log\mathrm{e}xp(\phi_q^-(T))$. Then $\phi_q^-(T)$
belongs to $\mathcal{E}_K$.
\mathrm{e}nd{proof}
\mathcal{B}egin{corollary}[Criterion of solvability for $q$-difference
equations]\label{criterion of solvability for q-difference} The
equation $\sigma_q-a(q,T)$, with $a(q,T)=\lambda_q T^N a^-(q,T)
a^+(q,T)$, with
$$a^-(T):=\mathrm{e}xp(\sum_{i\leq -1}a_iT^i)\quad,\quad a^+(T):=\mathrm{e}xp(\sum_{i\mathfrak{r}eq
1}a_iT^i)\;,$$ is solvable if and only if the following conditions
are verified
\mathcal{B}egin{enumerate}
\item $\lambda=q^{a_0}$, with $a_0\in\mathbb{Z}_p$\;;
\item $N=0$\;;
\item There exist two families $\{\mathcal{B}s{\lambda}_{-n}\}_{n\in\mathbb{J}}$ and
$\{\mathcal{B}s{\lambda}_{n}\}_{n\in\mathbb{J}}$, with $\mathcal{B}s{\lambda}_{-n},\mathcal{B}s{\lambda}_{n}\in\mathcal{B}s{\mathrm{W}}(\mathscr{O}_K)$, for
all $n\in\mathbb{J}$, such that
\mathcal{B}egin{equation}
a_{-np^m}=\frac{(q^{-np^m}-1)}{p^m}\cdot\phi_{-n,m}\quad,\quad
a_{np^m}=\frac{(q^{np^m}-1)}{p^m}\cdot\phi_{n,m}\;,
\mathrm{e}nd{equation}
for all $n\in\mathbb{J}$ and all $m\mathfrak{r}eq 0$;
\item $\{\mathcal{B}s{\lambda}_{-n}\}_{n\in\mathbb{J}}\in\mathrm{Conv}(\mathcal{E})$.
\mathrm{e}nd{enumerate}
In other words, the formal solution of this equation can be
represented by the symbol \mathrm{e}qref{q-formal solution} in which the
family $\{\mathcal{B}s{\lambda}_{-n}\}_{n\in\mathbb{J}}$ belongs to
$\mathrm{Conv}(\mathcal{E}_K)$, and $a(T)=\mathrm{e}xp(\phi^-_q(T))\cdot
q^{a_0}\cdot\mathrm{e}xp(\phi^+_q(T))$, where $\phi^-_q(T)$, $\phi^+_q(T)$
are defined in \mathrm{e}qref{gygy} and \mathrm{e}qref{gygyg}.
$\Box$
\mathrm{e}nd{corollary}
\mathcal{B}egin{corollary}[canonical extension for $q-$difference]
Let $\sigma_q-Mod(\mathcal{A}_K([1,\infty]))^{\mathrm{sol}}_{rk=1}$ be the
category of rank one $\sigma_q-$modules over $\mathcal{A}_K([1,\infty])$,
solvable at all $\mathfrak{r}ho\mathfrak{r}eq 1$. The scalar extension functor
$\sigma_q\textrm{-}\mathrm{Mod}(\mathcal{A}_K([1,\infty]))^{\mathrm{sol}}_{rk=1}
\to
\sigma_q\textrm{-}\mathrm{Mod}(\mathcal{E}_K)^{\mathrm{sol}}_{rk=1}$
is an equivalence.
\mathrm{e}nd{corollary}
\mathcal{B}egin{proof}
The proof is analogous
to the proof of Corollary
\mathfrak{r}ef{canonical ext over
amice}. \mathrm{e}nd{proof}
\mathcal{B}egin{remark}[Strong confluence]
\label{strongly confluence}
The $q$-deformation and $q$-confluence equivalences
of \cite{Pu-q-Diff} do not hold over the ring
$\mathcal{E}_K$. Indeed those equivalences involve
the Taylor solutions, and their convergence locus.
The Taylor solution of a differential equations over
$\mathcal{E}_K$ does not converge anywhere.
However the computations we have obtained show that
the solutions of differential equations and of
$q-$difference equation over $\mathcal{E}_K$ coincide.
Moreover by the canonical extension theorem for
differential and $q-$difference equations one knows that,
if $|q-1|<\omega$, then
every rank one object comes by scalar extension from an
object over the affinoid domain
$A:=\mathbb{P}^1-\mathrm{D}^-(0,1)=
\{|x|\mathfrak{r}eq 1\}$.
In particular, for all $r>1$, every object comes by
scalar extension from an object over the
closed annulus $\{|x|\in[1,r]\}$.
Hence we can apply the deformation and the confluence
to the canonical extensions.
\mathrm{e}nd{remark}
\mathcal{B}ibliographystyle{amsalpha}
\mathcal{B}ibliography{2012-NP-III}
\mathrm{e}nd{document} |
\begin{document}
\makeatletter
\defJournal of the ACM{Journal of the ACM}
\defCommunications of the ACM{Communications of the ACM}
\def\ICALP{International Colloquium on Automata, Languages
and Programming}
\def{\cal{S}}TOC{annual ACM Symp. on the Theory
of Computing}
\def{\cal{F}}OCS{annual IEEE Symp. on Foundations of Computer Science}
\def{\cal{S}}IAM{SIAM Journal on Computing}
\def{\cal{S}}IOPT{SIAM Journal on Optimization}
\defMath. Oper. Res.{Math. Oper. Res.}
\def{\cal{B}}SMF{Bulletin de la Soci\'et\'e Ma\-th\'e\-ma\-tique de France}
\defC. R. Acad. Sci. Paris{C. R. Acad. Sci. Paris}
\defInformation Processing Letters{Information Processing Letters}
\defTheoretical Computer Science{Theoretical Computer Science}
\def{\cal{B}}AMS{Bulletin of the Amer. Math. Soc.}
\defTransactions of the Amer. Math. Soc.{Transactions of the Amer. Math. Soc.}
\defProceedings of the Amer. Math. Soc.{Proceedings of the Amer. Math. Soc.}
\defJournal of the Amer. Math. Soc.{Journal of the Amer. Math. Soc.}
\defLect. Notes in Math.{Lect. Notes in Math.}
\defLect. Notes in Comp. Sci.{Lect. Notes in Comp. Sci.}
\defJournal for Symbolic Logic{Journal for Symbolic Logic}
\defJournal of Symbolic Computation{Journal of Symbolic Computation}
\defJ. Comput. System Sci.{J. Comput. System Sci.}
\defJ. of Complexity{J. of Complexity}
\defMath. Program.{Math. Program.}
\sloppy
\begin{title}
{{\bf \mbox{Solving Linear Programs with Finite Precision:} \\
III. Sharp Expectation bounds}
\thanks{This work has been substantially funded by
a grant from the Research Grants Council of the
Hong Kong SAR (project number CityU 1085/02P).
}}
\end{title}
\author{Dennis Cheung\\
Division of Continuing Professional Education\\
The Hong Kong Institute of Education\\
HONG KONG\\
e-mail: {\tt [email protected]}
\and
Felipe Cucker\\
Department of Mathematics\\
City University of Hong Kong\\
HONG KONG\\
e-mail: {\tt [email protected]}
}
\date{}
\makeatletter
\maketitle
\makeatother
\thispagestyle{empty}
\begin{quote}
{\small {\bf Abstract.\quad}
We give an ${\cal{O}}(\log n)$ bound for the expectation of the
logarithm of the condition number ${\cal{K}}(A,b,c)$ introduced in
``Solving linear programs with finite
precision: I. Condition numbers and random programs.''
\mbox{\it Math. Programm.}, 99:175--196, 2004. This bound
improves the previously existing bound, which was of
${\cal{O}}(n)$.
}
\end{quote}
\section{Introduction}
Consider the following linear programming problem
(in standard form),
\begin{align}\label{K3:LP}
\min c^{\rm T} x&\notag\\
{\rm{s.t.}} \ Ax &=b \tag*{(P)}\\
x& \geq 0.\notag
\end{align}
Here $A \in {\mathbb{R}}^{m\times n}, b \in{\mathbb{R}}^m, c \in{\mathbb{R}}^n$,
and $n\geq m\geq 1$.
Assuming this problem is feasible (i.e., the set given by
$Ax=b$, $x\geq 0$, is not empty) and bounded (i.e.,
the function $x\mapsto c^{\rm T} x$ is bounded below
on the feasible set), algorithms solving \ref{K3:LP}
may return an optimizer $x^*\in{\mathbb{R}}^n$ and/or
the optimal value $c^{\rm T} x^*$. Whereas these two
computations are essentially equivalent in the presence
of infinite precision, obtaining an optimizer appears to
be more difficult if only finite precision is available.
Accuracy analyses of interior-point algorithms for
these problems have been done in~\cite{Vera98}
---for the computation of the optimal value---
and in~\cite{ChC03} ---for the computation of
an optimizer. In both cases, accuracy bounds
(as well as complexity bounds) are given in terms
of the dimensions $m$ and $n$, as well as of the
logarithm of a condition number. The bounds in
both analyses are similar. What turns out to be
different is their relevant condition numbers.
In~\cite{Vera98} this is Renegar's condition number
$C(A,b,c)$ which,
roughly speaking, is the relativized inverse of the
size of the smallest perturbation needed to
make~\ref{K3:LP} either infeasible or unbounded.
In~\cite{ChC03} it is the condition number
${\cal{K}}(A,b,c)$ which, following the same idea,
is the relativized inverse of the
size of the smallest perturbation needed to
change the optimal basis of~\ref{K3:LP}
(a detailed definition is in Section~\ref{K3:sec:prelim}
below).
A characteristic of these (and practicality all other) condition numbers
is that they cannot be easily computed from the data at hand. Their
computation appears to be at least as difficult as that of the solution for
the problem whose condition they are
measuring (see~\cite{Renegar94} for a discussion on this)
and requires at
least the same amount of precision (see~\cite{ChC05}).
A way out of this dilemma going back to the very beginning
of condition numbers is to randomize the data and to
estimate the expectation of its condition. Indeed,
the first papers on condition are published independently
by Turing~\cite{Turing48} and by Goldstine and
von Neumann~\cite{vNGo47}, both for the condition of
linear equation solving and in a sequel~\cite{vNGo51} to the
latter the matix $A$ of the input linear system was considered
to be random and some probabilistic estimates on its
condition number were derived. This approach was
subsequently championed
by Demmel~\cite{Demmel88} and Smale~\cite{Smale97}.
A number of probabilistic estimates for Renegar's condition number
(or for a close relative introduced in~\cite{ChC00}) have been
obtained in the last
decade~\cite{CW01,BuCuLo:AoP,DuSpTe:09}.
The overall picture
is that the contribution of the log of this condition number to complexity
and accuracy bounds is, on the average, ${\cal{O}}(\log n)$.
In contrast with this satisfactory state of affairs,
little is known for the condition number ${\cal{K}}$ on random
triples $(A,b,c)$. In~\cite{ChC02} it was shown that
for these triples, conditioned to~\ref{K3:LP} being feasible and
bounded, $\log {\cal{K}}(A,b,c)$ is ${\cal{O}}(n)$ on the average but this
estimate appears to be poor. In the present paper we improve
this result and show a ${\cal{O}}(\log n)$ bound (see
Theorem~\ref{K3:th:mainK} below for a precise statement).
\section{Statement of the Main Result}
\label{K3:sec:prelim}
In this section we fix notations, recall the definition
of ${\cal{K}}(A,b,c)$, and state our main result.
For any subset $B$ of $\{1, 2, . . . , n\}$, denote by $A_B$ the
submatrix of $A$ obtained by removing from $A$ all the
columns with index not in $B$. If $x\in {\mathbb{R}}^n$,
$x_B$ is defined analogously.
A set $B\subset\{1, 2, . . . , n\}$ such that $|B| = m$ and $A_B$ is
invertible is said to be a {\em basis} for $A$.
Let $B$ be a basis. Then we may uniquely solve $A_Bx' = b$.
Consider the point $x^*\in {\mathbb{R}}^n$ defined by $x^*_j = 0$ for
$j\not\in B$ and $x^*_B = x'$. Clearly, $Ax^* = b$. We say that
$x^*$ is a {\em primal basic solution}. If, in addition, $x^*\geq 0$,
which is equivalent to $x^*_B\geq 0$, then we say
$x^*$ is a {\em primal basic feasible solution}.
The dual of \ref{K3:LP}, which in the sequel we denote by \ref{K3:LD},
is the following problem,
\begin{align}\label{K3:LD}
\max\ b^{\rm T} y&\tag*{(D)}\\
{\rm{s.t.}}\ A^{\rm T} y&\leq c.\notag
\end{align}
For any basis $B$, we may now uniquely solve
$A^{\rm T} _By^* = c_B$. The point $y^*$ thus
obtained is said to be a {\em dual basic solution}. If,
in addition, $A^{\rm T} y \leq c$, $y^*$ is said to be a
{\em dual basic feasible solution}.
Let $B$ be a basis. We say that $B$ is an
{\em optimal basis} (for the pair (P--D))
if both the primal and dual basic solutions are
feasible. In this case the points $x^*$ and $y^*$
above are the {\em optimizers} of~\ref{K3:LP}
and~\ref{K3:LD}, respectively.
We denote by $d$ the input data $(A, b, c)$. We say that $d$
is {\em feasible} when there exist $x\in{\mathbb{R}}^n$, $x\geq 0$,
and $y\in{\mathbb{R}}^m$ such that $Ax=b$ and $A^{\rm T} y\leq c$.
Let
$$
{\cal{U}}=\{d=(A,b,c)\mid\mbox{$d$ has a unique optimal basis}\}.
$$
By definition, triples in ${\cal{U}}$ are feasible.
To define conditioning, we need a norm in the space of data triples.
To do so, we associate to each triple
$d = (A, b, c)\in{\mathbb{R}}^{mn+m+n}$ the matrix
$$
M_d = \left(\begin{array}{cc}
c^{\rm T} &0\\
A& b\\
\end{array}\right)
$$
and we define $\|d\|$ to be the operator norm
$\|M_d\|_{rs}$ of $M_d$ considered as a linear map
from ${\mathbb{R}}^{n+1}$ to ${\mathbb{R}}^{m+1}$. Note that this
requires norms $\|\ \|_r$ and $\|\ \|_s$ in
${\mathbb{R}}^{n+1}$ and ${\mathbb{R}}^{m+1}$, respectively.
Let ${\cal{S}}igma_U$ be the boundary of ${\cal{U}}$ in
${\mathbb{R}}^{mn+m+n}$. For any data input $d\in{\cal{U}}$,
we define the {\em distance to ill-posedness} and the
{\em condition number} for $d$, respectively, as follows,
$$
\varrho(d) = \min\{ \|\delta d\| :\, d+\delta d\in
{\cal{S}}igma_U\}\qquad\mbox{ and }
\qquad {\cal{K}}(d) = \frac{\|d\|}{\varrho(d)}.
$$
We next state our main result, after making precise the
underlying probability model.
\begin{definition}
We say that $d=(A,b,c)$ is {\em Gaussian}, and
we write $d\sim N(0,\mathsf{Id})$, when
all entries of $A, b$ and $c$ are i.i.d. with standard
normal distribution.
\end{definition}
\begin{theorem}\label{K3:th:mainK}
For the $\|\ \|_{12}$ norm we have
$$
{\mathsf{u}}nderset {d\sim N(0,\mathsf{Id})}{{\mathbb{E}}}
\left(\ln{\cal{K}}(d)\mid d\in{\cal{U}}\right) \leq
\frac{5}{4}\ln(m+1)+\frac{3}{2}\ln(n+1)+\ln(12).
$$
\end{theorem}
\begin{remark}
The use of the $\|\ \|_{12}$ norm in Theorem~\ref{K3:th:mainK} is
convenient but inessential. Well known norm equivalences
yield ${\cal{O}}(\log n)$ bounds for any of the usually
considered matrix norms.
\end{remark}
\section{Proof of the Main Result}
\subsection{A useful characterization}
Write ${\cal{D}}={\mathbb{R}}^{mn+m+n}$ for the space of data inputs, and
$$
{\cal{B}} = \{B\subset\{1,2,\ldots,n\}|\, |B| = m\}
$$
for the family of possible bases.
For any $B\in{\cal{B}}$ and any triple $d\in{\cal{D}}$,
let ${\cal{S}}_1$ be the set of all $m$ by $m$
submatrices of $[A_B, b]$, ${\cal{S}}_2$ the set of all $m + 1$ by
$m + 1$ submatrices of $\left(A^{\rm T} , c \right)^{\rm T} $
containing $A_B$, and ${\cal{S}}_B(d) = {\cal{S}}_1 \bigcup {\cal{S}}_2$.
Note that $|{\cal{S}}_1| = m + 1$ and
$|{\cal{S}}_2| = n-m$, so ${\cal{S}}_B(d)$ has $n + 1$ elements.
Let ${\cal{S}}ing$ be the set of singular matrices. For any square matrix
$S$, we define the distance to singularity as follows.
$$
\rho_{\cal{S}}ing(S) := \min\{ \|\delta S\| :\, (S + \delta S) \in{\cal{S}}ing\}.
$$
For any $B\in{\cal{B}}$ consider the function
\begin{eqnarray*}
h_B:{\cal{D}} &\to& [0,+\infty)\\
d&\mapsto & \min_{S\in{\cal{S}}_B(d)} \rho_{\cal{S}}ing(S).
\end{eqnarray*}
The following characterization of $\varrho(d)$
is Theorem~2 in~\cite{ChC02}.
\begin{theorem}\label{K3:the1}
For any $d\in{\cal{U}}$,
$$
\varrho(d) = h_B(d)
$$
where $B$ is the optimal basis of $d$.
{\mbox{}
\qed}
\end{theorem}
\subsection{The group action}
We consider the group (with respect to componentwise
multiplication) ${\mathfrak{G}}_n=\{-1,1\}^n$. This group acts
on ${\cal{D}}$ as follows. For ${\mathsf{u}}\in{\mathfrak{G}}_n$ let
$D_{{\mathsf{u}}}$ be the diagonal matrix having ${\mathsf{u}}_j$
as its $j$th diagonal entry, and
\begin{eqnarray*}
{\mathsf{u}}(A)&:=&AD_{{\mathsf{u}}} = ({\mathsf{u}}_1a_1,{\mathsf{u}}_2a_2,\ldots,
{\mathsf{u}}_{n}a_{n}),\\
{\mathsf{u}}(c)&:=&D_{{\mathsf{u}}}c =
({\mathsf{u}}_1c_1,{\mathsf{u}}_2c_2,\ldots, {\mathsf{u}}_{n}c_{n}),
\end{eqnarray*}
where $a_i$ denotes the $i$th column of $A$.
We define ${\mathsf{u}}(d) := ({\mathsf{u}}(A), b, {\mathsf{u}}(c))$. The group ${\mathfrak{G}}_n$ also acts
on ${\mathbb{R}}^n$ by ${\mathsf{u}}(x):=({\mathsf{u}}_1x_1,\ldots,{\mathsf{u}}_nx_n)$. It is immediate
to verify that for all $A\in{\mathbb{R}}^{m\times n}$, all $x\in{\mathbb{R}}^n$, and
all ${\mathsf{u}}\in{\mathfrak{G}}_n$ we have ${\mathsf{u}}(A){\mathsf{u}}(x)=Ax$.
\begin{lemma}\label{K3:l1}
The functions $h_B$ are ${\mathfrak{G}}_n$-invariant. That is,
for any $d\in{\cal{D}}$, $B\in{\cal{B}}$ and ${\mathsf{u}}\in{\mathfrak{G}}_n$,
$$
h_B(d)=h_B({\mathsf{u}}(d)).
$$
\end{lemma}
{\noindent\sc Proof. \quad}
Let $S^*$ be any matrix in ${\cal{S}}_B(d)$ such that
\begin{equation}\label{K3:eq0}
\rho_{\cal{S}}ing(S^*) = \min_{S\in{\cal{S}}_B(d)}\rho_{\cal{S}}ing(S).
\end{equation}
Let $k$ be the number of rows (or columns) of $S^*$ and
$E$ be any matrix in ${\mathbb{R}}^{k\times k}$ such that
$S^*+ E\in {\cal{S}}ing$ and
\begin{equation}
\|E\|=\rho_{\cal{S}}ing(S^*)\label{K3:eq1}.
\end{equation}
Then, there exists $z\in{\mathbb{R}}^k$ such that
\begin{equation}\label{K3:eq11}
(S^* + E)z =0.
\end{equation}
Suppose $S^*$ consists of the $j_1, j_2, \ldots, j_k$
columns of $M_d$ and let
$\bar{{\mathsf{u}}}=({\mathsf{u}}_{j_1}, {\mathsf{u}}_{j_2},\ldots,{\mathsf{u}}_{j_k})\in{\mathfrak{G}}_k$.
Then, by the definition of ${\cal{S}}_B(d)$ and ${\cal{S}}_B({\mathsf{u}}(d))$, we
have $\bar{{\mathsf{u}}}(S^*)\in{\cal{S}}_B({\mathsf{u}}(d))$.
Furthermore,
$$
(\bar{{\mathsf{u}}}(S^*) +\bar{{\mathsf{u}}}(E))\bar{{\mathsf{u}}}(z)
= \bar{{\mathsf{u}}}(S^*+E)\bar{{\mathsf{u}}}(z)
= (S^*+E)(z) =0,
$$
the last by Equation~\eqref{K3:eq11}.
That is, $(\bar{{\mathsf{u}}}(S^*)+\bar{{\mathsf{u}}}(E))$ is also singular. By the
definition of $\rho_{\cal{S}}ing$,
\begin{equation}
\rho_{\cal{S}}ing(\bar{{\mathsf{u}}}(S^*))\leq \|\bar{{\mathsf{u}}}(E)\|.\label{K3:eq2}
\end{equation}
Since operator norms are invariant under
multiplication of arbitrary matrix columns by $-1$
we have $\|E\|= \|\bar{{\mathsf{u}}}(E)\|$.
Combining this equality with Equations (\ref{K3:eq0}), (\ref{K3:eq1}),
and (\ref{K3:eq2}) we obtain
\begin{equation*}
\rho_{\cal{S}}ing(\bar{{\mathsf{u}}}(S^*))\leq \min_{S\in{\cal{S}}_B(d)}\rho_{\cal{S}}ing(S).
\end{equation*}
Since $\bar{{\mathsf{u}}}(S^*)\in{\cal{S}}_B({\mathsf{u}}(d))$ we obtain
\begin{equation*}\label{K3:eq4}
\min_{S\in{\cal{S}}_B({\mathsf{u}}(d))}\rho_{\cal{S}}ing(S)\leq
\min_{S\in{\cal{S}}_B(d)}\rho_{\cal{S}}ing(S).
\end{equation*}
The reversed inequality follows by exchanging
the roles of $S({\mathsf{u}})$ and $S$.
{\mbox{}
\qed}
For any $B\in{\cal{B}}$, let
$$
{\cal{U}}_B=\{d\in{\cal{D}}\mid B \mbox{ is the only optimal basis for $d$}\}.
$$
The set ${\cal{U}}$ of well-posed feasible triples is thus partitioned
by the sets $\{{\cal{U}}_B\mid B\in{\cal{B}}\}$.
\begin{lemma}\label{K3:l2}
Let $d\in{\cal{D}}$ and $B\in{\cal{B}}$.
If $h_B(d)>0$, then there exists a unique ${\mathsf{u}}\in{\mathfrak{G}}_n$
such that ${\mathsf{u}}(d)\in{\cal{U}}_B$.
\end{lemma}
{\noindent\sc Proof. \quad}
First observe that, since $\min_{S\in{\cal{S}}_B(d)}\rho_{\cal{S}}ing(S)>0$,
we have $A_B$ invertible and therefore $B$ is a basis for $A$.
Let $y^*$ and $x^*$ be the dual and primal basic solutions of
$d$ for the basis $B$, i.e.
\begin{equation}\label{K3:eq6}
y^*=A_B^{-{\rm T}} c_B,\quad x^*_B
=A_B^{-1}b,\quad x^*_j = 0,\, \forall j\not\in B.
\end{equation}
Similarly, let $y^{{\mathsf{u}}}$ and $x^{{\mathsf{u}}}$ be the dual and
primal basic solutions of ${\mathsf{u}}(d)$ for the same basis. Then,
using that ${\mathsf{u}}(A)=AD_{{\mathsf{u}}}$ and ${\mathsf{u}}(c)=D_{{\mathsf{u}}}c$,
\begin{equation}\label{K3:eq7y}
y^{{\mathsf{u}}}={\mathsf{u}}(A)_B^{-{\rm T}}\,{\mathsf{u}}(c)_B
=A_B^{-{\rm T}} (D_{\mathsf{u}})_B^{-{\rm T}} (D_{\mathsf{u}})_Bc_B
=A_B^{-{\rm T}} c_B =y^*
\end{equation}
the third equality by the definition of $(D_{\mathsf{u}})_B$.
Similarly,
\begin{equation}\label{K3:eq7x}
x^{{\mathsf{u}}}_B
={\mathsf{u}}(A)_B^{-1} \,b
=(D_{\mathsf{u}})_B^{-1}A_B^{-1} \,b
=(D_{\mathsf{u}})_B A_B^{-1}\,b
=(D_{\mathsf{u}})_B x^*_B
\end{equation}
and $x^{{\mathsf{u}}}_j = 0$ for all $j\not\in B$.
Therefore,
\begin{eqnarray}\label{K3:eq9}
B \mbox { is optimal for ${\mathsf{u}}(d)$}
&\Leftrightarrow&\mbox{$x^{{\mathsf{u}}}$ and $y^{{\mathsf{u}}}$
are both feasible}\notag\\
&\Leftrightarrow&\left\{\begin{array}{l} x^{{\mathsf{u}}}_B\geq 0\\[3pt]
{\mathsf{u}}(A)_j^{\rm T} y^{\mathsf{u}}\leq {\mathsf{u}}(c)_j,\ \mbox{for }j\not\in B
\end{array}\right.\notag\\
&\Leftrightarrow&\left\{\begin{array}{l} (D_{\mathsf{u}})_Bx^*_B\geq 0\\[3pt]
{\mathsf{u}}_j(a_j)^{\rm T} y\leq {\mathsf{u}}(c)_j,\ \mbox{for }j\not\in B
\end{array}\right.\qquad\mbox{(by (\ref{K3:eq7y}) and (\ref{K3:eq7x}))}\notag\\
&\Leftrightarrow&
\left\{\begin{array}{l} {\mathsf{u}}_jx^*_j\geq 0,\ \mbox{for }j\in B\\[3pt]
{\mathsf{u}}_j(c_j-a_j^{\rm T} y)\geq 0,\ \mbox{for }j\not\in B.
\end{array}\right.
\end{eqnarray}
Since by hypothesis $\min_{S\in{\cal{S}}_B(d)}\rho_{\cal{S}}ing(S)>0 $,
\begin{equation}\label{K3:eq10}
x^*_j\neq 0,\,\forall j\in B\qquad\mbox{ and }\qquad
a_j^{\rm T} y\neq
c_j,\,\forall j\not\in B.
\end{equation}
Combining Equations (\ref{K3:eq9}) and (\ref{K3:eq10}),
the statement follows for ${\mathsf{u}}\in{\mathfrak{G}}_n$ given by
${\mathsf{u}}_j=\mathsf{sign}(x^*_j)$ if $j\in B$ and
${\mathsf{u}}_j=\mathsf{sign}(c_j-a_j^{\rm T} y)$ otherwise. Clearly, this
${\mathsf{u}}$ is unique.
{\mbox{}
\qed}
For $B\in {\cal{B}}$ let
$$
{\cal{S}}igma_B:={\cal{B}}ig\{d\in{\cal{D}}\mid h_B(d)=0 {\cal{B}}ig\}
$$
and ${\cal{D}}_B:={\cal{D}}\setminus {\cal{S}}igma_B$. Lemma~\ref{K3:l1}
implies that, for all $B\in{\cal{B}}$, ${\cal{S}}igma_B$ and ${\cal{D}}_B$ are
${\mathfrak{G}}_n$-invariant. Lemma~\ref{K3:l2} immediately
implies the following
corollary.
\begin{corollary}\label{cor:K3_partition}
For all $B\in{\cal{B}}$ the sets
$$
{\cal{D}}_{{\mathsf{u}}}:=\{d\in {\cal{D}}_B\mid {\mathsf{u}}(d)\in {\cal{U}}_B\},\qquad
\mbox{for ${\mathsf{u}}\in{\mathfrak{G}}_n$}
$$
are a partition of ${\cal{D}}_B$. {\mbox{}
\qed}
\end{corollary}
\subsection{Probabilities}
\begin{definition}\label{K3:def:invariance}
We say that a distribution $\mathscr D$ on the set of triples $d=(A,b,c)$
is {\em ${\mathfrak{G}}_n$-invariant} when
\begin{description}
\item[(i)]
if $d\sim \mathscr D$ then ${\mathsf{u}}(d)\sim\mathscr D$ for all ${\mathsf{u}}\in{\mathfrak{G}}_n$.
\item[(ii)]
for all $B\in{\cal{B}}$,
$\displaystyle {\mathsf{u}}nderset {d\sim \mathscr D}{{\rm{Prob}}}
\big\{h_B(d)=0\big\}=0.$
\end{description}
\end{definition}
Note that Gaussianity is a special case of
${\mathfrak{G}}_n$-invariance. Consequently, all results true for a
${\mathfrak{G}}_n$-invariant distribution also hold for Gaussian data.
\noindent
{\bf Note:\ }
For a time to come we fix a ${\mathfrak{G}}_n$-invariant
distribution $\mathscr D$ with density function~$f$.
\begin{lemma}\label{K3:l3}
For any ${\mathsf{u}}\in{\mathfrak{G}}_n$ and $B\in{\cal{B}}$,
$$
{\mathsf{u}}nderset {d\sim\mathscr D}{{\rm{Prob}}}\{{\mathsf{u}}(d)\in{\cal{U}}_B\}
= {\mathsf{u}}nderset{d\sim\mathscr D}{{\rm{Prob}}}\{d\in{\cal{U}}_B\}
=\frac{1}{2^n}.
$$
\end{lemma}
{\noindent\sc Proof. \quad}
The equality between probabilities follows from~(i)
in Definition~\ref{K3:def:invariance}.
Therefore, by Corollary~\ref{cor:K3_partition}
and Definition~\ref{K3:def:invariance}(ii),
the probability of each of
them is $2^{-n}$.
{\mbox{}
\qed}
The following lemma tells us
that, for all $B\in{\cal{B}}$, the random variable
$h_B(d)$ is independent of the event ``$d\in{\cal{U}}_B$."
\begin{lemma}\label{K3:l4}
For all measurable $g:{\mathbb{R}}\rightarrow{\mathbb{R}}$ and $B\in{\cal{B}}$,
$$
{\mathsf{u}}nderset {d\sim\mathscr D}{{\mathbb{E}}}
\big(g\big(h_B(d)\big)\big| \,d\in{\cal{U}}_B\big) =
{\mathsf{u}}nderset {d\sim\mathscr D}{{\mathbb{E}}}
\big(g\big(h_B(d)\big)\big).
$$
\end{lemma}
{\noindent\sc Proof. \quad}
From the definition of conditional expectation
and Lemma~\ref{K3:l3} we have
\begin{equation}\label{K3:eq:cond_ex}
{\mathsf{u}}nderset {d\sim\mathscr D}{{\mathbb{E}}}
\big(g(h_B(d))\big| \,d\in{\cal{U}}_B\big) =
\frac{\displaystyle\int_{d\in{\cal{U}}_B}g(h_B(d))f(d)}
{{\mathsf{u}}nderset{d\sim\mathscr D}{{\rm{Prob}}}\{d\in{\cal{U}}_B\}}
=\; 2^n \int_{d\in{\cal{D}}}\mbox{1\hspace*{-2.5pt}l}_B(d) g(h_B(d))f(d)
\end{equation}
where $\mbox{1\hspace*{-2.5pt}l}_B$ denotes the indicator function of ${\cal{U}}_B$.
Now, for any ${\mathsf{u}}\in{\mathfrak{G}}_n$, the map $d\mapsto {\mathsf{u}}(d)$
is a linear isometry on ${\cal{D}}$. Therefore
$$
\int_{d\in{\cal{D}}}\mbox{1\hspace*{-2.5pt}l}_B(d) g(h_B(d))f(d)=
\int_{d\in{\cal{D}}}\mbox{1\hspace*{-2.5pt}l}_B({\mathsf{u}}(d)) g(h_B({\mathsf{u}}(d)))f({\mathsf{u}}(d)).
$$
Using that
$h_B(d)=h_B({\mathsf{u}}(d))$ (by Lemma~\ref{K3:l1})
and $f(d) = f({\mathsf{u}}(d))$ (by the ${\mathfrak{G}}_n$-invariance of $\mathscr D$),
it follows that
\begin{align*}
{\mathsf{u}}nderset {d\sim\mathscr D}{{\mathbb{E}}}
\big(g(h_B(d))&\,\big| \,d\in{\cal{U}}_B\big)
= 2^n \int_{d\in{\cal{D}}}\mbox{1\hspace*{-2.5pt}l}_B(d)g(h_B(d))f(d)\\
= &\sum_{{\mathsf{u}}\in{\mathfrak{G}}_n} \int_{d\in{\cal{D}}}\mbox{1\hspace*{-2.5pt}l}_B({\mathsf{u}}(d))
g(h_B({\mathsf{u}}(d))) f({\mathsf{u}}(d))\\
= &\sum_{{\mathsf{u}}\in{\mathfrak{G}}_n} \int_{d\in{\cal{D}}}\mbox{1\hspace*{-2.5pt}l}_B({\mathsf{u}}(d))
g(h_B(d)) f(d)\\
= &\int_{d\in{\cal{D}}} g(h_B(d))f(d)\;=\;
{\mathsf{u}}nderset {d\sim\mathscr D}{{\mathbb{E}}}
\big(g\big(h_B(d)\big)\big),
\end{align*}
the last line by Corollary~\ref{cor:K3_partition}.
{\mbox{}
\qed}
Let $B^*=\{1,2,\ldots,m\}$.
\begin{lemma}\label{K3:l5}
For all measurable $g:{\mathbb{R}}\rightarrow{\mathbb{R}}$
$$
{\mathsf{u}}nderset {d\sim N(0,\mathsf{Id})}{{\mathbb{E}}}(g(\varrho(d))\mid d\in{\cal{U}})
={\mathsf{u}}nderset{d\sim N(0,\mathsf{Id})}{{\mathbb{E}}}\big(g\big(h_{B^*}(d)\big)\big).
$$
\end{lemma}
{\noindent\sc Proof. \quad}
Let $\varphi$ be the probability density function of $N(0,\mathsf{Id})$.
\begin{equation}\label{K3:eq12}
{\mathsf{u}}nderset{d\sim N(0,\mathsf{Id})}{{\mathbb{E}}}(g(\varrho(d))|d\in{\cal{U}})
=\frac{\int_{d\in{\cal{U}}}g(\varrho(d))\varphi(d)\mathsf{d}(d)}
{{\mathsf{u}}nderset{d\sim N(0,\mathsf{Id})}{{\rm{Prob}}}\{d\in{\cal{U}}\}}.
\end{equation}
Since $d$ is Gaussian, the probability
that $d$ has two optimal bases is $0$. Using this and
Lemma \ref{K3:l3} we see that
\begin{equation}\label{K3:eq13}
{\mathsf{u}}nderset{d\sim N(0,\mathsf{Id})}{{\rm{Prob}}}\{d\in{\cal{U}}\}
=\sum_{B\in{\cal{B}}}{\mathsf{u}}nderset{d\sim N(0,\mathsf{Id})}{{\rm{Prob}}}\{d\in{\cal{U}}_B\}
= \sum_{B\in{\cal{B}}}\frac{1}{2^n}
=\left(\begin{array}{c}n\\m\end{array}\right)
\left(\frac{1}{2^n}\right).
\end{equation}
Combining Equations (\ref{K3:eq12}) and (\ref{K3:eq13}), we have
\begin{eqnarray*}
\left(\begin{array}{c}n\\m\end{array}\right)
\left(\frac{1}{2^n}\right){\mathsf{u}}nderset{d\sim N(0,\mathsf{Id})}{{\mathbb{E}}}(g(\varrho(d))|d\in{\cal{U}})
&=&\int_{d\in{\cal{U}}}g(\varrho(d))\varphi(d)\mathsf{d}(d)\\
&=&\sum_{B\in{\cal{B}}}\int_{d\in{\cal{U}}_B}g(\varrho(d))\varphi(d)\mathsf{d}(d)
\end{eqnarray*}
the last since the probability that $d$ has two optimal bases is
$0$. Using now that the entries of $d$ are i.i.d.
and Theorem~\ref{K3:the1} we obtain
\begin{eqnarray*}
\left(\frac{1}{2^n}\right){\mathsf{u}}nderset{d\sim N(0,\mathsf{Id})}{{\mathbb{E}}}
(g(\varrho(d))\mid d\in{\cal{U}})
&=&\int_{d\in{\cal{U}}_{B^*}}g(\varrho(d))\varphi(d)\mathsf{d}(d)\\
&=&\int_{d\in{\cal{U}}_{B^*}}g\big(h_{B^*}(d)\big)\varphi(d)\mathsf{d}(d).
\end{eqnarray*}
Therefore, by Lemma \ref{K3:l3} with $B=B^*$,
\begin{equation*}
{\mathsf{u}}nderset{d\sim N(0,\mathsf{Id})}{{\rm{Prob}}}\{d\in{\cal{U}}_{B^*}\}\,
{\mathsf{u}}nderset{d\sim N(0,\mathsf{Id})}{{\mathbb{E}}}(g(\varrho(d))\mid d\in{\cal{U}})
=\int_{d\in{\cal{U}}_{B^*}}
g\big(h_{B^*}(d)\big)\varphi(d)\mathsf{d}(d).
\end{equation*}
We conclude since, by the definition of conditional expectation
and Lemma \ref{K3:l4},
\begin{align}
{\mathsf{u}}nderset {d\sim N(0,\mathsf{Id})}{{\mathbb{E}}}(g(\varrho(d))\mid d\in{\cal{U}})
\;&=\;{\mathsf{u}}nderset{d\sim N(0,\mathsf{Id})}{{\mathbb{E}}} \big(g\big(h_{B^*}(d)
\big)\mid d\in{\cal{U}}_{B^*}\big)\notag\\
&=\;{\mathsf{u}}nderset{d\sim N(0,\mathsf{Id})}{{\mathbb{E}}}
\big(g\big(h_{B^*}(d)\big)\big).\tag*{\qed}
\end{align}
The following is Lemma~11 in~\cite{ChC02}.
\begin{lemma}\label{K3:l6}
For the $\|\ \|_{12}$ in the definition of
$\rho_{\cal{S}}ing$ we have
$$
{\mathsf{u}}nderset{S\sim N(0,\mathsf{Id})}{{\mathbb{E}}} \left(\sqrt{\frac{1}{\rho_{\cal{S}}ing(S)}}\right)
\leq 2m^{5/4}
$$
where $N(0,\mathsf{Id})$ is the Gaussian distribution in the set of
$m\times m$ real matrices.
\end{lemma}
\begin{lemma}\label{K3:l7}
Let $B\in{\cal{B}}$ fixed. Then, for the $\|\ \|_{12}$
in the definition of $\rho_{\cal{S}}ing$ we have
$$
{\mathsf{u}}nderset{d\sim N(0,\mathsf{Id})}{{\mathbb{E}}}\left(\sqrt\frac{1}
{h_{B}(d)}\right)\leq2(m+1)^{5/4}(n+1).
$$
\end{lemma}
{\noindent\sc Proof. \quad}
For any fixed $d\in{\cal{D}}$,
$$
\sum_{S\in{\cal{S}}_{B}}\sqrt{\frac{1}{\rho_{\cal{S}}ing(S)}}
\,\geq\,\max_{S\in{\cal{S}}_{B}}\sqrt\frac{1}{\rho_{\cal{S}}ing(S)}
\,=\,\sqrt\frac{1}{h_{B}(d)}.
$$
Take average on both sides,
\begin{align}
{\mathsf{u}}nderset{d\sim\mathscr D}{{\mathbb{E}}}\left(\sqrt\frac{1}{h_{B}(d)}\right)
\leq&{\mathsf{u}}nderset{d\sim\mathscr D}{{\mathbb{E}}}\left(\sum_{S\in{\cal{S}}_{B}}
\sqrt{\frac{1}{\rho_{\cal{S}}ing(S)}}\right)
\,\leq\,\sum_{S\in{\cal{S}}_{B}}{\mathsf{u}}nderset{d\sim\mathscr D}{{\mathbb{E}}}
\left(\sqrt{\frac{1}{\rho_{\cal{S}}ing(S)}}\right)\notag\\
\leq&\sum_{S\in{\cal{S}}_{B}}2(m+1)^{5/4}
\qquad\mbox{by Lemma \ref{K3:l6}}\notag\\
\leq&\;2(m+1)^{5/4}(n+1).\tag*{\qed}
\end{align}
The following lemma is proved as Lemma \ref{K3:l4}.
\begin{lemma}\label{K3:l8}
For all $r,s\geq 1$ we have
\begin{equation}\tag*{\qed}
{\mathsf{u}}nderset {d\sim\mathscr D}{{\mathbb{E}}}\left(\|d\|_{rs}\mid d\in{\cal{U}}\right)
={\mathsf{u}}nderset{d\sim\mathscr D}{{\mathbb{E}}}\left(\|d\|_{rs}\right).
\end{equation}
\end{lemma}
\begin{lemma}\label{K3:l9}
We have
$$
{\mathsf{u}}nderset {d\sim N(0,\mathsf{Id})}{{\mathbb{E}}}\left(\|d\|_{12}\right)\leq 6\sqrt{n+1}.
$$
\end{lemma}
{\noindent\sc Proof. \quad}
Recall that $\|d\|_{12}=\|M_d\|_{12}$. It is well known that
$\|M_d\|_{12}\leq \|M_d\|$ where the latter is spectral
norm. The statement now follows from the fact that,
for a random Gaussian $A\in{\mathbb{R}}^{(m+1)\times (n+1)}$
we have ${\mathbb{E}}(\|A\|)\leq 6\sqrt{n+1}$~\cite[Lemma~2.4]{BuCu:10}.
{\mbox{}
\qed}
{\noindent\sc Proof. \quad}of{Theorem~\ref{K3:th:mainK}}
By Jensen's inequality and Lemma~\ref{K3:l9},
\begin{equation}\label{K3:eq14}
{\mathsf{u}}nderset{d}{{\mathbb{E}}}\left(\ln\|d\|_{12}\right)
\leq\ln {\mathsf{u}}nderset{d}{{\mathbb{E}}}\left(\|d\|_{12}\right)
\leq \frac12\ln (n+1) +\ln 6.
\end{equation}
In addition, using now Lemma~\ref{K3:l7},
\begin{eqnarray}\label{K3:eq15}
{\mathsf{u}}nderset{d}{{\mathbb{E}}}\left(\ln\left(h_{B^*}(d)\right)\right)
&=&
-2{\mathsf{u}}nderset{d}{{\mathbb{E}}}\left(\ln\sqrt\frac{1}{h_{B^*}(d)}\right)
\,\geq\,
-2\ln{\mathsf{u}}nderset{d}{{\mathbb{E}}}\left(\sqrt\frac{1}{h_{B^*}(d)}\right)\notag\\
&\geq& -\ln(2(m+1)^\frac{5}{4}(n+1)).
\end{eqnarray}
By the definition of ${\cal{K}}(d)$ and Lemmas \ref{K3:l8} and \ref{K3:l5},
\begin{eqnarray}\label{K3:eq16}
{\mathsf{u}}nderset {d}{{\mathbb{E}}}\left(\left.\ln{\cal{K}}(d)\right|\,d\in{\cal{U}}\right)
&=&{\mathsf{u}}nderset{d}{{\mathbb{E}}}\left(\left.\ln\|d\|_{12}\right|\,d\in{\cal{U}}\right)
-{\mathsf{u}}nderset{d}{{\mathbb{E}}}\left(\left.\ln\varrho(d)\right|\,d\in{\cal{U}}\right)\notag\\
&=&{\mathsf{u}}nderset{d}{{\mathbb{E}}}\left(\ln\|d\|_{12}\right)
-{\mathsf{u}}nderset{d}{{\mathbb{E}}}\left(\ln\left(h_{B^*}(d)\right)\right).
\end{eqnarray}
Combining Equations (\ref{K3:eq14}), (\ref{K3:eq15}), and
(\ref{K3:eq16}),
the proof is done.
{\mbox{}
\qed}
{\small
}
\end{document} |
\begin{document}
\title{Random and free observables saturate the Tsirelson bound for CHSH inequality}
\author{Z. Yin$^{1,2}$, A. W. Harrow$^{3}$, M. Horodecki$^{1}$, M. Marciniak$^{1}$ and A. Rutkowski$^{1}$}
\affiliation{$^1$
Faculty of Mathematics, Physics and Informatics, University of Gda\'{n}sk, 80-952 Gda\'{n}sk, Institute of Theoretical Physics and Astrophysics and National Quantum Information Centre in Gda\'{n}sk, 81-824 Sopot, Poland \\
$^2$ Institute Of Advanced Study In Mathematics, Harbin Institute Of Technology, Harbin 150006, China\\
$^3$ Center for Theoretical Physics, Massachusetts Institute of Technology, Cambridge MA 02139, USA.}
\begin{abstract}
Maximal violation of the CHSH-Bell inequality is usually said to be a feature of
anticommuting observables. In this work we show that even random observables exhibit
near-maximal violations of the CHSH-Bell inequality. To do this, we use the tools of
free probability theory to analyze the commutators of large random matrices. Along the
way, we introduce the notion of ``free observables'' which can be thought of as
infinite-dimensional operators that reproduce the statistics of random matrices as their
dimension tends towards infinity. We also study the fine-grained uncertainty of a
sequence of free or random observables, and use this to construct a steering inequality
with a large violation.
\end{abstract}
\maketitle
\section{Introduction}
The notion of quantum mechanics violating local realism was first raised by the work of
A.~Einstein, B.~Podolsky and N.~Rosen \cite{EPR}. This was put on a rigorous and general
footing by the revolutionary 1964 paper of J.~S.~Bell~\cite{Bell}, which derived an
inequality (now known as the Bell inequality) involving correlations of two
observables. Bell showed that there is a constraint on any possible correlations obtained
from local hidden variable models which can be violated by quantum measurements of
entangled states. Later on, another Bell-type inequality which is more experimentally
feasible was derived by J.~F.~Clauser, M.~A.~Horne, A. Shimony and R. A. Holt
\cite{CHSH1969}. Since then, Bell inequalities have played a fundamental role in quantum
theory and have had applications in quantum information science including
cryptography, distributed computing, randomness generation and many others (see
\cite{BCPSW2014} for a review).
In this paper, we mainly focus on the maximal violation of CHSH-Bell inequality
\cite{CHSH1969}. It is well known that the {\it Tsirelson bound} $2\sqrt{2}$ for CHSH-Bell inequality was first obtained by Tsirelson \cite{Tsirelson1980}. And he also proved that the bound can be realized by using proper Pauli observables. Apart from the above qubit case, it is possible to find dichotomic
observables in high dimension \cite{GP1992,BMR1992}, as well as in the continuous-variable
(infinite dimension) case \cite{CPHZ2002}, to obtain the Tsirelson bound. Recently,
Y.~C.~Liang et.~al.~\cite{LHBR2010} have studied the possibility of violation of CHSH-Bell
inequality by random observables. For the bipartite qubits case, if two observers share a Bell
state showed that random measure settings lead to a violation with probability $\approx
0.283$. However, for two qubits, the probability of the maximal
violation is zero, and the probability of near-maximal violation is negligible.
Contrary to the case of qubits, our results show that the probability of near-maximal violation is large in high dimension. here near-maximal violations are approximately achieved with high probability by random high-dimensional observables. Previous methods of showing maximal violation were based on specific algebraic relations,
namely, anti-commuting, and indeed there is a sense in which maximal violations imply
anti-commutation on some subspace~\cite{MYS2012}.
However, this random approach reveals that there is another type of
algebraic relations between observables which might lead to the Tsirelson bound of
CHSH-Bell inequality. We call the observables, which satisfy those relations, {\it free
observables.} This terminology is from a mathematical theory called free probability
\cite{VDN1992,NS2006}. As we explain below, those free
observables are freely independent in some quantum probability space, which is a quantum analogue of the classical probability space (see the section IV for the definition). A crucial point is
that free observables can only exist in infinite dimension, and thus are experimentally
infeasible. We also discuss finite-dimensional approximations (section IV.B) which are more
experimentally plausible and for which
the Tsirelson bound can be approximately obtained.
In another part of this work we study the fine-grained uncertainty relations of free or
random observables, which was introduced by J. Oppenheim and S. Wehner \cite{OW2010}. It
is more fundamental than the usual entropic uncertainty relations and it relates to the
degree of violation of Bell inequalities (non-local games) \cite{OW2010,RGMH2015}. For a
pair of free (random) observables, we can show that the degree of their uncertainty is
0. On the other hand, it is interesting that for a sequence of free (random) observables
$A_1,\ldots,A_n$ with $n>4,$ the fine-grained uncertainty is upper bounded by $\frac{1}{2}
+ \frac{1}{\sqrt{n}},$ which is the same as the one given by the anti-commuting
observables. Therefore as a byproduct of above results, by using free (random) observables
we can obtain one type of steering inequality with large violation that recently was
studied in \cite{MRYHH2015}.
\section{Preliminaries}
First, we introduce terminology. For a bipartite dichotomic Bell scenario, there are two
space-like separated observers, say, Alice and Bob. Each of them is described by a
$N$-dimensional Hilbert space $H_N,$ and Alice (resp.~Bob) chooses one of $n$ dichotomic
(i.e.~two-outcome) observables $A_i$ (resp. $B_j$) that will take results $\alpha_i$
(resp. $\beta_j$) from set $\{1, -1\}.$ Thus the observables are self-adjoint unitaries.
Next, recall the famous CHSH-Bell inequality \cite{CHSH1969}. If
$\alpha_1,\alpha_2,\beta_1,\beta_2$ are classically correlated random variables then
\begin{equation}
\left| \langle \alpha_1 \beta_1 \ranglengle + \langle \alpha_1 \beta_2 \ranglengle + \langle \alpha_2 \beta_1 \ranglengle - \langle \alpha_2 \beta_2 \ranglengle \right| \leq 2,
\end{equation}
so we say that 2 is the largest {\it classical value} obtained by any local hidden
variable model. In \cite{Tsirelson1980}, Tsirelson first proved that if the correlations
are obtained by quantum theory then the {\it quantum value} of the CHSH-Bell inequality is
$2\sqrt{2}$ (i.e., the Tsirelson bound). To see this, consider the following CHSH-Bell operator
\begin{equation}
B_{\text{CHSH}} = A_1 \otimes B_1 + A_1 \otimes B_2 + A_2 \otimes B_1 - A_2 \otimes B_2,
\end{equation}
where $A_i, B_j, i,j =1,2$ are dichotomic observables. By choosing proper observables,
e.g. $A_1 = \sigma_x, A_2 = \sigma_z, B_1 = (\sigma_x + \sigma_z) /\sqrt{2}, B_2 =
(\sigma_x - \sigma_z)/\sqrt{2},$ the norm (largest singular value) of the CHSH-Bell
operator is $2\sqrt{2}.$
If $\mathcal{B} = B_{\text{CHSH}}^2,$ then
\begin{equation}\label{eq:B^2}
\mathcal{B} = 4 1\mkern -4mu{\rm l} - [A_1, A_2] \otimes [B_1, B_2].
\end{equation}
If both parties choose compatible (commutative) observables, then $\mathcal{B} = 4 1\mkern -4mu{\rm l}.$
Hence incompatible (non-commutative) observables are necessary for the violation of
CHSH-Bell inequality \cite{BMR1992}. The Tsirelson bound is also determined by
the eigenvalues of the commutators $[A_1, A_2]$ and $[B_1, B_2].$ More precisely, suppose
the local dimension for each party is $N,$ and the eigenvalues of $[A_1, A_2]$
(resp. $[B_1, B_2]$) are $s_1,\ldots, s_N$ (resp. $t_1, \ldots, t_N$). Then we have
\cite{BMR1992}:
\begin{equation}
\|\mathcal{B} \| = \max_{i,j} \{ 4 - s_i t_j \}.
\end{equation}
It is clear that if there exist eigenstates such that the eigenvalues of $[A_1, A_2]$
(resp. $[B_1, B_2]$) are $\pm2,$ then $\|B_{\text{CHSH}}\| = 2\sqrt{2}.$ In particular,
anti-commuting dichotomic local observables, such as $\sigma_x$ and $\sigma_z$, will
saturate the Tsirelson bound.
\section{A random approach to the Tsirelson bound}\label{sec-random}
Suppose $D$ is a $N\times N$ deterministic diagonal matrix, where the diagonal terms of
$D$ are either $1$ or $-1$ and $\Tr (D) =0$ where $Tr$ is the usual trace for matrices. It is easy to see that $D^2 = 1\mkern -4mu{\rm l}.$ Suppose
unitaries $U_i, i=1,\ldots, n$ are independent Haar-random matrices in the group of
unitary matrices $U(N).$
Define the following random dichotomic observables:
\begin{equation}
A_i = U_i D U_i^\dagger, \; i=1, \ldots, n.
\end{equation}
We would like to establish results that hold with ``high
probability'' over some natural distribution.
Recall that we call a sequence of random variables $\{X_N\}_N$ convergent
to $X$ almost surely in probability space $(\Omega, P),$ if $P \left( \lim_{N\to \infty}
X_N = X \right) =1.$ With these notions, we claim that the Tsirelson bound of CHSH-Bell
inequality can be obtained in high probability by using random dichotomic observables in
sufficient large dimension. More precisely, we have following theorem:
\begin{theorem}\label{thm:Main}
Let $A_i = U_i D U_i^\dagger$ and $B_i = V_i D V_i^\dagger, i=1,2,$ where $U_i, V_i$ are
independent Haar-random unitaries in $U (N).$ Then we have
\begin{equation}
\lim_{N \to \infty} \|B_{\text{CHSH}}\| = 2\sqrt{2}, \;\; \text{almost surely}.
\end{equation}
\end{theorem}
Above theorem could be understand as the following: with sufficient large dimension, the random dichotomic observables may saturate the Tsirelson bound of the CHSH-Bell inequality. We note here that in this approximate scenario, the shared state for Alice and Bob should not be fixed, otherwise it may not obtain any violation at all.
To prove this theorem, we first need following lemma from \cite{NS2006}:
\begin{lem}\label{lem:c-norm}[\cite{NS2006}]
Let $\mathcal{M}_N$ be the set of $N \times N$ matrices. Then for every $ A \in \mathcal{M}_N,$
\begin{equation}
\|A\| = \lim_{k\rightarrow \infty} \left( \tr_N \left((A^{\dagger} A)^k \right)\right)^{\frac{1}{2k}},
\end{equation}
where $\tr_N = \Tr/N.$
\end{lem}
Now denote $A= [A_1, A_2]$ and $B = [B_1, B_2].$ For any $k \in \mathbb{N}_0,$ by we can
use the binomial formula and equation \eqref{eq:B^2} to obtain
\begin{equation}
\begin{split}
\tr_{N^2} (\mathcal{B}^k) & = \tr_{N^2}(41\mkern -4mu{\rm l} - A \otimes B)^k \\
& =\sum_{j=0}^{k}\binom{k}{j} 4^{k-j} (-1)^{j} \tr_N (A^j) \cdot \tr_N (B^j).
\end{split}
\end{equation}
Let us consider the term $\tr (A^j).$ Since $A_1A_2$ and $A_2A_1$ commute, again by binomial formula, we have
\begin{equation}
\tr_N (A^j) = \sum_{l=0}^j \binom{j}{l} (-1)^{j-l} \tr_N ((A_1A_2)^{|2l-j|}).
\end{equation}
Now we need the second key lemma (see Appendix B for the details of proof).
\begin{lem}\label{lem:key}
Let $A_i = U_i D U_i^\dagger,$ where $U_i, i=1,\ldots, n \in U(N)$ are independent Haar
random unitaries. Consider a sequence
$i(1), \ldots, i(k) \in [n]$ satisfying $i(1) \neq i(2) \neq i(3) \neq \ldots i(k-1) \neq i(k).$
Then
\begin{equation}\label{eq:asym-free}
\lim_{N \to \infty} \tr_N (A_{i(1)}A_{i(2)}\cdots A_{i(k)}) = 0, \;\; \text{almost surely}.
\end{equation}
\end{lem}
This lemma is mostly due to the work of B. Collins~\cite{Collins2002,CS2004}, where he
and other co-authors developed a method to calculate the moments of polynomial random
variables on unitary groups. This method is called the Weingarten calculus and is in
turn based on \cite{Weingarten1978}. As we will see in the next section, this lemma can
be thought of as establishing the ``asymptotic freeness''
of these random matrices. Thus by Lemma \ref{lem:key}, we have (almost surely)
\begin{equation}
\begin{split}
\lim_{N \to \infty} \tr_N (A^j)& = \sum_{l=0}^j \binom{j}{l} (-1)^{j-l} \lim_{N \to \infty} \tr_N ((A_1A_2)^{|2l-j|})\\
& = \left \{ \begin{split}
& (-1)^{j/2} \binom{j}{j/2} , \;\; \text{j is even},\\
& 0, \;\; \text{otherwise}.
\end{split} \right.
\end{split}
\end{equation}
A similar estimate is also valid for the term $\tr_N (B^j).$ Therefore
\begin{equation}\label{eq:fact}
\lim_{N \to \infty} \tr_{N^2} (\mathcal{B}^k) = \sum_{j=0, \; \text{j is even}}^{k}\binom{k}{j} 4^{k-j} \binom{j}{j/2}^2 : = Q_k, \;\; \text{almost surely}.
\end{equation}
By Stirling's formula, we have $\lim_{k \to \infty} (Q_{2k})^{1/2k} =8.$
In other words, for any $\epsilon>0,$ we can choose $k \in \mathbb{N},$ such that
$(Q_{2k})^{1/2k} > 8- \epsilon.$
Since $( \tr_{N^2} \mathcal{B}^{k})^{1/k} \leq \|\mathcal{B}\|$ for all $k \geq 1,$ then we have
\begin{equation}
\liminf_{N \to \infty} \|\mathcal{B}\| \geq (Q_{2k})^{1/2k} > 8- \epsilon, \;\; \text{almost surely.}
\end{equation}
On the other hand, due to Tsirelson's inequality~\cite{Tsirelson1980} we have $\|\mathcal{B}\| \leq 8$. Thus we complete our proof of Theorem \ref{thm:Main}.
\section{A free approach to the Tsirelson bound}
The random dichotomic observables do not satisfy the anti-commuting relations. In fact, random dichotomic observables are ``asymptotically'' freely independent, which
was first established by Voiculescu \cite{V1991} in the case of the Gaussian unitary ensemble
(GUE). That result builds a gorgeous bridge across two distinct mathematical
branches--random matrix theory and free probability. In free probability theory, we will
treat observables $A_i, B_j$ as elements of a $C^*$-algebra $\mathcal{A},$ equipped with
an unital (faithful) state $\phi$, where ``state'' means a linear map from $\mathcal{A}$
to $\mathbb{R}$, unital means $\phi(1\mkern -4mu{\rm l}) =1$ and faithful means
$\phi(AA^*)=0 \Rightarrow A =0.$ The pair $(\mathcal{A}, \phi)$ is called a
$C^*$-probability space, which is a quantum analogue of a classical probability space and we can call it a "quantum" probability space. For
example, $(\mathcal{M}_N, \tr_N)$ is a $C^*$-probability space, where $\mathcal{M}_N$ is the
set of $N \times N$ matrices. We refer to \cite{NS2006} for more details of quantum probability.
Lemma \ref{lem:key} inspires us to consider the following adaptation of definition of freeness to the case of dichotomic observables.
\begin{definition}\label{def:freeness}
For given $C^*$-probability space $(\mathcal{A}, \phi),$ dichotomic observables $A_i, i\in I$ are called freely independent, if
\begin{equation}
\phi (A_{i(1)}A_{i(2)}\cdots A_{i(k)}) =0
\end{equation}
whenever we have following:
\begin{enumerate}[{\rm (i)}]
\item $k$ is positive; $i(1), i(2), \ldots, i(k) \in I$;
\item $\phi(A_{i(k)}) =0$ for all $k$;
\item $i(1) \neq i(2), i(2) \neq i(3), \ldots, i(k-1) \neq i(k).$
\end{enumerate}
\end{definition}
For the special case $I= \{1,2\},$ the above conditions are equivalent to
\begin{equation}\label{eq:two-free}
\phi (A_1) = \phi(A_2) = \phi (A_1A_2) = \phi (A_2A_1) = \phi(A_1A_2A_1) = \phi(A_2A_1A_2) = \cdots = 0.
\end{equation}
However, finite-dimensional
observables cannot be freely independent.
In other words, for fixed $N,$ the $C^*$-probability space $(M_N, \tr_N)$ is too small to
talk about freeness, and Definition \ref{def:freeness} refers to an empty set.
Fortunately if we consider the observables in infinite dimensional Hilbert space, it is
possible for them to be freely independent in some $C^*$-probability space $(\mathcal{A},
\phi).$ Furthermore, the derivations in Section~\ref{sec-random} do not depend on
the dimension. In order to use an infinite dimensional
$C^*$-probability space $(\mathcal{A},\phi)$ instead of $(M_N, \tr),$ we need only update
Lemma \ref{lem:c-norm} with an appropriate formula, which is achieved by \eqref{eq:norm}
below. We conclude as follows.
\begin{theorem}\label{thm:main}
For the CHSH-Bell inequality, the Tsirelson bound can be obtained by using observables which are freely independent in their respective local system. More precisely, if $A_1, A_2$ and $B_1, B_2$ are freely independent in some $C^*$-probability space $(\mathcal{A}, \phi),$ then we have
$ \|B_{\text{CHSH}}\| = 2\sqrt{2}.$
\end{theorem}
This result is rather abstract, but in the next subsection, we will provide a concrete
example which satisfies the conditions in this theorem.
\subsection{A concrete example in infinite dimension}\label{sub-example}
For infinite-dimensional $C^*$-probability space, Definition \ref{def:freeness} is
meaningful. Now consider a group $G= \ast_n \mathbb{Z}_2$ and its associated Hilbert space
$\ell_2(G).$ This notation refers to the $n$-fold free product of $\mathbb{Z}_2$ with
itself; i.e.~the infinite group $G$ with the the following elements: $g_{i_1}, g_{i_1}g_{i_2}, \ldots, g_{i_1}g_{i_2}\cdots g_{i_n}, i_1, \ldots, i_n =1, \ldots, n,$ where $g_1, \ldots, g_n$ are the generators of the group $G$ whose only
relations are $g_i^2=1$.
The set $\{|g\ranglengle: g\in G\}$ forms an orthonormal basis of $\ell_2(G),$
thus the dimension of $\ell_2(G)$ is infinite. Let $\lambda : G \rightarrow
B(\ell^2(G))$ be the left regular group representation, which is defined as:
\begin{equation}
\lambda(g) |h \ranglengle = |gh\ranglengle, \; \forall h \in G.
\end{equation}
The reduced $C^*$-algebra $C^*_{\text{red}}(G)$ is defined as the norm closure of the linear span
$\{\lambda(g), g\in G\},$ where the norm is the operator norm of $B(\ell_2(G)).$ There is
a faithful trace state $\phi$ on $C^*_{\text{red}}(G)$ defined as
\begin{equation}\label{eq:state}
\phi\Big(\sum_g \alpha_g \lambda(g)\Big) := \alpha_e.
\end{equation}
Obviously $\phi(1\mkern -4mu{\rm l}) =1.$ Hence $(C^*_{\text{red}}(G),\phi)$ is a $C^*$-probability space. If
$g_i$ is the generator of the $i$-th copy of $\ast_n \mathbb{Z}_2, i=1,2,\ldots, n$ then
\begin{equation}
A_i = \lambda(g_i), \;\; i=1, \ldots, n,
\end{equation}
It is easy to check $A_i, i=1,\ldots, n$ are self-adjoint unitaries and freely independent
in $(C^*_{\text{red}}(G), \phi)$. We will choose the local Hilbert spaces of Alice and Bob to
be $\ell_2(G),$ where $n=2.$ By using those free observables, we can obtain the quantum
value $2\sqrt{2}$ for CHSH-Bell inequality.
Note conjugating by a unitary preserves
freeness of observables, i.e, if $A_1, A_2$ are freely independent, then $U A_1 U^\dagger,
U A_2 U^\dagger$ are still freely independent for any unitary $U$. Since the norm of Bell
operator does not change under the local unitary operation, we can simply assume $A_1=B_1
= \lambda(g_1), A_2 = B_2 = \lambda(g_2).$
\subsection{Truncated free observables in finite dimension}\label{sub-free-for-poor}
In order to see how the freeness behaves in a simple and direct way, we will truncate the
free observables given by last subsection to finite dimension. Denote the elements in
$\ell_2(\ast_2 \mathbb{Z}_2)$ as follows:
\begin{equation}
\begin{array}{ccccccccc} \cdots & \left|g_2g_1g_2\right\ranglengle & \left|g_2g_1\right\ranglengle & \left|g_2\right\ranglengle & \left|e\right\ranglengle & \left|g_1\right\ranglengle & \left|g_1g_2\right\ranglengle & \left|g_1g_2g_1\right\ranglengle & \cdots \\ \updownarrow & \updownarrow & \updownarrow & \updownarrow & \updownarrow & \updownarrow & \updownarrow & \updownarrow & \updownarrow \\ \cdots & \left|-3\right\ranglengle & \left|-2\right\ranglengle & \left|-1\right\ranglengle & \left|0\right\ranglengle & \left|1\right\ranglengle & \left|2\right\ranglengle & \left|3\right\ranglengle & \cdots \end{array}.
\end{equation}
With the above notation, we have
\begin{equation}
\left \{ \begin{split}
& \lambda(g_1)|i\ranglengle = |j\ranglengle, \; i+j=1,\\
& \lambda(g_2)|i\ranglengle = |j\ranglengle, \; i+j=-1.
\end{split} \right.
\end{equation}
where $i, j = \cdots, -1, 0, 1, \cdots$.
Now define $A_1^{(N)}$ and $A_2^{(N)}$ to be the truncation of the free observables to dimension $N = 2l+1$ (i.e, we truncated the operators $\lambda(g_1), \lambda(g_2)$ into the operators acting on $N$ dimension Hilbert space.). Then we have (see Figure 1):
\begin{subequations}\label{eq:cut1}\begin{align}
A_1^{(N)}|i\ranglengle &= |1-i\ranglengle, & i=-l+1, \ldots, l\\
A_1^{(N)}|-l\ranglengle &= |-l\ranglengle, &
\end{align}
\end{subequations}
and
\begin{subequations}\label{eq:cut2}\begin{align}
A_2^{(N)}|i\ranglengle &= |-1-i\ranglengle, & i=-l, \ldots, l-1\\
A_2^{(N)}|l\ranglengle &= |l\ranglengle.
\end{align}
\end{subequations}
where $|i \ranglengle, i= -l, \ldots, l$ denotes the basis of the $N$ dimensional Hilbert space.
\begin{figure}
\caption{The solid line stands for $A_1^{(N)}
\end{figure}
It is clear that $A_1^{(N)}$ and $A_2^{(N)}$ are self-adjoint unitaries. Thus they can be
treated as a pair of dichotomic observables in an $N$-dimensional Hilbert space. Denote
$S= A_2^{(N)} \circ A_1^{(N)},$ so that
\begin{subequations}\label{eq:cut3}\begin{align}
S |j\ranglengle &= |j-2\ranglengle, & j=-l+2,\ldots, l\\
S |-l+1\ranglengle& = |l\ranglengle,\\
S |-l\ranglengle& = |l-1\ranglengle.
\end{align}\end{subequations}
By the following diagram it is easy to see that $S$ is a cycle in the permutation group $S_{N}.$
\begin{equation}
\begin{array}{ccccccccccc}
|l\rangle & \rangler & |l-2\rangle & \rangler & |l-4\rangle & \rangler & \cdots & \rangler & |-l+2\rangle & \rangler & |-l\rangle \\
\nwarrow &&&&&&&&&& \swarrow \\
& |-l+1\rangle & \longleftarrow & |-l+3\rangle & \longleftarrow & \cdots & \longleftarrow & |l-3\rangle & \longleftarrow & |l-1\rangle. &
\end{array}
\end{equation}
Now for the CHSH-Bell operator $B_{\text{CHSH}},$ by using those truncated free observables, we can show that the quantum value tends to $2\sqrt{2}$ as $N \rightarrow \infty.$ Then due to the fact that the eigenvalues of $S$ are $\lambda_j = \exp^{2 \pi i j/N}, j=0, \ldots, N-1,$ we have
\begin{equation}
\begin{split}
\|B_{\text{CHSH}}^2\| & = \left\| 4 1\mkern -4mu{\rm l} - [A_1^{(N)}, A_2^{(N)}] \otimes [A_1^{(N)}, A_2^{(N)}]\right\|\\
& = \left\| 4 1\mkern -4mu{\rm l} - (S^\dagger - S) \otimes (S^\dagger -S) \right\| \\
& = \max_j \{ 4 + 4 (\Im(\lambda_j))^2 \} = \max_j \left\{ 4+ 4 \sin^2{\frac{2\pi j}{N}} \right\}\\
& \thickapprox 4 + 4 \left(1- O\left(\frac{1}{N^2}\right)\right) = 8 - O\left( \frac{1}{N^2} \right).
\end{split}
\end{equation}
Here for simplicity, we have assumed that Alice and Bob take same measurements. Therefore, we have following proposition:
\begin{proposition}\label{prop:cut}
By using truncated free observables $A_1^{(N)}, A_2^{(N)}, N= 2l+1,$ we can asymptotically obtain the Tsirelson bound for CHSH-Bell inequality, i.e,
$\|B_{\text{CHSH}}\| = 2\sqrt{2} - O(1/N^2).$
\end{proposition}
This result suggests the speed of the convergence mentioned in Theorem \ref{thm:Main}, namely, the Tsirelson bound will be saturated with the speed of $O(1/N)$ by using the random observables. However, the rigorous proof would need very careful and subtle analysis of Weingarten calculus.
\section{Fine-grained uncertainty relations for random (free) observables}
The uncertainty principle and non-locality are two fundamental and intrinsic concepts of
quantum theory which were quantitatively linked by J. Oppenheim and S. Wehner's work
\cite{OW2010}. There they introduced a notion called ``fine-grained uncertainty relations''
to quantify the ``amount of uncertainty'' in a particular physical theory. Suppose we have $n$ dichotomic observables $A_i, i=1,\ldots,n,$
corresponding to measurement settings $P_i^a = \frac{1\mkern -4mu{\rm l} + (-1)^a A_i}{2}, i=1,\ldots,n;
a=0,1.$ The uncertainty of measurement settings $P_i^0, i=1, \ldots, n$ is defined as:
\begin{equation}
\xi_{\vec{0}} = \sup_{\rho} \left \{ \frac{1}{n} \sum_{i=1}^n \Tr (P_i^0 \rho)\right\} = \frac{1}{2} + \frac{1}{2n} \sup_{\rho} \Tr \left( \sum_{i=1}^n A_i \rho \right).
\end{equation}
Similarly, the uncertainty of $P_i^1, i=1,\ldots,n$ is
\begin{equation}
\xi_{\vec{1}} = \sup_{\rho} \left \{\frac{1}{n} \sum_{i=1}^n \Tr (P_i^1 \rho)\right\} = \frac{1}{2} - \frac{1}{2n} \sup_{\rho} \Tr \left( \sum_{i=1}^n A_i \rho \right).
\end{equation}
Notice that
\begin{equation}
\sup_{\rho} \left| \Tr \left( \sum_{i=1}^n A_i \rho \right) \right| = \left\| \sum_{i=1}^n A_i \right\|.
\end{equation}
Hence $\xi_{\vec{0}}= \xi_{\vec{1}} = \frac{1}{2} + \frac{1}{2n} \left\| \sum_{i=1}^n A_i \right\|.$ The state $\rho$ which can obtain $\xi_{\vec{x}}$ is called the maximally certain state for those measurement settings. If we assume $A_i$ are freely independent observables, then we have following proposition (see Appendix C and D for the proof):
\begin{proposition}\label{prop:uncertainty-free}
The fine-grained uncertainty for free observables $A_i, i=1, \ldots, n, n>4$ is
\begin{equation}
\xi_{\vec{0}}= \xi_{\vec{1}} \leq \frac{1}{2} + \frac{1}{\sqrt{n}}< 1.
\end{equation}
The same results approximately hold for random observables $A_i = U_i D U_i^\dagger,
i=1,\ldots, n, n>4$ with high probability.
\end{proposition}
For the special case $n=2,$ we have $\|A_1 + A_2 \| =2.$
Thus for $n=2,$ $\xi_{\vec{0}}= \xi_{\vec{1}} =1$ (see Appendix D for random observables and Appendix E for free obervables). Interestingly, for truncated free observables we have
\begin{equation}
\begin{split}
\|A_1^{(N)} + A_2^{(N)}\|^2 & = 2 1\mkern -4mu{\rm l} + S + S^\dagger = \max_j \{2 + 2 \Re(\lambda_j)\} \\
& = \max_j \left\{2 + 2 \cos{\frac{2\pi j}{N}} \right\} = 4.
\end{split}
\end{equation}
Thus for the truncated free observables, we always have $\xi_{\vec{0}} =\xi_{\vec{1}} =1$ regardless of what dimension we truncate to.
In a recent work, some of us show that there is a tight relationship between fine-grained
uncertainty and violation of one specific steering inequality, called the linear steering
inequality, which was first used in \cite{SJWP2010} to verify steering by experiment. It
has following form:
\begin{equation}\label{eq:steering-ineq}
S_n = \sum_{i=1}^n \langle \alpha_i A_i \ranglengle \leq C_n,
\end{equation}
where $C_n$ is called the {\it local hidden state} bound of $S_n.$ This bound can be calculated easily as follows \cite{SJWP2010}:
\begin{equation}\label{eq:LHS-bound}
C_n = \sup_{\alpha_i= \pm 1} \left\| \sum_{i=1}^n \alpha_i A_i \right\|.
\end{equation}
If the observables $A_i$ are chosen to be operators of a Clifford algebra, which are
anti-commutative, a large (unbounded) violation can be obtained \cite{MRYHH2015}. Because
the degree of the fine-grained uncertainty of free or random observables is the same
order as that of anti-commuting observables, we find:
\begin{corollary}
If $A_i, i=1, \ldots, n$ are chosen to be free observables, then the local hidden state bound of steering inequality $S_n = \sum_{i=1}^n \langle \alpha_i A_i \ranglengle \leq C_n,$ is upper bounded by $2\sqrt{n}.$ The similar result holds for random observables with high probability.
\end{corollary}
Here we note that for the free case, we should also care about the quantum values of
steering inequalities. Due to M. Navascu\'{e}s and D. P\'{e}rez-Garc\'{i}a's work, there
are two different ways to define them~\cite{NG2012}. One is in a commuting way that means
the system described by a total Hilbert space, and the other one is the total system
described in a tensor form. As a matter of fact, they also used the free observables
$\lambda(g_i), i=1, \ldots, n$ to define the linear steering inequality. They showed the
quantum value defined in the commuting sense is $n,$ while in the tensor scenario is upper
bounded by $2\sqrt{n-1}.$ So by their work, we can easily see that the local hidden state
bound is upper bounded by $2\sqrt{n-1}$ for free observables. Their bound is even sharper
than ours. However, we have provided another proof which is more focussed on the freeness
property and is applicable to random observables.
\section{Conclusions}
In this paper, we show that random dichotomic observables generically achieve near-maximal violation
of the CHSH-Bell inequality, approaching the Tsirelson bound in the limit of large
dimension.
This is despite the fact that these observables are not anti-commuting.
Instead, due to Voiculescu's theory, they are asymptotically freely independent. It means
when the dimension increases, their behaviors tends to the ones of free observables in
some quantum probability space. However, the quantum state that is optimal for the random observables is random as well, as it in general will depend on the observables. For a fixed state, random observables might not lead to any violation. Another main result of this paper is that we have considered the fine-grained uncertainty of a sequence of free or random observables. The
degree of their uncertainty is as the same order as the one which is given by the
anti-commuting observables. As a byproduct of this result, we can construct a linear
steering inequality with large violation by using free or random observables. For further
applications, free observables may be used for studying the quantum value of other type of
Bell inequalities. Thus a natural question arises: {\it Do free observables always
maximally violate any Bell inequalities?} Unfortunately, a quick answer is that we can
consider the linear Bell operator $ \sum_{i=1}^n A_i\otimes A_i.$ It is trivial since
its quantum and classical value are both $n,$ while the quantum value given by free
observables is upper bounded by $2\sqrt{n}$. However, it seems promising when considering
other specific Bell inequalities. Since the free observables and their truncated ones are
deterministic (constructive), another possible application is that this may be a new
source of constructive examples of Bell inequality violations where previously only random
ones were known.
{\it Acknowledgments}---We would like to thank Pawe{\l} Horodecki, Marek Bozejko, Mikael de la Salle, Yanqi Qiu and Junghee Ryu for valuable discussions. We also would like to thank the anonymous referee for her/his useful comments.
This work is supported by ERC AdG QOLAPS, EU grant RAQUEL and the Foundation for
Polish Science TEAM project cofinanced by the EU European Regional Development Fund. Z. Yin was
partly supported by NSFC under Grant No.11301401. A. Rutkowski was supported by a postdoc internship decision number DEC\textendash{} 2012/04/S/ST2/00002 and grant No. 2014/14/M/ST2/00818 from the Polish National Science Center. M. Marciniak was supported by EU project BRISQ2. Harrow was funded by NSF grants CCF-1111382 and CCF-1452616, ARO contract
W911NF-12-1-0486 and a Leverhulme Trust Visiting Professorship VP2-2013-041.
\begin{thebibliography}{1}
\bibitem{EPR} A. Einstein, B. Podolsky and N. Rosen, Phys. Rev. \textbf{47}, 777, (1935).
\bibitem{Bell} J. S. Bell, Physics, \textbf{1}, 195 (1964).
\bibitem{CHSH1969} J. F. Clauser, M. A. Horne, A. Shimony and R. A. Holt,
Phys. Rev. Lett. \textbf{23}, 880 (1969).
\bibitem{BCPSW2014} N. Brunner, D. Cavalcanti, S. Pironio, V. Scarani, S. Wehner, Rev. Mod. Phys. \textbf{86}, 419 (2014).
\bibitem{Tsirelson1980} B. S. Tsirelson, Lett. Math. Phys. \textbf{4}, 93 (1980).
\bibitem{GP1992} N. Gisin and A. Peres, Phys. Lett. A \textbf{162}, 15-17 (1992).
\bibitem{BMR1992}S. L. Braunstein, A. Mann, and M. Revzen, Phys. Rev. Lett. \textbf{68}, 3259 (1992).
\bibitem{CPHZ2002} Z. B. Chen, J. W. Pan, G. Hou, and Y. D. Zhang,
Phys. Rev. Lett. \textbf{88}, 040406 (2002).
\bibitem{LHBR2010}Y. C. Liang, N. Harrigan, S. D. Bartlett, and T. Rudolph,
Phys. Rev. Lett. \textbf{104}, 050401 (2010).
\bibitem{MYS2012}
M. McKague, T. H. Yang, V. Scarani. J. Phys. A: Math. Theor. 45 455304 (2012)
\bibitem{VDN1992}D. Voiculescu, K. J. Dykema, and A. Nica,
{\it Free Random Variables,} CRM Monograph Series 1, AMS (1992).
\bibitem{NS2006} A. Nica and R. Speicher,
{\it Lectures on the Combinatorics of Free Probability}.
\bibitem{OW2010}J. Oppenheim and S. Wehner,
Science. \textbf{330}, 1072 (2010).
\bibitem{RGMH2015} R. Ramanathan, D. Goyeneche, P. Mironowicz, and P. Horodecki,
Arxiv 1506.05100 (2015).
\bibitem{MRYHH2015}M. Marciniak, A. Rutkowski, Z. Yin, M. Horodecki, and R. Horodecki,
Phys. Rev. Lett. \textbf{115}, 170401 (2015).
\bibitem{Collins2002} B. Collins,
Int. Math. Res. Not. \textbf{17}, 953-982 (2002).
\bibitem{CS2004} B. Collins and P. \'{S}niady,
Comm. Math. Phys. \textbf{264} (3), 773-795 (2004).
\bibitem{Weingarten1978} D. Weingarten, J. Math. Phys. \textbf{19}, 999 (1978).
\bibitem{V1991}D. Voiculescu, Invent. Math. \textbf{104}, 201-220 (1991).
\bibitem{SJWP2010} D. J. Saunders, S. J. Jones, H. M. Wiseman and G. J. Pryde,
Nature. Phys. \textbf{6}, 845 (2010).
\bibitem{NG2012}M. Navascu\'{e}s and D. P\'{e}rez-Garc\'{i},
Phys. Rev. Lett. \textbf{109}, 160405 (2012).
\bibitem{BM2013}B. Collins and C. Male, To appear in Annales Scientifique de l'ENS. ArXiv:1105.4345 (2013).
\bibitem{BO2008} N. Brown and N. Ozawa,
{\it Graduate Studies in Mathematics}, vol. \textbf{88}, AMS (2008).
\bibitem{EKB2013} M. Epping, H. Kampermann, and D. Bru{\ss},
Phys. Rev. Lett. \textbf{111}, 240404 (2013).
\bibitem{WW2001} R. F. Werner and M. M. Wolf,
Phys. Rev. A. \textbf{64}, 032112 (2001).
\bibitem{ZB2002} M. Zukowski and C. Brukner,
Phys. Rev. Lett. \textbf{88}, 210401 (2002).
\end{thebibliography}
\section*{Appendix}
\subsection{$C^*$-probability space and freely independent}
\begin{definition}
A $\ast$-probability space $(\mathcal{A}, \phi)$ consists of an unital $\ast$-algebra $\mathcal{A}$ over $\mathbb{C}$ and an unital linear positive functional
\begin{equation}
\phi: \mathcal{A} \rightarrow \mathbb{C} ; \;\;\; \phi(1_{\mathcal{A}}) = 1.
\end{equation}
The elements $a \in \mathcal{A}$ are called non-commutative random variables
in $(\mathcal{A}, \phi).$ A $C^*$-probability space is a $\ast$-probability space
$(\mathcal{A}, \phi)$ where $\mathcal{A}$ is an unital $C^*$-algebra.
\end{definition}
If additionally we assume $\phi$ is faithful, we have for any $a \in \mathcal{A},$
\begin{equation}\label{eq:norm}
\|a\| = \lim_{k\rightarrow \infty} \left( \phi \left((a^* a)^k \right)\right)^{\frac{1}{2k}}.
\end{equation}
\begin{definition}\cite{NS2006,BM2013}
For given $C^*$-probability space $(\mathcal{A}, \varphi),$ let $\mathcal{A}_1, \ldots, \mathcal{A}_n$ be $\ast$-subalgebras of $\mathcal{A}$. They are said to be free if for all $a_i \in \mathcal{A}_{j(i)}, i = 1,\ldots n,\; j(i) \in \{1, \ldots, n\}$ such that $\phi(a_i) = 0$, one has
\begin{equation}
\phi (a_1a_2\cdots a_n) =0
\end{equation}
whenever $j(1) \neq j(2), j(2) \neq j(3), \ldots, j(n-1) \neq j(n).$ A sequence of random variables are said to be free if
the unital subalgebras they generate are free.
\end{definition}
\subsection{Proofs for Lemma \ref{lem:key}}
Lemma \ref{lem:key} is a direct corollary of the work of B. Collins \cite{Collins2002}. A random variable $u \in (\mathcal{A}, \phi)$ is called a Haar unitary when it is unitary and
\begin{equation}\label{eq:Haar}
\phi (u^j) = \left \{ \begin{split}
& 1 , \;\; j=0,\\
& 0, \;\; \text{otherwise}.
\end{split} \right.
\end{equation}
Since we have
\begin{equation}
\lim_{N \to \infty} \tr (D^j)= \left \{ \begin{split}
& 1 , \;\; \text{j is even},\\
& 0, \;\; \text{j is odd}.
\end{split} \right.
\end{equation}
Then there will exist a $C^*$-probability space $(\mathcal{A}, \phi)$ and a random variable $d \in \mathcal{A},$ such that
\begin{equation}\label{eq:D}
\lim_{N \to \infty} \tr (D^j) = \phi (d^j), \;\; \text{for all}\; j\geq 0.
\end{equation}
Let $u_1, \ldots, u_n$ be a sequence of Haar unitaries in $(\mathcal{A}, \phi)$ which are freely independent together with $d$. We will give a concrete example of $u_1, \ldots, u_n, d$ in the end of this subsection. Let $E (\cdot)= \int \cdot \; d\mu,$ where $d\mu$ is the Haar measure on $U(N),$ then by the main theorem of \cite[Theorem 3.1]{Collins2002}, we have following:
\begin{equation}
\begin{split}
\lim_{N \to \infty} E \; \tr (A_{i(1)}A_{i(2)} \cdots A_{i(k)}) &= \phi (u_{i(1)}d u_{i(1)}^* \cdots u_{i(k)}d u_{i(k)}^*)\\
& = 0,
\end{split}
\end{equation}
where the second equation comes from the freeness of $d, u_1, \ldots, u_n.$ Moreover, by theorem of \cite[Theorem 3.5]{Collins2002}
\begin{equation}
P \left( \left| \tr (A_{i(1)}A_{i(2)} \cdots A_{i(k)}) \right| \geq \epsilon \right) = O(N^{-2}).
\end{equation}
Then by the Borel-Cantelli Lemma, for any $\epsilon>0,$
\begin{equation}
\limsup_{N \to \infty} \left| \tr (A_{i(1)}A_{i(2)} \cdots A_{i(k)}) \right| \leq \epsilon, \;\; \text{almost surely}.
\end{equation}
Hence
\begin{equation}
\lim_{N \to \infty} \tr (A_{i(1)}A_{i(2)} \cdots A_{i(k)}) =0, \;\; \text{almost surely}.
\end{equation}
{\bf A concrete example of $u_1, \ldots, u_n$ and $d$.}
Let $G= \ast_{2n+1} \mathbb{Z}_2$ and $g_i, i=1, \ldots, 2n+1$ be the generator of the
$i$-th copy. Let $u_i= \lambda(g_{2i-1} g_{2i}), i=1,\ldots,n$ and $d=\lambda(g_{2n+1}).$
Then the $C^*$-probability we consider is $(C^*_{\text{red}}(G), \phi)$ which was defined
in Subsection \ref{sub-example}. It is easy to check that equations \eqref{eq:Haar} and \eqref{eq:D} hold. Thus $u_i, i=1, \ldots, n$ are Haar unitaries in $(C^*_{\text{red}}(G), \phi).$ Moreover $u_1, \ldots, u_n, d$ are freely independent in $(C^*_{\text{red}}(G), \phi).$
\subsection{Proof of Proposition \ref{prop:uncertainty-free}}
Suppose the dichotomic observables $A_i, i=1,\ldots, n$ are freely independent in some $C^*$-probability space $(\mathcal{A}, \phi)$. Then by equation \eqref{eq:norm},
\begin{equation}\label{eq:free-LHS-bound}
\begin{split}
\left\| \sum_{i=1}^n A_i \right\| &= \lim_{k\rightarrow \infty} \left(\phi \left( \sum_{i=1}^n A_i \right)^{2k} \right)^{\frac{1}{2k}}\\
& = \lim_{k\rightarrow \infty} \left(\phi \left( \sum_{i(1), \ldots, i(2k)=1}^n A_{i(1)} \cdots A_{i(2k)} \right)\right)^{\frac{1}{2k}}.
\end{split}
\end{equation}
To estimate the above equation we need the following definitions and facts from
combinatorics~\cite{NS2006}. For a given set $\{1, \ldots, 2k\},$ there is a partition $\pi
= \{V_1, \ldots , V_s\}$ of this set. $\pi$ is determined as follows: Two numbers $p$ and
$q$ belong to the same block $V_k$ of $\pi$ if and only if $i(p) = i(q).$ There is a
particular partition called pair partition, in which every block only contains two
elements. A pair partition of $\{1, \ldots , 2k\}$ is called non-crossing if there does
not exist $1\leq p_1 < q_1 < p_2 < q_2 \leq 2k$ such that $p_1$ is paired with $p_2$ and
$q_1$ is paired with $q_2.$ The number of non-crossing pair partitions of the set $\{1,
\ldots , 2k\}$ is given by the Catalan number $C_k= \frac{1}{k+1}\binom{2k}{k}$.
Now for the indices $i(1), \ldots, i(2k),$ if there exist a pair of adjacent indices which they belong to a same block, e.g. $i(s-1) = i(s),$ then we will shrink the indices $i(1), \ldots, i(2k)$ to $i(1), \ldots, i(s-2), \emptyset, i(s+1), \ldots, i(2k),$ since obviously $A_{i(s-1)} A_{i(s)} = 1\mkern -4mu{\rm l}.$ According to this rule, we can shrink $\pi$ to a new partition $\tilde{\pi}$ on $\{1, \ldots, 2t\},$ where $t \leq k.$ Hence we can divide $\pi$ into two groups:
{\it Case 1.} $\tilde{\pi}= \emptyset$.
{\it Case $2.$} The indices in $\tilde{\pi}$ are satisfy condition (iii) in Definition \ref{def:freeness}, i.e, the adjacent indices are not equal.
We decompose $ \phi \left( \sum_{i=1}^n A_i \right)^{2k}$ into two terms:
\begin{equation}
\phi \left( \sum_{i=1}^n A_i \right)^{2k} = \phi \sum_{\pi \in \Pi_1} \cdot + \phi \sum_{\pi \in \Pi_2} \cdot : = II_1 + II_2,
\end{equation}
where the set of partitions $\Pi_1$ and $\Pi_2$ is defined as follows: Partition $\pi \in \Pi_1$ if and only if $\pi$ belongs to Case 1. And $\pi \in \Pi_2$ if and only if $\pi$ belongs to Case $2$.
By our assumption, i.e, freeness of $A_i,$ $II_2 =0.$ For the term $II_1,$ it is easy to see that $II_1$ is equal to the cardinality of the set $\Pi_1.$ Due to shrink process, $\pi \in \Pi_1$ only if there is even number of elements for every block. Those partitions with even elements in every block can be realized in following process: First choosing an arbitrary non-crossing pair partition, then combining some proper blocks to one block. Hence the number of $\pi \in \Pi_1$ is upper bounded by $C_k n^k.$ Thus
\begin{equation}
\phi \left( \sum_{i=1}^n A_i \right)^{2k} \leq C_k n^k.
\end{equation}
Therefore under our assumption,
\begin{equation}
\left\| \sum_{i=1}^n A_i \right\| = \lim_{k \rightarrow \infty} \left( \phi \left( \sum_{i=1}^n A_i \right)^{2k} \right)^{\frac{1}{2k}} \leq 2 \sqrt{n}.
\end{equation}
{\it Note}: For the local hidden state bound $C_n$ of steering inequality $S_n$ in equation \eqref{eq:LHS-bound}, the variables $\alpha_i$ do not make any effort to the whole derivation. Thus $C_n$ is also upper bounded by $2\sqrt{n}.$
\subsection{Fine-grained uncertainty for random observables}\label{sub:random-steering}
In fact, the statement is a corollary of the work of B. Collins and C. Male \cite{BM2013}. Here we restate their result as following:
Let $A_i = U_i D U_i^\dagger,$ then there exist $C^*$-probability space $(\mathcal{A}, \phi)$ and Haar unitaries $u_1, \ldots, u_n$ which are freely independent of element $d\in \mathcal{A},$ such that
\begin{equation}
\lim_{N \to \infty} \left\| \sum_{i=1}^n A_i \right\| = \left\| \sum_{i=1}^n u_i d u_i^* \right\|, \;\; \text{almost surely}.
\end{equation}
Denote $a_i = u_i d u_i^*,$ it is easy to see that $a_1, \ldots, a_n$ are freely independent in $(\mathcal{A}, \phi).$ Hence due to a similar argument in Appendix C, we have $\left\| \sum_{i=1}^n u_i d u_i^* \right\| \leq 2 \sqrt{n}.$ Therefore we have following corollary:
\begin{corollary}
Let $A_i = U_i D U_i^\dagger, i=1,\ldots, n,$ $U_i$ are independent random matrices in $U (N).$ Then we have
\begin{equation}
\lim_{N \to \infty} \left\| \sum_{i=1}^n A_i \right\| \leq 2 \sqrt{n}, \;\; \text{almost surely.}
\end{equation}
\end{corollary}
For the special case $n=2,$ we have following corollary:
\begin{corollary}
Let $A_i = U_i D U_i^\dagger, i=1,2,$ $U_i$ are independent random matrices in $U (N).$ Then we have
\begin{equation}
\lim_{N \to \infty} \left\|\sum_{i=1}^2 A_i \right\| = 2, \;\; \text{almost surely}.
\end{equation}
\end{corollary}
{\it Proof.}
For all $k \in \mathbb{N}_0,$ then almost surely we have
\begin{equation}
\begin{split}
\lim_{N \to \infty} \tr \left( A_1 + A_2 \right)^{2k} & = \sum_{j=0}^k \binom{k}{j} 2^{k-j} \sum_{l=0}^j \binom{j}{l} \lim_{N \to \infty} \tr \left( A_1 A_2\right)^{2l-j}\\
& = \sum_{j=0, \; \text{even}}^k \binom{k}{j} 2^{k-j} \binom{j}{j/2}.
\end{split}
\end{equation}
Since $\lim_{k \to \infty} \left( \sum_{j=0, \; \text{even}}^k \binom{k}{j} 2^{k-j} \binom{j}{j/2} \right)^{\frac{1}{2k}} =2,$
then by the standard argument in this sequel, we have
\begin{equation}
\liminf_{N \to \infty} \Big\|\sum_{i=1}^2 A_i \Big\| \geq 2 -\epsilon, \; \text{almost surely}.
\end{equation}
On the other hand, $\Big\|\sum_{i=1}^2 A_i \Big\| \leq 2$ is obvious.
\subsection{Maximally certain states for $\xi_{\vec{0}}$ and $\xi_{\vec{1}}$ in the case $n=2$}\label{sub:uncertainty}
Let $A_1= \lambda(g_1), A_2 = \lambda(g_2),$ where $g_1, g_2$ are generator of group $\ast_2 \mathbb{Z}_2.$ We need following notions.
\begin{definition}
A group G is amenable if there exists a state $\mu$ on $\ell_\infty(G)$ which is invariant
under the left translation action: i.e. for all $s\in G$ and $f \in \ell_\infty (G), \mu(s \cdot f ) = \mu (f).$
\end{definition}
\begin{definition}
Let G be a group, a F{\o}lner net (sequence) is a net of non-empty finite subsets $F_n \subset G$ such that $|F_n \cap g F_n|/|F_n| \to 1$ for all $g\in G.$ Where $g F_n$ denotes the subset $\{g h: h\in F_n\}.$
\end{definition}
For any $g \in G,$ there exists $N,$ such that for all $n \geq N,$ $g \in F_n.$ There are many characterizations of amenable groups.
\begin{proposition}\cite{BO2008}
Let G be a discrete group. The following are equivalent:
\begin{enumerate}[{\rm i)}]
\item G is amenable;
\item G has a F{\o}lner net (sequence);
\item For any finite subset $E\subset G$, we have $\frac{1}{|E|} \left\|\sum_{g\in E} \lambda(g) \right\| =1.$
\end{enumerate}
\end{proposition}
For instance, group $\ast_2\mathbb{Z}_2$ is amenable. Hence by above proposition, $\|\lambda(g_1)+ \lambda(g_2)\|=2.$ With above notions, we can formally define a state
\begin{equation}
\rho_n = \frac{1}{|F_n|} \sum_{g,h \in F_n} |g \ranglengle \langle h|,
\end{equation}
where $F_n$ is a F{\o}lner sequence of $G= \ast_2 \mathbb{Z}_2.$ Now we have:
\begin{equation}
\begin{split}
\lim_{n \rightarrow \infty} \Tr ((\lambda(g_1)+ \lambda(g_2)) \rho_n) & = \lim_{n \to \infty} \frac{1}{|F_n|} \left( \sum_{g,h \in F_n} \langle h| g_1 g\ranglengle + \sum_{g,h \in F_n} \langle h| g_2 g \ranglengle \right)\\
& = \lim_{n\to \infty} \frac{1}{|F_n|} (|F_n \cap g_1 F_n|+ |F_n \cap g_2 F_n|) =2,
\end{split}
\end{equation}
where for the second equation we have used the property of F{\o}lner sequence. Thus in this approximate sense, the fine-grained uncertainty of
$A_1^0$ and $A_2^0$ is 1. Technically we can construct $\tilde{\rho}_n$ to approximate $\xi_{\vec{1}}.$ Firstly we define two subsets of $G = \ast_2{\mathbb{Z}_2}:$
\begin{equation}
G_1 = \{g_1, g_1g_2, g_1g_2g_1, \cdots\} \;\; \text{and} \;\; G_2 = \{g_2, g_2g_1, g_2g_1g_2, \cdots\}.
\end{equation}
In fact, $G_1$ (resp. $G_2$) is the subset of wards which begin with $g_1$ (resp. $g_2$). It is easy to see $G_1 \cup G_2 \cup \{e\} = G.$ Now we define a state:
\begin{equation}
|\tilde{\phi}_n \ranglengle = \frac{1}{\sqrt{|F_n|}} \sum_{g \in F_n} e^{i \theta_g} |g\ranglengle,
\end{equation}
where $F_n$ is still a F{\o}lner sequence of $G$ and
\begin{equation}\label{eq:theta}
\theta_g = \left \{ \begin{split}
& \pi/2 & \quad \quad g \in G_1,\\
& -\pi/2 & \quad \quad g \in G_2,\\
& 0 & \quad \quad g=e.
\end{split} \right.\end{equation}
Let $\tilde{\rho}_n = |\tilde{\phi}_n\ranglengle\langle\tilde{\phi}_n|,$ then we have
\begin{equation}\label{eq:g}
\begin{split}
\lim_{n \to \infty} Tr ((\lambda(g_1) + \lambda(g_2))\tilde{\rho}_n) & = \lim_{n \to \infty} \frac{1}{|F_n|} \sum_{g,h \in F_n} e^{i (\theta_g-\theta_h)} \left( \langle h| g_1 g\ranglengle + \langle h| g_2 g \ranglengle \right)\\
& = \lim_{n \to \infty} \frac{1}{|F_n|} \left( \sum_{g \in F_n \cap g_1 F_n} e^{i (\theta_g-\theta_{g_1g})} + \sum_{g \in F_n \cap g_2 F_n} e^{i (\theta_g-\theta_{g_2g})} \right).
\end{split}
\end{equation}
For the first of term of right hand side, for large enough $n,$ we can say $e, g_1 \in F_n.$ Therefore $e, g_1 \in F_n \cap g_1 F_n$ for large enough $n.$ Then we have
\begin{equation}
\begin{split}
\frac{1}{|F_n|} \sum_{g \in F_n \cap g_1 F_n} e^{i (\theta_g-\theta_{g_1g})} & = \frac{1}{|F_n|} \sum_{g \in F_n \cap g_1 F_n, g\neq \{e, g_1\}} e^{i (\theta_g-\theta_{g_1g})}+ \frac{1}{|F_n|} e^{i (\theta_e-\theta_{g_1e})}+ \frac{1}{|F_n|} e^{i (\theta_{g_1}-\theta_{g_1g_1})}\\
& = \frac{1}{|F_n|} \sum_{g \in F_n \cap g_1 F_n, g\neq \{e, g_1\}} e^{i \pi} = -\frac{|F_n \cap g_1 F_n|-2}{|F_n|},
\end{split}
\end{equation}
where for the second equation we have used \eqref{eq:theta}. A similar argument is valid for the second term of right hand side of \eqref{eq:g}. Thus finally we have
\begin{equation}
\lim_{n \to \infty} \Tr ((\lambda(g_1) + \lambda(g_2))\tilde{\rho}_n) = -2.
\end{equation}
\subsection{Quantum value of complex CHSH-Bell inequality}
In this appendix we will consider a Bell inequality which has a similar form to the CHSH-Bell inequality. The Bell operator is defined as follows:
\begin{equation}
\mathcal{B} = A_1 \otimes B_1 + A_1 \otimes B_2 + A_2 \otimes B_1 + \omega A_2 \otimes B_2,
\end{equation}
where $\omega = e^{\frac{2\pi i}{3}}.$ Here the observables are not dichotomic. Instead, there are three possible outcomes: $1, \omega, \omega^2.$ Thus $A_i, B_j$ are required to be unitaries and satisfy $A_i^3 = B_j^3 = 1\mkern -4mu{\rm l}$ for any $i,j =1,2.$ The classical value of this Bell functional is $\sqrt{7}.$
Now for the quantum value, we can assume $A_1 = B_1, A_2 = B_2$ and $A_1, A_2$ are freely independent in some $C^*$-probability space. Hence we have
\begin{equation}
\mathcal{B} \mathcal{B}^\dagger = 3 1\mkern -4mu{\rm l} \otimes 1\mkern -4mu{\rm l} + (1\mkern -4mu{\rm l} - \omega A) \otimes (1\mkern -4mu{\rm l}- \omega A),
\end{equation}
where $A= A_1 A_2^\dagger + \omega A_2 A_1^\dagger.$ By binomial formula we have
\begin{equation}
\tr (\mathcal{B} \mathcal{B}^\dagger)^k = \sum_{j=0}^k \binom{k}{j} 3^{k-j} \left( \sum_{l=0, \text{l is even}}^{j} \binom{j}{l}\binom{l}{l/2} \right)^2:= Q_k.
\end{equation}
On one hand, by Stirling's formula, for even $l,$ $\binom{l}{l/2} \leq 2^l,$ thus
\begin{equation}
Q_k \leq \sum_{j=0}^k \binom{k}{j} 3^{k-j} \left( \sum_{l=0}^{j} \binom{j}{l} 2^l \right)^2 = 12^k.
\end{equation}
By Lemma \ref{lem:c-norm}, we have $\|\mathcal{B}\| \leq 2 \sqrt{3}.$ By a slightly adaption of the results in \cite{EKB2013}, where they provided a method to estimate the quantum value for given dichotomic Bell inequalities, we can conclude that $2\sqrt{3}$ is an upper bound for the quantum value of complex CHSH Bell inequality. In fact, this upper bound can be obtained by choosing:
\begin{equation}
A_1 = B_1 = \begin{pmatrix} & 0 & 0 & 1 \\ & \omega^2 & 0 & 0 \\ & 0 & \omega & 0 \end{pmatrix}, \; A_2 = B_2 = \begin{pmatrix} & 0 & 0 & -\omega \\ & 0 & 1 & 0 \\ & \omega^2 & 0 & 0 \end{pmatrix}.
\end{equation}
On the other hand,
\begin{equation}
\begin{split}
Q_{2k} & = \sum_{j=0}^{k} \binom{2k}{2j} 3^{2k-2j} \left( \sum_{l=0}^j \binom{2j}{2l}\binom{2l}{l} \right)^2 + \sum_{j=1}^{k} \binom{2k}{2j-1} 3^{2k-2j+1} \left( \sum_{l=0}^{j-1} \binom{2j-1}{2l}\binom{2l}{l} \right)^2\\
& \approx \sum_{j=0}^{k} \binom{2k}{2j} 3^{2k-2j} \left( \sum_{l=0}^j \binom{2j}{2l} 2^{2l} \right)^2 + \sum_{j=1}^{k} \binom{2k}{2j-1} 3^{2k-2j+1} \left( \sum_{l=0}^{j-1} \binom{2j-1}{2l} 2^{2l} \right)^2\\
& \gtrsim \sum_{j=0}^{k} \binom{2k}{2j} 3^{2k-2j} \left( \sum_{l=0}^j \binom{j}{l} 2^{2l} \right)^2 + \sum_{j=1}^{k} \binom{2k}{2j-1} 3^{2k-2j+1} \left( \sum_{l=0}^{j-1} \binom{j-1}{l} 2^{2l} \right)^2\\
& \approx \sum_{j=0}^{2k} \binom{2k}{j} 3^{2k-j} 5^{j} = 8^{2k}.
\end{split}
\end{equation}
Therefore $\|\mathcal{B}\| \geq 2\sqrt{2}> \sqrt{7}.$
This method is also promising for the famous MABK Bell inequalities \cite{WW2001,ZB2002}.
\end{document} |
\begin{eqnarray*}gin{document}
\title{Supplement to\Bayesian causal inference in probit graphical models}
\section{Proof of Proposition 3.1}
\label{subsec:Appendix:causal}
In this section we give a proof of Proposition 3.1. To this end we first introduce the following two lemmata.
\begin{eqnarray*}gin{lemma}
\label{corollary_1}
Let $\bm{x},\bm{b} \in Re^d$ be two vectors, $\bm{M}\inRe^{d\times d}$ a symmetric and invertible matrix. Then
\begin{eqnarray*}
\bm{x}^\top\bm{M}\bm{x} - 2\bm{b}^\top\bm{x}=(\bm{x}-\bm{M}^{-1}\bm{b})^\top\bm{M}(\bm{x}-\bm{M}^{-1}\bm{b})-\bm{b}^\top\bm{M}^{-1}\bm{b}.
\end{eqnarray*}
\end{lemma}
\begin{eqnarray*}gin{proof}
Simply expand the right-hand side of the equation.
\end{proof}
\begin{eqnarray*}gin{lemma}
\label{corollary_2}
Let $V\,\vert\, (\boldsymbol{U}=\bm{u}) \sim \mathcal{N}(\mu+\bm{\gamma}^\top\bm{u},\delta^2)$, $\boldsymbol{U}\sim\mathcal{N}_d(\bm{0},\bm{S}igma)$. Then
\begin{eqnarray*}
V &\sim& \mathcal{N}\left(\mu, \frac{\delta^2}{1-(\bm{\gamma}^\top\bm{T}^{-1}\bm{\gamma})/\delta^2}\right),
\end{eqnarray*}
where $\bm{T}=\bm{S}igma^{-1}+\bm{\gamma}\bm{\gamma}^\top/\delta^2$.
\begin{eqnarray*}gin{proof}
We can write by definition
\begin{eqnarray*}
f(v)&=&\int f(v\,\vert\,\bm{u})f(\bm{u}) \, d\bm{u} \nonumber \\
&=& \int_{Re^d}\frac{1}{\sqrt{2\pi}\delta}
\exp\left\{-\frac{1}{2\delta^2}(v-\mu-\bm{\gamma}^\top\bm{u})^2\right\} \\
&\cdot&
\frac{1}{(2\pi)^{d/2}|\bm{S}igma|^{1/2}}\exp\left\{-\frac{1}{2}\bm{u}^\top\bm{S}igma^{-1}\bm{u}\right\} \, d\bm{u}.
\end{eqnarray*}
Next, observe that
\begin{eqnarray*}
-\frac{1}{2\delta^2}(v-\mu-\bm{\gamma}^\top\bm{u})^2 &=& -\frac{1}{2\delta^2}\left[(v-\mu)^2+(\bm{\gamma}^\top\bm{u})^2-2(v-\mu)\bm{\gamma}^\top\bm{u}\right] \\
&=&
-\frac{1}{2\delta^2}\left[(v-\mu)^2+\bm{u}^\top\bm{\gamma}\bm{\gamma}^\top\bm{u}-2(v-\mu)\bm{\gamma}^\top\bm{u}\right] \\
&=&
-\frac{1}{2\delta^2}\left[(v-\mu)^2+\bm{u}^\top\bm{\Gamma}\,\bm{u}-2(v-\mu)\bm{\gamma}^\top\bm{u}\right],
\end{eqnarray*}
being $\bm{\Gamma}=\bm{\gamma}\bm{\gamma}^\top$.
Therefore we can write
\begin{eqnarray*}
f(v) &=& \frac{1}{\sqrt{2\pi}\delta} \exp\left\{-\frac{1}{2}\left(\frac{v-\mu}{\delta}\right)^2\right\} \\
&\cdot&
\frac{1}{|\bm{S}igma|^{1/2}}
\int_{Re^{d}}\frac{1}{(2\pi)^{d/2}}
\exp\left\{-\frac{1}{2}\left[\bm{u}^\top\left(\bm{S}igma^{-1}+\frac{\bm{\Gamma}}{\delta^2}\right)\bm{u}
-2\frac{(v-\mu)}{\delta^2}\bm{\gamma}^\top\bm{u}\right]\right\} \, d\bm{u}.
\end{eqnarray*}
\noindent
Let now $\bm{T}=\bm{S}igma^{-1}+\bm{\Gamma}/\delta^2$. By Lemma \ref{corollary_1} we obtain
\begin{eqnarray*}
\bm{u}^\top\bm{T}\,\bm{u}-2\left[\frac{(v-\mu)}{\delta^2}\bm{\gamma}^\top\right]\bm{u}
&=& \left[\bm{u}-\bm{T}^{-1}\frac{(v-\mu)}{\delta^2}\bm{\gamma}\right]^\top\bm{T}
\left[\bm{u}-\bm{T}^{-1}\frac{(v-\mu)}{\delta^2}\bm{\gamma}\right] \\
&-& \left[\frac{(v-\mu)}{\delta^2}\bm{\gamma}^\top\right]\bm{T}^{-1}\left[\frac{(v-\mu)}{\delta^2}\bm{\gamma}\right].
\end{eqnarray*}
Therefore,
\begin{eqnarray*}
f(v) &=& \frac{1}{\sqrt{2\pi}\delta} \exp\left\{-\frac{1}{2}\left(\frac{v-\mu}{\delta}\right)^2\right\} \\
&\cdot&
\frac{1}{|\bm{S}igma|^{1/2}}
\int_{Re^{d}}\frac{1}{(2\pi)^{d/2}}
\exp\left\{-\frac{1}{2}\left[\left(\bm{u}-\bm{T}^{-1}\frac{(v-\mu)}{\delta^2}\bm{\gamma}\right)^\top\bm{T}
\left(\bm{u}-\bm{T}^{-1}\frac{(v-\mu)}{\delta^2}\bm{\gamma}\right)\right]\right\} \\
&\cdot&
\exp \left\{\,\frac{1}{2}\left[\frac{(v-\mu)}{\delta^2}\bm{\gamma}^\top\right]\bm{T}^{-1}\left[\frac{(v-\mu)}{\delta^2}\bm{\gamma}\right]\right\}
\, d\bm{u}.
\end{eqnarray*}
Moreover we can write
\begin{eqnarray*}
f(v) &=&
\frac{1}{\sqrt{2\pi}\delta} \exp\left\{-\frac{1}{2}\left(\frac{v-\mu}{\delta}\right)^2\right\} \cdot
\frac{1}{|\bm{S}igma|^{1/2}}
\exp \left\{\,\frac{1}{2}\left[\frac{(v-\mu)}{\delta^2}\bm{\gamma}^\top\right]\bm{T}^{-1}\left[\frac{(v-\mu)}{\delta^2}\bm{\gamma}\right]\right\} \\
&\cdot&
\frac{1}{|\bm{T}|^{1/2}}
\int_{Re^d}\frac{|\bm{T}|^{1/2}}{(2\pi)^{d/2}}
\exp\left\{-\frac{1}{2}\left(\bm{u}-\frac{(v-\mu)}{\delta^2}\,\bm{T}^{-1}\bm{\gamma}\right)^\top
\bm{T}\left(\bm{u}-\frac{(v-\mu)}{\delta^2}\,\bm{T}^{-1}\bm{\gamma}\right)\right\} \, d\bm{u}.
\end{eqnarray*}
Hence,
\begin{eqnarray*}
f(v) &=&
\frac{1}{\sqrt{2\pi}\delta}
\frac{1}{|\bm{S}igma|^{1/2}|\bm{T}|^{1/2}}
\exp\left\{-\frac{1}{2\delta^2}
\left[1-\frac{1}{\delta^2}\bm{\gamma}^\top\bm{T}^{-1}\bm{\gamma}\right](v-\mu)^2\right\},
\end{eqnarray*}
and so
\begin{eqnarray*}
V &\sim& \mathcal{N}\left(\mu, \frac{\delta^2}{1-(\bm{\gamma}^\top\bm{T}^{-1}\bm{\gamma})/\delta^2}\right).
\end{eqnarray*}
\end{proof}
\end{lemma}
\begin{eqnarray*}gin{proof} [Proof of Proposition 3.1.]
Let $(X_1,X_2,\dots,X_q) \,\vert\, \bm{S}igma \sim(\bm{0}, \bm{S}igma)$ and
consider the do operator $\textnormal{do}(X_s=\tilde{x})$ where $s\in\{2,\dots,q\}$ is the intervened node.
The post-intervention distribution of $X_1$ is given by (Section 3)
\begin{eqnarray*}
f(x_1\,\vert\, \textnormal{do}(X_s=\tilde x), \bm{S}igma) = \int \mathcal{N}(x_1\,\vert\,\,\vert\,amma_s \tilde x+\bm{\gamma}^\top\bm{x}_{\mathrm{pa}(s)},\bm{S}igma)\cdot \mathcal{N}(\bm{x}_{\mathrm{pa}(s)}\,\vert\,\bm{0},\bm{S}igma_{\mathrm{pa}(s),\mathrm{pa}(s)}) \, d\bm{x}_{\mathrm{pa}(s)},
\end{eqnarray*}
where
\begin{eqnarray*}
X_1\,\vert\, \bm{X}_{\mathrm{pa}(s)}=\bm{x}_{\mathrm{pa}(s)} &\sim&\mathcal{N}(\,\vert\,amma_s \tilde x+\bm{\gamma}^\top\bm{x}_{\mathrm{pa}(s)},\delta^2), \\
\bm{X}_{\mathrm{pa}(s)} &\sim& \mathcal{N}(\bm{0}, \bm{S}igma_{\mathrm{pa}(s),\mathrm{pa}(s)}).
\end{eqnarray*}
Applying Lemma \ref{corollary_2} with
$V = X_1$ and $\boldsymbol{U} = \bm{X}_{\mathrm{pa}(s)}$ we obtain
\begin{eqnarray*}
X_1\,\vert\, \textnormal{do}(X_s=\tilde x), \bm{S}igma &=& \mathcal{N}\left(\,\vert\,amma_s \tilde x, \frac{\delta^2}{1-(\bm{\gamma}^\top\bm{T}^{-1}\bm{\gamma})/\delta^2}\right),
\end{eqnarray*}
where
\begin{eqnarray*}gin{eqnarray}
\delta^2 &=& \bm{S}igma_{1\,\vert\,\mathrm{fa}(s)}, \nonumber \\
(\,\vert\,amma_s,\bm{\gamma}^\top)^\top &=& \bm{S}igma_{1,\mathrm{fa}(s)}\left(\bm{S}igma_{\mathrm{fa}(s),\mathrm{fa}(s)}\right)^{-1}, \nonumber \\
\bm{T} &=& \left(\bm{S}igma_{\mathrm{pa}(s),\mathrm{pa}(s)}\right)^{-1} + \frac{1}{\delta^2}\bm{\gamma}\bm{\gamma}^\top. \nonumber
\end{eqnarray}
\end{proof}
\section{PAS algorithm}
\label{subsec:Appendix:PAS}
We first summarize the main features of a \emph{Partial Analytic Structure} (PAS) algorithm \citep{Godsill:2012, wang:li:2012}.
Let $X_1,\dots,X_q$ be a collection of variables, $\bm{X}$ a $(n,q)$ data matrix collecting $n$ i.i.d. multivariate observations from $X_1,\dots,X_q$. Consider $K$ distinct models, $\mathcal{M}_1,\dots, \mathcal{M}_K$, each one indexed by a parameter $\bm{\theta}_k\in\bm{T}heta_k$ and assume model uncertainty, so that the true data generating model is one of the $K$ models. Under each model $\mathcal{M}_k$ the likelihood function is $f(\bm{X}\,\vert\,\bm{\theta}_k,\mathcal{M}_k)$.
Consider now two models $\mathcal{M}_h$, $\mathcal{M}_k$, $h,k\in\{1,\dots,K\}$, $h\ne k$, with parameters $\bm{\theta}_h, \bm{\theta}_k$ and let $(\bm{\theta}_h)_u$, $(\bm{\theta}_k)_u$ be two sub-vector of $\bm{\theta}_h, \bm{\theta}_k$ respectively.
A general PAS algorithm relies on the following two assumptions:
\begin{eqnarray*}gin{enumerate}
\item the full conditional distribution of $(\bm{\theta}_k)_u$, $p\left((\bm{\theta}_k)_u\,\vert\,(\bm{\theta}_k)_{-u},\mathcal{M}_k,\bm{X}\right)$ is available in closed form;
\item in $\mathcal{M}_h$ there exists an ``equivalent" set of parameters $(\bm{\theta}_h)_u$ with same dimension of $(\bm{\theta}_k)_u$;
\end{enumerate}
see also \citet[Section 5.2]{wang:li:2012}.
A PAS reversible jump algorithm adopts a proposal distribution which sets $(\bm{\theta}_h)_{-u} = (\bm{\theta}_k)_{-u}$, draws $\mathcal{M}_k$ from $q(\mathcal{M}_k\,\vert\,\mathcal{M}_h)$ and $(\bm{\theta}_k)_u$ from $p\left((\bm{\theta}_k)_u\,\vert\,(\bm{\theta}_k)_{-u},\mathcal{M}_k,\bm{X}\right)$.
Specifically, the update of model $\mathcal{M}_k$ and model parameter $\bm{\theta}_k$ is performed in two steps.
The first step concerns the model update and can be summarized as follows:
\begin{eqnarray*}gin{enumerate}
\item propose $\mathcal{M}_k\sim q(\mathcal{M}_k\,\vert\,\mathcal{M}_h)$ and set $(\bm{\theta}_h)_{-u} = (\bm{\theta}_k)_{-u}$;
\item accept $\mathcal{M}_k$ with probability $\alpha=\min\{1,r_{k}\}$,
\begin{eqnarray*}n
\label{eq:PAS:accept}
r_k = \frac{p(\mathcal{M}_k\,\vert\,(\bm{\theta}_k)_{-u},\bm{X})}{p(\mathcal{M}_h\,\vert\,(\bm{\theta}_h)_{-u},\bm{X})}
\frac{q(\mathcal{M}_h\,\vert\,\mathcal{M}_k)}{q(\mathcal{M}_k\,\vert\,\mathcal{M}_h)},
\end{eqnarray*}n
where
\begin{eqnarray*}n
p(\mathcal{M}_k\,\vert\,(\bm{\theta}_k)_{-u},\bm{X}) = \int p(\mathcal{M}_k, (\bm{\theta}_k)_u \,\vert\, (\bm{\theta}_k)_{-u},\bm{X})\, d(\bm{\theta}_k)_u
\end{eqnarray*}n
\item if $\mathcal{M}_k$ is accepted, generate
$(\bm{\theta}_k)_u \sim p\left((\bm{\theta}_k)_u\,\vert\,(\bm{\theta}_k)_{-u},\mathcal{M}_k,\bm{X}\right)$, \\otherwise
$(\bm{\theta}_h)_u \sim p\left((\bm{\theta}_h)_u\,\vert\,(\bm{\theta}_h)_{-u},\mathcal{M}_h,\bm{X}\right)$.
\end{enumerate}
In the second step we then update parameter $\bm{\theta}_k$ (if $\mathcal{M}_k$ is accepted in the model update step) or $\bm{\theta}_h$ (otherwise) from its full conditional distribution using standard MCMC steps. Notice that in the case of Gaussian undirected graphical models with G-Wishart priors this requires specific MCMC techniques to avoid the computation of prior normalizing constants which are not available in closed form; see \cite[Section 2.4]{wang:li:2012}.
We now apply the PAS algorithm to our DAG setting.
We start defining a suitable proposal distribution, $q(\mathcal{D}'\,\vert\,\mathcal{D})$ on the DAG space.
To this end we consider three types of operators that locally modify a given DAG $\mathcal{D}$: insert a directed edge (InsertD $a\rightarrow b$ for short), delete a directed edge (DeleteD $a\rightarrow b$) and reverse a directed edge (ReverseD $a\rightarrow b$).
For each $\mathcal{D}\in\mathcal{S}_q$, being $\mathcal{S}_q$ the set of all DAGs on $q$ nodes, we then construct the set of \textit{valid} operators $\mathcal{O}_{\mathcal{D}}$, that is operators whose resulting graph is a DAG.
Therefore, given the current $\mathcal{D}$ we propose $\mathcal{D}'$ by uniformly sampling a DAG in $\mathcal{O}_\mathcal{D}$.
Because there is a one-to-one correspondence between each operator and resulting DAG $\mathcal{D}'$,
the probability of transition is given by
$q(\mathcal{D}'\,\vert\,\mathcal{D}) = 1/|\mathcal{O}_{\mathcal{D}}|$.
Such a proposal can be easily adapted to account for sparsity constraints on the DAG space, for instance by fixing a maximum number of edges and limiting the model space to those DAGs having at most a given number of edges, typically a small multiple of the number of nodes.
Because of the structure of our proposal, at each step of our MCMC algorithm we will need to compare two DAGs $\mathcal{D}$, $\mathcal{D}'$ which differ by one edge only. Notice that the ReverseD $a\rightarrow b$ operator can be also brought back to the same case since is equivalent to the consecutive application of the operators DeleteD $a\rightarrow b$ and InsertD $b\rightarrow a$.
Therefore, consider two DAGs $\mathcal{D}=(V,E)$, $\mathcal{D}'=(V,E')$ such that $E'=E\setminus\{(h,j)\}$.
Notice that if a parent ordering is valid for $\mathcal{D}$, it is also valid for $\mathcal{D}'$, and we adopt this convention to simplify the analysis.
At this stage, for better clarity of notation, we index each parameter with its own DAG-model and write accordingly
$(\bm{D}^{\mathcal{D}}, \bm{L}^{\mathcal{D}})$ and $(\bm{D}^{\mathcal{D}'}, \bm{L}^{\mathcal{D}'})$ and $\bm{\Omega}^{\mathcal{D}}$ and $\bm{\Omega}^{\mathcal{D}'}$ when interest centers on the covariance matrix.
Notice that the Cholesky parameters under the two DAGs differ only with regard to their $j$-th component
$((\sigma_j^{\mathcal{D} })^2, \bm{L}^{\mathcal{D}}_{\prec j\,]})$, and $((\sigma_j^{\mathcal{D}'})^2, \bm{L}^{\mathcal{D}'}_{\prec j\,]})$ respectively.
Moreover the remaining parameters
$\{ (\sigma_r^{\mathcal{D}})^2, \bm{L}^{\mathcal{D}}_{\prec r\,]}; \, r \neq j \}$ and
$\{ (\sigma_r^{\mathcal{D}'})^2, \bm{L}^{\mathcal{D}'}_{\prec r\,]}; \, r \neq j \}$
are componentwise \emph{equivalent} between the two graphs because they refer to structurally equivalent conditional models; see
(6).
This is crucial for the correct application of the PAS algorithm.
The acceptance probability for $\mathcal{D}'$ under a PAS algorithm is given by
$\alpha_{\mathcal{D}'}=\min\{1;r_{\mathcal{D}'}\}$
where
\begin{eqnarray*}n
\label{eq:ratio:PAS:DAG}
r_{\mathcal{D}'}
&=&\frac{p(\mathcal{D}'\,\vert\,\bm{D}^{\mathcal{D}'}\setminus (\sigma_j^{\mathcal{D}'})^2,\bm{L}^{\mathcal{D}'}\setminus \bm{L}^{\mathcal{D}'}_{\prec j\,]},\bm{X})}
{p(\mathcal{D}\,\vert\,\bm{D}^{\mathcal{D}}\setminus (\sigma_j^{\mathcal{D}})^2,\bm{L}^{\mathcal{D}}\setminus \bm{L}^{\mathcal{D}}_{\prec j\,]},\bm{X})}
\cdot
\frac{q(\mathcal{D}\,\vert\,\mathcal{D}')}{q(\mathcal{D}'\,\vert\,\mathcal{D})}
\nonumber \\
&=&
\frac{p(\bm{X},\bm{D}^{\mathcal{D}'}\setminus (\sigma_j^{\mathcal{D}'})^2,\bm{L}^{\mathcal{D}'}\setminus \bm{L}^{\mathcal{D}'}_{\prec j\,]}\,\vert\, \mathcal{D}')}
{p(\bm{X},\bm{D}^{\mathcal{D}}\setminus (\sigma_j^{\mathcal{D}})^2,\bm{L}^{\mathcal{D}}\setminus \bm{L}^{\mathcal{D}}_{\prec j\,]}\,\vert\, \mathcal{D})}
\cdot\frac{p(\mathcal{D}')}{p(\mathcal{D})}
\cdot\frac{q(\mathcal{D}\,\vert\,\mathcal{D}')}{q(\mathcal{D}'\,\vert\,\mathcal{D})}.
\end{eqnarray*}n
Therefore we require to evaluate for DAG $\mathcal{D}$
\begin{eqnarray*}
p(\bm{X},\bm{D}\setminus \sigma_j^2,\bm{L}\setminus \bm{L}_{\prec j\,]}\,\vert\, \mathcal{D})
&=&
\int_0^{\infty}\int_{Re^{|\mathrm{pa}(j)|}}
p(\bm{X}\,\vert\,\bm{D},\bm{L},\mathcal{D})p(\bm{D},\bm{L}\,\vert\,\mathcal{D}) \, d \bm{L}_{\prec j\,]} d\sigma_j^2
\end{eqnarray*}
(and similarly for $\mathcal{D}'$) where
we removed for simplicity the super-script $\mathcal{D}$ from the Cholesky parameters, now emphasizing the dependence on $\mathcal{D}$ though the conditioning sets.
Moreover, because of the likelihood and prior factorization in (8) and (16) we can write
\begin{eqnarray*}n
\label{eq:PAS:joint:DAG}
p(\bm{X},\bm{D}\setminus \sigma_j^2,\bm{L}\setminus \bm{L}_{\prec j\,]}\,\vert\, \mathcal{D})
&=&
\prod_{r\ne j}f(\bm{X}_r\,\vert\,\bm{X}_{\mathrm{pa}(r)},\sigma_r^2,\bm{L}_{\prec r\,]},\mathcal{D})p(\bm{L}_{\prec r\,]}\,\vert\,\sigma_r^2,\mathcal{D})p(\sigma_r^2\,\vert\,\mathcal{D}) \nonumber \\
&\cdot&
\int_0^{\infty}\int_{Re^{|\mathrm{pa}(j)|}}
f(\bm{X}_j\,\vert\,\bm{X}_{\mathrm{pa}(j)},\sigma_j^2,\bm{L}_{\prec j\,]},\mathcal{D}) \\
&\cdot&
p(\bm{L}_{\prec j\,]}\,\vert\,\sigma_j^2,\mathcal{D})p(\sigma_j^2\,\vert\,\mathcal{D})
\, d \bm{L}_{\prec j\,]} d\sigma_j^2. \nonumber
\end{eqnarray*}n
In particular, because of conjugacy of the Normal density with the Normal-Inverse-Gamma prior, the integral in \eqref{eq:PAS:joint:DAG} can be obtained in closed form as
\begin{eqnarray*}n
\label{eq:marg:like:j:suppl}
m_{}(\bm{X}_j\,\vert\,\bm{X}_{\mathrm{pa}_{\mathcal{D}}(j)}, \mathcal{D}) =
(2\pi)^{-\frac{n}{2}}
\frac{\big|\bm{T}_j\big|^{1/2}}
{\big|\bm{a}r{\bm{T}}_j\big|^{1/2}}\cdot
\frac{\mathcal{G}amma\left(a_j^*+\frac{n}{2}\right)}{\mathcal{G}amma\left(a_j^*\right)}
\left[\frac{1}{2}g\right]^{a_j^*}
\left[\frac{1}{2}\big(g+\bm{X}_j^\top\bm{X}_j - \hat{\bm{L}}_j^\top\bm{a}r{\bm{T}}_j\hat{\bm{L}}_j\big)\right]^{-(a_j^*+n/2)}
\end{eqnarray*}n
where
\begin{eqnarray*}
\bm{T}_j &=& g\bm{I}_{|\mathrm{pa}_{\mathcal{D}}(j)|} \\
\bm{a}r{\bm{T}}_j &=& g\bm{I}_{|\mathrm{pa}_{\mathcal{D}}(j)|}+\bm{X}_{\mathrm{pa}_{\mathcal{D}}(j)}^\top\bm{X}_{\mathrm{pa}_{\mathcal{D}}(j)} \\
\hat{\bm{L}}_j
&=& \big(g\bm{I}_{|\mathrm{pa}_{\mathcal{D}}(j)|}+\bm{X}_{\mathrm{pa}_{\mathcal{D}}(j)}^\top\bm{X}_{\mathrm{pa}_{\mathcal{D}}(j)}\big)^{-1}\bm{X}_{\mathrm{pa}_{\mathcal{D}}(j)}^{\top}\bm{X}_j,
\end{eqnarray*}
and $a_j^*=\frac{a_j}{2}-\frac{|\mathrm{pa}_{\mathcal{D}}(j)|}{2}-1$. For $j=1$, because we fixed $\sigma_1^2=1$ we instead obtain
\begin{eqnarray*}n
m_{}(\bm{X}_1\,\vert\,\bm{X}_{\mathrm{pa}_{\mathcal{D}}(1)}, \mathcal{D}) &=&
(2\pi)^{-\frac{n}{2}} \frac{\big|\bm{T}_1\big|^{1/2}}
{\big|\bm{a}r{\bm{T}}_1\big|^{1/2}}\cdot
\exp\left\{-\frac{1}{2}\big(\bm{X}_1^\top\bm{X}_1+
\hat{\bm{L}}_1^\top\bm{a}r{\bm{T}}_1\hat{\bm{L}}_1\big)\right\}.
\end{eqnarray*}n
Therefore, the PAS ratio in \eqref{eq:ratio:PAS:DAG} reduces to
\begin{eqnarray*}n
r_{\mathcal{D}'}&=&
\frac{m_{}(\bm{X}_j\,\vert\,\bm{X}_{\mathrm{pa}_{\mathcal{D}'}(j)}, \mathcal{D}')}
{m_{}(\bm{X}_j\,\vert\,\bm{X}_{\mathrm{pa}_{\mathcal{D}}(j)}, \mathcal{D})}
\cdot\frac{p(\mathcal{D}')}{p(\mathcal{D})}
\cdot\frac{q(\mathcal{D}\,\vert\,\mathcal{D}')}{q(\mathcal{D}'\,\vert\,\mathcal{D})}.
\end{eqnarray*}n
\section{Proof of Proposition 4.1}
In this section we prove that the posterior distribution $p(\bm{D},\bm{L},\mathcal{D},\theta_0,\bm{X}_1\,\vert\, \bm{y}, \bm{X}_{-1})$ is proper. To this end we factorize it as
\begin{eqnarray*}
p(\bm{D},\bm{L},\mathcal{D},\theta_0,\bm{X}_1 \,\vert\, \bm{y}, \bm{X}_{-1}) &\propto&
p(\theta_0\,\vert\, \bm{y}, \bm{X}_{-1})
p(\bm{X}_1 \,\vert\, \theta_0,\bm{y}, \bm{X}_{-1}) \\
&\cdot& p(\bm{D},\bm{L} \,\vert\, \theta_0,\bm{X}_1, \bm{y}, \bm{X}_{-1})
p(\mathcal{D} \,\vert\, \bm{D},\bm{L},\theta_0,\bm{X}_1, \bm{y}, \bm{X}_{-1}).
\end{eqnarray*}
Also, because $\bm{y}$ is deterministically set once $\bm{X}_1$ and $\theta_0$ are given, the latter simplifies to
\begin{eqnarray*}
p(\bm{D},\bm{L},\mathcal{D},\theta_0,\bm{X}_1 \,\vert\, \bm{y}, \bm{X}_{-1}) &\propto&
p(\theta_0\,\vert\, \bm{y}, \bm{X}_{-1})
p(\bm{X}_1 \,\vert\, \theta_0,\bm{y}, \bm{X}_{-1}) \\
&\cdot& p(\bm{D},\bm{L} \,\vert\, \theta_0,\bm{X})
p(\mathcal{D} \,\vert\, \bm{D},\bm{L},\theta_0,\bm{X}),
\end{eqnarray*}
where $\bm{X}$ is the $(n,q)$ augmented data matrix, column binding of $\bm{X}_1$ and $\bm{X}_{-1}$.
To prove the propriety of the joint posterior we will show that each of the above terms corresponds to a proper distribution.
\subsection{Propriety of \boldmath$p(\theta_0\,\vert\, y, X_{-1})$}
Frist notice that $p(\theta_0\,\vert\, \bm{y}, \bm{X}_{-1}) \propto p(\bm{y},\bm{X}_{-1}\,\vert\, \theta_0)$ since $p(\theta_0)\propto 1$.
Remember the \textit{augmented} likelihood in Equation (9) of our manuscript
\begin{eqnarray*}
f(\bm{y},\bm{X}\,\vert\, \bm{D},\bm{L},\theta_0,\mathcal{D}) =
\prod_{j=1}^{q} d\,\mathcal{N}_n(\bm{X}_j\,\vert\, -\bm{X}_{\mathrm{pa}(j)} \bm{L}_{ \prec j\,]}, \sigma_j^2 \bm{I}_n)
\cdot
\left\{
\prod_{i=1}^{n}
\mathbbm{1}(\theta_{y_i-1} < x_{i,1} \le \theta_{y_i})
\right\},
\end{eqnarray*}
now emphasizing the dependence on DAG $\mathcal{D}$, where we set the $(j,j)$-element of $\bm{D}$ equal to $\sigma_j^2$.
We first integrate out $\bm{X}_1$ to obtain the likelihood
\begin{eqnarray*}
f(\bm{y},\bm{X}_{-1}\,\vert\, \bm{D},\bm{L},\theta_0, \mathcal{D}) &=&
\prod_{j=2}^{q} d\,\mathcal{N}_n(\bm{X}_j\,\vert\, -\bm{X}_{\mathrm{pa}(j)} \bm{L}_{ \prec j\,]}, \sigma_j^2 \bm{I}_n) \\
&\cdot&
\int_{Re^n}
d\,\mathcal{N}_n(\bm{X}_1\,\vert\, -\bm{X}_{\mathrm{pa}(1)} \bm{L}_{ \prec 1\,]}, \sigma_1^2 \bm{I}_n)
\prod_{i=1}^{n}
\mathbbm{1}(\theta_{y_i-1} < x_{i,1} \le \theta_{y_i}) \,
d\bm{X}_1 \\
&=&
f(\bm{X}_{-1}\,\vert\,\bm{D},\bm{L}) \cdot
\int\limits_{O^n(\bm{y},\theta_0)}
d\,\mathcal{N}_n(\bm{X}_1\,\vert\, -\bm{X}_{\mathrm{pa}(1)} \bm{L}_{ \prec 1\,]}, \sigma_1^2 \bm{I}_n) \,
d\bm{X}_1,
\end{eqnarray*}
where $\bm{X}_{-1}=(\bm{X}_2,\dots,\bm{X}_q)$, $O^n(\bm{y},\theta_0)$ is the \textit{n}-dimensional orthant $\bigtimes\limits_{i=1}^n(\theta_{y_i-1}, \theta_{y_i}]$ and $\theta_{-1}=-\infty, \theta_{1}=+\infty$.
To obtain the marginal posterior of $\theta_0$ we
first compute the \textit{integrated likelihood} for $\theta_0$ by
integrating the likelihood w.r.t. $(\bm{D},\bm{L})$. Because of prior parameter independence across $(\sigma_j^2,\bm{L}_{ \prec j\,]})$'s, we obtain
\begin{eqnarray*}
f(\bm{y},\bm{X}_{-1}\,\vert\,\theta_0) &=&
\int f(\bm{y},\bm{X}_{-1}\,\vert\, \bm{D},\bm{L},\theta_0) \, p(\bm{D},\bm{L})\, d \bm{D} \, d\bm{L} \\
&=& \prod_{j=2}^{q} \left\{\, \int
d\,\mathcal{N}_n(\bm{X}_j\,\vert\, -\bm{X}_{\mathrm{pa}(j)} \bm{L}_{ \prec j\,]}, \sigma_j^2 \bm{I}_n)
\,p(\sigma_j^2,\bm{L}_{ \prec j\,]})
\, d\sigma_j^2 \, d\bm{L}_{ \prec j\,]} \right\} \\
&\cdot&
\int\limits_{Re^{|\mathrm{pa}(1)|}}
\int\limits_{O^n(\bm{y},\theta_0)}
d\,\mathcal{N}_n(\bm{X}_1\,\vert\, -\bm{X}_{\mathrm{pa}(1)} \bm{L}_{ \prec 1\,]}, \sigma_1^2 \bm{I}_n)
\, p(\bm{L}_{ \prec 1\,]})\, d\bm{X}_1 \, d\bm{L}_{ \prec 1\,]},
\end{eqnarray*}
since $\sigma_1^2=1$.
Notice that the first term in the integrated likelihood $f(\bm{y},\bm{X}_{-1}\,\vert\,\theta_0)$ does not depend on $\theta_0$. We then write
\begin{eqnarray*}
f(\bm{y},\bm{X}_{-1}\,\vert\,\theta_0) =
K(\bm{X}_{-1}) \cdot
\int\limits_{O^n(\bm{y},\theta_0)}
\int\limits_{Re^{|\mathrm{pa}(1)|}}
d\,\mathcal{N}_n(\bm{X}_1\,\vert\, -\bm{X}_{\mathrm{pa}(1)} \bm{L}_{ \prec 1\,]}, \sigma_1^2 \bm{I}_n)
\, p(\bm{L}_{ \prec 1\,]}) \, d\bm{L}_{ \prec 1\,]} \, d\bm{X}_1,
\end{eqnarray*}
upon interchanging the order of integration.
Consider now the inner integral where $p(\bm{L}_{ \prec 1\,]})= d\,\mathcal{N}_{|\mathrm{pa}(1)|}(\bm{0},g^{-1}\bm{I}_{\mathrm{pa}(1)})$; see also Equation (15) in our manuscript.
Because of conjugacy with the normal density $d\,\mathcal{N}_n(\bm{X}_1\,\vert\,\cdot)$ we obtain
\begin{eqnarray*}
\int\limits_{Re^{|\mathrm{pa}(1)|}}
d\,\mathcal{N}_n(\bm{X}_1\,\vert\, -\bm{X}_{\mathrm{pa}(1)} \bm{L}_{ \prec 1\,]}, \sigma_1^2 \bm{I}_n)
\, p(\bm{L}_{ \prec 1\,]}) \, d\bm{L}_{ \prec 1\,]} =
d\,\mathcal{N}_n(\bm{X}_1\,\vert\,\bm{0},\bm{S}igma_1),
\end{eqnarray*}
where $|\mathrm{pa}(1)|$ is the number of parents,
$
\bm{S}igma_1 = g^{-1} \bm{X}_{\mathrm{pa}(1)}\bm{X}_{\mathrm{pa}(1)}^\top + \bm{I}_n.
$
The integrated likelihood thus becomes
\begin{eqnarray*}n
\label{eq:integrated:likelihood}
f(\bm{y},\bm{X}_{-1}\,\vert\,\theta_0) =
K(\bm{X}_{-1}) \cdot
\int\limits_{O^n(\bm{y},\theta_0)}
d\,\mathcal{N}_n(\bm{X}_1\,\vert\,\bm{0},\bm{S}igma_1)
\, d\bm{X}_1.
\end{eqnarray*}n
Since we assumed $p(\theta_0)\propto 1$, the marginal posterior of $\theta_0$ is proportional to \eqref{eq:integrated:likelihood} and also
\begin{eqnarray*}
\label{eq:marginal:posterior}
p(\theta_0\,\vert\,\bm{y},\bm{X}_{-1})
&\propto& \int\limits_{O^n(\bm{y},\theta_0)}
d\,\mathcal{N}_n(\bm{X}_1\,\vert\,\bm{0},\bm{S}igma_1)\, d\bm{X}_1 \\
&\coloneqq& I(\theta_0,n),
\end{eqnarray*}
where we emphasize the dependence on $n$. Therefore, to verify the propriety of $p(\theta_0\,\vert\,\bm{y},\bm{X}_{-1})$ we need to prove that $I(\theta_0,n)$ is integrable over $\theta_0\in(-\infty,\infty)$.
\noindent Let now for simplicity $\bm{X}_1 = \bm{U}=(U_1,\dots,U_n)^\top$ and recall that
\begin{eqnarray*}
U_i \in (-\infty,\theta_0] \iff U_i \le \theta_0 \iff U_i - \theta_0 \le 0, \\
U_i \in (\theta_0,\infty) \iff U_i > \theta_0 \iff \theta_0 - U_i < 0.
\end{eqnarray*}
Hence, if we let
\begin{eqnarray*}
U_i^* =
\begin{eqnarray*}gin{cases}
\,\, U_i-\theta_0 & \text{if $y_i=0$},\\
\,\, \theta_0-U_i & \text{if $y_i=1$},
\end{cases}
\end{eqnarray*}
we obtain that $\bm{U}^* \sim \mathcal{N}_n(\bm{\mu}^*,\bm{S}igma^*)$, where $\bm{\mu}^*=(\mu^*_1,\dots,\mu^*_n)^\top$,
\begin{eqnarray*}
\mu_i^* =
\begin{eqnarray*}gin{cases}
\,\, -\theta_0 & \text{if $y_i=0$},\\
\,\, \theta_0 & \text{if $y_i=1$},
\end{cases} \quad \quad
\bm{S}igma^*(h,k) =
\begin{eqnarray*}gin{cases}
\,\, \bm{S}igma_1(h,k) & \text{if $y_h = y_k$},\\
\,\, -\bm{S}igma_1(h,k) & \text{if $y_h \ne y_k$},
\end{cases}
\end{eqnarray*}
and $\bm{S}igma^*(h,k)$ denotes the element at position $(h,k)$ in $\bm{S}igma^*$. Therefore, we can write
\begin{eqnarray*}n
\label{eq:integral:n}
I(\theta_0,n) &=&
\int\limits_{0}^{\infty}
\cdots
\int\limits_{0}^{\infty}
d\,\mathcal{N}_n(\bm{U}^*\,\vert\,\bm{\mu}^*,\bm{S}igma^*) \, d U_1^* \cdots \, d U_n^*.
\end{eqnarray*}n
\noindent If $n=1$, then \eqref{eq:integral:n} is not integrable so that the posterior of $\theta_0$ improper.
To see why, notice that
\begin{eqnarray*}
I(\theta_0,1)
&=&
\int\limits_{0}^{\infty}
d\,\mathcal{N}(U_1^*\,\vert\, \mu^*,\sigma^{2*}) \, d U_1^* \\
&=&
\begin{eqnarray*}gin{cases}
\,\, \Phi\left(\frac{\theta_0}{\sigma^{2*}}\right)\ & \text{if $y_i=0$},\\
\,\, \Phi\left(-\frac{\theta_0}{\sigma^{2*}}\right) & \text{if $y_i=1$},
\end{cases}
\end{eqnarray*}
which is not integrable over $(-\infty,\infty)$ in either case.
\noindent Consider now $n=2$ and assume that the two observations have different values for $Y$, say $y_1=1$ and $y_2=0$.
We then obtain
\begin{eqnarray*}gin{eqnarray*}
\begin{eqnarray*}gin{pmatrix}
U_2^*\\
U_2^*
\end{pmatrix} & \sim & \mathcal{N}\left[\left(\begin{eqnarray*}gin{array}{c}
\theta_0\\
-\theta_0
\end{array}\right),\left(\begin{eqnarray*}gin{array}{cc}
\sigma_1^2 & \rho\sigma_1\sigma_2 \\
\rho\sigma_1\sigma_2 & \sigma_2^2
\end{array}\right)\right],
\end{eqnarray*}
where $\rho=\textnormal{Corr}(U_1^*,U_2^*)$ and
\begin{eqnarray*}
\label{eq:integral}
I(\theta_0,2) &=&
\int\limits_{0}^{\infty}
\int\limits_{0}^{\infty}
d\,\mathcal{N}_n(\bm{U}^*\,\vert\,\bm{\mu}^*,\bm{S}igma^*) \, dU_1^* \, dU_2^* \\
&=&
P\left\{U_1^*>0,U_2^*>0\right\}.
\end{eqnarray*}
The posterior for $\theta_0$ is proper
if and only if $\int_{-\infty}^{\infty}I(\theta_0,2)\, d\theta_0 < \infty$.
We will show this result by providing an upper bound
$I(\theta_0,2) \leq G(\theta_0,2)$ with $G(\theta_0,2)$ integrable over the real line.
To this end, we first standardize $U_1^*$ and $U_2^*$
\begin{eqnarray*}
\label{eq:U:standardized}
V_1^*=
\frac{U_1^*-\theta_0}{\sigma_1} \\
V_2^*=\frac{U_2^*+\theta_0}{\sigma_2},
\end{eqnarray*}
so that
\begin{eqnarray*}gin{eqnarray*}
\begin{eqnarray*}gin{pmatrix}
V_2^*\\
V_2^*
\end{pmatrix} & \sim & \mathcal{N}\left[\left(\begin{eqnarray*}gin{array}{c}
0 \\
0
\end{array}\right),\left(\begin{eqnarray*}gin{array}{cc}
1 & \rho \\
\rho\ & 1
\end{array}\right)\right]
\end{eqnarray*}
and
\begin{eqnarray*}n
\label{eq:integral:U:V}
P\left\{U_1^*>0,U_2^*>0\right\} = P\left\{V_1^*>-\frac{\theta_0}{\sigma_1},V_2^*>\frac{\theta_0}{\sigma_2}\right\}.
\end{eqnarray*}n
We now distinguish two cases: $\rho\,\vert\,e 0$ and $\rho < 0$.
\mathrm{pa}ragraph{Case 1}
Consider first $0 \le \rho < 1 $. Applying Equation (C.8) in \citet{Beck:Melchers:2017} to the right-hand-side of
\eqref{eq:integral:U:V}
we obtain
\begin{eqnarray*}
P\left\{U_1^*>0,U_2^*>0\right\}
\le
\Phi(-b)\Phi\left(\frac{\theta_0}{\sigma_1}\right) + \Phi(-a)\Phi\left(-\frac{\theta_0}{\sigma_2}\right)
\coloneqq H(\theta_0,2),
\end{eqnarray*}
where
\begin{eqnarray*}
a=\frac{-\theta_0\left[\frac{1}{\sigma_1}+\frac{\rho}{\sigma_2}\right]}{(1-\rho^2)^{1/2}}, \quad \quad
b=\frac{\theta_0\left[\frac{1}{\sigma_2}+\frac{\rho}{\sigma_1}\right]}{(1-\rho^2)^{1/2}},
\end{eqnarray*}
and $\Phi(t)$ is the c.d.f. of a standard normal distribution evaluated at $t$.
It follows that $H(\theta_0,2)$ is continuous, bounded and
\begin{eqnarray*}
\lim_{\theta_0\rightarrow -\infty} H(\theta_0,2) = 0, \quad \quad
\lim_{\theta_0\rightarrow \infty} H(\theta_0,2) = 0.
\end{eqnarray*}
Now we verify integrability of $H(\theta_0,2)$ in a right and left neighborhood of $-\infty$ and $\infty$ respectively.
Consider first $\theta_0\rightarrow-\infty$ and notice that
\begin{eqnarray*}
H(\theta_0,2) &\le&
\Phi\left(\frac{\theta_0}{\sigma_1}\right) +
\Phi\left(-a\right) \\
&=&
\Phi\left(\theta_0 A\right) +
\Phi\left(\theta_0 B\right) \\
&=&
P\left\{Z\,\vert\,e-\theta_0 A\right\} + P\left\{Z \,\vert\,e -\theta_0 B\right\}.
\end{eqnarray*}
where $Z\sim\mathcal{N}(0,1)$ and
\begin{eqnarray*}
A = \frac{1}{\sigma_1}>0 \quad \quad
B = \frac{\frac{1}{\sigma_1}+\frac{\rho}{\sigma_2}}{(1-\rho^2)^{1/2}} > 0.
\end{eqnarray*}
Because $\theta_0 \rightarrow -\infty$, we have $-\theta_0 A>0, -\theta_0 B > 0$. Applying the following inequality for the upper tail of $Z$
\begin{eqnarray*}
P\{Z > z\} \le \frac{\exp\{-z^2/2\}}{z\sqrt{2\pi}}, \, z>0,
\end{eqnarray*}
one gets
\begin{eqnarray*}
P\left\{Z\,\vert\,e-\theta_0 A\right\} + P\left\{Z \,\vert\,e -\theta_0 B\right\}
&\le&
\frac{\exp\{-\theta_0^2 A^2/2\}}{-\theta_0 A \sqrt{2\pi}} +
\frac{\exp\{-\theta_0^2 B^2/2\}}{-\theta_0 B \sqrt{2\pi}} \\
&\le&
\frac{\exp\{-\theta_0^2 A^2/2\}}{A \sqrt{2\pi}} +
\frac{\exp\{-\theta_0^2 B^2/2\}}{B \sqrt{2\pi}} \\
&\coloneqq&
G(\theta_0,2).
\end{eqnarray*}
Clearly $\int_{-\infty}^0 G(\theta_0,2) d\theta_0 < \infty$, whence
$I(\theta_0,2)$ is integrable in a right neighborhood of $-\infty$.
Consider now $\theta_0\rightarrow\infty$. Similarly as before notice that
\begin{eqnarray*}
H(\theta_0,2) &\le&
\Phi\left(-b\right) +
\Phi\left(-\frac{\theta_0}{\sigma_2}\right) \\
&=&
\Phi\left(-\theta_0 C\right) +
\Phi\left(-\theta_0 D\right) \\
&=&
P\left\{Z\,\vert\,e \theta_0 C\right\} + P\left\{Z \,\vert\,e \theta_0 D\right\}.
\end{eqnarray*}
where
\begin{eqnarray*}
C = \frac{\frac{1}{\sigma_2}+\frac{\rho}{\sigma_1}}{(1-\rho^2)^{1/2}} > 0, \quad \quad
D = \frac{1}{\sigma_2}>0
\end{eqnarray*}
It follows that an upper bound for $I(\theta_0,2)$ is now
\begin{eqnarray*}
G(\theta_0,2) =
\frac{\exp\{-\theta_0^2 C^2/2\}}{C \sqrt{2\pi}} +
\frac{\exp\{-\theta_0^2 D^2/2\}}{D \sqrt{2\pi}}.
\end{eqnarray*}
Thus for $ \rho \,\vert\,eq 0 $ the posterior for $\theta_0$ is proper.
\mathrm{pa}ragraph{Case 2}
Consider now the case $-1<\rho<0$.
We rewrite \eqref{eq:integral:U:V} using $P_{\rho<0}\left\{\cdot \right\}$ to indicate that the probability is evaluated w.r.t. the joint distribution when the correlation coefficient is less than zero, and use $P_{\rho \,\vert\,e 0}\left\{\cdot \right\}$ otherwise.
Letting $v_1=-\frac{\theta_0}{\sigma_1}, v_2=\frac{\theta_0}{\sigma_2}$ we have
\begin{eqnarray*}
P_{\rho<0}\left\{U_1^*>0,U_2^*>0\right\} &=&
P_{\rho<0}\left\{V_1^*>v_1,V_2^*>v_2\right\} \\
&=&
P_{\rho<0}\left\{V_1^*\le v_1,V_2^* \le v_2\right\} +
P\left\{V_1^*> v_1\right\} +
P\left\{V_2^*> v_2\right\} - 1 \\
&\le&
P_{\rho \,\vert\,e 0}\left\{V_1^*\le v_1,V_2^* \le v_2\right\} +
P\left\{V_1^*> v_1\right\} +
P\left\{V_2^*> v_2\right\} - 1 \\
&=&
P_{\rho \,\vert\,e 0}\left\{V_1^*> v_1,V_2^* > v_2\right\} \\
&=&
P_{\rho \,\vert\,e 0}\left\{U_1^*> 0, U_2^* > 0 \right\};
\end{eqnarray*}
see Equation (C.16) in \citet{Beck:Melchers:2017} for the inequality in step 3.
It follows that $I(\theta_0,2)$ for $\-1<\rho<0$ is bounded above by the corresponding function for the case $0 \le \rho <1$.
Hence $I(\theta_0,2)$ is integrable also for $\rho<0$ and propriety of the posterior of $\theta_0$ holds in this case too.
Having established that $p(\theta_0\,\vert\,\bm{y},\bm{X}_{-1})$ is proper when $n=2$ and $y_1=1, y_2=0$, it follows that propriety will hold for any sample size $n\,\vert\,e2$ provided that there exist at least two observations with distinct values for $Y$. To see why, assume without loss of generality that the first two observations have distinct values; then
\begin{eqnarray*}
p\left(\theta_0\,\vert\,\bm{y}^{(1:n)}, \bm{X}_{-1}^{(1:n)}\right) \propto
p\left(\theta_0\,\vert\,\bm{y}^{(1:2)}, \bm{X}_{-1}^{(1:2)}\right) \cdot
f\left(\bm{y}^{(3:n)}, \bm{X}_{-1}^{(3:n)} \,\vert\, \bm{y}^{(1:2)}, \bm{X}_{-1}^{(1:2)}, \theta_0 \right),
\end{eqnarray*}
which is proper because
$p\left(\theta_0\,\vert\,\bm{y}^{(1:2)}, \bm{X}_{-1}^{(1:2)}\right)$ is proper and
the conditional integrated likelihood $f(\cdot\,\vert\,\cdot)$ is not degenerate.
\noindent
Results presented in the next sections rely on the following proposition.
\begin{eqnarray*}gin{prop}
\label{prop:integrated:like:proper}
Let $p(\bm{X}\,\vert\, \boldsymbol{\mathbb{V}\!\mathrm{ar}theta}, \boldsymbol{\lambda})$ be a proper statistical model for the data $\bm{X}$ where $\boldsymbol{\mathbb{V}\!\mathrm{ar}theta}$, $\boldsymbol{\lambda}$ are continuous parameters with joint prior
$p(\boldsymbol{\mathbb{V}\!\mathrm{ar}theta}, \boldsymbol{\lambda})=
p(\boldsymbol{\lambda}\,\vert\,\boldsymbol{\mathbb{V}\!\mathrm{ar}theta})p(\boldsymbol{\mathbb{V}\!\mathrm{ar}theta})$.
If $p(\boldsymbol{\lambda}\,\vert\,\boldsymbol{\mathbb{V}\!\mathrm{ar}theta})$ is proper, then
\begin{eqnarray*}
p(\bm{X} \,\vert\, \boldsymbol{\mathbb{V}\!\mathrm{ar}theta}) =
\int p(\bm{X}\,\vert\, \boldsymbol{\mathbb{V}\!\mathrm{ar}theta}, \boldsymbol{\lambda})
p(\boldsymbol{\lambda}\,\vert\,\boldsymbol{\mathbb{V}\!\mathrm{ar}theta}) \, d\boldsymbol{\lambda}
\end{eqnarray*}
is also a proper statistical model.
If viewed as a function of $\boldsymbol{\mathbb{V}\!\mathrm{ar}theta}$, $p(\bm{X} \,\vert\, \boldsymbol{\mathbb{V}\!\mathrm{ar}theta})$ is called \textit{integrated likelihood}.
\begin{eqnarray*}gin{proof}
The proof is immediate if one can interchange the order of integration between $\bm{X}$ and $\boldsymbol{\lambda}$.
\end{proof}
\noindent
In addition, it follows that if $p(\boldsymbol{\mathbb{V}\!\mathrm{ar}theta})$ is also proper, then the posterior of $\boldsymbol{\mathbb{V}\!\mathrm{ar}theta}$, $p(\boldsymbol{\mathbb{V}\!\mathrm{ar}theta}\,\vert\,\bm{X})$, will also be proper.
\end{prop}
\subsection{Propriety of \boldmath $p(X_1 \,\vert\, \theta_0,y, X_{-1})$}
We can first write
\begin{eqnarray*}
p(\bm{X}_1 \,\vert\, \theta_0, \bm{y}, \bm{X}_{-1}) &\propto&
p(\bm{X}_{-1}\,\vert\, \bm{y},\theta_0,\bm{X}_1)
p(\bm{y}\,\vert\, \theta_0,\bm{X}_1)
p(\bm{X}_1\,\vert\,\theta_0) \\
&=&
p(\bm{X}_{-1}\,\vert\, \theta_0,\bm{X}_1)
p(\bm{y}\,\vert\, \theta_0,\bm{X}_1)
p(\bm{X}_1\,\vert\,\theta_0)
\end{eqnarray*}
since $p(\bm{X}_{-1}\,\vert\, \bm{y},\theta_0,\bm{X}_1) = p(\bm{X}_{-1}\,\vert\, \theta_0,\bm{X}_1)$ and $p(\theta_0)\propto 1$.
The first term can be written as
\begin{eqnarray*}
p(\bm{X}_{-1}\,\vert\, \theta_0,\bm{X}_1) =
\sum_{\mathcal{D}}\left\{
\int \int p(\bm{X}_{-1}\,\vert\, \bm{D}, \bm{L}, \mathcal{D},\theta_0,\bm{X}_1)
p(\bm{D},\bm{L}\,\vert\,\mathcal{D}, \theta_0,\bm{X}_1)
\, d\bm{D} \, d\bm{L}
\right\}
p(\mathcal{D}\,\vert\,\theta_0,\bm{X}_1).
\end{eqnarray*}
Now notice that
\begin{eqnarray*}
p(\bm{D},\bm{L}\,\vert\,\mathcal{D}, \theta_0,\bm{X}_1)
&\propto&
p(\bm{X}_1\,\vert\,\bm{D},\bm{L},\mathcal{D},\theta_0) p(\bm{D},\bm{L}\,\vert\, \mathcal{D},\theta_0) \\
&\propto&
p(\bm{X}_1\,\vert\,\bm{D},\bm{L},\mathcal{D},\theta_0) p(\bm{D},\bm{L}\,\vert\, \mathcal{D}),
\end{eqnarray*}
and that we can write
\begin{eqnarray*}
p(\bm{X}_1\,\vert\,\bm{D},\bm{L},\mathcal{D},\theta_0)
&=&
\sum_{\bm{y}\in\{0,1\}^n}
\left\{
\int
f(\bm{y},\bm{X}\,\vert\, \bm{D},\bm{L},\theta_0)
\, d\bm{X}_{-1}
\right\} \\
&=&
\sum_{\bm{y}\in\{0,1\}^n}
\left\{
\int
f(\bm{X}\,\vert\,\bm{D},\bm{L},\mathcal{D})
\, d \bm{X}_{-1}
\prod_{i=1}^{n}
\mathbbm{1}(\theta_{y_i-1} < x_{i,1} \le \theta_{y_i})
\right\} \\
&=&
d\mathcal{N}_n(\bm{X}_1\,\vert\,\bm{0},\tau_1^2\bm{I}_n)
\sum_{\bm{y}\in\{0,1\}^n}
\left\{
\prod_{i=1}^{n}
\mathbbm{1}(\theta_{y_i-1} < x_{i,1} \le \theta_{y_i})
\right\} \\
&=&
d\mathcal{N}_n(\bm{X}_1\,\vert\,\bm{0},\tau_1^2\bm{I}_n),
\end{eqnarray*}
being $\tau_1^2=[(\bm{L}^\top)^{-1}\bm{D} \bm{L}^{-1}]_{11}$, because for each fixed $\bm{X}_1=(x_{1,1},\dots,x_{n,1})^\top$ only one of the $2^n$ product of indicators will hold true and therefore $\sum_{y\in\{0,1\}^n}\{\dots\}=1$.
Hence, $p(\bm{X}_1\,\vert\,\bm{D},\bm{L},\mathcal{D},\theta_0)$ corresponds to a (proper) multivariate normal density.
Also, $p(\bm{D},\bm{L}\,\vert\,\mathcal{D})$ is a proper (prior) distribution by assumption.
It follows that $p(\bm{X}_{-1}\,\vert\, \theta_0,\bm{X}_1)$ is an integrated likelihood because it is obtained with proper priors $p(\bm{D},\bm{L}\,\vert\,\mathcal{D}, \theta_0,\bm{X}_1)$ and $p(\mathcal{D}\,\vert\, \theta_0, \bm{X}_1)$.
Moreover, $p(\bm{y}\,\vert\,\theta_0,\bm{X}_1)$ is also proper because
\begin{eqnarray*}n
p(\bm{y}\,\vert\,\theta_0,\bm{X}_1)=
\begin{eqnarray*}gin{cases}
1 & \text{if $y_i = \mathbbm{1}(x_{i,1}>\theta_0)$ for each $i=1,\dots,n$},\\
0
& \text{otherwise}.
\end{cases}
\end{eqnarray*}n
Finally we have
\begin{eqnarray*}
p(\bm{X}_1\,\vert\,\theta_0) &=&
\sum_{\mathcal{D}}\left\{
\int \int
p(\bm{X}_1\,\vert\,\bm{D},\bm{L},\mathcal{D},\theta_0)
p(\bm{D},\bm{L}\,\vert\,\theta_0,\mathcal{D})
\, d\bm{D} \, d \bm{L}
\right\}
p(\mathcal{D}\,\vert\,\theta_0) \\
&=&
\sum_{\mathcal{D}}\left\{
\int
d\mathcal{N}_n(\bm{X}_1\,\vert\,\bm{0},\tau_1^2\bm{I}_n)
p(\tau_1^2\,\vert\,\mathcal{D})
\, d\tau_1^2
\right\}
p(\mathcal{D}),
\end{eqnarray*}
which is also proper because the family of densities for $\bm{X}_1$ is integrated w.r.t. proper priors
$p(\tau_1^2\,\vert\,\mathcal{D})$ (induced by the proper prior on $(\bm{D},\bm{L})$) and $p(\mathcal{D})$.
\color{black}
\subsection{Propriety of \boldmath $p(D,L \,\vert\, \theta_0,X)$}
Consider now
\begin{eqnarray*}
p(\bm{D},\bm{L} \,\vert\, \theta_0,\bm{X}) =
\sum_{\mathcal{D}}
p(\bm{D},\bm{L} \,\vert\, \mathcal{D}, \theta_0,\bm{X})
p(\mathcal{D}\,\vert\, \theta_0,\bm{X}).
\end{eqnarray*}
First notice that
\begin{eqnarray*}
p(\bm{D},\bm{L} \,\vert\, \mathcal{D}, \theta_0,\bm{X})
&\propto&
p(\bm{X}\,\vert\, \bm{D},\bm{L},\mathcal{D},\theta_0)
p(\bm{D},\bm{L}\,\vert\,\mathcal{D},\theta_0) \\
&=&
p(\bm{X}\,\vert\, \bm{D},\bm{L},\mathcal{D},\theta_0)
p(\bm{D},\bm{L}\,\vert\,\mathcal{D}),
\end{eqnarray*}
where
\begin{eqnarray*}
p(\bm{X}\,\vert\,\bm{D},\bm{L},\mathcal{D},\theta_0)
&=&
\sum_{\bm{y}\in\{0,1\}^n}
p(\bm{y},\bm{X}\,\vert\, \bm{D},\bm{L},\theta_0)
\\
&=&
\sum_{\bm{y}\in\{0,1\}^n}
\left\{
p(\bm{X}\,\vert\,\bm{D},\bm{L},\mathcal{D})
\prod_{i=1}^{n}
\mathbbm{1}(\theta_{y_i-1} < x_{i,1} \le \theta_{y_i})
\right\} \\
&=&
p(\bm{X}\,\vert\,\bm{D},\bm{L},\mathcal{D}),
\end{eqnarray*}
arguing as in Section 3.2. Therefore we can write $p(\bm{D},\bm{L}\,\vert\,\mathcal{D},\theta_0,\bm{X}) = p(\bm{D},\bm{L}\,\vert\,\mathcal{D},\bm{X})$ which is a proper DAG Wishart distribution.
In addition,
$
p(\mathcal{D}\,\vert\,\theta_0,\bm{X})
\propto
p(\bm{X}\,\vert\,\mathcal{D},\theta_0) p(\mathcal{D}\,\vert\,\theta_0) =
p(\bm{X}\,\vert\,\mathcal{D},\theta_0) p(\mathcal{D})
$
where
\begin{eqnarray*}
p(\bm{X}\,\vert\,\theta_0,\mathcal{D}) &=&
\int \int
p(\bm{X}\,\vert\,\bm{D},\bm{L},\mathcal{D},\theta_0) p(\bm{D},\bm{L}\,\vert\,\mathcal{D},\theta_0)
\, d\bm{D} \, d\bm{L} \\
&=&
\int \int
p(\bm{X}\,\vert\,\bm{D},\bm{L},\mathcal{D}) p(\bm{D},\bm{L}\,\vert\,\mathcal{D})
\, d\bm{D} \, d\bm{L}
\end{eqnarray*}
is again an integrated (augmented) likelihood with a proper prior $p(\bm{D},\bm{L}\,\vert\,\mathcal{D})$ and therefore proper.
Hence, $p(\mathcal{D}\,\vert\,\theta_0,\bm{X})$ is proper according to Proposition \ref{prop:integrated:like:proper} because $p(\mathcal{D})$ is also a proper prior.
Therefore, $p(\bm{D},\bm{L} \,\vert\, \theta_0,\bm{X})$ is proper because it corresponds to a finite mixture of proper priors.
\subsection{Propriety of \boldmath $p(\mathcal{D} \,\vert\, D,L,\theta_0,X)$}
Finally, we can write
\begin{eqnarray*}
p(\mathcal{D} \,\vert\, \bm{D},\bm{L},\theta_0,\bm{X}) &\propto&
p(\bm{X}\,\vert\,\bm{D},\bm{L},\mathcal{D},\theta_0)p(\mathcal{D}\,\vert\,\theta_0) \\
&=&
p(\bm{X}\,\vert\,\bm{D},\bm{L},\mathcal{D})p(\mathcal{D}),
\end{eqnarray*}
which will be proper because $\mathcal{D}$ belongs to a finite space.
\section{MCMC convergence diagnostics}
In order to fix the numer of MCMC iterations in our simulation study we have run few pilot simulations that we used to perform diagnostics of convergence.
Specifically, for each pilot simulation we ran two independent chains of length $T_1$ and $T_2$. We fixed $T_1=25000$ and $T_2=50000$ in the $q=20$ setting, while $T_1=50000$ and $T_2=100000$ for $q=40$.
We compare the BMA causal effect estimates (as defined in Equation (27) of our paper) obtained under the two independent chains.
Figure \ref{fig:conv_causal_effect} shows the scatter plots of the BMA estimates obtained from the two chains for four randomly chosen intervened nodes in one of the pilot simulations for $q=20$.
By inspection, one can see that the agreement between the results is highly satisfactory since points are clustered around the main diagonal of the plot.
Therefore, $T_1$ iterations are sufficient to reach convergence. Similar results, not reported for brevity, were obtained in the $q=40$ scenarios.
\begin{eqnarray*}gin{figure}
\begin{eqnarray*}gin{center}
\begin{eqnarray*}gin{tabular}{cc}
\includegraphics[scale=0.22]{figures/causal_conv_1.png} &
\includegraphics[scale=0.22]{figures/causal_conv_2.png} \\
\includegraphics[scale=0.22]{figures/causal_conv_3.png} &
\includegraphics[scale=0.22]{figures/causal_conv_4.png}
\end{tabular}
\caption{\small Scatter plots of the BMA estimates for four randomly chosen intervened nodes, obtained from two chains of length $T_1=25000$ and $T_2=50000$ (pilot simulation for the $q=20$ setting).}
\label{fig:conv_causal_effect}
\end{center}
\end{figure}
\section{Simulation results and comparisons}
To emphasize the crucial role played by the set $fa(s)$ in causal effect estimation, we compare our method with an alternative setup which does not adjust for confounders.
Specifically, we perform inference of causal effects under a fixed \textit{naive} DAG wherein all variables, save for the response, are disjoint and each is a parent of the response; see Figure \ref{fig:regr:dag} for an example on $q=4$ nodes.
The resulting DAG depicts a situation similar to that of a standard (Bayesian) probit regression model, wherein the covariates are not fixed but rather random and jointly independent with marginal normal distributions, $X_j\sim\mathcal{N}(0,\sigma_j^2)$, $j=2,\dots,q$.
\begin{eqnarray*}gin{figure}
\begin{eqnarray*}gin{center}
\begin{eqnarray*}gin{tabular}{c}
{\normalsize
\tikz \,\vert\,raph [no placement, math nodes, nodes={circle}]
{
{X_2[x=0,y=-0.5], X_3[x=2,y=0], X_4[x=4,y=-0.5]}-> X_1[x=2,y=-2]};
}
\end{tabular}
\end{center}
\caption{\small An instance of \textit{naive} DAG with $q=4$ nodes used in the alternative method under comparison.}
\label{fig:regr:dag}
\end{figure}
We then construct simulation scenarios as detailed in Section 6 of our paper.
Results are summarized in the box-plots of Figure \ref{fig:simulations:cfr}, where we report the distribution of the Mean Absolute Error (MAE, constructed across the $40$ DAGs and nodes $s=2,\dots,q$) under the two strategies as a function of $n$.
As expected, our original method which fully accounts for the uncertainty on the DAG generating model, outperforms the alternative approach which is instead based on a fixed DAG not accounting for dependencies among variables.
Indeed, it appears that the causal effect estimate significantly deviates from the true causal effect even for large sample sizes.
\begin{eqnarray*}gin{figure}
\begin{eqnarray*}gin{center}
\begin{eqnarray*}gin{tabular}{cc}
$\quad \quad q=20$ & $\quad \quad q=40$ \\
\includegraphics[scale=0.24]{figures/q20_MAE_causal_cfr.png} &
\includegraphics[scale=0.24]{figures/q40_MAE_causal_cfr.png}
\end{tabular}
\caption{\small Distribution over $40$ datasets and nodes $s \in \{2,\dots,q\}$ of the mean absolute error (MAE) of BMA estimates of true causal effects. Results are presented for two methods under comparison, our original DAG-probit method (light grey) and the \textit{naive} DAG-based approach (dark grey), for each combination of number of nodes $q\in\{20,40\}$ and sample size $n\in\{100,200,500\}$.}
\label{fig:simulations:cfr}
\end{center}
\end{figure}
\section{Computational time}
In this section we investigate the computational time of our method as a function of the number of variables $q$ and sample size $n$.
The following plots summarize the behavior of the running time (averaged over 40 replicates) \textit{per} iteration, as a function of $q \in \{5,10,20,50,100\}$ for $n = 500$, and as a function of $n\in\{50,100,200,500,1000\}$ for $q=50$.
Results were obtained on a PC Intel(R) Core(TM) i7-8550U 1,80 GHz.
\begin{eqnarray*}gin{figure}
\begin{eqnarray*}gin{center}
\begin{eqnarray*}gin{tabular}{c}
\includegraphics[scale=0.27]{figures/time_q.png} \\
\includegraphics[scale=0.27]{figures/time_n.png}
\end{tabular}
\caption{\small Computational time (in seconds) \textit{per} iteration, as a function of the number of variables $q$ for fixed $n=500$ (upper plot) and as a function of the sample size $n$ for fixed $q=50$ (lower plot), averaged over 40 simulated datasets.}
\label{fig:computational_time}
\end{center}
\end{figure}
\end{document} |
\begin{document}
\date{}
\title{An Improved Distributed Algorithm for \\Maximal Independent Set}
\author{
Mohsen Ghaffari\\
\small MIT \\
\small [email protected]
}
\maketitle
\begin{abstract}
The Maximal Independent Set (MIS) problem is one of the basics in the study of \emph{locality} in distributed graph algorithms. This paper
presents an extremely simple randomized algorithm providing a near-optimal \emph{local complexity} for this problem, which incidentally, when combined with some known techniques, also leads to a near-optimal \emph{global complexity}.
Classical MIS algorithms of Luby [STOC'85] and Alon, Babai and Itai [JALG'86] provide the \emph{global complexity} guarantee that, with high probability\footnote{As standard, we use the phrase \emph{with high probability} to indicate that an event has probability at least $1-1/n$.}, \emph{all nodes} terminate after $O(\log n)$ rounds. In contrast, our initial focus is on the \emph{local complexity}, and our main contribution is to provide a very simple algorithm guaranteeing that \emph{each} particular node $v$ terminates after $O(\log \mathsf{deg}(v)+\log 1/\varepsilon)$ rounds, with probability at least $1-\varepsilon$.
The guarantee holds even if the randomness outside $2$-hops neighborhood of $v$ is determined adversarially.
This degree-dependency is optimal, due to a lower bound of Kuhn, Moscibroda, and Wattenhofer [PODC'04].
Interestingly, this local complexity smoothly transitions to a global complexity: by adding techniques of Barenboim, Elkin, Pettie, and Schneider [FOCS'12; arXiv: 1202.1983v3], we\footnote{\emph{quasi nanos, gigantium humeris insidentes}} get a randomized MIS algorithm with a high probability global complexity of $O(\log \Delta) + 2^{O(\sqrt{\log \log n})}$, where $\Delta$ denotes the maximum degree. This improves over the $O(\log^2 \Delta) + 2^{O(\sqrt{\log \log n})}$ result of Barenboim et al., and gets close to the $\Omega(\min\{\log \Delta, \sqrt{\log n}\})$ lower bound of Kuhn et al.
Corollaries include improved algorithms for MIS in graphs of upper-bounded arboricity, or lower-bounded girth, for Ruling Sets, for MIS in the Local Computation Algorithms (LCA) model, and a faster distributed algorithm for the Lov\'{a}sz Local Lemma.
\end{abstract}
\vspace*{-7mm}
\setcounter{page}{0}
\thispagestyle{empty}
\section{Introduction and Related Work}\label{sec:intro}
Locality sits at the heart of distributed computing theory and is studied in the medium of problems such as Maximal Independent Set (MIS), Maximal Matching (MM), and Coloring. Over time, MIS has been of special interest as the others reduce to it. The story can be traced back to the surveys of Valiant\cite{valiant1983parallel} and Cook\cite{cook1983overview} in the early 80's which mentioned MIS as an interesting problem in non-centralized computation, shortly after followed by (poly-)logarithmic algorithms of Karp and Wigderson\cite{KarpWigderson}, Luby\cite{luby1985simple}, and Alon, Babai, and Itai\cite{alon1986fast}. Since then, this problem has been studied extensively. We refer the interested reader to \cite[Section 1.1]{barenboim2012locality}, which provides a thorough and up to date review of the state of the art.
In this article, we work with the standard distributed computation model called $\mathsf{LOCAL}$\cite{peleg:2000}: the network is abstracted as a graph $G=(V, E)$ where $|V|=n$; initially each node only knows its neighbors; communications occur in synchronous rounds, where in each round nodes can exchange information only with their graph neighbors.
In the $\mathsf{LOCAL}$ model, besides it's practical application, the distributed computation time-bound has an intriguing purely graph-theoretic meaning: it identifies the radius up to which one needs to look to determine the output of each node, e.g., its color in a coloring. For instance, results of \cite{luby1985simple, alon1986fast} imply that looking only at the $O(\log n)$-hop neighborhood suffices, w.h.p.
\subsection{Local Complexity}
\label{subsec:localIntro}
Despite the local nature of the problem, classically the main focus has been on the global complexity, i.e., the time till all nodes terminate. Moreover, somewhat strikingly, the majority of the standard analysis also take a non-local approach: often one considers the whole graph and shows guarantees on how the algorithm makes a \emph{global progress} towards it \emph{local objectives}. A prominent example is the results of \cite{luby1985simple, alon1986fast} where the analysis shows that per round, in expectation, half of the edges of the whole network get removed\footnote{These analysis do not provide any uniformity guarantee for the removed edges.}, hence leading to the \emph{global complexity} guarantee that after $O(\log n)$ rounds, with high probability, the algorithm terminates everywhere.
See \Cref{warmup}.
This issue seemingly suggests a gap in our understanding of \emph{locality}. The starting point in this paper is to question whether this global mentality is necessary for obtaining the \emph{tight} bound\footnote{Without insisting on tightness, many straightforward (but weak) complexities can be given using local analysis.}. That is, can we instead provide a tight bound using \emph{local analysis}, i.e., an analysis that only looks at a node and some small neighborhood of it? To make the difference more sensible, let us imagine $n\rightarrow \infty$ and seek time-guarantees independent of $n$.
Of course this brings to mind locality-based lower bounds which at first glance can seem to imply a negative answer: Linial\cite{linial1992locality} shows that even in a simple \emph{cycle} graph, MIS needs $\Omega(\log^* n)$ rounds, and Kuhn, Moscibroda and Wattenhofer\cite{kuhn2004localLB} prove that it requires $\Omega(\sqrt{\log n})$ rounds in some well-crafted graphs. But there is a catch: these lower bounds state that the time till all nodes terminate is at least so much. One can still ask, what if we want a time-guarantee for \emph{each single node} instead of \emph{all nodes}? While in the deterministic case these time-guarantees, called respectively \emph{local} and \emph{global} complexities, are equivalent, they can differ when the guarantee that is to be given is probabilistic, as is usual in randomized algorithms. Note that, the local complexity is quite a useful guarantee, even on its own. For instance, the fact that in a cycle, despite Linial's beautiful $\Omega(\log^* n)$ lower bound, the vast majority of nodes are done within $O(1)$ rounds is a meaningful property and should not be ignored. To be concrete, our starting question now is:
\begin{mdframed}[hidealllines=false,backgroundcolor=gray!10]
\textbf{Local Complexity Question}: How long does it take till each particular node $v$ terminates, and knows whether it is in the (computed) MIS or not, with probability at least $1-\varepsilon$?
\end{mdframed}
Using $\Delta$ to denote the maximum degree, one can obtain answers such as $O(\log^{2} \Delta+ \log 1/\varepsilon)$ rounds for Luby's algorithm, or $O(\log \Delta \log \log \Delta$ $+ \log \Delta \log 1/\varepsilon)$ rounds for the variant of Luby's used by Barenboim, Elkin, Pettie, and Schneider\cite{barenboim2012locality} and Chung, Pettie, and Su~\cite{chung2014LLL}. However, both of these bounds seem to be off from the right answer; e.g., one cannot recover from these the standard $O(\log n)$ high probability global complexity bound. In the first bound, the first term is troublesome and in the latter, the second term becomes the bottleneck. In both, the high probability bound becomes $O(\log^2 n)$ when one sets $\Delta=n^\delta$ for a constant $\delta>0$.
We present an extremely simple algorithm that overcomes this problem and provides a local complexity of $O(\log \Delta+ \log 1/\varepsilon)$. More formally, we prove that:
\begin{theorem}
\label{thm:local} There is a randomized distributed MIS algorithm for which, for each node $v$, the probability that $v$ has not made its decision after the first $O(\log \mathsf{deg}(v) + \log 1/\varepsilon)$ rounds is at most $\varepsilon$. Furthermore, this holds even if the bits of randomness outside the $2$-hops neighborhood of $v$ are determined adversarially.
\end{theorem}
The perhaps surprising fact that the bound only depends on the degree of node $v$, even allowing its neighbors to have infinite degree, demonstrates the \emph{truly local} nature of this algorithm.
The logarithmic degree-dependency in the bound is optimal, following a lower bound of Kuhn, Moscibroda and Wattenhofer~\cite{kuhn2004localLB}: As indicated by \cite{Fabian}, with minor changes in the arguments of~\cite{kuhn2004localLB}, one can prove that there are graphs in which, the time till each node $v$ can know if it is in MIS or not with constant probability is at least $\Omega(\log \Delta)$ rounds.
Finally, we note that the fact that \emph{the proof has a locality of $2$-hops}---meaning that the analysis only looks at the $2$-hops neighborhood and particularly, that the guarantee relies only on the coin tosses within the $2$-hops neighborhood of node $v$---will prove vital as we move to global complexity. This might be interesting for practical purposes as well.
\subsection{Global Complexity}
\label{subsec:global}
Notice that \Cref{thm:local} easily recovers the standard result that after $O(\log n)$ rounds, w.h.p., all nodes have terminated, but now with a local analysis. In light of the $\Omega(\min\{\log \Delta, \sqrt{\log n}\})$ lower bound of Kuhn et al.~\cite{kuhn2004localLB}, it is interesting to find the best possible upper bound, specially when $\log \Delta = o(\log n)$. The best known bound prior to this work was $O(\log^2 \Delta) + 2^{O(\sqrt{\log \log n})}$ rounds, due to Barenboim et al.\cite{barenboim2012locality}.
The overall plan is based on the following nice and natural intuition, which was used in the MIS results of Alon et al.\cite{alon2012LCA} and Barenboim et al.\cite{barenboim2012locality}. We note that this general strategy is often attributed to Beck, as he used it first in his breakthrough algorithmic version of the Lov{\'a}sz Local Lemma\cite{beck1991LLL}. Applied to MIS, the intuition is that, when we run any of the usual randomized MIS algorithms, nodes get removed probabilistically more and more over time. If we run this \emph{base algorithm} for a certain number of rounds, a \emph{graph shattering} type of phenomena occurs. That is, after a certain time, what remains of the graph is a number of ``small'' components, where small might be in regard to size, (weak) diameter, the maximum size of some specially defined independent sets, or some other measure. Once the graph is shattered, one switches to a deterministic algorithm to \emph{finish off} the problem in these remaining small components.
Since we are considering graphs with max degree $\Delta$, even ignoring the troubling probabilistic dependencies (which are actually rather important), a simplistic intuition based on \emph{Galton-Watson branching processes} tells us that the graph shattering phenomena starts to show up around the time that the probability $\varepsilon$ of each node being left falls below $1/\Delta$\footnote{In truth, the probability threshold is $1/\operatorname{\text{{\rm poly}}}(\Delta)$, because of some unavoidable dependencies. But due to the exponential concentration, the time to reach the $1/\operatorname{\text{{\rm poly}}}(\Delta)$ threshold is within a constant factor of that of the $1/\Delta$ threshold. We will also need to establish some independence, which is not discussed here. See \Cref{sec:global}.}. Alon et al.\cite{alon2012LCA} used an argument of Parnas and Ron~\cite{parnas2007approximating}, showing that Luby's algorithm reaches this threshold after $O(\Delta \log \Delta)$ rounds. Barenboim et al.\cite{barenboim2012locality} used a variant of Luby's, with a small but clever modification, and showed that it reaches the threshold after $O(\log^2 \Delta)$ rounds. As Barenboim et al.\cite{barenboim2012locality} show, after the shattering, the remaining pieces can be solved deterministically, via the help of known deterministic MIS algorithms (and some other ideas), in $\log \Delta \cdot 2^{O(\sqrt{\log \log n})}$ rounds. Thus, the overall complexity of \cite{barenboim2012locality} is $O(\log^2 \Delta) + \log \Delta \cdot 2^{O(\sqrt{\log \log n})} = O(\log^2 \Delta) + 2^{O(\sqrt{\log \log n})}$.
To improve this, instead of Luby's, we use our new MIS algorithm as the base, which as \Cref{thm:local} suggests, reaches the shattering threshold after only $O(\log \Delta)$ rounds. This will be formalized in \Cref{sec:global}. We will also use some minor modifications for the \emph{post-shattering} phase to reduce it's complexity from $\log \Delta \cdot 2^{O(\sqrt{\log \log n})}$ to $2^{O(\sqrt{\log \log n})}$. The overall result thus becomes:
\begin{theorem}
\label{thm:global} There is a randomized distributed MIS algorithm that terminates after $O(\log\Delta) + 2^{O(\sqrt{\log \log n})}$ rounds, with probability at least $1-1/n$.
\end{theorem}
This improves the best-known bound for MIS and gets close to the $\Omega(\min\{\log \Delta, \sqrt{\log n}\})$ lower bound of Kuhn et al.\cite{kuhn2004localLB}, which at the very least, shows that the upper bound is provably optimal when $\log \Delta \in [2^{\sqrt{\log\log n}}, \sqrt{\log n}]$. Besides that, the new result matches the lower bound in a stronger and much more instructive sense: as we will discuss in point (C2) below, it perfectly pinpoints why the current lower bound techniques cannot prove a lower bound better than $\Omega(\min\{\log \Delta, \sqrt{\log n}\})$.
\subsection{Other Implications}
\label{subsec:implications}
Despite its extreme simplicity, the new algorithm turns out to lead to several implications, when combined with some known results and/or techniques:
\begin{enumerate}
\item[(C1)] Combined with the finish-off phase results of Barenboim et al.\cite{barenboim2012locality}, we get MIS algorithms with complexity $O(\log \Delta) + O(\min\{\lambda^{1+\varepsilon}+\log \lambda \log\log n, \lambda+ \lambda^{\varepsilon} \log\log n, \lambda+ (\log \log n)^{1+\varepsilon} \})$ for graphs with arboricity $\lambda$. Moreover, combined with the low-arboricity to low-degree reduction of Barenboim et al.\cite{barenboim2012locality}, we get an MIS algorithm with complexity $O(\log \lambda + \sqrt{\log n})$. These bounds improve over some results of \cite{barenboim2012locality}, Barenboim and Elkin\cite{barenboim2010sublogarithmic}, and Lenzen and Wattenhofer\cite{lenzen2011mis}.
\item[(C2)] \label{tightness} The new results highlight the barrier of the current lower bound techniques. In the known locality-based lower bound arguments, including that of~\cite{kuhn2004localLB}, to establish a $T$-round lower bound, it is necessary that within $T$ rounds, each node sees only a tree. That is, each $T$-hops neighborhood must induce a tree, which implies that the girth must be at least $2T+1$. Since any $g$-girth graph has arboricity $\lambda \leq O(n^{\frac{2}{g-2}})$, from (C1), we get an $O(\sqrt{\log n})$-round MIS algorithm when $g=\Omega(\sqrt{\log n})$. More precisely, for any graph with girth $g=\Omega(\min\{\log \Delta, \sqrt{\log n}\})$, we get an $O(\min\{\log \Delta + 2^{O(\sqrt{\log \log n})}, \sqrt{\log n}\})$-round algorithm. Hence, the $\Omega(\min\{\log \Delta, \sqrt{\log n}\})$ lower bound of~\cite{kuhn2004localLB} is essentially the best-possible when the the topology seen by each node within the allowed time must be a tree. This means, to prove a better lower bound, one has to part with these \emph{``tree local-views"} topologies. However, that gives rise to intricate challenges and actually, to the best of our knowledge, there is no distributed locality-based lower bound, in fact for any (local) problem, that does not rely on \emph{tree local-views}.
\item[(C3)] We get an $O(\sqrt{\log n})$-round MIS algorithm for Erd\"{o}s-R\'{e}nyi random graphs $G(n, p)$. This is because, if $p =\Omega(\frac{2^{\sqrt{\log n}}}{n})$, then with high probability the graph has diameter $O(\sqrt{\log n})$ hops (see e.g. \cite{chung2001diameter}) and when $p =O(\frac{2^{\sqrt{\log n}}}{n})$, with high probability, $\Delta = O(2^{\sqrt{\log n}})$ and thus, the algorithm of \Cref{thm:global} runs in at most $O(\sqrt{\log n})$ rounds.
\item[(C4)] Combined with a recursive sparsification method of Bisht et al.\cite{bisht2014brief}, we get a $(2, \beta)$-ruling-set algorithm with complexity $O(\beta \log^{1/\beta} \Delta) + 2^{O(\sqrt{\log\log n})}$, improving on the complexities of \cite{barenboim2012locality} and \cite{bisht2014brief}. An $(\alpha, \beta)$-ruling set $S$ is a set where each two nodes in $S$ are at distance at least $\alpha$, and each node $v\in V\setminus S$ has a node in $S$ within its $\beta$-hops. So, a $(2, 1)$-ruling-set is simply an MIS. The term $O(\beta \log^{1/\beta} \Delta)$ is arguably (and even \emph{provably}, as \cite{Fabian} indicated,) best-possible for the current method, which roughly speaking works by computing the ruling set iteratively using $\beta$ successive reductions of the degree.
\item[(C5)] In the Local Computation Algorithms (LCA) model of Rubinfeld et al.\cite{rubinfeld2011fast} and Alon et al.\cite{alon2012LCA}, we get improved bounds for computing MIS. Namely, the best-known time and space complexity improve from, respectively, $2^{O(\log^3 \Delta)} \log^3 n$ and $2^{O(\log^3 \Delta)} \log^2 n$ bounds of Levi, Rubinfeld and Yodpinyanee~\cite{levi2015local} to $2^{O(\log^2 \Delta)} \log^3 n$ and $2^{O(\log^2 \Delta)} \log^2 n$.
\item[(C6)] We get a Weak-MIS algorithm with complexity $O(\log \Delta)$, which thus improves the round complexity of the distributed algorithmic version of the Lov\'{a}sz Local Lemma presented by Chung, Pettie, and Su~\cite{chung2014LLL} from $O(\log_{\frac{1}{ep(\Delta+1)}} n \cdot \log^2 \Delta)$ to $O(\log_{\frac{1}{ep(\Delta+1)}} n \cdot \log \Delta)$. Roughly speaking, a Weak-MIS computation should produce an independent set $S$ such that for each node $v$, with probability at least $1-1/\operatorname{\text{{\rm poly}}}(\Delta)$, $v$ is either in $S$ or has a neighbor in $S$.
\item[(C7)] We get an $O(\log \Delta + \log \log \log n)$-round MIS algorithm for the $\mathsf{CONGESTED}$-$\mathsf{CLIQUE}$ model where per round, each node can send $O(\log n)$-bits to each of the other nodes (even those non-adjacent to it): After running the MIS algorithm of \Cref{thm:local} for $O(\log \Delta)$ rounds, w.h.p., if $\Delta \geq n^{0.1}$, we are already done, and otherwise, as \Cref{lem:shattering} shows, all leftover components have size $o(n^{0.5})$. In the latter case, using the algorithm of \cite{fastMST-congestclique}, we can make all nodes know the \emph{leader} of their component in $O(\log \log \log n)$ rounds, and using Lenzen's routing\cite{lenzen2013route}, we can make each leader learn the topology of its whole component, solve the related MIS problem locally, and send back the answers, all in $O(1)$ rounds.
\end{enumerate}
\section{Warm Up: Local Analysis of Luby's Algorithm}
\label{warmup}
As a warm up for the MIS algorithm of the next section, here, we briefly review Luby's algorithm and present some local analysis for it. The main purpose is to point out the challenge in (tightly) analyzing the local complexity of Luby's, which the algorithm of the next section tries to bypass.
\paragraph{Luby's Algorithm} The algorithm of \cite{luby1985simple, alon1986fast} is as simple and clean as this:
{\centering
\begin{enumerate}
\item[] ``\textit{In each \texttt{round}, each node picks a random number\footnote{One can easily see that a precision of $O(\log \Delta)$ bits suffices.} uniformly from $[0, 1]$; strict local \\minimas join the MIS, and get removed from the graph along with their neighbors.}"
\end{enumerate}
}
\noindent Note that each round of the algorithm can be easily implemented in $2$ communication rounds on $G$, one for exchanging the random numbers and the other for informing neighbors of newly joined MIS nodes. Ignoring this 2 factor, in the sequel, by \emph{round} we mean one round of the algorithm.
\paragraph{Global Analysis} The standard method for analyzing Luby's algorithm goes via looking at the whole graph, i.e., using a \emph{global view}. See \cite[Section 12.3]{motwani-raghavan}, \cite[Section 8.4]{peleg:2000}, \cite[Section 4.5]{lynch1996distributed} for textbook treatments. We note that this is the only known way for proving that this algorithm terminates everywhere in $O(\log n)$ rounds with high probability. The base of the analysis is to show that per iteration, in expectation, at least half of the edges (of the whole remaining graph) get removed. Although the initial arguments in \cite{luby1985simple, alon1986fast} were more lengthy, Yves et al.\cite{YvesMIS} pointed out a much simpler argument for this. See \Cref{Yves}, which describes (a paraphrased version of) their argument. By Markov's inequality, this per-round halving implies that after $O(\log n)$ rounds, the algorithm terminates everywhere, with high probability.
\subsection{Local Analysis: Take 1}
To analyze the algorithm in a local way, and to bound its local complexity, the natural idea is to say that over time, each local neighborhood gets ``simplified". Particularly, the first-order realization of this intuition would be to look at the degrees and argue that they shrink with time. The following standard observation is the base tool in this argument:
\begin{claim}\label{clm:degree-drop} Consider a node $u$ at a particular round, let $d(u)$ be its degree and $d_{max}$ be the maximum degree among the nodes in the inclusive neighborhood $N^{+}(u)$ of $u$. The probability that $u$ is removed in this round is at least $\frac{d(u)+1}{d(u)+d_{max}}$.
\end{claim}
\begin{proof} Let $u^*$ be the node in $N^{+}(u)$ that draws the smallest random number. If $u^*$ actually has the smallest in its own neighborhood, then it will join MIS which means $u$ gets removed. Since all numbers are iid random variables, and as $u^*$ is the smallest number of $d(u)+1$ of them, the probability that it is the smallest both in its own neighborhood and the neighborhood of $u$ is at least $\frac{d(u)+1}{d(u)+d_{max}}$. This is because, the latter is a set of size at most $d(u)+d_{max}$.
\end{proof}
From the claim, we get that if the degree of a node $u$ is at least half of that of the max of its neighbors, then in one round, with probability at least $1/3$, $u$ gets removed. Thus, in $\alpha=O(1)$ rounds from the start, either $u$ is removed or its degree falls below $\Delta/2$, with probability at least $1/2$. We would like to continue this argument and say that every $O(1)$ rounds, $u$'s degree shrinks by another 2 factor, thus getting a bound of $O(\log \Delta)$. However, this is not straightforward as $u$'s degree drops might get delayed because of delays in the degree drops of $u$'s neighbors. The issue seems rather severe as the degree drops of different nodes can be positively correlated.
Next, we explain a simple argument giving a weak but still local complexity of $O(\log^{2.5} \Delta + \log \Delta \log 1/\varepsilon)$ rounds: For the purpose of this paragraph, let us say a removed node has degree $0$. From above, we get that after $10\alpha\log^{1.5} \Delta$ rounds, the probability that $u$ still has degree at least $\Delta/2$ is at most $2^{-10\log^{1.5} \Delta}$. Thus, using a union bound, we can say that with probability at least $1-(\Delta+1) 2^{-10\log^{1.5} \Delta}$, after $10\alpha\log^{1.5} \Delta$ rounds, $u$ and all its neighbors have degree at most $\Delta/2$. Hence, with probability at least $1-(\Delta+2) 2^{-10\log^{1.5} \Delta}$, after $20\alpha\log^{1.5} \Delta$ rounds, node $u$ has another drop and its degree is at most $\Delta/4$. Continuing this argument pattern recursively for $\log^{0.5} \Delta$ iterations, we get that with probability at least $1-(\Delta+2)^{\log^{0.5} \Delta} \;\cdot\; 2^{-10\log^{1.5} \Delta} \geq 1-2^{-5\log^{1.5} \Delta}$, after $10\alpha\log^{2} \Delta$ rounds, node $u$'s degree has dropped to $\Delta/2^{\log^{0.5} \Delta}$. Now, we can repeat a similar argument, but in blocks of $10\alpha\log^{2} \Delta$ rounds, and each time expecting a degree drop of $2^{\log^{0.5} \Delta}$ factor. We will be able to afford to continue this for $\log^{0.5} \Delta$ iterations and say that, after $10\alpha\log^{2.5} \Delta$ rounds, with probability at least $1-(\Delta+2)^{\log^{0.5} \Delta} \;\cdot\; 2^{-5\log^{1.5} \Delta} \geq 1-2^{-\log^{1.5} \Delta}$, the degree of $u$ has dropped to $1/2$. Since a degree less than $1/2$ means degree $0$, which in turn implies that $v$ is removed, we get that $v$ is removed after at most $O(\log^{2.5} \Delta)$ rounds with probability at least $1-2^{-\Omega(\log^{1.5} \Delta)}$. A simple repetition argument proves that this generalizes to show that after $O(\log^{2.5} \Delta + \log \Delta \log 1/\varepsilon)$ rounds, node $u$ is removed with probability at least $1-\varepsilon$.
In the full version of this paper, we will present a stronger (but also much more complex) argument which proves a local complexity of $O(\log^2 \Delta + \log 1/\varepsilon)$ for the same algorithm. This bound has the desirable additive $\log 1/\varepsilon$ dependency on $\varepsilon$ but it is still far from the best possible bound, due to the first term.
\subsection{Local Analysis: Take 2}
Here, we briefly explain the modification of Luby's algorithm that Barenboim et al.\cite{barenboim2012locality} use. The key is the following clever idea: they \emph{manually} circumvent the problem of nodes having a lag in their degree drops, that is, they \emph{kick out} nodes that their degree drops is lagging significantly out of the algorithm, as these nodes can create trouble for other nodes in their vicinity.
Formally, they divide time into phases of $\Theta(\log \log \Delta +\log 1/\varepsilon)$ rounds and require that by the end of phase $k$, each node has degree at most $\Delta/2^{k}$. At the end of each phase, each node that has a degree higher than the allowed threshold is \emph{kicked out}. The algorithm is run for $\log \Delta$ phases. From \Cref{clm:degree-drop}, we can see that the probability that a node that has survived up to phase $i-1$ gets \emph{kicked out} in phase $i$ is at most $2^{- \Theta(\log \log \Delta +\log 1/\varepsilon)} = \frac{\varepsilon}{\log \Delta}$. Hence, the probability that a given node $v$ gets kicked out in one of the $\log \Delta$ phases is at most $\varepsilon$. This means, by the end of $\Theta(\log \Delta \log \log \Delta + \log \Delta \log 1/\varepsilon)$ rounds, with probability $1-\varepsilon$, node $v$ is not kicked out and is thus removed because of having degree $0$. That is, it joined or has a neighbor in the MIS.
This $\Theta(\log \Delta \log \log \Delta + \log \Delta \log 1/\varepsilon)$ local complexity has an improved $\Delta$-dependency (and the guarantee has some nice independence type of properties). However, as mentioned in \Cref{subsec:localIntro}, its $\varepsilon$-dependency is not desirable, due to the $\log \Delta$ factor. Note that this is exactly the reason that the \emph{shattering threshold} in the result of Barenboim et al.\cite{barenboim2012locality} is $O(\log^2 \Delta)$ rounds.
\section{The New Algorithm and Its Local Complexity}
Here we present a very simple and clean algorithm that guarantees for each node $v$ that after $O(\log \Delta + \log 1/\varepsilon)$ rounds, with probability at least $1-\varepsilon$, node $v$ has terminated and it knows whether it is in the (computed) MIS or it has a neighbor in the (computed) MIS.
\paragraph{The Intuition} Recall that the difficulty in locally analyzing Luby's algorithm was the fact that the degree-dropping progresses of a node can be delayed by those of its neighbors, which in turn can be delayed by their own neighbors, and so on (up to $\log \Delta$ hops). To bypass this issue, the algorithm presented here tries to completely disentangling the ``progress" of node $v$ from that of nodes that are far away, say those at distance above $3$.
The intuitive base of the algorithm is as follows. There are two scenarios in which a node $v$ has a good chance of being removed: either (1) $v$ is trying to join MIS and it does not have too many competing neighbors, in which case $v$ has a shot at joining MIS, or (2) a large enough number of neighbors of $v$ are trying to join MIS and each of them does not have too much competition, in which case it is likely that one of these neighbors of $v$ joins the MIS and thus $v$ gets removed. These two cases also depend only on $v$'s 2-neighborhood. Our key idea is to create an essentially deterministic \emph{dynamic} which has these two scenarios as its (more) stable points and makes each node $v$ spend a significant amount of time in these two scenarios, unless it has been removed already.
\begin{mdframed}[hidealllines=false,backgroundcolor=gray!30]
\paragraph{The Algorithm}
In each round $t$, each node $v$ has a \emph{desire-level} $p_t(v)$ for joining MIS, which initially is set to $p_0(v)=1/2$. We call the total sum of the desire-levels of neighbors of $v$ it's \emph{effective-degree} $d_{t}(v)$, i.e., $d_t(v)=\sum_{u \in N(v)} p_{t}(u)$. The desire-levels change over time as follows: $$p_{t+1}(v)=
\begin{cases}
p_{t}(v)/2, & \text{if } d_{t}(v)\geq 2\\
\min\{2p_{t}(v), 1/2\}, &\text{if } d_{t}(v)< 2.
\end{cases}
$$
The desire-levels are used as follows: In each round, node $v$ gets \emph{marked} with probability $p_{t}(v)$ and if no neighbor of $v$ is marked, $v$ joins the MIS and gets removed along with its neighbors\footnotemark.
\end{mdframed}
\addtocounter{footnote}{0}\footnotetext{There is a version of Luby's algorithm which also uses a similar marking process. However, at each round, letting $deg(v)$ denote the number of the neighbors of $v$ remaining at that time, Luby's sets the marking probability of each node $v$ to be $\frac{1}{deg(v)+1}$, which by the way is the same as the probability of $v$ being a local minima in the variant described in \Cref{warmup}. Notice that this is a very strict fixing of the marking probability, whereas in our algorithm, we change the probability dynamically/flexibly over time, trying to push towards the two desirable scenarios mentioned in the intuition, and in fact, this simple dynamic is the key ingredient of the new algorithm.}
Again, each round of the algorithm can be implemented in $2$ communication rounds on $G$, one for exchanging the desire-levels and the marks, and the other for informing neighbors of newly joined MIS nodes. Ignoring this 2 factor, in the sequel, each round means a round of the algorithm.
\paragraph{The Analysis} The algorithm is clearly correct meaning that the set of nodes that join the MIS is indeed an independent set and the algorithm terminates at a node only if the node is either in MIS or adjacent to a node in MIS. We next argue that each node $v$ is likely to terminate quickly.
\begin{theorem} \label{thm:local-restate} For each node $v$, the probability that $v$ has not made its decision within the first $\beta(\log \mathsf{deg} + \log 1/\varepsilon)$ rounds, for a large enough constant $\beta$ and where $\mathsf{deg}$ denotes $v$'s degree at the start of the algorithm, is at most $\varepsilon$. Furthermore, this holds even if the outcome of the coin tosses outside $N^{+}_{2}(v)$ are determined adversarially.
\end{theorem}
Let us say that a node $u$ is \emph{low-degree} if $d_t(u)<2$, and \emph{high-degree} otherwise. Considering the intuition discussed above, we define two types of \emph{golden rounds} for a node $v$: (1) rounds in which $d_t(v)<2$ and $p_{t}(v)= 1/2$, (2) rounds in which $d_{v}(t)\geq 1$ and at least $d_{t}(v)/10$ of it is contributed by low-degree neighbors. These are called golden rounds because, as we will see, in the first type, $v$ has a constant chance of joining MIS and in the second type there is a constant chance that one of those low-degree neighbors of $v$ joins the MIS and thus $v$ gets removed. For the sake of analysis, let us imagine that node $v$ keeps track of the number of golden rounds of each type it has been in.
\begin{lemma}\label{lem:goldCount} By the end of round $\beta(\log \mathsf{deg} + \log 1/\varepsilon)$, either $v$ has joined, or has a neighbor in, the (computed) MIS, or at least one of its golden round counts reached $100 (\log \mathsf{deg} + \log 1/\varepsilon)$.
\end{lemma}
\begin{proof} We focus only on the first $\beta(\log \mathsf{deg} + \log 1/\varepsilon)$ rounds. Let $g_1$ and $g_2$ respectively be the number of golden rounds of types 1 and 2 for $v$, during this period. We assume that by the end of round $\beta(\log \mathsf{deg}+ \log 1/\varepsilon)$, node $v$ is not removed and $g_1 \leq 100 (\log \mathsf{deg} + \log 1/\varepsilon)$, and we conclude that, then it must have been the case that $g_2 > 100 (\log \mathsf{deg} + \log 1/\varepsilon)$.
Let $h$ be the number of rounds during which $d_{t}(v)\geq 2$. Notice that the changes in $p_{t}(v)$ are governed by the condition $d_{t}(v)\geq 2$ and the rounds with $d_{t}(v)\geq 2$ are exactly the ones in which $p_{t}(v)$ decreases by a $2$ factor. Since the number of $2$ factor increases in $p_{t}(v)$ can be at most equal to the number of $2$ factor decreases in it, we get that there are at least $\beta(\log \mathsf{deg} + \log 1/\varepsilon) -2h$ rounds in which $p_{t}(v)=1/2$. Now out of these rounds, at most $h$ of them can be when $d_{t}(v)\geq 2$. Hence, $g_1 \geq \beta(\log \mathsf{deg} + \log 1/\varepsilon) -3h$. As we have assumed $g_1 \leq 100 (\log \mathsf{deg} + \log 1/\varepsilon)$, we get that $\beta(\log \mathsf{deg} + \log 1/\varepsilon) -3h \leq 100 (\log \mathsf{deg} + \log 1/\varepsilon)$. Since $\beta\geq 1300$, we get $h\geq 400 (\log \mathsf{deg} + \log 1/\varepsilon)$.
Let us consider the changes in the effective-degree $d_{t}(v)$ of $v$ over time. If $d_{t}(v) \geq 1$ and this is not a golden round of type-2, then we have $$d_{t+1}(v) \leq 2 \frac{1}{10} d_v(t)+ \frac{1}{2} \frac{9}{10} d_{v}(t) < \frac{2}{3} d_{t}(v).$$ There are $g_2$ golden rounds of type-2. Except for these, whenever $d_{t}(v)\geq 1$, the effective-degree $d_{t}(v)$ shrinks by at least a $2/3$ factor. In those exceptions, it increases by at most a $2$ factor. Each of these exception rounds cancels the effect of at most $2$ shrinkage rounds, as $(2/3)^2 \times 2 <1$. Thus, ignoring the total of at most $3g_2$ rounds lost due to type-2 golden rounds and their cancellation effects, every other round with $d_{t}(v)\geq 2$ pushes the effective-degree down by a $2/3$ factor\footnote{Notice the switch to $d_{t}(v)\geq 2$, instead of $d_{t}(v)> 1$. We need to allow a small slack here, as done by switching to threshold $d_{t}(v)\geq 2$, in order to avoid the possible zigzag behaviors on the boundary. This is because, the above argument does not bound the number of $2$-factor increases in $d_{t}(v)$ that start when $d_{t}(v)\in (1/2, 1)$ but these would lead $d_{t}(v)$ to go above $1$. This can continue to happen even for an unlimited time if $d_{t}(v)$ keeps zigzagging around $1$ (unless we give further arguments of the same flavor showing that this is not possible). However, for $d_{t}(v)$ to go/stay above $2$, it takes increases that start when $d_{t}(v)>1$, and the number of these is upper bounded to $g_2$.}. This cannot (continue to) happen more than $\log_{3/2} \mathsf{deg}$ often as that would lead the effective degree to exit the $d_{t}(v)\geq 2$ region. Hence, the number of rounds in which $d_{t}(v)\geq 2$ is at most $\log_{3/2} \mathsf{deg} + 3g_2$. That is, $h \leq \log_{3/2} \mathsf{deg} + 3g_2$. Since $h\geq 400 (\log \mathsf{deg} + \log 1/\varepsilon)$, we get $g_2 > 100 (\log \mathsf{deg} + \log 1/\varepsilon)$.
\end{proof}
\begin{lemma} In each type-1 golden round, with probability at least $1/100$, $v$ joins the MIS. Moreover, in each type-2 golden round, with probability at least $1/100$, a neighbor of $v$ joins the MIS. Hence, the probability that $v$ has not been removed (due to joining or having a neighbor in MIS) during the first $\beta(\log \mathsf{deg} + \log 1/\varepsilon)$ rounds is at most $\varepsilon$. These statements hold even if the coin tosses outside $N^+_{2}(v)$ are determined adversarially.
\end{lemma}
\begin{proof}
In each type-1 golden round, node $v$ gets marked with probability $1/2$. The probability that no neighbor of $v$ is marked is $\prod_{u \in N(v)} (1-p_t(u)) \geq 4^{-\sum_{u \in N(v)} p_t(v)} = 4^{-d_{t}(v)} > 4^{-2}=1/16$. Hence, $v$ joins the MIS with probability at least $1/32>1/100$.
Now consider a type-2 golden round. Suppose we walk over the set $L$ of low-degree neighbors of $v$ one by one and expose their randomness until we reach a node that is marked. We will find a marked node with probability at least $$1-\prod_{u \in \textit{L}} (1-p_{u}(t)) \geq 1- e^{-\sum_{u \in \textit{L}} p_{u}(t)} \geq 1-e^{-d_{t}(v)/10} \geq 1-e^{-1/10} >0.08.$$ When we reach the first low-degree neighbor $u$ that is marked, the probability that no neighbor of $u$ gets marked is at least $\prod_{w\in N(u)} (1-p_{t}(w)) \geq 4^{-\sum_{w \in N(u)} p_t(w)} \geq 4^{-d_{t}(u)} > 1/16$. Hence, with probability at least $0.08/16=1/100$, one of the neighbors of $v$ joins the MIS.
We now know that in each golden round, $v$ gets removed with probability at least $1/100$, due to joining MIS or having a neighbor join the MIS. Thus, using \Cref{lem:goldCount}, we get that the probability that $v$ does not get removed is at most $(1-1/100)^{100(\log \mathsf{deg} + \log 1/\varepsilon)} \leq \varepsilon/\mathsf{deg} \leq \varepsilon$.
\end{proof}
\section{Improved Global Complexity}
\label{sec:global}
In this section, we explain how combining the algorithm of the previous section with some known techniques leads to a randomized MIS algorithm with a high probability global complexity of $O(\log \Delta) + 2^{O(\sqrt{\log \log n})}$ rounds.
As explained in \Cref{subsec:global}, the starting point is to run the algorithm of the previous section for $\Theta(\log \Delta)$ rounds. Thanks to the local complexity of this base algorithm, as we will show, we reach the \emph{shattering threshold} after $O(\log \Delta)$ rounds. The $2$-hops \emph{randomness locality} of \Cref{thm:local-restate}, the fact that it only relies on the randomness bits within $2$-hops neighborhood, plays a vital role in establishing this shattering phenomena. The precise statement of the shattering property achieved is given in \Cref{lem:shattering}, but we first need to establish a helping lemma:
\begin{lemma}\label{lem:smallComp} Let $c>0$ be an arbitrary constant. For any $5$-independent set of nodes $S$---that is, a set in which the pairwise distances are at least $5$---the probability that all nodes of $S$ remain undecided after $\Theta(c\log \Delta)$ rounds of the MIS algorithm of the previous section is at most $\Delta^{-c|S|}$.
\end{lemma}
\begin{proof} We walk over the nodes of $S$ one by one: when considering node $v\in S$, we know from that \Cref{thm:local-restate} that the probability that $v$ stays undecided after $\Theta(c\log \Delta)$ rounds is at most $\Delta^{-c}$, and more importantly, this only relies on the coin tosses within distance $2$ of $v$. Because of the $5$-independence of set $S$, the coin tosses we rely on for different nodes of $S$ are non-overlapping and hence, the probability that the whole set $S$ stays undecided is at most $\Delta^{-c|S|}$.
\end{proof}
From this lemma, we can get the following \emph{shattering} guarantee. Since the proof is similar to that of \cite[Lemma 3.3]{barenboim2012locality}, or those of \cite[Main Lemma]{beck1991LLL}, \cite[Lemma 4.6]{alon2012LCA}, and \cite[Theorem 3]{levi2015local}, we only provide a brief sketch:
\begin{lemma}
\label{lem:shattering} Let $c$ be a large enough constant and $B$ be the set of nodes remaining undecided after $\Theta(c\log \Delta)$ rounds of the MIS algorithm of the previous section on a graph $G$. Then, with probability at least $1-1/n^{c}$, we have the following two properties:
\begin{enumerate}
\item[(P1)] There is no $(G^{4^-})$-independent $(G^{9^-})$-connected subset $S \subseteq B$ s.t. $|S|\geq \log_{\Delta} n$. Here $G^{x^-}$ denotes the graph where we put edges between each two nodes with $G$-distance at most $x$.
\item[(P2)] All connected components of $G[B]$, that is the subgraph of $G$ induced by nodes in $B$, have each at most $O(\log_{\Delta} n \cdot \Delta^4)$ nodes.
\end{enumerate}
\end{lemma}
\begin{proof}[Proof Sketch] Let $H=G^{9^-} \setminus G^{4^-}$, i.e., the result of removing $G^{4-}$ edges from $G^{9-}$. For (P1), note that the existence of any such set $S$ would mean $H[B]$ contains a $(\log_{\Delta} n)$-node tree subgraph. There are at most $4^{\log_{\Delta} n}$ different $(\log_{\Delta} n)$-node tree topologies and for each of them, less than $n \Delta^{\log_{\Delta} n}$ ways to embed it in $H$. For each of these trees, by \Cref{lem:smallComp}, the probability that all of its nodes stay is at most $\Delta^{-c(\log_{\Delta} n)}$. By a union bound over all trees, we conclude that with probability $1-n (4\Delta)^{ \log_{\Delta} n} \Delta^{-c(\log_{\Delta} n)} \geq 1-1/n^{c}$, no such such set $S$ exists. For (P2), note that if $G[B]$ has a component with more than $\Theta(\log_{\Delta} n \cdot \Delta^4)$ nodes, then we can find a set $S$ violating (P1): greedily add nodes to the candidate $S$ one-by-one, and each time discard all nodes within $4$-hops of the newly added node, which are at most $O(\Delta^4)$ many.
\end{proof}
From property (P2) of \Cref{lem:shattering}, it follows that running the deterministic MIS algorithm of Panconesi and Srinivasan\cite{panconesi1992improved}, which works in $2^{O(\log n')}$ rounds in graphs of size $n'$, in each of the remaining components finishes our MIS problem in $2^{O(\sqrt{\log \Delta + \log \log n}})$ rounds. However, the appearance of the $\log \Delta$ in the exponent is undesirable, as we seek a complexity of $O(\log \Delta) + 2^{O(\sqrt{\log \log n}})$. To remedy this problem, we use an idea similar to \cite[Section 3.2]{barenboim2012locality}, which tries to leverage the (P1) property.
In a very rough sense, the (P1) property of \Cref{lem:shattering} tells us that if we ``contract nodes that are closer than 5-hops" (this is to be made precise), the left over components would have size at most $\log_{\Delta} n$, which would thus avoid the undesirable $\log \Delta$ term in the exponent. We will see that, while running the deterministic MIS algorithm, will be able to expand back these contractions and solve their local problems. We next formalize this intuition.
The \emph{finish-off} algorithm is as follows: We consider each connected component $C$ of the remaining nodes separately; the algorithm runs in parallel for all the components. First compute a $(5, h)$-ruling set $R_{C}$ in each connected component $C$ of the set $B$ of the remaining nodes, for an $h=\Theta(\log \log n)$. Recall that a $(5, h)$-ruling set $R_{C}$ means each two nodes of $R_{C}$ have distance at least $5$ while for each node in $C$, there is at least one node in $R_C$ within its $h$-hops. This $(5, h)$-ruling set $R_C$ can be computed in $O(\log \log n)$ rounds using the algorithm\footnote{This is different than what Barenboim et al. did. They could afford to use the more standard ruling set algorithm, particularly computing a $(5, 32\log \Delta+O(1))$-ruling set for their purposes, because the fact that this $32 \log \Delta$ ends up multiplying the complexity of their finish-off phase did not change (the asymptotics of) their overall complexity.} of Schneider, Elkin and Wattenhofer\cite{schneider2013symmetry}. See also\cite[Table 4]{barenboim2012locality}. Form clusters around $R_{C}$-nodes by letting each node $v\in C$ join the cluster of the nearest $R_{C}$-node, breaking ties arbitrarily by IDs. Then, contract each cluster to a new node. Thus, we get a new graph $G'_C$ on these new nodes, where in reality, each of these new nodes has radius $h=O(\log \log n)$ and thus, a communication round on $G'_C$ can be simulated by $O(h)$ communication rounds on $G$.
From (P1) of \Cref{lem:smallComp}, we can infer that $G'_C$ has at most $\log_\Delta n$ nodes, w.h.p., as follows: even though $R_C$ might be disconnected in $G^{9-}$, by greedily adding more nodes of $C$ to it, one by one, we can make it connected in $G^{9-}$ but still keep it $5$-independent. We note that this is done only for the analysis. See also \cite[Page 19, Steps 3 and 4]{barenboim2012locality} for a more precise description. Since by (P1) of \Cref{lem:smallComp}, the end result should have size at most $\log_\Delta n$, with high probability, we conclude $G'_C$ has at most $\log_\Delta n$ nodes, with high probability.
We can now compute an MIS of $C$, via almost the standard deterministic way of using network decompositions. We run the network decomposition algorithm of Panconesi and Srinivasan\cite{panconesi1992improved} on $G'_C$. This takes $2^{O(\sqrt{\log \log_\Delta n})} $ rounds and gives $G'_C$-clusters of radius at most $2^{O(\sqrt{\log \log_\Delta n})} $, colored with $2^{O(\sqrt{\log \log_\Delta n})}$ colors such that adjacent clusters do not have the same color. We will walk over the colors one by one and compute the MIS of the clusters of that color, given the solutions of the previous colors. Each time, we can (mentally) expand each of these $G'_C$ clusters to all the $C$-nodes of the related cluster, which means these $C$-clusters have radius at most $\log \log n \cdot 2^{O(\sqrt{\log \log_\Delta n})}$. While solving the problem of color-$j$ clusters, we make a node in each of these clusters gather the whole topology of its cluster and also the adjacent MIS nodes of the previous colors. Then, this cluster-center solves the MIS problem locally, and reports it back. Since each cluster has radius $\log \log n \cdot 2^{O(\sqrt{\log \log_\Delta n})}$, this takes $\log \log n \cdot 2^{O(\sqrt{\log \log_\Delta n})}$ rounds per color. Thus, over all the colors, the complexity becomes $2^{O(\sqrt{\log\log_{\Delta} n})} \cdot \log \log n \cdot 2^{O(\sqrt{\log \log_\Delta n})} = 2^{O(\sqrt{\log \log n})}$ rounds. Including the $O(\log \log n)$ ruling-set computation rounds and the $O(\log \Delta)$ pre-shattering rounds, this gives the promised global complexity of $O(\log \Delta) + 2^{O(\sqrt{\log \log n})}$, hence proving \Cref{thm:global}.
\section{Concluding Remarks}
This paper presented an extremely simple randomized distributed MIS algorithm, which exhibits many interesting \emph{local} characteristics, including a \emph{local complexity} guarantee of each node $v$ terminating in $O(\log \mathsf{deg}(v)+\log 1/\varepsilon)$ rounds, with probability at least $1-\varepsilon$. We also showed that combined with known techniques, this leads to an improved high probability global complexity of $O(\log \Delta) + 2^{O(\sqrt{\log \log n})}$ rounds, and several other important implications, as described in \Cref{subsec:implications}.
For open questions, the gap between the upper and lower bounds, which shows up when $\log \Delta = \omega (\sqrt{\log n})$, is perhaps the most interesting. We saw in (C2) of \Cref{subsec:implications} that if the lower-bound is the one that should be improved, we need to go away from ``tree local-views" topologies. Another longstanding open problem is to find a $\operatorname{\text{{\rm poly}}}(\log n)$ deterministic distributed MIS algorithm. Combined with the results of this paper, that can potentially get us to an $O(\log \Delta) + \operatorname{\text{{\rm poly}}}(\log \log n)$ randomized algorithm.
\paragraph{Acknowledgment}
I thank Eli Gafni, Bernhard Haeupler, Stephan Holzer, Fabian Kuhn, Nancy Lynch, and Seth Pettie for valuable discussions. I am also grateful to Fabian Kuhn and Nancy Lynch for carefully reading the paper and many helpful comments. The point (C2) in \Cref{subsec:implications} was brought to my attention by Fabian Kuhn. The idea of highlighting the \emph{local complexity} is rooted in conversations with Stephan Holzer and Nancy Lynch, and also in Eli Gafni's serious insistence\footnote{This is a paraphrased version of his comment during a lecture on Linial's $\Omega(\log^* n)$ lower bound, in the Fall 2014 Distributed Graph Algorithms (DGA) course at MIT.} that \emph{the (true) complexity of a local problem should not depend on $n$}.
\appendix
\section{Simplified Global Analysis of Luby's, due to Yves et al.}
\label{Yves}
We here explain (a slightly paraphrased version of) the clever approach of Yves et al.\cite{YvesMIS} for bounding Luby's global time complexity:
\begin{lemma} \label{lem:Main} Let $G[V_t]$ be the graph induced by nodes that are alive in round $t$, and let $m_t$ denote the number of edges of $G[V_t]$. For each round $t$, we have $\mathbb{E}[m_{t+1}] \leq \frac{m_{t}}{2}$, where the expectation is on the randomness of round $t$.
\end{lemma}
\begin{proof}
Consider an edge $e=(u, v)$ that is alive at the start of a round $t$, i.e., that is in $G[V_t]$. Note that edge $e$ will not be in $G[V_{t+1}]$, in which case we say $e$ died, if in the random numbers drawn in round $t$, there is a node $w$ that is adjacent to $v$ or $u$ (or both) and $w$ has the strict local minima of its own neighborhood. In this case, we say node $w$ \emph{killed} edge $e$.
Note that the probability that $w$ kills $e$ is $\frac{1}{d(w)+1}$, where $d(w)$ denotes the degree of $w$. The difficulty comes when we want to compute the probability that \emph{there exists} a $w$ that kills $e$. This is mainly because, the events of different $w$ killing $e$ are not disjoint, and hence we cannot easily sum over them. Fortunately, there is a simple and elegant change in the definition, due to Yves et al.\cite{YvesMIS}, which saves us from tedious calculations:
Without loss of generality, suppose that $w$ is adjacent to $v$. We say $w$ \emph{strongly kills} $e$ from the side of node $v$---and use notation $w\stackrel{v}{\rightarrow}e$ to denote it---if $w$ has the (strictly) minimum random number in $\Gamma_w\cup \Gamma_v$. Note that this is a stronger requirement and thanks to this definition, at most one node $w$ can strongly kill $e$ from the side of $v$. Thus, in a sense, we now have events that are disjoint which means the probability that any of them happens is the summation of the probabilities of each of them happening. The only catch is, we might \emph{double count} an edge dying, because it gets (strongly) killed from both endpoints, but that is easy to handle; we just lose a $2$-factor. In the following, with a slight abuse of notation, by $E$ we mean the alive edges, i.e., those of $G[V_t]$, and by $\Gamma(v)$, we mean the neighbors of $v$ in $G[V_t]$. We have
\begin{align*}
\mathbb{E}[\textit{Number of edges that die}] &\geq& \sum_{e=(v, w)\in E} \Pr[e \textit{ gets strongly killed}]\\
&\geq& \sum_{e=(v, w)\in E} \bigg(\sum_{w\in \Gamma(v)}\Pr[w\stackrel{v}{\rightarrow}e] + \sum_{w'\in \Gamma(u)}\Pr[w'\stackrel{u}{\rightarrow}e]\bigg)/2 \\
&\geq& \sum_{e=(v, w)\in E} \bigg(\sum_{w\in \Gamma(v)}\frac{1}{d(w)+d(v)} + \sum_{w'\in \Gamma(u)}\frac{1}{d(w)+d(u)}\bigg)/2 \\
&=& \sum_{v\in V} \sum_{u \in \Gamma(v), \; e=(u, v)} \bigg(\sum_{w\in \Gamma(v)}\frac{1}{d(w)+d(v)} + \sum_{w'\in \Gamma(u)}\frac{1}{d(w)+d(u)}\bigg)/2 \\
&=& \bigg(\sum_{v\in V} \sum_{w\in \Gamma(v)} \sum_{u \in \Gamma(v), \; e=(u, v)} \frac{1}{d(w)+d(v)} \\
&+& \sum_{u\in V} \sum_{w'\in \Gamma(u)} \sum_{v \in \Gamma(u), \; e=(u, v)} \frac{1}{d(w')+d(u)}\bigg)/2\\
&=& \bigg(\sum_{v\in V} \sum_{w\in \Gamma(v)} \frac{d(v)}{d(w)+d(v)}+\sum_{u\in V} \sum_{w'\in \Gamma(u)} \frac{d(u)}{d(w')+d(u)} \bigg)/2\\
&=& \bigg(\sum_{v\in V} \sum_{w\in \Gamma(v)} \frac{d(v)}{d(w)+d(v)}+\sum_{v\in V} \sum_{w\in \Gamma(v)} \frac{d(w)}{d(w)+d(v)} \bigg)/2 \\
&=& \bigg(\sum_{v\in V} \sum_{w\in \Gamma(v)} 1\bigg)/2 = m_{t}/2.
\end{align*}
\end{proof}
It follows from Lemma \ref{lem:Main} that the expected number of the edges that are alive after $4\log n$ rounds is at most $\frac{n^2 /2}{2^{4\log n}} < \frac{1}{n^2}$. Therefore, using Markov's inequality, we conclude that the probability that there is at least $1$ edge that is left alive is at most $\frac{1}{n^2}$. Hence, with probability at least $1-\frac{1}{n^2}$, all nodes have terminated by the end of round $4\log n$.
\end{document} |
\begin{document}
\title{Automorphisms and derivations of affine commutative and PI-algebras}
\author{Oksana Bezushchak}
\address{Faculty of Mechanics and Mathematics,
Taras Shevchenko National University of Kyiv, 60, Volodymyrska street, 01033 Kyiv, Ukraine}
\email{[email protected]}
\thanks{The first author was supported by the PAUSE program (France), partly supported by UMR 5208 du CNRS; and partly supported by MES of Ukraine: Grant for the perspective development of the scientific direction "Mathematical sciences and natural sciences" at TSNUK}
\author{Anatoliy Petravchuk}
\address{Faculty of Mechanics and Mathematics,
Taras Shevchenko National University of Kyiv, 60, Volodymyrska street, 01033 Kyiv, Ukraine}
\email{[email protected], [email protected]}
\thanks{The second author was supported by MES of Ukraine: Grant for the perspective development of the scientific direction "Mathematical sciences and natural sciences" at TSNUK}
\author{Efim Zelmanov}
\address{SICM, Southern University of Science and Technology, 518055 Shenzhen, China}
\email{[email protected]}
\subjclass[2020]{Primary 13N15, 16R99, 16W20; Secondary 16R40, 16W25, 17B40, 17B66}
\date{\today}
\keywords{PI-algebra, Lie algebra, automorphism, derivation, affine commutative algebra}
\begin{abstract}
We prove analogs of A.~Selberg's result for finitely generated subgroups of $\text{Aut}(A)$ and of Engel's theorem for subalgebras of $\text{Der}(A)$ for a finitely generated associative commutative algebra $A$ over an associative commutative ring. We prove also an analog of the theorem of W.~Burnside and I.~Schur about locally finiteness of torsion subgroups of $\text{Aut}(A)$.
\end{abstract}
\maketitle
\section{Introduction}
Let $\textbf{A}$ be the algebra of regular (polynomial) functions on an affine algebraic variety $V$ over an associative commutative ring $\Phi$ with $1.$
The group of $\Phi$-linear automorphisms $\text{Aut}(\textbf{A})$ and the Lie algebra of $\Phi$-linear derivations $\text{Der} (\textbf{A})$ are referred to as the group of polynomial automorphisms of $V$ and the Lie algebra of vector fields on $V$, respectively.
When the variety $V$ is irreducible, i.e. the ring $\textbf{A}$ is a domain, the group $\text{Aut}(K)$ of automorphisms of the field $K$ of fractions of $\textbf{A}$ is called the group of birational automorphisms of $V$; and the Lie algebra $\text{Der} (K)$ of derivations of $K$ is called the Lie algebra of rational vector fields on $V$.
Let $\mathbb{F}$ be the field. Then $\mathbb{F}[x_1,\ldots, x_n]$ and $\mathbb{F}(x_1,\ldots, x_n)$ are the polynomial algebra and the field of rational functions. The group $\text{Aut}(\mathbb{F}(x_1,\ldots, x_n))$ and the algebra $\text{Der}(\mathbb{F}(x_1,\ldots, x_n))$ (resp. $\text{Aut}(\mathbb{F}[x_1,\ldots, x_n])$ and $\text{Der}(\mathbb{F}[x_1,\ldots, x_n])$) are called the \emph{Cremona group} and the \emph{Cremona Lie algebra} (resp. \emph{polynomial Cremona group} and \emph{polynomial Cremona Lie algebra}).
Recall that a group is called \emph{linear} if it is embeddable into a group of invertible matrices over an associative commutative ring. Groups $\text{Aut}(\textbf{A})$ are, generally speaking, not linear. It has been an ongoing effort of many years to understand:
\begin{center}\emph{which properties of linear groups can be carried over to \\ automorphisms groups $\emph{\text{Aut}(\textbf{A})}$ and to Cremona groups}?\end{center}
J.-P.~Serre \cite{Serre38,Serre37} studied finite subgroups of Cremona groups. V.~L.~Popov \cite{Popov28} initiated the study of the question of whether the celebrated Jordan's theorem on finite subgroups of linear groups carries over to the groups $\text{Aut}(A).$ For some important results in this direction see \cite{Bandman_Zarhin5,Birkar8,Deserti11,Popov28,Popov29,Prokhorov_Shramov31}.
S.~Cantat \cite{Cantat10} proved the Tits Alternative for Cremona groups of rank $2$.
In this paper, we prove analogs of A.~Selberg's result \cite{Selberg36} (see also \cite{Alperin2}) for finitely generated subgroups of $\text{Aut}(A)$ and of Engel's theorem for subalgebras of $\text{Der}(A)$ for a finitely generated associative commutative algebra $A.$
We say that a group is \emph{virtually torsion free} if it has a subgroup of finite index that is torsion free.
\begin{theorem}\label{Theorem1} Let $A$ be a finitely generated associative commutative algebra over an associative commutative ring $\Phi$ with $1$. Suppose that $A$ does not have additive torsion. Then
\begin{enumerate}
\item[$(a)$] an arbitrary finitely generated subgroup of the group $\emph{\text{Aut}}(A)$ is virtually torsion free;
\item[$(b)$] if $A$ is a finitely generated ring (i.e. $\Phi$ is the ring of integers $ \mathbb{Z}$), then the group $\emph{\text{Aut}}(A)$ is virtually torsion free.
\end{enumerate} \end{theorem}
\begin{corollary}[\rm\textbf{An analog of the theorem of W.~Burnside and I.~Schur}; see \cite{Jacobson17,Jacobson18}]\label{Cor1} Under the assumptions of theorem $\ref{Theorem1}(a)$ every torsion subgroup of $\emph{\text{Aut}}(A)$ is locally finite.
\end{corollary}
\begin{corollary}\label{Corollary2} Every torsion subgroup of a polynomial Cremona group \\ $\text{Aut}(\mathbb{F}[x_1,\ldots,x_n]),$ where $\mathbb{F}$ is a field of characteristic zero, has an abelian normal subgroup of finite index.
\end{corollary}
Corollary \ref{Corollary2} immediately follows from corollary \ref{Cor1} and from the Jordan property of the group $\text{Aut}(\mathbb{F}[x_1,\ldots,x_n]);$ see \cite{Birkar8,Prokhorov_Shramov31}.
If the torsion subgroup in corollary \ref{Cor1} is torsion of bounded degree, then we don't need any assumptions on additive torsion. Indeed, in \cite{Bass_Lubotzky6}, it was shown that the group ${\rm Aut}(A)$ is locally residually finite. Hence, by the positive solution of the restricted Burnside problem (see \cite{Zelmanov41,Zelmanov42}), the group $G$ is locally finite.
Recall that a derivation $d$ of an algebra $A$ is called \emph{locally nilpotent} if for an arbitrary element $a\in A$ there exists an integer $n(a)\geq 1$ such that $d^{n(a)}(a)=0.$ For more information about locally nilpotent derivations see \cite{Freudenburg12}. An algebra is called \emph{locally nilpotent} if every finitely generated subalgebra is nilpotent.
Let $L\subseteq {\rm Der}(A)$ be a Lie algebra that consists of locally nilpotent derivations. The question of whether it implies that the Lie algebra $L$ is locally nilpotent was discussed in \cite{Freudenburg12,Petravchuk_Sysak26,Skutin39}. In particular, A.~Skutin \cite{Skutin39} proved local nilpotency of $L$ for a commutative domain $A$ of finite transcendence degree and characteristic zero.
\begin{theorem}\label{Theorem2}
Let $A$ be a finitely generated associative commutative algebra over an associative commutative ring, and let $L$ be a subalgebra of ${\rm Der}(A)$ that consists of locally nilpotent derivations. Then the Lie algebra $L$ is locally nilpotent.
\end{theorem}
The assumption of finite generation of the algebra $A$ is essential. If $A$ is the algebra of polynomials in countably many variables over a field, then there exists a non-locally nilpotent Lie subalgebra $L\subseteq {\rm Der}(A)$ that consists of locally nilpotent derivations. The following theorem, however, imposes a finiteness condition that is weaker than finite generation.
Let $A$ be a commutative domain. Let $K$ be the field of fractions of $A.$ An arbitrary derivation of the domain $A$ extends to a derivation of the field $K,$ ${\rm Der} (A)\subseteq {\rm Der} (K).$ We have $K{\rm Der} (K)\subseteq {\rm Der} (K),$ hence ${\rm Der} (K)$ can be viewed as a vector space over the field $K.$
\begin{theorem}\label{Theorem3} Under the assumptions above, let $L\subseteq {\rm Der}(A)$ be a Lie ring that consists of locally nilpotent derivations. Suppose that ${\rm dim}_{K}KL<\infty .$ Then the Lie ring $L$ is locally nilpotent.
\end{theorem}
A special case of this theorem was proved by A.~P.~Petravchuk and K.~Ya.~Sysak in \cite{Petravchuk_Sysak26}.
The proof of theorem \ref{Theorem3} is based on a stronger version of theorem \ref{Theorem2}, which is of independent interest.
Recall that a subalgebra $B$ of an associative commutative algebra $A$ is called an \emph{order} in $A$ if there exists a multiplicative semigroup $S\subset B$ such that
\begin{enumerate}
\item[(1)] every element from $S$ is invertible in $A,$
\item[(2)] an arbitrary element $a\in A$ can be represented as $a=s^{-1}b,$ where $s\in S$ and $b\in B.$
\end{enumerate}
Let $L \subseteq {\rm Der}(A)$ be a subalgebra. The subset
$A_L=\{ a\in A \ | \ \text{for an arbitrary} \ d\in L \ \text{there exists an integer} \ n(d)\geq 1 \ {\rm such \ that} \ d^{n(d)}(a)=0\}$ is a subalgebra of the algebra $A.$
\begin{proposition}\label{Proposition1}
Let $A$ be a finitely generated commutative domain. Let $L$ be a subalgebra of ${\rm Der}(A).$ If the subalgebra $A_L$ is an order in $A$, then the Lie algebra $L$ is locally nilpotent.
\end{proposition}
To achieve a natural generality and to expand to noncommutative cases we extended theorems \ref{Theorem1} and \ref{Theorem2} to algebras with polynomial identities, i.e. $\text{PI}$-algebras; see \cite{Aljad_Gia_Procesi_Regev1,Belov_Rowen7,Rowen35}.
A $\text{PI}$-algebra is called \emph{representable} if it is embeddable in a matrix algebra over an associative commutative algebra. In \cite{Small40}, L. W. Small constructed an example of a finitely generated $\text{PI}$-algebra that is not representable.
\begin{theorem}\label{Theorem4}
Let $A$ be a finitely generated representable $\emph{\text{PI}}$-algebra over an associative commutative ring. Suppose that $A$ does not have additive torsion. Then
\begin{enumerate}
\item[(a)] an arbitrary finitely generated subgroup of the group ${\rm Aut}(A)$ is virtually torsion free;
\item[(b)] if $A$ is a finitely generated ring, then the group ${\rm Aut}(A)$ is virtually torsion free.
\end{enumerate}
\end{theorem}
\begin{theorem}\label{Theorem5}
Let $A$ be a finitely generated $\emph{\text{PI}}$-algebra over an associative commutative ring. Suppose that $A$ does not have additive torsion. Then an arbitrary torsion subgroup of ${\rm Aut}(A)$ is locally finite.
\end{theorem}
We remark that theorem \ref{Theorem5} does not contain assumptions on representability.
C.~Procesi \cite{Procesi30} proved local finiteness of torsion subgroups of multiplicative groups of $\text{PI}$-algebras.
\begin{theorem}\label{Theorem6}
Let $A$ be a finitely generated $\emph{\text{PI}}$-algebra over an associative commutative ring. Let $L\subseteq {\rm Der}(A)$ be a subalgebra that consists of locally nilpotent derivations. Then the Lie algebra $L$ is locally nilpotent.
\end{theorem}
\section{Preliminaries}
In this section, we review some facts that will be used in proofs.
\subsection{}\label{1.1} Theorems \ref{Theorem1}, \ref{Theorem2}, \ref{Theorem5} and \ref{Theorem6} were formulated for finitely generated associative commutative algebras over an associative commutative ring $\Phi .$ We will show that it is sufficient to assume $\Phi =\mathbb Z,$ that is to prove the theorems for finitely generated rings. In particular, theorems \ref{Theorem1}(b) and \ref{Theorem4}(b) imply theorems \ref{Theorem1}(a) and \ref{Theorem4}(a), respectively.
We will do it for theorem \ref{Theorem6}. The arguments for theorems \ref{Theorem1}, \ref{Theorem2} and \ref{Theorem5} are absolutely similar.
Let $\Phi$ be an associative commutative ring and let $A$ be an associative $\text{PI}$-algebra over $\Phi $ (see \textbf{\ref{1.2}}) generated by elements $a_1, \ldots a_m;$ and $ A\ni 1.$ Let $L\subseteq \Der _{\Phi}(A)$ be a Lie subalgebra generated by derivations $d_1, \ldots , d_n.$ Suppose that every derivation of the $\Phi$-algebra $L$ is locally nilpotent. Let $\Phi \langle x_1, \ldots , x_m\rangle$ be the free associative $\Phi$-algebra in free generators $x_1, \ldots , x_m.$ Then there exist elements $f_{ij}(x_1, \ldots , x_m), 1\leq i\leq n, 1\leq j\leq m,$ such that $d_i(a_j)=f_{ij}(a_1, \ldots , a_m).$
Let $A_1$ be the subring of $A$ generated by elements $1, a_1, \ldots ,a_m$ and by all coefficients of the elements $f_{ij}(x_1, \ldots ,x_m).$ It is straightforward that the subring $A_1$ is invariant under $d_1, \ldots , d_n.$ Assuming that theorem \ref{Theorem6} is true for $\Phi =\mathbb Z$, there exists an integer $r\geq 1$ such that $L^r(A_1)=(0).$ In particular, $L^r(a_i)=(0), 1\leq i\leq m.$ Since the elements $a_1, \ldots , a_m$ generate the $\Phi$-algebra $A$ we conclude that $L^r=(0).$
Let us review some basic definitions and facts about $\text{PI}$-algebras that can be found in the books \cite{Aljad_Gia_Procesi_Regev1,Belov_Rowen7,Rowen35}.
\subsection{}\label{1.2} An associative algebra over an associative commutative ring $\Phi \ni 1$ is said to be \emph{$\text{PI}$}- if there exists an element
$$ f(x_1, \ldots , x_n)=x_1\cdots x_n+\sum _{1\not =\sigma \in S_n}\alpha _{\sigma}x_{\sigma (1)}\cdots x_{\sigma (n)}$$
of the free associative algebra $\Phi \langle x_1, \ldots , x_n\rangle$ such that $f(a_1, \ldots , a_n)=0$ for arbitrary elements $a_1, \ldots , a_n\in A;$ hereafter $S_n$ is the group of permutations of the set $\{1,\ldots, n \}$. In this case we say that the algebra $A$ satisfies the identity $f(x_1, \ldots , x_n)=0.$
If $A$ is a $\text{PI}$-algebra, then it satisfies an identity with all the coefficients $\alpha _{\sigma}, 1\not =\sigma\in S_n,$ lying in $\mathbb Z.$ In other words, every $\text{PI}$-algebra is $\text{PI}$ over $\mathbb Z,$ i.e. $\text{PI}$ as a ring.
\subsection{}\label{1.6} A ring $A$ is called \emph{prime} if the product of any two nonzero ideals is different from zero. If $A$ is a prime $\text{PI}$-ring, then the center $$Z=\{ a\in A \ | \ ab=ba \ \rm{for \ an \ arbitrary \ element} \ b\in A\}\not =(0)$$
and the ring of fractions $(Z\setminus ( 0))^{-1}A$
is a finite-dimensional central simple algebra over the field of fractions of the domain $Z$; see \cite{Markov22,Rowen34}.
\subsection{}\label{1.7} A ring $A$ is called \emph{semiprime} if it does not contain nonzero nilpotent ideals. Let $A$ be a finitely generated semiprime $\text{PI}$-ring. Let $Z$ be the center of $A$ and let $Z^*$ denote the set of elements from $Z$ that are not zero divisors. Then the ring of fractions ${(Z^{*})}^{-1}A$ is a finite direct sum of simple finite-dimensional (over their centers) algebras.
\subsection{}\label{1.8} An element $a\in L$ of a Lie algebra $L$ is called ad-\emph{nilpotent} if the operator $${\rm ad} (a) : L\to L, \quad {\rm ad} (a): x \mapsto [a, x],$$ is nilpotent.
Suppose that a Lie algebra $L$ is generated by elements $a_1, \ldots , a_m.$ Commutators in $a_1, \ldots , a_m$ are defined via the following rules:
\begin{enumerate}
\item[(i)] an arbitrary generator $a_i, 1\leq i\leq m,$ is a commutator in $a_1, \ldots , a_m;$
\item[(ii)] if $\rho '$ and $\rho ''$ are commutators in $a_1, \ldots , a_m$, then $\rho =[\rho ', \rho '']$ is a commutator in $a_1, \ldots , a_m.$
\end{enumerate}
An element $a\in L$ is called a \emph{commutator} in $a_1, \ldots , a_m$ if it is a commutator because of (i) and (ii).
A Lie algebra $L$ over an associative commutative ring $\Phi \ni 1$ is called $\text{PI}$ (\emph{satisfies a polynomial identity}) if there exists a multilinear element of the free Lie algebra
$$f(x_0, x_1, \ldots , x_n)=({\rm ad} (x_1)\cdots {\rm ad} (x_n) +\sum _{1\not =\sigma \in S_n}\alpha _{\sigma}{\rm ad} (x_{\sigma (1)})\cdots {\rm ad} (x_{\sigma (n)}))x_0, \alpha _{\sigma}\in \Phi,$$
such that $f(a_0, a_1, \ldots a_n)=0$ for arbitrary elements $a_0, a_1, \ldots a_n\in L.$
The following theorem was proved in \cite{Zelmanov42}.
\begin{theo*}[\cite{Zelmanov42}] Let $L$ be a Lie $\emph{\text{PI}}$-algebra over an associative commutative ring generated by elements $a_1, \ldots , a_m.$ Suppose that every commutator in $a_1, \ldots , a_m$ is $\emph{\text{ad}}$-nilpotent. Then the Lie algebra $L$ is nilpotent.\end{theo*}
\section{Groups of automorphisms}
\begin{lemma}\label{Lemma1}
Let $A$ be a finitely generated commutative domain without additive torsion. Then the group ${\rm Aut} (A)$ is virtually torsion free.
\end{lemma}
\begin{proof}
Let $I$ be a maximal ideal of the ring $A.$ The field $A/I$ is finitely generated, hence $A/I$ is a finite field, $A/I\simeq GF(p^l).$ Let $\mathcal P$ be the set of all ideals $P\vartriangleleft A$ such that $A/P\simeq GF(p^l).$ Let $P_0$ be the ideal of the ring $A$ generated by all elements $a^{p^l}-a, a\in A,$ and by the prime number $p.$ It is easy to see that the ring $A/P_0$ is finite, $P_0\subseteq \cap _{P\in \mathcal P}P.$
This implies that the set $\mathcal P$ is finite.
Automorphisms of the ring $A$ permute ideals from $\mathcal P.$ The ideal $I$ belongs to $\mathcal P.$ Hence, there exists a subgroup $ H_1 \leq {\rm Aut} (A), |{\rm Aut} (A): H_1|<\infty ,$ that leaves the ideal $I$ invariant. We have $|A: I^2|<\infty .$ Therefore, there exists a subgroup $H_2\leq H_1, |{\rm Aut} (A):H_2|<\infty ,$ such that
$$(1-h)(A)\subseteq I^2$$
for an arbitrary element $h\in H_2.$ Furthermore, if $a_1, \ldots a_k\in I,$ then
$$(h-1)(a_1\cdots a_k)=(h(a_1)-a_1+a_1)\cdots (h(a_k)-a_k+a_k)-a_1\cdots a_k=\sum b_1\cdots b_k,$$
where each $b_i=(h-1)(a_i)$ or $a_i$ and in each summand at least one element $b_i$ is equal to $(h-1)(a_i).$ This implies that
$$(1-h)(I^k)\subseteq I^{k+1}.$$
By the Krull intersection theorem (see \cite{Atiyah_Macdonald4}), we have $$\bigcap _{k\geq 1}I^k=(0).$$ If an element from $H_2$ has finite order, then this order must be a power of the prime number~$p.$
Consider the ring $$\widetilde{A}=\langle 1/p , A\rangle \subseteq A\otimes _{\mathbb Z}\mathbb Q,$$ where $\mathbb Q$ is the field of rational numbers. If $\widetilde J$ is a maximal ideal of the ring $\widetilde A,$ then
$$\widetilde{A}/\widetilde{J}\simeq GF(q^t) \quad \text{for prime } \quad q, \quad q\not =p, \quad \text{and} \quad \bigcap _{k\geq 1}\widetilde{J}\,^k=(0).$$
Let $J=\widetilde{J}\cap A.$ Arguing as above, we find a subgroup $H_3\leq {\rm Aut} (A)$ of a finite index such that $(1-h)(J^k)\subseteq J^{k+1}, k\geq 0,$ for an arbitrary element $h\in H_3.$ Hence, if an element from $H_3$ has finite order, then this order must be a power of the prime number $q.$
Now, $H_2\cap H_3$ is a torsion free subgroup of ${\rm Aut} (A).$ This completes the proof of the lemma. \end{proof}
\begin{lemma}\label{Lemma2}
Let $A$ be a semiprime finitely generated associative commutative ring without additive torsion. Then the group ${\rm Aut} (A)$ is virtually torsion free.
\end{lemma}
\begin{proof}
Let $S\subset A$ be the set of all nonzero elements that are not zero divisors. Then the ring of fractions $S^{-1}A$ is a direct sum of fields, $S^{-1}A=\mathbb{F}_1\oplus \cdots \oplus \mathbb{F}_k.$ An arbitrary automorphism of the ring $A$ extends to an automorphism of $S^{-1}A.$
Hence, there exists a subgroup $H\leq {\rm Aut} (A)$ of finite index such that every automorphism from $H$ leaves the summands $\mathbb{F}_1, \ldots , \mathbb{F}_k$ invariant. For each $i, 1\leq i\leq k,$ the factor-ring
$$K=A/A\cap ( \mathbb{F}_1\oplus \cdots \oplus \mathbb{F}_{i-1} \oplus \mathbb{F}_{i+1}\oplus \cdots \oplus \mathbb{F}_k)$$
is a domain without additive torsion. By lemma \ref{Lemma1}, there exists a subgroup $H_i< H$ of finite index such that the image of $H_i$ in $ {\rm Aut} (K)$ is torsion free. This implies that the group $\cap _{i=1}^kH_i$ is torsion free. Indeed, if an element $h\in \cap _{i=1}^kH_i$ has finite order, then $h$ acts identically modulo $K,$ and we get
$$(1-h)(A)\subseteq \bigcap _{i=1}^k(\mathbb{F}_1\oplus \cdots \oplus \mathbb{F}_{i-1} \oplus \mathbb{F}_{i+1}\oplus \cdots \oplus \mathbb{F}_k)=(0).$$
This completes the proof of the lemma.
\end{proof}
\begin{proof}[Proof of theorem \emph{\ref{Theorem4}(b)}.] Let $A$ be a finitely generated representable $\text{PI}$-ring that does not have additive torsion. A.~I.~Malcev \cite{Malcev21} showed that the ring $A$ is embeddable in a matrix algebra over a field of characteristic zero, $A\hookrightarrow M_n(\mathbb{F}), \ {\rm char}\, \mathbb{F}=0.$ Let $a_1, \ldots , a_m$ be generators of the ring $A,$ and let $\mathbb Z\langle X\rangle$ be the free associative ring on free generators $x_1, \ldots , x_m.$ If $R\subseteq \mathbb Z\langle x_1, \ldots , x_m\rangle$ is a set of defining relations of the ring $A$ in the generators $a_1, \ldots ,a_m,$ then $A\simeq \langle x_1, \ldots , x_m \ | \ R=(0)\rangle .$
Let $n, m\geq 2.$ Consider $m$ generic $n\times n$ matrices $$X_k=(x_{ij}^{(k)})_{1\leq i, j\leq n}, 1\leq k\leq m.$$
These are $n\times n$ matrices over the polynomial ring $\mathbb Z[X],$ where $$ X=\{x_{ij}^{(k)}, 1\leq i, j\leq n, 1\leq k\leq m\}$$ is the set of variables. The ring $G(m,n)$ generated by generic matrices $ {X}_1, \ldots , {X}_m$ is a domain and it is $\text{PI}$; see \cite{Amitsur3}.
For a relation $r\in R$ let
$$r(X_1, \ldots , X_m)=\big(r_{ij}(X)\big)_{1\leq i, j\leq n},\quad r_{ij}(X)\in \mathbb Z[X].$$
Consider the associative commutative ring $U$ presented by generators $X$ and relations
$r_{ij}(X)=0, $ $r\in R, $ $ 1\leq i, j\leq n,$ i.e. $$ U=\mathbb Z[X]/I, \quad
I={\rm id}_{\mathbb Z[X]}\big(r_{ij}(X), \ r\in R, \ 1\leq i, j\leq n\big).$$
Since the ring $A$ is embeddable in $M_n(\mathbb{F})$ it follows that the homomorphism $$u:A\to M_{n}(U), \quad u(a_k)=X_k+I\in M_n(U),\quad 1\leq k\leq m,$$ is an embedding. Moreover, the ring $U$ has the following universal property: \\ if $C$ is an associative commutative ring and $\varphi :A\to M_n(C)$ is an embedding, then there exists a unique homomorphism $U\to C$ that makes the diagram
$$ \xymatrix{ {A}\ar [r] ^{u} \ar[rd]_{\varphi} & {M_n(U)} \ar[d] \\
& {M_n(C)} }$$
commutative.
This implies that every automorphism of the ring $A$ gives rise to an automorphism of the ring $U.$ Let
$$T(U)=\{ x\in U \ | \ \text{there exists an integer} \ k\geq 1 \ \text{such that} \ kx=0\}$$
be the torsion part of the ring $U.$ Let $J\big(U/T(U)\big)$ be the radical of the ring $U/T(U),$ $J\big(U/T(U)\big)=J/T(U),$ where
$$(0)\subseteq T(U)\subseteq J\vartriangleleft U, \quad \overline{U}=U/J.$$
The factor-ring $\overline{U}$ is semiprime and does not have additive torsion. An arbitrary automorphism of the ring $A$ gives rise to an automorphism of $\overline{U}.$
Since the ring $A$ is embeddable in $M_n(\mathbb{F})$, $\rm{char}\, \mathbb{F}=0,$ it follows that $A$ is embeddable in $M_n(\overline{U})$ and the group ${\rm Aut} (A)$ is embeddable in ${\rm \Aut} (\overline{U}).$ By lemma \ref{Lemma2}, the group ${\rm Aut} (\overline{U})$ is virtually torsion free and so is ${\rm Aut} (A).$ This
completes the proof of theorem \ref{Theorem4}(b). \end{proof}
Recall that theorem \ref{Theorem4}(b) implies theorems \ref{Theorem1} and \ref{Theorem4}(a).
We will discuss the annoying representability assumption in theorem \ref{Theorem4}. Let $A$ be a finitely generated $\text{PI}$-algebra over the field of rational numbers $\mathbb Q,$ and let $J$ be the Jacobson radical of the algebra $A.$ By \cite{Braun9}, the Jacobson radical of a finitely generated $\text{PI}$-ring is nilpotent. So, the radical $J$ is nilpotent. The stabilizer of the descending chain
$A\supset J\supset J^2\supset \cdots \ $ in $ {\rm Aut} (A)$ is torsion free.
Indeed, let $\varphi \in \Aut (A)$ and $(1-\varphi )J^i\subseteq J^{i+1}, i\geq 0.$ We assume that $\varphi ^{n}=1.$ Then we have
$$\varphi ^{n}=(\varphi -1+1)^n=\sum _{i=2}^n\binom{n}{i}(\varphi -1)^i+n(\varphi -1)+1.$$
Hence, $$n(1-\varphi )=\sum _{i= 2}^n\binom{n}{i}(\varphi -1)^i.$$
Suppose that $a\in A$ and $(1-\varphi )a\not=0.$ Let $(1-\varphi )a\in J^k\setminus J^{k+1}.$ By the above, $n(1-\varphi )a\in (\varphi -1)J^k\subseteq J^{k+1},$ a contradiction.
If the group ${\rm Aut} (A/J^2)$ is virtually torsion free, then so is the group ${\rm Aut} (A).$ Indeed, let $H$ be a torsion free subgroup of finite index in ${\rm Aut} (A/J^2)$ and let $\widetilde H$ be the preimage of $H$ under the homomorphism ${\rm Aut} (A)\to {\rm Aut} (A/J^2).$ If $h\in \widetilde{H}$ is a torsion element, then $h$ acts identically modulo $J^2,$ hence $h$ stabilizes the chain $A\supset J\supset J^2\supset \cdots $ and $h=1.$ We proved that the subgroup $\widetilde H$ of ${\rm Aut} (A)$ is torsion free.
In all known examples of nonrepresentable finitely generated $\text{PI}$-algebras the Jacobson radical is nilpotent of degree $\geq 3.$
\begin{Conjecture*}
A finitely generated $\text{PI}$-algebra with $J^2=0$ is representable.
\end{Conjecture*}
If this conjecture is true, then the representability assumption in theorem \ref{Theorem4} can be dropped.
The analog of Selberg's theorem holds for automorphism groups of some algebras that are far from being $\text{PI}.$
\begin{proposition}\label{Proposition2}
Let $A=\mathbb Z\langle x_1, \ldots, x_m\rangle, m\geq 2,$ be the free associative ring on free generators $ x_1, \ldots, x_m.$ The group of automorphisms ${\rm Aut} (A)$ is virtually torsion free.
\end{proposition}
\begin{proof}
Let $p$ be a prime number. Let $I_p$ be the ideal of the algebra $A$ generated by $p$ and by all elements $a^p-a, a \in A.$ The ideal $I_p$ is invariant under all automorphisms, the factor-ring $A/I_p$ is finite and constant terms of all elements in $I_p$ are divisible by $p.$ Hence, $$\bigcap _{i\geq 1}I_{p}\,^{i}=(0).$$ The subgroup
$$H_1={\rm ker} \big({\rm Aut} (A)\to {\rm Aut} (A/I_p^{2})\big)$$ has finite index in ${\rm Aut} (A)$ and every element of finite order in $H_1$ has an order, which is a power of $p.$ Now, choose a prime number $q,$ $ p\not =q.$ The subgroup $$H_2={\rm ker} \big({\rm Aut} (A)\to {\rm Aut} (A/I_q^{2})\big)$$ also has finite index in ${\rm Aut} (A)$ and every element of finite order in $H_2$ has an order which is a power of $q.$ The subgroup $H_1\cap H_2$ is torsion free and has finite index in ${\rm Aut} (A).$ This completes the proof of the proposition.
\end{proof}
\begin{lemma}\label{Lemma3}
Let $A$ be a \emph{$\text{PI}$}-algebra. Let $\phantom{i}_{A}{M}$ be a finitely generated left $A$-module. Then the algebra of $A$-module endomorphisms of the module $\phantom{i}_{A}{M}$ is \emph{$\text{PI}.$}
\end{lemma}
\begin{proof}
Let $M=\sum _{i=1}^{n}Am_i.$ Consider the free $A$-module $V$ on free generators $x_1, \ldots ,x_n:$ $$ V=\sum _{i=1}^{n}Ax_i,$$ and the homomorphism
$$f: V \to M, \quad x_i \mapsto m_i,\quad 1\leq i\leq n.$$
Denote its kernel as $V_0.$ Let
$$E_1=\{ \varphi \in {\rm End} _{A}(V) \ | \ \varphi (V_0)\subseteq V_0\}, \quad E_2=\{ \varphi \in {\rm End} _{A}(V) \ | \ \varphi (V)\subseteq V_0\}.$$
Then
$$ {\rm End} _{A}(M)\simeq E_1/E_2.$$
The algebra ${\rm End}_A(V)$ is isomorphic to the algebra of $n\times n$ matrices over $A$. Hence, ${\rm End} _A(V)$ is a $\text{PI}$-algebra. This implies that $E_1$ and $E_1/E_2$ are $\text{PI}$-algebras.
\end{proof}
\begin{proof}[Proof of theorem \emph{\ref{Theorem5}}.] Let $A$ be a finitely generated $\text{PI}$-algebra over $\mathbb Q$, and let $G$ be a finitely generated torsion subgroup of ${\rm Aut} (A).$ Consider the Jacobson radical $J$ of the algebra $A.$ The semisimple algebra $\overline{A}=A/J$ is representable; see \cite{Herstein16}. Hence, by theorem \ref{Theorem4}(a), the group ${\rm Aut} (\overline{ A})$ has Selberg's property, and the image of the group $G$ in ${\rm Aut} (\overline{ A})$ is finite. In other words, the subgroup
$ H=\{\varphi \in G \ | \ (1-\varphi )(A)\subseteq J\}$ has finite index in $G.$
Consider the subgroup $$K=\{\varphi \in {\rm Aut} (A) \ | \ (1-\varphi )(A)\subseteq J^2\}.$$ We showed that this subgroup centralizes the descending chain $A\supset J\supset J^2\ldots ,$ hence $K$ is a torsion free group. Therefore, $G\cap K=(1)$, and the homomorphism $G\to {\rm Aut} (A/J^2)$ is an embedding. Without loss of generality, we will assume that $J^2=(0).$ The radical $J$ can be viewed as an $\overline A$-bimodule.
Let $a_1, \ldots , a_m$ be generators of the algebra $A,$ and let $h_1, \ldots , h_r$ be generators of the subgroup $H.$ We have $(1-h_i)(A)\subseteq J, J^2=0,$ hence $1-h_i$ is a derivation of the algebra $A.$ This implies that $(1-h_i)(A)$ lies in the $\overline A$-subbimodule of $J$ generated by elements ${(1-h_i)(a_1), \ldots , (1-h_i)(a_m).}$ Let $J'$ be the $\overline A$-subbimodule of $J$ generated by elements ${(1-h_i)(a_j), 1\leq i\leq r, 1\leq j\leq m.}$ The finitely generated subbimodule $J'$ is invariant with respect to the action of $H.$ For an automorphism $h\in H,$ consider the restriction ${\rm Res}(h)$ of $h$ to $J'.$ This restriction is a bimodule automorphism of the $\overline A$-bimodule $J'.$ The mapping $$\varphi : H\to GL(_{\overline A}J'_{\overline A}), \quad h\mapsto {\rm Res}(h),$$ is a homomorphism to the group of bimodule automorphisms $GL(_{\overline A}J'_{\overline A}).$
The $\overline A$-bimodule $J'$ is a left module over the algebra ${\overline A}\bigotimes _{\mathbb Q}{\overline A}^{op}$ and
$$ GL(_{\overline A}J'_{\overline A})=GL_{{\overline A}\bigotimes _{\mathbb Q}{\overline A}^{op}}(J').$$
The algebra ${\overline A}\bigotimes _{\mathbb Q}{\overline A}^{op}$ is $\text{PI}$; see \cite{Regev33}. By lemma \ref{Lemma3}, the algebra $${\rm End} _{{\overline A}\bigotimes _{\mathbb Q}{\overline A}^{op}}(J')$$ is $\text{PI}$ as well. Thus, $\varphi (H)$ is a finitely generated torsion subgroup of the multiplicative group of a $\text{PI}$-algebra. By the result of C.~Procesi \cite{Procesi30}, the group $\varphi (H)$ is finite. The kernel $H'={\rm ker}\, \varphi$ is a subgroup of finite index in $G$ and for an arbitrary element $h\in H'$ we have $(1-h)(A)\subseteq J', (1-h)(J')=(0).$ Let $h^k=1, k\geq 1.$ We have
$$1-h^k=k(1-h) \ {\rm mod} \ (1-h)^2.$$
This implies $k(1-h)(A)=0$ and, therefore, $h=1, H'=(1).$
Hence, $|G|<\infty .$ This completes the proof of the theorem.\end{proof}
\section{Lie rings of locally nilpotent derivations}
\begin{proposition}\label{Proposition3}
Let $A$ be a finitely generated $\emph{\text{PI}}$-ring. Then the Lie ring ${\rm Der} (A)$ is $\emph{\text{PI}}.$
\end{proposition}
\begin{proof}
For an integer $n\geq 2$ consider the following elements of the free Lie ring
$$P_n(x_0, x_1, \ldots x_n)=\sum_{\sigma \in S_n}(-1)^{|\sigma |}\ad (x_{\sigma (1)})\cdots \ad (x_{\sigma (n)})x_0.$$
For an associative commutative ring $\Phi$ let $W_{\Phi}(n)$ denote the Lie $\Phi$-algebra of $\Phi$-linear derivations of the polynomial algebra $\Phi [x_1, \ldots ,x_n].$ In \cite{Razmyslov32}, Yu.P.Razmyslov proved that for a field $\mathbb{F}$ of characteristic zero the Lie algebra $W_{\mathbb{F}}(n)$ satisfies the identity $P_N=0,$ where $N=(n+1)^{2}.$ The Lie ring $W_{\mathbb Z}(n)$ is a subring of the $\mathbb Q$-algebra $W_{\mathbb Q}(n).$ Hence, $W_{\mathbb Z}(n)$ satisfies the identity $P_N=0.$ Let $A$ be a $\text{PI}$-ring generated by elements $a_1, \ldots , a_m.$ Since $A$ is a finitely generated $\text{PI}$-ring, it follows that $A$ is an epimorphic image of the ring of generic matrices $G(m, n)$ for some integers $m, n\geq 2;$ see \cite{Belov_Rowen7,Kemer19}. Let
$$ G(m, n)\to A, \quad X_k=\big(x_{ij}^{(k)}\big)_{1\leq i, j\leq n}\mapsto a_k, \quad 1\leq k\leq m,$$
be an epimorphism. Let $N=(n^2m+1)^2.$ We will show that the Lie ring ${\rm Der} (A)$ satisfies the identity $P_N=0.$
Denote $$X=\{\,x_{ij}^{(k)} \ | \ 1\leq i, j\leq n, \quad 1\leq k\leq m\, \}.$$ Choose derivations $d_0, d_1, \ldots , d_N\in {\rm Der} (A).$ There exist elements $f_{st}(x_1, \ldots , x_m)$ of the free associative ring $\mathbb Z\langle x_1, \ldots , x_m\rangle, $ $ 0\leq s\leq N,$ $ 1\leq t\leq m,$ such that
$$d_s(a_t)=f_{st}(a_1, \ldots , a_m).$$
Let $$f_{st}(X_1, \ldots , X_m)=\big(g_{ij}^{st}(X)\big)_{1\leq i, j\leq n},$$ where $g_{ij}^{st}(X)\in \mathbb Z[X]$ are entries of the matrix $f_{st}(X_1, \ldots X_m).$
Consider derivations $ \widetilde{d}_{s}$ of the ring $\mathbb Z[X],$ $$\widetilde{d}_{s}(x_{ij}^{(t)})=g_{ij}^{st}(X), \quad 1\leq i, j\leq n, \quad 0\leq s\leq N, \quad 1\leq t\leq m.$$
Let $L$ be the Lie subring generated by the derivations $ \widetilde{d}_{s}, 0\leq s\leq N$ in ${\rm Der} (\mathbb Z[X]).$ The mapping $ \widetilde{d}_{s}\to d_{s}, 0\leq s\leq N,$ extends to a homomorphism $L\to {\rm Der} (A).$ This implies $P_N(d_0, d_1, \ldots d_N)=0$ and completes the proof of the proposition.
\end{proof}
Now, our aim is to prove theorem \ref{Theorem6}. In view of \textbf{\ref{1.1}}, we will assume that the finitely generated $\text{PI}$-algebra $A$ of theorem \ref{Theorem6} is a finitely generated ring.
Let's prove theorem \ref{Theorem6} and proposition \ref{Proposition1} for the case of prime characteristics.
Let $A$ be a finitely generated $\text{PI}$-ring and let $L\subseteq {\rm Der} (A)$ be a Lie ring that consists of locally nilpotent derivations. Suppose further that there exists a prime number $p\geq 2$ such that $pA=(0).$
Let $a_1, \ldots , a_m$ be generators of the ring $A.$ Let $d\in L.$ There exists a power $p^k$ of the prime number $p$ such that $$d^{p^k}(a_i)=0, \quad 1\leq i\leq m.$$
The power $d^{p^k}$ is again a derivation of the ring $A.$ Hence $d^{p^k}=0.$ This implies that ${\rm ad} (d)^{p^k}=0$ in the Lie ring $L.$ By proposition \ref{Proposition3}, the Lie ring $L$ is $\text{PI},$ and by results of \cite{Zelmanov43} (see \textbf{\ref{1.7}}), the Lie ring $L$ is locally nilpotent. Moreover, every finitely generated subalgebra $L_1$ of $L$ acts on $A$ nilpotently, i.e. there exists an integer $s\geq 1$ such that $$\underbrace{L_1\cdots L_1}_{s}A=(0).$$ This proves theorem \ref{Theorem6} in the case of a prime characteristic.
Now, let $A$ be an associative commutative ring generated by elements $a_1, \ldots , a_m,$ let $p$ be a prime number such that $pA=(0),$ and let $L\subseteq {\rm Der} (A)$ be a Lie subring of ${\rm Der} (A).$ Suppose that the subring $A_L$ is an order in $A.$ Then $a_i=b_{i}^{-1}c_i, 1\leq i\leq m,$ where $b_i, c_i\in A_L.$ For an arbitrary derivation $d\in L$ there exists a power $p^k$ such that $d^{p^k}(b_i)=d^{p^k}(c_i)=0, 1\leq i\leq m.$ Then $d^{p^k}(a_i)=0, 1\leq i\leq m, $ and, therefore, $d^{p^k}=0.$ Again, by \cite{Zelmanov43}, the ring $L$ is locally nilpotent. This proves proposition \ref{Proposition1} in the case of prime characteristic.
A Lie ring $L$ is called \textit{weakly Engel} if for arbitrary elements $a, b\in L$ there exists an integer $n(a, b)\geq 1$ such that
$${\rm ad} (a)^{n(a, b)}b=0.$$
B.~I.~Plotkin \cite{Plotkin27} proved that a weakly Engel Lie ring has a locally nilpotent radical. In other words, if $L$ is a weakly Engel Lie ring, then $L$ contains the largest locally nilpotent ideal $I$ such that the factor-ring $L/I$ does not contain nonzero locally nilpotent ideals. We denote $I=\text{Loc}(L).$
\begin{lemma}\label{Lemma4}
Let $A$ be a finitely generated ring and let a Lie ring $L\subseteq {\rm Der} (A)$ consist of locally nilpotent derivations. Then the Lie ring $L$ is weakly Engel.
\end{lemma}
\begin{proof}
Let the ring $A$ be generated by elements $a_1, \ldots , a_m.$ Let $d_1, d_2\in L.$ There exists an integer $n\geq 1$ such that $d_1^n(a_i)=0, 1\leq i\leq m.$ Since the set $$\{\, d_2d_1^i(a_j), \quad 0\leq i\leq n-1, \quad 1\leq j\leq m\,\}$$ is finite there exists an integer $k\geq 1$ such that $$d_1^kd_2d_1^i(a_j)=0, \quad 0\leq i\leq n-1, \quad 1\leq j\leq m.$$
We have $${\rm ad} (d_1)^sd_2=\sum_{i+j=s}(-1)^j\binom{s}{i}d_1^id_2d_1^j.$$ Hence
$$(\ad (d_1)^{n+k-1}d_2)(a_j)=0, 1\leq j\leq m.$$
This implies
${\rm ad} (d_1)^{n+k-1}d_2=0$ and completes the proof of the lemma.
\end{proof}
\begin{lemma}\label{Lemma5}
Let $A$ be a finitely generated associative commutative ring. Let $L \subseteq {\rm Der} (A)$ be a Lie ring of derivations such that the subring $A_L$ is an order in $A.$ Then the Lie ring $L$ is weakly Engel.
\end{lemma}
\begin{proof}
Let $a_1, \ldots ,a_m$ be generators of the ring $A,$ let $a_i=b_i^{-1}c_i, 1\leq i\leq m, $ where $b_i, c_i \in A_L.$ Choose derivations $d_1, d_2 \in L.$ In the proof of lemma \ref{Lemma4} we showed that there exists an integer $s\geq 1$ such that
$$({\rm ad} (d_1)^sd_2)(b_i)=({\rm ad} (d_1)^{s}d_2)(c_i)=0, 1\leq i\leq m.$$
Since $d'={\rm ad} (d_1)^sd_2$ is a derivation of the algebra $A$ it follows that $d'(a_i)=0, 1\leq i\leq m,$ and therefore $d'=0.$ This completes the proof of the lemma.
\end{proof}
\begin{lemma}\label{Lemma6}
Let $A$ be a finitely generated semiprime $\text{PI}$-ring. Then there exists a family of homomorphisms $A\to M_{n}(\mathbb Z/p\mathbb Z)$ into matrix rings over prime fields that approximates $A.$
\end{lemma}
\begin{proof}
The ring $A$ is representable \cite{Herstein16}, i.e. it is embeddable into a ring of matrices over a finitely generated associative commutative semiprime ring $C, \ A \hookrightarrow M_n(C).$ Hilbert's Nullstellensatz \cite{Atiyah_Macdonald4} implies that $C$ is a subdirect product of finite fields. Hence, there exists a family of homomorphisms $\varphi _i : A\to M_n(\mathbb{F}_i),$ where $ \mathbb{F}_i$ are finite fields such that $\cap _{i}{\rm ker}\, \varphi _i=(0).$ If ${\rm char}\, \mathbb{F}_i=p,$ then the field $\mathbb{F}_i$ is embeddable into a ring of matrices over $\mathbb Z/p\mathbb Z.$ This completes the proof of the lemma.
\end{proof}
\begin{lemma}\label{Lemma7}
Let $A$ be a finitely generated prime $\text{PI}$-ring. Let $Z$ be the center of $A$ and let $K$ be the field of fractions of the commutative domain $Z.$ Then ${\rm dim} _{K}K{\rm Der} (A)<\infty .$ \end{lemma}
\begin{proof}
Let $a_1, \ldots , a_m$ be generators of the ring $A.$ As we have remarked in \textbf{\ref{1.6}} the ring of fractions $\widetilde A=(Z\setminus ( 0))^{-1}A$ is a finite-dimensional central simple algebra over the field $K.$ Let ${\rm dim} _{K}\widetilde A=s.$ We will show that ${\rm dim}_KK{\rm Der} (A)\leq ms.$
Choose $ms+1$ derivations $d_1, \ldots , d_{ms+1}$ of the ring $A.$ Consider the vector space $$V=\underbrace{\widetilde A\oplus \cdots \oplus \widetilde A}_{m}$$ over the field $K$, ${\rm dim} _KV=ms,$ and vectors $v_i=(d_i(a_1), \ldots , d_i(a_m))\in V, 1\leq i\leq ms+1.$ There exist coefficients $k_1, \ldots k_{ms+1}\in K,$ not all equal to $0,$ such that $$\sum _{i=1}^{ms+1}k_iv_i=0.$$ This implies $d(a_i)=0, 1\leq i\leq m,$ where $d=\sum_{i=1}^{ms+1}k_id_i.$ Since $d$ is a derivation of the ring $\widetilde A$ and elements $a_1, \ldots , a_m$ generate $A$ as a ring it follows that $d(A)=(0).$ This implies that $d(K)=0$ and completes the proof of the lemma.
\end{proof}
Now, we will prove theorem \ref{Theorem6} and proposition \ref{Proposition1} in the case when the algebra $A$ is prime.
As above, let $A$ be a finitely generated prime $\text{PI}$-ring, let $Z=Z(A)$ be the center of the ring $A,$ and let $ K=(Z\setminus \{0\})^{-1}Z$ be the field of fractions of the domain $Z.$ Suppose that a Lie ring $L\subseteq {\rm Der} (A)$ consists of locally nilpotent derivations. For a derivation $d\in L$ let ${\rm id}_L(d)$ denote the ideal of the Lie ring $L$ generated by the element $d.$ Consider the descending chain of ideals
$$ I_1=L, \ \
I_{i+1}=\sum _{d\in I_{i}} \ [{\rm id}_L(d), {\rm id}_L(d)].$$
Since ${\rm dim} _KKL<\infty$, by lemma \ref{Lemma7}, it follows that the descending chain
$$ KI_1\supseteq KI_2\supseteq \cdots $$ stabilizes. Let $KI_l=KI_{l+1}= \cdots.$ We will show that $I_l=(0).$ Indeed, there exists a finite collection of derivations $d_1, \ldots , d_r\in I_l$ such that
$$KI_{l+1}=\sum _{i=1}^rK[{\rm id}_L(d_i), {\rm id}_L(d_i)].$$
Recall that
\begin{equation}\label{equation1} {\rm id}_L(d_i)=\mathbb{Z}d_i+\sum _{t\geq 1} [\ldots [d_i, \underbrace{ L], L], \ldots , L}_{t}] .
\end{equation}
Let
\begin{equation}\label{equation2} d\in [{\rm id}_L(d_i), {\rm id}_L(d_i)].
\end{equation}
Expanding the commutators on the right-hand sides of (\ref{equation1}) and (\ref{equation2}) we get
$$d=\sum \underset{(\star _1)}{ . . . }d_i\underset{(\star _2)}{ . . . }d_i\underset{(\star _3)}{ . . . },$$
where $(\star _{1})$ is a product of derivations from $L$ and, possibly, a multiplication by an element from $K,$
$(\star _{2})$ and $(\star _{3})$ are products, may be empty, of derivations from $L.$ Hence,
$d=\sum \cdots d_i\cdots ,$ where each summand has a nonempty product of derivations from $L$ to the right of $d_i.$
Since $d_1, \ldots , d_r\in \sum _{j}K[{\rm id}_L(d_j), {\rm id}_L(d_j)],$ we have
\begin{equation}\label{equation3} d_i=\sum k_{ijt}u_{ijt}d_jv_{ijt}, \quad 1\leq i\leq r,
\end{equation}
where $k_{ijt}\in K; u_{ijt}, v_{ijt}$ are products of derivations from $L;$ $v_{ijt}$ are nonempty products of derivations from $L.$
Let $b$ be a common denominator of all elements $k_{ijt},$ that is $k_{ijt}\in b^{-1}Z.$ Consider the finitely generated prime $\text{PI}$-ring $A_1=\langle b^{-1}, A\rangle.$ The ring $A_1$ is invariant under $\Der (A).$
Suppose that there exists an element $a\in A$ such that $d_i(a)\not =0.$ By lemma \ref{Lemma6}, there exists a family of homomorphisms $\varphi : A_1\to M_{n}(\mathbb Z/p\mathbb Z)$ that approximates the ring $A_1.$ Hence, there exists a prime number $p$ such that $d_i(a)\not \in pA_1.$
Consider the subring $L'$ of the Lie ring $L$ generated by all derivations that are involved in the products $v_{ijt}.$ Clearly, $L'$ is a finitely generated Lie ring.
We have shown above that theorem \ref{Theorem6} is true for rings of prime characteristics. Applying this result to the ring $A/pA$, we conclude that the ring $L'$ acts nilpotently on $A/pA$. In other words,
there exists an integer $s\geq 1$ such that
\begin{equation}\label{equation4} \underbrace{L'\cdots L'}_{s}(A)\subseteq pA .
\end{equation}
Iterating (\ref{equation3}) $s$ times, we get
$$d_i=\sum u_td_jv_{i_1j_1t_1}\cdots v_{i_sj_st_s},$$
where $u_t\in A_1.$ By (\ref{equation4}), we get
$$ v_{i_1j_1t_1}\cdots v_{i_sj_st_s}A\subseteq pA\subseteq pA_1$$
and, therefore, $d_i(a)\in pA_1, 1\leq i\leq r,$
a contradiction. We showed that $I_l=(0).$ Recall that, by B.~I.~Plotkin's theorem \cite{Plotkin27}, the ring $L$ has a locally nilpotent radical ${\rm Loc}(L).$ Let $i\geq 1$ be a minimal positive integer such that $I_i\subseteq {\rm Loc}(L), i\leq l.$ Suppose that $i\geq 2.$ For an arbitrary element $a\in I_{i-1}$ the ideal ${\rm id}_L(a)$ is abelian modulo $I_i.$ Since the factor-ring $L/{\rm Loc}(L)$ does not contain nonzero abelian ideals it follows that $a\in {\rm Loc}(L), I_{i-1}\subseteq {\rm Loc}(L),$ a contradiction.
We showed that $L=I_1\subseteq {\rm Loc}(L),$ in other words, the ring $L$ is locally nilpotent. This completes the proof of theorem \ref{Theorem6} in the case when the ring $A$ is prime.
To finish the proof of proposition \ref{Proposition1} we need just to repeat the arguments above. Let $A$ be a commutative domain, $L\subseteq {\rm Der} (A)$ and $A_L$ is an order in $A.$ We see that the subring $(A_1)_L$ is an order in the ring $A_1$ and, therefore, for any prime number $p$ the subring $(A_1/pA_1)_L$ is an order in $A_1/pA_1.$ In the case of a prime characteristic proposition \ref{Proposition1} was proved for an arbitrary finitely generated associative commutative ring, not necessarily a domain. Hence, we can apply it to $A_1/pA_1,$ and finish the proof of proposition \ref{Proposition1} following the proof of theorem \ref{Theorem6} verbatim.
To tackle the semiprime case we will need the following lemma.
\begin{lemma}\label{Lemma8}
Let $A$ be a finitely generated semiprime ring. Then there exists a finite family of ideals $I_1, \ldots , I_n\vartriangleleft A$ such that each ideal $I_i, 1\leq i\leq n,$ is invariant under ${\rm Der} (A);$ each factor-ring $A/I_i$ is prime, and $$\bigcap _{j=1}^{n}I_j=(0).$$
\end{lemma}
\begin{proof} As we have mentioned in \textbf{\ref{1.7}}, the ring of fractions $\widetilde{A}=({Z^{\star}})^{-1}A,$ where $Z^{\star}$ is the set of all nonzero central elements of $A$ that are not zero divisors, is a direct sum $\widetilde{ A}=\widetilde{ A_1}\oplus \cdots \oplus \widetilde{ A_n}$ of simple finite-dimensional over their centers algebras. Let
$$ I_i=A\cap (\widetilde{ A}_1+\cdots +\widetilde{ A}_{i-1}+\widetilde{ A}_{i+1}+\cdots +\widetilde{ A}_{n}), 1\leq i\leq n.$$
All direct summands $\widetilde{A}_i$ are invariant under ${\rm Der} (\widetilde{A}).$ An arbitrary derivation of the ring $A$ extends to a derivation of $\widetilde{A}.$ This implies that each ideal $I_i$ is invariant under ${\rm Der} (A).$
Let us prove that each factor-ring $A/I_i$ is prime. Suppose that $a, b\in A$ and $aAb\subseteq I_i.$ We need to show that $a\in I_i$ or $b\in I_i.$ The inclusion above implies that
$$a{\widetilde A}b\subseteq \widetilde{ A}_1+\cdots +\widetilde{ A}_{i-1}+\widetilde{ A}_{i+1}+\cdots +\widetilde{ A}_{n}.$$
The factor-ring
$${\widetilde A}/(\widetilde{ A}_1+\cdots +\widetilde{ A}_{i-1}+\widetilde{ A}_{i+1}+\cdots +\widetilde{ A}_{n})\simeq {\widetilde A}_{i}$$ is simple. Hence, at least one of the elements $a, b$ lies in $I_i.$ It is straightforward that $I_1\cap \cdots \cap I_n=(0).$ This completes the proof of the lemma.\end{proof}
Now, we are ready to prove theorem \ref{Theorem6} in the case when the ring $A$ is semiprime.
Let $A$ be a finitely generated semiprime $\text{PI}$-ring. Let $L$ be a finitely generated Lie subring $L\subseteq {\rm Der} (A)$ that consists of locally nilpotent derivations. Let $I_1, \ldots , I_n$ be the ideals of lemma \ref{Lemma8}. We showed above that there exists $r\geq 1$ such that
$$L^r(A/I_i)=(0), 1\leq i\leq n.$$
Hence, $$L^r(A)\subseteq \bigcap _{i=1}^nI_i=(0) \quad \text{and} \quad L^r=(0).$$ This completes the proof of theorem \ref{Theorem6} for semisimple rings.
\begin{lemma}\label{Lemma9}
Let $A$ be a finitely generated $\text{PI}$-ring and let $L\subseteq {\rm Der} (A)$ be a Lie ring that consists of locally nilpotent derivations. Let $I\vartriangleleft A$ be a differentially invariant ideal such that $I^2=(0)$ and the image of the Lie ring $L$ in ${\rm Der} (A/I)$ is locally nilpotent. Then the Lie ring $L$ is locally nilpotent.
\end{lemma}
\begin{proof}
Choose derivations $d_1, \ldots, d_n\in L.$
We need to show that the Lie ring $L'$ generated by $d_1, \ldots, d_n$ is nilpotent. By the assumption of lemma \ref{Lemma8}, there exists $r\geq 1$ such that $L'^r(A)\subseteq I.$ Let $d\in L'^r$ and let $a_1, \ldots , a_m$ be generators of the ring $A.$ There exists an integer $l\geq 1$ such that $d^l(a_j)=0, 1\leq j\leq m.$ Let $v=a_{i_1}\cdots \cdot a_{i_s}$ be a product of generators in the ring $A.$ Since $d(a_i)Ad(a_j)\subseteq I^2=(0)$ it follows that
$$d^l(a_{i_1}\cdots a_{i_s}) =d^l(a_{i_1})a_{i_2}\cdots a_{i_s}+a_{i_1}d^l(a_{i_2})\cdots a_{i_s}+\cdots +a_{i_1}\cdots a_{i_{s-1}}d^l(a_{i_s})=0.$$
Hence $d^l=0.$
Since the ring $L$ is weakly Engel by lemma \ref{Lemma4}, B.~I.~Plotkin's theorem \cite{Plotkin27} implies that the Lie ring $L'^r$ is finitely generated. Hence, by \cite{Zelmanov43} (see also \textbf{\ref{1.7}}), the Lie ring $L'^r$ is nilpotent and the Lie ring $L'$ is solvable. Again by B.~I.~Plotkin's theorem, the Lie ring $L'$ is nilpotent. This completes the proof of the lemma.\end{proof}
Let us prove theorem \ref{Theorem6} in the case when the ring $A$ does not have additive torsion.
Let $J$ be the Jacobson radical of the ring $A.$ By \cite{Braun9}, the radical $J$ is nilpotent. Let $J^n=(0), J^{n-1}\not =(0), n\geq 2.$ It is well known that if the ring $A$ does not have additive torsion, then the radical $J$ is differentially invariant.
Let $$I=\{ a\in A \ | \ {\rm there \ exists \ an \ integer} \ s\geq 1 \ \ {\rm such \ that} \ sa\in J^{n-1}\}.$$
The ideal $I$ is differentially invariant. We claim that $I^2=(0).$ Indeed, let $a, b\in I.$ There exist integers $s_1, s_2 \geq 1$ such that $s_1a\in J^{n-1}, s_2b \in J^{n-1}.$ Hence $s_1s_2ab \in (J^{n-1})^2=(0).$ Since the ring $A$ does not have additive torsion it follows that $ab=0.$
The Jacobson radical of the ring $A/I$ is $J/I, (J/I)^{n-1}=(0).$ The ring $A/I$ obviously does not have additive torsion. Hence, by inductive assumption on $n$, the image of $L$ in ${\rm Der} (A/I)$ is locally nilpotent; and by lemma \ref{Lemma9}, the ring $L$ is locally nilpotent.
Now, we are ready to finish the proof of theorem \ref{Theorem6}.
Let $a_1, \ldots , a_m$ be generators of a $\text{PI}$-ring $A.$ Let $L\subseteq {\rm Der} (A)$ be a finitely generated Lie subring such that every derivation from $L$ is locally nilpotent. Let $T(A)$ be the ideal of $A$ that consists of elements of a finite additive order. Clearly, $T(A)$ is differentially invariant. The factor-ring $A/T(A)$ does not have an additive torsion. Hence, by the proof of theorem~\ref{Theorem6} in the case when the ring $A$ does not have additive torsion, the image of the ring $L$ in ${\rm Der} (A/T(A))$ is nilpotent. Therefore, there exists $r\geq 1$ such that for any derivation $d\in L^r$ we have $d(A)\subseteq T(A).$ Since the ring $L$ is finitely generated and weakly Engel by lemma \ref{Lemma4}, it follows from B.~I.~Plotkin's theorem \cite{Plotkin27} that the Lie ring $L^r$ is finitely generated.
We aim to show that the Lie ring $L^r$ is nilpotent. Let $d_1', \ldots , d_l'$ be generators of $L^r.$ There exists an integer $n\geq 1$ such that $$nd_i'(a_j)=0, \quad 1\leq i\leq l, \quad 1\leq j\leq m.$$ Hence,
$nL^r(A)=(0).$
For a prime number $p$, consider the ideal
$$I_p=\{ a\in A \ | \ {\rm there \ exists \ an \ integer } \ t\geq 1 \ {\rm such \ that } \ p^ta=0\}.$$ Let $a\in I_p,$ $ d\in L^r.$ Then $nd(a)=0$ and $p^td(a)=0$ for some $t\geq 1.$ Hence, for a prime number $p$ not dividing $n$, we have $L^rI_p=(0).$
This allows us to consider the factor-ring $A/\sum_{p\nmid n}I_p$ instead of $A.$ In other words, we will assume that for a prime number $p$ not dividing $n$ the ring $A$ does not have a $p$-torsion.
Let $p_1, \ldots , p_s$ be all distinct prime divisors of $n.$ Then $$T(A)=I_{p_1}\oplus \cdots \oplus I_{p_s}. $$ Let $s\geq 2.$ Inducting on the integer $n$ we can assume that the image of the Lie ring $L$ in each ${\rm Der} (A/I_{p_i})$ is nilpotent. In other words, there exists a number $r_i\geq 1$ such that $L^{r_i}(A)\subseteq I_{p_i}.$ This implies
$$L^{\max{(r_1, r_2)}}(A)\subseteq I_{p_1}\cap I_{p_2}=(0).$$
Therefore, we assume that $T(A)=I_p$ for some prime number $p.$ The ideal $pI_p$ lies in the Jacobson radical of $A$ and $pI_p$ is differentially invariant. Let $(pI_p)^q=(0), q\geq 1.$ If $q\geq 2,$ then inducting on $q$ we can assume that the image of the Lie ring $L$ in $\Der (A/(pI_p)^{q-1})$ is nilpotent. Hence, the ideal $(pI_p)^{q-1}$ satisfies the assumptions of lemma \ref{Lemma9}. Suppose, therefore, that $q=1,$ $ pI_p=(0),$ $ n=p.$ Now, we have $pL^r(A)=(0).$
This implies that for an arbitrary derivation $d\in L^r$ every $p$-power $d^{p^k}$ is again a derivation. Indeed,
$$ d^{p^k}(ab)=\sum _{i=0}^{p^k}\binom{p^k}{i}d^i(a)d^{p^k-i}(b)$$
for arbitrary elements $a, b\in A.$ If $0< i< p^k,$ then the binomial coefficient $\binom{p^k}{i}$ is divisible by $p,$ hence $$\binom{p^k}{i}d^i(a)=0, \quad \text{which implies} \quad d^{p^k}(ab)=d^{p^k}(a)b+ad^{p^k}(b).$$
Choosing $d\in L^r$ and arguing as above, we find $p^k$ such that $d^{p^k}(a_j)=0, 1\leq j\leq m, $ therefore, $d^{p^k}=0.$ The Lie ring $L^r$ is finitely generated, $\text{PI}$, and an arbitrary derivation from $L^r$ is nilpotent. By \cite{Zelmanov43}, the Lie ring $L^r$ is nilpotent. The ring $L$ is solvable, hence, by the result of B.~I.~Plotkin \cite{Plotkin27}, it is nilpotent. This completes the proof of theorem \ref{Theorem6}.
Now, our aim is to prove theorem \ref{Theorem3}. In the rest of this section, we assume that $A$ is a commutative domain; $L\subseteq {\rm Der} (A)$ is a Lie ring that consists of locally nilpotent derivations; $K$ is the field of fractions of the domain $A,$ and ${\rm dim} _KKL<\infty.$
Our aim is to prove that the Lie ring $L$ is locally nilpotent. Let
\begin{equation}\label{equation5} KL=\sum _{i=1}^{n}Kd_i, \quad d_i\in L, \quad \text{and} \quad [d_i, d_j]=\sum _{t=1}^nc_{ijt}d_t, \quad c_{ijt}=\frac{a_{ijt}}{b_{ijt}},
\end{equation}
where $a_{ijt},$ $ b_{ijt}\in A.$ Enlarging the set $\{ d_1, \ldots , d_n\}$ if necessary we will assume that the derivations $d_1, \ldots , d_n$ generate $L, $ that is, $L={\rm Lie}_{\mathbb Z}\langle d_1, \ldots , d_n\rangle .$ Let $ d_{i_1} \cdots d_{i_m}$ be a product in the associative ring of additive endomorphisms of the field $K.$ We call this product \textit{ordered} if $i_1\leq i_2\leq \cdots \leq i_m.$
Let $\mathcal P$ denote the set of all ordered products of derivations $d_1, \ldots , d_n$ including the empty product, i.e. the identity operator.
\begin{lemma}\label{Lemma10}
For an arbitrary element $a\in A$ the set of ordered products $v=d_{i_1}\cdots d_{i_m}\in \mathcal P$ such that $v(a)\not =0,$ is finite.
\end{lemma}
\begin{proof}
Let $$v=d_1^{k_1}d_{2}^{k_2}\cdots d_n^{k_n}, \quad \text{where} \ k_i \ \text{are nonnegative integers.}$$ There exists an integer $q_n\geq 1$ such that $d_{n}^{q_n}(a)=0.$ Hence, if $v(a)\not =0,$ then $k_n<q_n.$ Similarly, there exists $q_{n-1}\geq 1$ such that $$d_{n-1}^{q_{n-1}}d_n^i(a)=0 \quad \text{for all} \quad 0\leq i\leq q_{i-1}.$$ Hence, $v(a)\not =0$ implies $k_n<q_n, k_{n-1}<q_{n-1}$ and so on. This completes the proof of the lemma.
\end{proof}
Consider the set $C=\{ c_{ijt}\}_{i, j, t}\subset K$; see (\ref{equation5}).
\begin{lemma}\label{Lemma11}
An arbitrary product $d_{i_1}\cdots d_{i_r}$ can be represented as
$$d_{i_1}\cdots d_{i_r}=\sum \pm (v_1(c_1))\cdots (v_s(c_s))v_0, $$
where in each summand the operators $v_0,v_1, \ldots ,v_s$ lie in $\mathcal P$ and elements $c_1, \ldots , c_s$ lie in $C.$
\end{lemma}
\begin{proof}
For a product $v=d_{i_1}\cdots d_{i_r}$ let $l$ be the number of $1\leq k\leq r-1,$ such that $i_k>i_{k+1}.$ Let $\nu (v)=(r, l).$ We will compare pairs $(r, l)$ lexicographically and use induction on $\nu (v).$ Let $i=i_k>i_{k+1}=j.$ Then $$d_id_j=d_jd_i+\sum _{t}c_{ijt}d_t.$$ Clearly, $$\nu (d_{i_1}\cdots d_{i_{k-1}}d_jd_id_{i_{k+2}}\cdots d_{i_r})<\nu (v).$$ Consider the product $$d_{i_1}\cdots d_{i_{k-1}}c_{ijt}d_td_{i_{k+2}}\cdots d_{i_r}.$$ Commuting the element $c_{ijt}$ with derivations $d_{i_1}, \ldots , d_{i_{k-1}}$ we get $$d_{i_1}\cdots d_{i_{k-1}}c_{ijt}=\sum (v'(c_{ijt}))v'',$$ where $v', v''$ are products of derivations $d_{i_1}, \ldots , d_{i_{k-1}}$ of total length $k-1.$ Hence, $$d_{i_1}\cdots d_{i_{k-1}}c_{ijt}d_td_{i_{k+2}}\cdots d_{i_r}=\sum \pm (v'(c_{ijt}))v''d_td_{i_{k+2}}\cdots d_{i_r}.$$ In each summand the lengths of products $v'$ and $v''d_td_{i_{k+2}}\cdots d_{i_r}$ are less than $r.$ Applying the induction assumption to these products, we get the assertion of the lemma. \end{proof}
Consider the subring $\widetilde{A}$ of the field $K$ generated by the elements $$v(a_{ijt}), \quad v(b_{ijt}),\quad v(b_{ijt})^{-1};\quad v\in \mathcal P; \quad i,\ j,\ t\geq 1.$$ By lemma \ref{Lemma10}, the ring $\widetilde A$ is finitely generated.
\begin{lemma}\label{Lemma12}
The subring $\widetilde A$ is invariant under the action of $L.$
\end{lemma}
\begin{proof}
For an arbitrarily ordered product of derivations $v\in \mathcal P$ we have
$$v(b_{ijt}^{-1})=\sum \frac{1}{b_{ijt}^m}(v_1b_{ijt})\cdots (v_sb_{ijt}),$$
where $m\geq 1; v_1, \ldots , v_s\in \mathcal P,$ and
$$ v(c_{ijt})=v(a_{ijt}\cdot b_{ijt}^{-1})=\sum _{v', v''\in \mathcal P}v'(a_{ijt})v''(b_{ijt}^{-1}).$$
These equalities imply $v(c_{ijt})\in \widetilde{A}.$ Now, by lemma \ref{Lemma11}, the ring $\widetilde A$ is invariant under the action of $L.$ \end{proof}
The ring $\widetilde A$ is generated by elements $v(a_{ijt}), v(b_{ijt})\in A\cap \widetilde{A}$ and elements $v(b_{ijt})^{-1}.$ Hence, an arbitrary element of the ring $\widetilde A$ can be represented as a ratio $a/b,$ where $a, b\in A\cap \widetilde{A}.$ Hence, $A\cap \widetilde{A}$ is an order in the ring $\widetilde A,$ and the multiplicative semigroup $S$ being generated by elements $v(b_{ijt})\not =0.$
By proposition \ref{Proposition1}, the image of the ring $L$ in $\End _{\mathbb Z}(\widetilde{A})$ is a nilpotent Lie ring. Hence, there exists an integer $r\geq 1$ such that $L^{r}(\widetilde{A})=(0).$ By lemma \ref{Lemma5} and Plotkin's theorem \cite{Plotkin27}, the Lie ring $L^r$ is finitely generated.
Consider the subfield
$$K_0=\big\{\, \alpha \in K \ | \ L^r(\alpha )=(0)\, \big\}.$$
The $K_0$-algebra $A'=K_0A\subseteq K$ is a domain. The field $K_0$ is invariant under the action of $L,$ hence the $K_0$-algebra $A'$ is invariant as well.
Let $L'$ be the image of the Lie ring $L^r$ in $\End _{\mathbb Z}(A').$ Since all the coefficients $c_{ijt}$ lie in $K_0$ the product $K_0L$ is a Lie ring and a finite-dimensional vector space over $K_0.$ This implies
that $K_0L'$ is a finite-dimensional $K_0$-algebra. Now, Petravchuk-Sysak theorem (see \cite{Petravchuk_Sysak26}) implies that $L'$ is a nilpotent Lie ring. Again, by lemma \ref{Lemma5} and B.~I.~Plotkin's theorem, the Lie ring $L$ is nilpotent. This completes the proof of theorem \ref{Theorem3}.
We will finish with examples showing that corollary \ref{Cor1} of theorem \ref{Theorem1} and theorem \ref{Theorem2} are wrong for countably generated algebras. Let $\mathbb{F}$ be an arbitrary field and let $A=\mathbb{F}[x_1, x_2, \ldots ]$ be the polynomial algebra on countable many generators. We will construct
(i) a Lie algebra $L\subset {\rm Der} (A)$ that consists of locally nilpotent derivations and is not locally nilpotent,
(ii) a torsion group $G<{\rm Aut} (A)$ that is not locally finite.
Consider the countable-dimensional vector space $V=\sum _{i\geq 1}\mathbb{F}x_i.$ There exists a countable finitely generated Lie algebra $L$ such that every operator ${\rm ad} (a), a\in L,$ is nilpotent, and the algebra $L$ has zero center (see \cite{Golod14,Lenagan_Smoctunowicz20}). The mapping $L\to \mathfrak{gl}(L),$ $ a\mapsto {\rm ad} (a),$ $ a\in L,$ is an embedding of the Lie algebra $L$ in $\mathfrak{gl}(L)$ and every linear transformation ${\rm ad} (a)$ from the image of $L$ is nilpotent. Therefore, we can suppose that $L\subseteq \mathfrak{gl}(V)$ and every linear transformation from $L$ is nilpotent. An arbitrary linear transformation on $V$ is a restriction of a derivation from $$\sum _{i\geq 1}V\frac{\partial}{\partial x_i}.$$ Hence, we assume that
$$L \subseteq \sum _{i\geq 1}V\frac{\partial}{\partial x_i}\subset {\rm Der} (A).$$
Since every derivation from $L$ acts nilpotently on $V$ it follows that it acts locally nilpotently on $A.$ Similarly, there exists a finitely generated torsion group $G< {\rm Aut} (V)$ that is not locally finite (see \cite{Grigorchuk15,Novikov_Adyan23,Novikov_Adyan24,Novikov_Adyan25}). Every linear transformation $\varphi \in GL(V)$ uniquely extends to an automorphism $\widetilde{\varphi}\in \Aut (A).$ Thus the mapping $GL(V)\to {\rm Aut} (A),$ $\varphi \mapsto \widetilde{\varphi}, $ is an embedding of groups. Hence, $G$ is a torsion not locally finite subgroup of ${\rm Aut} (A).$
\end{document} |
\begin{document}
\title[uniqueness along subsequences]{Cantor uniqueness and multiplicity along subsequences}
\author{Gady Kozma and Alexander Olevski\u\i}
\address{GK: Weizmann institute of Science, Rehovot, Israel.}
\email{[email protected]}
\address{AO: Tel Aviv University, Tel Aviv, Israel}
\email{[email protected]}
\begin{abstract}
We construct a sequence $c_{l}\to0$ such that the trigonometric series
$\sum c_{l}e^{ilx}$ converges to zero everywhere on a subsequence
$n_{k}$. We show that any such series must satisfy that the $n_{k}$
are very sparse, and that the support of the related distribution
is quite large.
\end{abstract}
\maketitle
\section{Introduction}
In 1870, Georg Cantor proved his famous uniqueness theorem for trigonometric
series: if a series $\sum c_{l}e^{ilx}$ converges to zero for every
$x\in[0,2\pi]$, then the $c_{l}$ are all zero \cite{C1870}. The
proof used important ideas from Riemann's \emph{Habilitationsschrift},
namely, that of taking the formal double integral $F(x)=\sum\frac{1}{l^{2}}c_{l}e^{ilx}$
and examining the second Schwarz derivative of $F$. Cantor's proof
is now classic and may be found in many books, e.g.\ \cite[\S IX]{Z}
or \cite[\S XIV]{B64}. A fascinating historical survey of these early
steps in uniqueness theory, including why Riemann defined $F$ in
the first place, may be found in \cite{C93}. (briefly, Riemann was
writing \emph{necessary} conditions for a function to be represented
by a trigonometric series in terms of its double integral).
Cantor's result may be extended in many directions, and probably the
most famous one was the direction taken by Cantor himself, that of
trying to see if the theorem still holds if the series is allowed
not to converge at a certain set, which led Cantor to develop set
theory, and led others to the beautiful theory of sets of uniqueness,
see \cite{KL87}. But in this paper we are interested in a different
kind of extension: does the theorem hold when the series $\sum c_{l}e^{ilx}$
is required to converge only on a subsequence?
This problem was first tackled in 1950, when Kozlov constructed a
nontrivial sequence $c_{l}$ and a second sequence $n_{k}$ such that
\begin{equation}
\lim_{k\to\infty}\sum_{l=-n_{k}}^{n_{k}}c_{l}e^{ilx}=0\qquad\forall x\in[0,2\pi].\label{eq:1}
\end{equation}
See \cite{K50} or \cite[\S XV.6]{B64}. A feature of Kozlov's construction
that was immediately apparent is that the coefficients $c_{l}$ are
(at least for some $l$), very large. Therefore it was natural to
ask if it is possible to have (\ref{eq:1}) together with $c_{l}\to0$.
The problem was first mentioned in the survey of Talalyan \cite{T60}
\textemdash{} this is problem 13 in \S10 (note that there is a mistake
in the English translation), and then repeated in \cite{AT64} where
the authors note, on page 1406, that the problem is ``very hard''.
In the same year, the survey of Ulyanov \cite[page 20 of the English version]{U64}
mentions the problem and conjectures that in fact, no such series
exists. Skvortsov constructed a counterexample for the Walsh system
\cite{S75}, but not for the Fourier system.
\subsection{Results}
In this paper we answer this question in the positive. Here is the
precise statement:
\begin{thm}
\label{thm:example}There exist coefficients $c_{l}\to0$, not all
zero, and $n_{k}\to\infty$ such that (\ref{eq:1}) holds.
\end{thm}
The existence of such an example raises many new questions about the
nature of the $c_{l}$, of the distribution $\sum c_{l}e^{ilx}$,
and of the numbers $n_{k}$. We have two results which show some restrictions
on these objects. The first states, roughly, that the $n_{k}$ must
increase at least doubly exponentially:
\begin{thm}
\label{thm:doubly exponentially}Let $c_{l}\to0$ and let $n_{k}$
be such that (\ref{eq:1}) holds. Assume further that $n_{k+1}=n_{k}^{1+o(1)}$.
Then $c_{l}\equiv0$.
\end{thm}
Our second result is a lower bound on the dimension of the support
of the distribution $\sum c_{l}e^{ilx}$. It is stated in terms of
the upper Minkowski dimension (see, e.g., \cite{F14} where it is
called the box counting dimension) which we denote by $\dim_{\textrm{Mink}}$.
\begin{thm}
\label{thm:dim}Let $c_{l}\to0$ and let $n_{k}$ be such that (\ref{eq:1})
holds. Let $K$ be the support of the distribution $\sum c_{l}e^{ilx}$,
and assume that
\[
\dim_{\Mink}(K)<\frac{1}{2}(\sqrt{17}-3)\approx0.561.
\]
Then $c_{l}\equiv0$.
\end{thm}
\subsection{\label{subsec:Comments}Comments and questions}
An immediate question is the sharpness of the double exponential bound
of theorem \ref{thm:doubly exponentially}. The proof of theorem \ref{thm:example}
which we will present is not quantitative, but it can be quantified
with only a modicum of effort, giving:
\begin{quote}
\emph{There exists $c_{l}\to0$ and $n_{k}=\exp(\exp(O(k))$ such
that (\ref{eq:1}) holds.}
\end{quote}
(this quantitative version, and all other claims in this section,
\S \ref{subsec:Comments}, will not be proved in this paper). Thus
in this setting the main problem remaining is the constant in the
exponent. The reader might find it useful to think about the question
as follows: suppose $n_{k+1}=n_{k}^{\lambda}$. For which value of
$\lambda$ is it possible to construct a counterexample with this
$n_{k}$?
But an even more interesting question is: what happens when the condition
$c_{l}\to0$ is removed from theorem \ref{thm:doubly exponentially}?
The answer is no longer doubly exponential, in fact Nina Bary \cite{B60}
showed that one can take $n_{k}$ growing only slightly faster than
exponentially, and conjectured that this rate of growth is optimal.
Our techniques allow only modest progress towards Bary's conjecture:
we can show that if $n_{k+1}-n_{k}=o(\log k)$ then no such example
may exist.
A variation of the problem where our upper and lower bounds match
more closely is the following: suppose that we require $c_{l}=0$
for all $l<0$ (often this is called an ``analytic'' version of
the problem, because there is a naturally associated analytic function
in the disk, $\sum c_{l}z^{l}$). In this case, the following can
be proved. On the one hand, one can extend Bary's construction and
find an example of a $c_{l}$ and $n_{k}$ both growing slightly faster
than exponential such that
\[
\lim_{k\to\infty}\sum_{l=0}^{n_{k}}c_{l}e^{ilx}=0\qquad\forall x.
\]
On the other hand, it is not possible to have such an example if either
$n_{k+1}-n_{k}\le n_{k}/\log^{2}n_{k}$ or $|c_{l}|\le\exp(Cl/\log^{2}l)$.
This holds for any inverse of a non-quasianalytic sequence.
In a different direction, the condition $c_{l}\to0$ can be improved:
it is possible to require, in theorem \ref{thm:example}, that the
coefficients $c_{l}$ be inside $\ell^{2+\epsilon}$, for any $\epsilon>0$.
This, too, will not be shown in this paper, but the proof is a simple
variation on the proof of theorem \ref{thm:example} below.
Another interesting question is the sharpness of the dimension bound
in theorem \ref{thm:dim}. In our example the dimension of the support
is $1$ (even for the Hausdorff dimension, which is smaller than the
upper Minkowski dimension). It would be very interesting to construct
an example with dimension strictly smaller than $1$. In the opposite
direction, let us remark that in our example the support of the distribution
$\sum c_{l}e^{ilx}$ has measure zero, but it is not difficult to
modify the example so that the support would have positive measure.
We find this interesting because this distribution is so inherently
singular. The support must always be nowhere dense, see lemma \ref{lem:K}
below.
\subsection{Measures}
It is interesting to note that the proofs of theorem \ref{thm:doubly exponentially}
and \ref{thm:dim} do not use the Riemann function in any way. In
fact, the only element of classic uniqueness theory that appears in
the proof is the localisation principle, in the form of Rajchman (see
\S \ref{subsec:Rajchman}). Thus the proof of theorem \ref{thm:doubly exponentially}
is also a new proof of Cantor's classic result. In the 150 years that
passed since its original publication, the only other attempt we are
aware of is \cite{A89}, which gives a proof of Cantor's theorem using
one formal integration rather than two. To give the reader a taste
of the ideas in the proofs of theorems \ref{thm:doubly exponentially}
and \ref{thm:dim}, let us apply the same basic scheme to prove a
simpler result: that no such construction is possible with $c_{l}$
being the Fourier coefficients of a measure.
\begin{prop}
\label{prop:measure}Let $\mu$ be a measure on $[0,2\pi]$ with $\widehat{\mu}(l)\to0$
and let $n_{k}$ be a series such that
\[
\lim_{k\to\infty}\sum_{l=-n_{k}}^{n_{k}}\widehat{\mu}(l)e^{ilx}=0\qquad\forall x\in[0,2\pi].
\]
Then $\mu=0$.
\end{prop}
\begin{proof}
Denote $S_{n}(x)=\sum_{l=-n}^{n}\widehat{\mu}(l)e^{ilx}$. For every
$x\in\supp\mu$ there exists an $M(x)$ such that $|S_{n_{k}}(x)|\le M(x)$
for all $k$ (certainly $M$ exists also for $x\not\in\supp\mu$ but
we will not need it). By the Baire category theorem there is an interval
$I$ and a value $M$ such that the set $\{x:M(x)\le M\}$ is dense
in $I\cap\supp\mu$ (and $I\cap\supp\mu\ne\emptyset$). Note that
we are using the Baire category theorem on the support of $\mu$,
which is compact (here and below support will always mean in the distributional
sense, and in particular will be compact). By continuity, in fact
$M(x)\le M$ for all $x\in I\cap\supp\mu$. Let $\varphi$ be a smooth
function supported on $I$ (and $\varphi(x)\ne0$ for all $x\in I^{\circ}$).
We apply the localisation principle (see, e.g.\ \cite[theorem IX.4.9]{Z})
and get that the series
\[
\varphi(x)\sum\widehat{\mu}(l)e^{ilx}\qquad\text{and}\qquad\sum\widehat{\varphi\mu}(l)e^{ilx}
\]
are uniformly equiconvergent. Hence $\varphi\mu$ satisfies the same
property as $\mu$ i.e.
\[
\lim_{k\to\infty}\sum_{l=-n_{k}}^{n_{k}}\widehat{\varphi\mu}(l)e^{ilx}=0\qquad\forall x\in[0,2\pi]
\]
and further, this convergence is bounded on $\supp\varphi\mu$ (since
$\supp\varphi\mu=I\cap\supp\mu$). If $\mu\ne0$ then also $\varphi\mu\ne0$.
The conclusion of the previous paragraph is that we could have, without
loss of generality, assumed to start with that $S_{n_{k}}$ is bounded
on $\supp\mu$. Let us therefore make this assumption (so we do not
have to carry around the notation $\varphi$). We now argue as follows:
\[
\sum_{l=-n_{k}}^{n_{k}}|\widehat{\mu}(l)|^{2}=\sum_{l=-\infty}^{\infty}\overline{\widehat{S_{n_{k}}}(l)}\cdot\widehat{\mu}(l)=\int\overline{S_{n_{k}}}(x)\,d\mu(x)\le M||\mu||
\]
where the second equality is due to Parseval and where $M$ is again
the maximum of $|S_{n_{k}}|$ on $\supp\mu$. Since this holds for
all $k$, we get that $\sum|\widehat{\mu}(l)|^{2}<\infty$, so $\mu$
is in fact an $L^{2}$ function. But this is clearly impossible, since
the Fourier series of an $L^{2}$ function converges in measure to
it.
\end{proof}
The crux of the proof is that $S_{n_{k}}$ is small where $\mu$ is
supported. The proofs of theorems \ref{thm:doubly exponentially}
and \ref{thm:dim} replace $\mu$ with a different partial sum, $S_{s}$
for some carefully chosen $s$ (roughly, for $s\approx n_{k}^{3/2}$)
and show that $S_{n_{k}}$ is small where $S_{s}$ is essentially
supported. The details are below.
Let us remark that the only place where the condition $\widehat{\mu}(l)\to0$
was used in the proof of proposition \ref{prop:measure} is in the
application of the localisation principle. This can be circumvented,
with a slightly more involved argument. See details in \S \ref{sec:Localisation}.
Similarly theorems \ref{thm:doubly exponentially} and \ref{thm:dim}
may be generalised from $c_{l}\to0$ to $c_{l}$ bounded, at the expense
of a more involved use of the localisation principle.
\section{Construction}
It will be convenient to work in the interval $[0,1]$ and not carry
around $\pi$-s, so define
\[
e(x)=e^{2\pi ix}.
\]
For an integrable function $f$ we define the usual Fourier partial
sums,
\[
S_{n}(f;x)=\sum_{l=-n}^{n}\widehat{f}(l)e(lx).
\]
In this paper ``smooth'' means $C^{2}$, but the proofs work equally
well with higher smoothness (up to the quasianalytic threshold). We
use $C$ and $c$ to denote arbitrary constants, whose value might
change from line to line or even inside the same line. We use $C$
for constants which are large enough, and $c$ for constants which
are small enough. We use $||\cdot||$ for the $L^{2}$ (or $\ell^{2}$)
norm, other $L^{p}$ norms are denoted by $||\cdot||_{p}$ (except
one place in the introduction where we used $||\mu||$ for the norm
of the measure $\mu$). For a set $E\subset[0,1]$ we denote by $|E|$
the Lebesgue measure of $E$.
\subsection{The localisation principle\label{subsec:Rajchman}}
Let us recall Riemann's localisation principle: as formulated by Riemann,
it states that the convergence of a trigonometric series at a point
$x$ depends only on the behaviour of the Riemann function at a neighbourhood
of $x$. See \cite[\S IX.4]{Z}. Rajchman found a formulation of the
principle which does not use the Riemann function and has a simple
proof. It states that for any $c_{l}\to0$ and any smooth function
$\varphi$,
\begin{equation}
\varphi(x)\sum c_{l}e(lx)\text{ and }\sum(c*\widehat{\varphi})(l)e(lx)\text{ are uniformly equiconvergent}\label{eq:Rajchman}
\end{equation}
where $c*\widehat{\varphi}$ is a discrete convolution. See \cite[theorem IX.4.9]{Z},
or the proof of theorem \ref{thm:local} below, which follows Rajchman's
approach precisely. We will use Rajchman's theorem both on and off
the support of $\sum c_{l}e(lx)$ (denote this support by $K$). Off
$K$, it has the following nice formulation: if $c_{l}\to0$ then
\begin{equation}
\sum c_{l}e(lx)=0\qquad\forall x\not\in K.\label{eq:KS}
\end{equation}
and further, convergence is uniform on any closed interval disjoint
from $K$. To the best of our knowledge, this precise formulation
first appeared in \cite[Proposition 1, \S V.3, page 54]{KS94}.
\subsection{First estimates}
\begin{lem}
\label{lem:willthisbetheendofthisproblematlast}For every $\epsilon>0$
there exists a smooth function $u:[0,1]\to\mathbb{R}$ with $u(0)=u(1)=0$,
$u(x)\in[0,1]$, and $||\widehat{u-1}||_{\infty}<\epsilon$.
\end{lem}
When we say that $u$ is smooth we mean also when extended periodically
(or when extended by $0$, which is the same under the conditions
above).
\begin{proof}
Take any standard construction of a smooth function satisfying $u(0)=u(1)=0$,
$u(x)\in[0,1]$ and $u(x)=1$ for all $x\in[\frac{1}{2}\epsilon,1-\frac{1}{2}\epsilon]$.
The condition on the Fourier coefficients then follows by $||\widehat{u-1}||_{\infty}\le||u-1||_{1}$.
\end{proof}
\begin{lem}
\label{lem:vandermonde}For every $\epsilon>0$ there exists a smooth
function $h:[0,1]\to\mathbb{R}$ and an $n\in\mathbb{N}$ such that
\begin{enumerate}
\item $\widehat{h}(0)=1$
\item $\supp h\subset[0,\frac{1}{2}]$
\item For all $x\in[0,\frac{1}{2}]$, $|S_{n}(h;x)|<\epsilon$.
\end{enumerate}
\end{lem}
\begin{proof}
Let $P$ be an arbitrary trigonometric polynomial satisfying that
$\widehat{P}(0)=1$ and $|P(x)|<\epsilon$ for all $x\in[0,\frac{1}{2}]$.
Let $n=\deg P$, let $m=2n+1$ and let $q$ be a smooth function supported
on $[0,\nicefrac{1}{2m}]$ with $\widehat{q}(k)\ne0$ for all $|k|\le n$.
Examine a function $h$ of the type
\[
h(x)=\sum_{j=0}^{m-1}a_{j}q\Big(x-\frac{j}{2m}\Big).
\]
Then $h$ is smooth, supported on $[0,\frac{1}{2}]$, and its Fourier
coefficients are given by
\[
\widehat{h}(k)=\widehat{q}(k)\sum_{j=0}^{m-1}a_{j}e(-jk/2m).
\]
The matrix $\{e(-jk/2m):j\in\{0,\dotsc,m-1\},k\in\{-n,\dotsc,n\}\}$
is a Vandermonde matrix hence invertible, so one may find $a_{j}$
such that $\sum a_{j}e(-jk/2m)\linebreak[0]=\widehat{P}(k)/\widehat{q}(k)$
for all $k\in\{-n,\dotsc,n\}$. With these $a_{j}$ our $h$ satisfies
$\widehat{h}(k)=\widehat{P}(k)$ for all $k$ such that $|k|\le n$
so $S_{n}(h)=P$ which has the required properties.
\end{proof}
\begin{rem*}
The coefficients of the $h$ given by lemma \ref{lem:vandermonde}
are typically large. The reason is the Vandermonde matrix applied.
We need to invert the Vandermonde matrix and its inverse has a large
norm, exponential in $n$ (the inverse of a Vandermonde matrix has
an explicit formula). To counterbalance this last sentence a little,
let us remark that $n$, the degree of the polynomial $P$ used during
the proof can be taken to be logarithmic in $\epsilon$. This requires
to choose a good $P$. For this purpose we apply the following theorem
of Szeg\H{o}: for every compact $K\subset\mathbb{C}$ there exists
monic polynomials $Q_{n}$ with $\max_{x\in K}|Q_{n}(x)|=(\capa(K)+o(1))^{n}$.
See \cite[corollary 5.5.5]{R95}. We apply Szeg\H{o}'s theorem with
$K=\{e(x):x\in[0,\nicefrac{1}{2}]\}$ and then define $P_{n}(x)=\textrm{Re}(e(-nx)Q_{n}(e(x)))$.
We get that $\widehat{P_{n}}(0)=1$ and $\max_{x\in[0,1/2]}|P_{n}(x)|\le(\capa(K)+o(1))^{n}$.
The capacity of $K$ can be calculated by writing explicitly a Riemann
mapping between $\mathbb{C}\setminus K$ and $\{z:|z|>1\}$ and is
$\nicefrac{1}{\sqrt{2}}$, and in particular smaller than 1 (see \cite[theorem 5.2.3]{R95}
for the connection to Riemann mappings). Hence it is enough to take
$n=C\log\nicefrac{1}{\epsilon}$ to ensure that $P$ would satisfy
$|P(x)|\le\epsilon$ for all $x\in[0,\nicefrac{1}{2}]$. With this
$P$ the norm of $h$ would be polynomial in $\epsilon$.
\end{rem*}
\subsection{Reducing the coefficients}
In the next lemma we reduce the Fourier coefficients using a method
inspired by a proof of the Menshov representation theorem (see \cite{O85}).
We separate the interval $[0,1]$ into many small pieces and on each
put a copy of the $h$ above, scaled differently. Unlike in typical
applications of Menshov's approach, we do not have each copy of $h$
sit in a distinct ``spectral interval'' but they are rather intertwined.
The details are below. Still, like in other applications of Menshov's
technique, the resulting set is divided into many small intervals
in a way that pushes the dimension up. This is why we are unable to
construct an example supported on a set with dimension less than $1$.
\begin{lem}
\label{lem:no g}For every $\epsilon>0$ there exists a smooth function
$f:[0,1]\to\mathbb{R}$ and an $n\in\mathbb{N}$ with the following
properties:
\begin{enumerate}
\item $\widehat{f}(0)=1$.
\item For all $k\ne0$, $|\widehat{f}(k)|<\epsilon$.
\item For every $x\in\supp f,$ $|S_{n}(f;x)|<\epsilon$.
\end{enumerate}
\end{lem}
\begin{proof}
We may assume without loss of generality that $\epsilon<\frac{1}{2}$,
and it is enough to replace requirement (i) by the weaker requirement
$|\widehat{f}(0)-1|<\epsilon$ (and then normalise).
\emph{1. }Let $h$ be the function given by lemma \ref{lem:vandermonde}
with $\epsilon_{\text{lemma \ref{lem:vandermonde}}}=\epsilon/4$,
and denote $m=n_{\text{lemma \ref{lem:vandermonde}}}$. In other words,
$h$ satisfies
\begin{gather*}
\widehat{h}(0)=1\qquad\qquad\supp h\subset[0,\tfrac{1}{2}]\\
|S_{m}(h;x)|<\tfrac{1}{4}\epsilon\quad\forall x\in[0,\tfrac{1}{2}].
\end{gather*}
Let $a>2||h||_{1}$/$\epsilon$ be some integer. Let $u$ be the
function given by lemma \ref{lem:willthisbetheendofthisproblematlast}
with $\epsilon_{\textrm{lemma \ref{lem:willthisbetheendofthisproblematlast}}}=\epsilon/2$
i.e.\ $u$ is smooth from $[0,1]$ to $[0,1]$, $u(0)=u(1)=0$ and
$u$ satisfies
\[
||\widehat{u-1}||_{\infty}<{\textstyle \frac{1}{2}}\epsilon.
\]
Let $v(x)=u(xa)$ (extended to zero outside $[0,1/a]$). Let $r$
be a large integer parameter to be fixed later, depending on all previously
defined quantities ($\epsilon$, $h$, $m$, $a$ and $u$). Define
\[
f(x)=\sum_{j=0}^{a-1}v\Big(x-\frac{j}{a}\Big)h(x(r^{3}+jr)).
\]
The role of the quantities $r^{3}+jr$ will become evident later.
Let us see that $f$ satisfies all required properties. It will be
easier to consider trigonometric polynomials rather than smooth functions
so define
\begin{gather}
\begin{aligned}H & \mathrel{\mathop:}= S_{\lfloor r/2\rfloor-1}(h)\qquad\qquad\qquad & V & \mathrel{\mathop:}= S_{\lfloor r/2\rfloor-1}(v)\end{aligned}
\nonumber \\
F(x)\mathrel{\mathop:}=\sum_{j=0}^{a-1}V\Big(x-\frac{j}{a}\Big)H(x(r^{3}+jr)).\label{eq:def f'}
\end{gather}
The smoothness of $v$ and $h$ imply that $||\widehat{v-V}||_{1}$
and $||\widehat{h-H}||_{1}$ can be taken arbitrarily small as $r\to\infty$.
Since
\[
||\widehat{f-F}||_{1}\le\sum_{j=0}^{a-1}||\widehat{v-V}||_{1}||\widehat{h}||_{1}+||\widehat{V}||_{1}||\widehat{h-H}||_{1}
\]
we may take $r$ sufficiently large and get $||\widehat{f-F}||_{1}<\frac{1}{2}\epsilon$
(but do not fix the value of $r$ yet). Thus, with such an $r$, we
need only show
\begin{enumerate}
\item $||\widehat{F-1}||_{\infty}<\frac{1}{2}\epsilon$
\item For every $x\in\supp f$, $|S_{n}(F;x)|<\frac{1}{2}\epsilon$ (note
that we take $x$ in $\supp f$ and not in $\supp F$).
\end{enumerate}
\emph{2}. We start with the estimate of $\widehat{F-1}$. Examine
one summand in the definition of $F$, (\ref{eq:def f'}). Denoting
$G_{j}=V(x-j/a)H(x(r^{3}+jr))$ we have
\begin{equation}
\widehat{G_{j}}(l)=\begin{cases}
\widehat{V}(p)\widehat{H}(q)e(-pj/a) & l=p+q(r^{3}+jr),\;|p|,|q|<r/2\\
0 & \text{otherwise}.
\end{cases}\label{eq:gj hat}
\end{equation}
In particular, $l$ and $j$ determine $p$ and $q$ uniquely. An
immediate corollary is:
\begin{equation}
||\widehat{G_{j}}||_{\infty}=||\widehat{V}||_{\infty}||\widehat{H}||_{\infty}\le||v||_{1}||h||_{1}\le\frac{||h||_{1}}{a}<\frac{\epsilon}{2}\label{eq:gj hat infty}
\end{equation}
where the last inequality is from the definition of $a$. Assume now
that $r>a$. Then we can extract another corollary from (\ref{eq:gj hat}):
that the different $G_{j}$ have disjoint spectra, except at $(-r/2,r/2)$.
Hence
\begin{equation}
|\widehat{F}(l)|=\max_{j}|\widehat{G_{j}}(l)|\stackrel{\textrm{(\ref{eq:gj hat infty})}}{<}\frac{\epsilon}{2}\qquad\forall|l|\ge r/2.\label{eq:l>r/2}
\end{equation}
Finally, for $l\in(-r/2,r/2)$ we have that $F$ ``restricted spectrally
to $(-r/2,\linebreak[0]r/2)$'' is simply $\sum_{j}V(x-j/a)$ so
its Fourier spectrum is simply that of $u$ spread out. Since $||\widehat{u-1}||<\frac{\epsilon}{2}$
we get also in this case $|\widehat{F-1}(l)|<\frac{1}{2}\epsilon$.
For those who prefer formulas, just note in (\ref{eq:gj hat}) that
if $l\in(-r/2,r/2)$ then $q=0$ and since $\widehat{H}(0)=1$ we
get
\[
\widehat{F}(l)=\sum_{j=0}^{a-1}\widehat{V}(l)e(-lj/a)=\begin{cases}
a\widehat{V}(l) & l\equiv0\text{ mod }a\\
0 & \text{otherwise}.
\end{cases}
\]
Recall that $v(x)=u(xa)$ so for $l\equiv0$ mod $a$ we have
\[
|\widehat{aV-1}(l)|\le|\widehat{av-1}(l)|=|\widehat{u-1}(l/a)|<\tfrac{1}{2}\epsilon.
\]
With (\ref{eq:l>r/2}) we get $||\widehat{F-1}||_{\infty}<\frac{1}{2}\epsilon$,
as needed.
\emph{3}. Finally, we need to define $n$ and see that $S_{n}(F)$
is small on $\supp f$. Assume $r>m$ and define
\[
n=m(r^{3}+r^{2}).
\]
This value of $n$ has the property that
\begin{align*}
n & >m(r^{3}+jr)+r/2\\
n & <(m+1)(r^{3}+jr)-r/2
\end{align*}
for all $j\in\{0,\dotsc,a-1\}$. We now see why it was important to
choose the spacings of the arithmetic progressions to be $r^{3}+jr$:
these spacings need to be different to have separation of the spectra
of the different $G_{j}$ (and they must be different by at least
$r$, because the spectra of the $G_{j}$ are arranged in blocks of
size $r$), but they need to be sufficiently close that it would still
be possible to ``squeeze'' an $n$ between all the terms that correspond
to the $m^{\textrm{th}}$ block in all $G_{j}$ and all the terms
that correspond to the $m+1^{\textrm{st}}$ blocks. The $r^{3}$ in
the spacings ensures that.
Using (\ref{eq:gj hat}) gives that
\[
S_{n}(G_{j};x)=S_{m}\big(H;x(r^{3}+jr)\big)\cdot V\Big(x-\frac{j}{a}\Big).
\]
At this point it will be easier to compare to $v$ rather than to
$V$, so write
\[
S_{n}(G_{j};x)=S_{m}\big(H;x(r^{3}+jr)\big)\cdot v\Big(x-\frac{j}{a}\Big)+E_{j}
\]
and note that for $r$ sufficiently large $E_{j}$ can be taken to
be arbitrarily small. Take $r$ so large as to have
\begin{equation}
\bigg|S_{n}(F;x)-\sum_{j=0}^{a-1}S_{m}(H;x(r^{3}+jr))v\Big(x-\frac{j}{a}\Big)\bigg|<\tfrac{1}{4}\epsilon\qquad\forall x\in[0,1].\label{eq:h tag u notag}
\end{equation}
This is our last requirement from $r$ and we may fix its value now.
For every $x\in[0,1]$ there is at most one $j_{0}$ such that $v(x-j_{0}/a)\ne0$,
namely $j_{0}=\lfloor x/a\rfloor$. If $x\in\supp f$ then it must
be the case that $x(r^{3}+j_{0}r)\in[0,\frac{1}{2}]$ mod 1. But in
this case, by our definition,
\[
|S_{m}(H;x(r^{3}+j_{0}r))|<\tfrac{1}{4}\epsilon.
\]
We get
\[
x\in\supp f\implies\bigg|\sum_{j=0}^{a-1}S_{m}(H;x(r^{3}+jr))\cdot v\Big(x-\frac{j}{a}\Big)\bigg|<\tfrac{1}{4}\epsilon,
\]
and with (\ref{eq:h tag u notag}) we get $|S_{n}(F;x)|<\frac{1}{2}\epsilon$,
as needed.
\end{proof}
\begin{lem}
\label{lem:yes g}Let $f:[0,1]\to\mathbb{R}$ be smooth, $\epsilon>0$
and $N\in\mathbb{N}$. Then there exists a smooth function $g:[0,1]\to\mathbb{R}$
satisfying
\begin{enumerate}
\item $\supp g\subseteq\supp f$.
\item \label{enu:g-f^}For all $n\in\mathbb{Z}$, $|\widehat{g}(n)-\widehat{f}(n)|<\epsilon$
\item \label{enu:Sng small}For some $n>N$ we have
\[
|S_{n}(g;x)|<\epsilon\qquad\forall x\in\supp g.
\]
\end{enumerate}
\end{lem}
\begin{proof}
Let $h$ be the function from lemma \ref{lem:no g} with $\epsilon_{\text{lemma \ref{lem:no g}}}=\epsilon/2||\widehat{f}||_{1}$.
Denote by $m$ the integer output of lemma \ref{lem:no g} i.e.\ the
number such that $S_{m}(h;x)<\epsilon/(2||\widehat{f}||_{1})$ for
all $x\in\supp h$. Let $r$ be large enough so that
\[
\sum_{|k|\ge r/2}|\widehat{f}(k)|<\epsilon/(2||\widehat{h}||_{1})
\]
and such that $r(m+1/2)>N$ (let $r$ be even). Denote
\begin{align*}
g(x) & \mathrel{\mathop:}= f(x)h(rx)\qquad n\mathrel{\mathop:}= r(m+1/2)
\end{align*}
where $h$ is extended periodically to $\mathbb{R}$. Let us see that
$g$ and $n$ satisfy the requirements of the lemma. The smoothness
of $g$ follows from those of $f$ and $h$. Condition (\ref{enu:g-f^})
follows because
\[
\widehat{g}(k)-\widehat{f}(k)=\sum_{l}\widehat{h-1}(l)\widehat{f}(k-lr)
\]
and because $||\widehat{h-1}||_{\infty}\le\epsilon/(2||\widehat{f}||_{1})$.
Finally, to see condition (\ref{enu:Sng small}) write
\[
F\mathrel{\mathop:}= S_{r/2}(f)\qquad G(x)\mathrel{\mathop:}= F(x)h(rx)
\]
and note that $||\widehat{g-G}||_{1}\le||\widehat{f-F}||_{1}||\widehat{h}||_{1}<\frac{1}{2}\epsilon$.
To estimate $S_{n}(G)$, note that if $x\in\supp g$ then $rx\in\supp h$
mod 1 and hence $S_{m}(h;rx)<\epsilon/(2||\widehat{f}||_{1})$. But
\[
S_{n}(G;x)=F(x)S_{m}(h;rx)
\]
but since $|F(x)|\le||\widehat{F}||_{1}\le||\widehat{f}||_{1}$ we
get
\[
|S_{n}(G;x)|\le||\widehat{f}||_{1}\frac{\epsilon}{2||\widehat{f}||_{1}}=\frac{\epsilon}{2}
\]
finishing the lemma.
\end{proof}
\subsection{Proof of theorem \ref{thm:example}}
The coefficients $c_{l}$ will be constructed by inductively applying
lemma \ref{lem:yes g}. Define therefore $f_{1}=1$ and $n_{1}=2$,
and for all $k\ge1$ define $f_{k+1}=g_{\text{lemma \ref{lem:yes g}}}$
and $n_{k+1}=n_{\text{lemma \ref{lem:yes g}}}$ where lemma \ref{lem:yes g}
is applied with $f_{\text{lemma \ref{lem:yes g}}}=f_{k}$, $\epsilon_{\text{lemma \ref{lem:yes g}}}=2^{-k}/n_{k}$
and $N_{\text{lemma \ref{lem:yes g}}}=n_{k}+1$ (this last parameter
merely ensures that the $n_{k}$ are increasing). We now claim that
$\widehat{f_{k}}(l)$ converges as $k\to\infty$, and that the limit,
$c_{l}$, satisfies the requirements of the theorem.
The fact that $\lim_{k\to\infty}\widehat{f_{k}}(l)$ exists is clear,
because $\widehat{f_{k+1}}(l)-\widehat{f_{k}}(l)<2^{-k}/n_{k}$. Denote
\[
c_{l}=\lim_{k\to\infty}\widehat{f_{k}}(l).
\]
This also shows that $c_{l}\to0$.
Denote now $S_{n}=\sum_{l=-n}^{n}c_{l}e(lx).$ To see that $S_{n_{k}}(x)\to0$
for all $x$ we separate into $x\in\cap\supp f_{k}$ and the rest.
Note that $\cap\supp f_{k}$ contains the support of the distribution
$\delta:=\sum c_{l}e(lx)$. Indeed, if $\varphi$ is a Schwartz test
function supported outside $\cap\supp f_{k}$ then $\supp\varphi\cap\supp f_{k}$
is a sequence of compact sets decreasing to the empty set (recall
that $\supp f_{k+1}\subseteq\supp f_{k}$) so for some finite $k_{0}$
we already have $\supp\varphi\cap\supp f_{k}=\emptyset$ for all $k>k_{0}$.
This of course implies that $\langle\varphi,f_{k}\rangle=0$. Taking
the limit $k\to\infty$ we get $\langle\varphi,\delta\rangle=0$ (we
may take the limit since $||\widehat{f_{k}-\delta}||_{\infty}\to0$
while $\widehat{\varphi}\in l_{1}$). Since this holds for any $\varphi$
supported outside $\cap\supp f_{k}$ we get $\supp\delta\subset\cap\supp f_{k}$,
as claimed.
Now, for $x\not\in\cap\supp f_{k}$ we use the localisation principle
in the form (\ref{eq:KS}) and get
\begin{equation}
\lim_{n\to\infty}S_{n}(x)=0\qquad\forall x\not\in\bigcap\supp f_{k}\label{eq:outside support}
\end{equation}
i.e.\ outside the support it is not necessary to take a subsequence.
Finally, examine $x\in\supp f_{k}$. By clause (\ref{enu:Sng small})
of lemma \ref{lem:yes g}
\begin{equation}
|S_{n_{k}}(f_{k};x)|<\frac{1}{2^{k-1}n_{k-1}}.\label{eq:nk at k}
\end{equation}
For any $j\ge k$, the condition $|\widehat{f_{j+1}}(k)-\widehat{f_{j}}(k)|<2^{-j}/n_{j}\le2^{-j}/n_{k}$
means that
\[
|S_{n_{k}}(f_{j+1};x)-S_{n_{k}}(f_{j};x)|<3\cdot2^{-j}
\]
which we sum (also with (\ref{eq:nk at k})) to get
\[
|S_{n_{k}}(f_{j};x)|<8\cdot2^{-k}\qquad\forall j\ge k
\]
and taking limit as $j\to\infty$ gives
\[
\big|S_{n_{k}}(x)\big|<8\cdot2^{-k}\qquad\forall x\in\supp f_{k}.
\]
We conclude
\[
\lim_{k\to\infty}S_{n_{k}}(x)=0\qquad\forall x\in\bigcap\supp f_{k}.
\]
With (\ref{eq:outside support}), the theorem is proved.\qed
\begin{rem*}
The observant reader probably noticed that we use smooth functions
as our building blocks rather than trigonometric polynomials, and
hence our construction does not naturally have large spectral gaps,
unlike many constructions of null series. This is not a coincidence:
it is not possible to have many large spectral gaps in any series
that satisfies the requirements of Theorem \ref{thm:example}. Precisely,
a theorem of Beurling states that any tempered distribution $\sum c_{l}e^{ilt}$
whose supported is not the whole interval (and our $c_{l}$ satisfy
that, see Lemma \ref{lem:K} below) cannot have $c_{l}=0$ on an increasing
sequence of intervals $[a_{k},b_{k}]$ satisfying $\sum(b_{k}-a_{k})^{2}/a_{k}^{2}=\infty$.
See e.g.~\cite[Theorem 4]{B84}.
\end{rem*}
\section{Proof of theorems \ref{thm:doubly exponentially} and \ref{thm:dim}}
The following lemma summarises some properties of the support of the
distribution.
\begin{lem}
\label{lem:K}Let $c_{l}\to0$ and $n_{k}\to\infty$ such that
\[
\lim_{k\to\infty}S_{n_{k}}(x)=0\qquad\forall x\qquad S_{n}(x)=\sum_{l=-n}^{n}c_{l}e(lx)
\]
Let $K$ be the support of the distribution $\sum c_{l}e(lx)$. Then
\begin{enumerate}
\item \label{enu:critical}$K=\{x:\forall\epsilon>0,S_{n_{k}}\text{ is unbounded in }(x-\epsilon,x+\epsilon)\}$.
\item \label{enu:nowhere dense}$K$ is nowhere dense.
\end{enumerate}
\end{lem}
\begin{proof}
We start with clause (\ref{enu:critical}). On the one hand, if $x\not\in K$
then the localisation principle (\ref{eq:KS}) tells us that $S_{n}\to0$
uniformly in some neighbourhood of $x$. On the other hand, if $S_{n_{k}}$
is bounded in some neighbourhood $I$ of $x$ then for any smooth
test function $\varphi$ supported on $I$ we have
\[
\langle\varphi,\sum c_{l}e(lx)\rangle=\sum_{l=-\infty}^{\infty}c_{l}\widehat{\varphi}(l)=\lim_{k\to\infty}\sum_{l=-n_{k}}^{n_{k}}c_{l}\widehat{\varphi}(l)=\lim_{k\to\infty}\int\varphi S_{n_{k}}
\]
but the integral on the right-hand side tends to zero from the bounded
convergence theorem. This shows (\ref{enu:critical}).
To see clause (\ref{enu:nowhere dense}) examine the function $N(x)=\sup_{k}|S_{n_{k}}(x)|$
and apply the Baire category theorem to the sets $\{x:N(x)\ge M\}$
for all integer $M$. We get, in every interval $I$, an open interval
$J\subset I$ and an $M$ such that $N(x)\le M$ on a dense subset
of $J$. continuity shows that in fact $N(x)\le M$ on all of $J$
and hence $J\cap K=\emptyset$, as needed.
\end{proof}
\begin{rem*}
Without the condition $c_{l}\to0$ it still holds that
\[
K\subset\{x:\forall\epsilon>0,S_{n_{k}}\text{ is unbounded in }(x-\epsilon,x+\epsilon)\}
\]
and that $K$ is nowhere dense. The proof is the same.
\end{rem*}
We will now make a few assumptions that will make the proof less cumbersome.
First we assume that $c_{-l}=\overline{c_{l}}$ (or, equivalently,
that the $S_{n}$ are real). It is straightforward to check that this
assumption may be made without loss of generality in both theorems
\ref{thm:doubly exponentially} and \ref{thm:dim}. Our next assumption
is:
\begin{assumption}In the next lemma we assume that $S_{n_{k}}$ is
bounded on $K,$ the support of the distribution $\sum c_{l}e(lx)$.
Further, whenever we write ``$C$'', the constant is allowed to
depend on $\sup\{|S_{n_{k}}(x)|:x\in K,k\}$. \end{assumption}
As in the proof of proposition \ref{prop:measure}, we will eventually
remove this assumption by a simple localisation argument.
\begin{lem}
\label{lem:new riemann}Let $c_{l}$, $n_{k}$ and $S_{n}$ be as
in the previous lemma. Let $r$ be a sufficiently large number in
our sequence (i.e.\ $r=n_{k}$ for some $k$) and let $s>r^{3/2}\log^{4}r$
not necessarily in the sequence. Then
\begin{equation}
||S_{s}||\ge c||S_{r}||^{2}.\label{eq:no dim}
\end{equation}
\end{lem}
Lemma \ref{lem:new riemann} is used in the proof of theorem \ref{thm:doubly exponentially}.
We will also need a version of lemma \ref{lem:new riemann} for theorem
\ref{thm:dim} but that version is somewhat clumsy to state, so rather
than doing it now, we postpone it to the end of the proof of the lemma,
the impatient can jump to (\ref{eq:dim-2}) to see it. The only point
worthy of making now is that we will need a result that holds for
all $s>r$ so throughout the proof of lemma \ref{lem:new riemann}
we will note when we use the assumption $s>r^{3/2}\log^{4}r$ and
when $s>r$ is enough.
It might be tempting to think that lemma \ref{lem:new riemann} is
a lemma on trigonometric polynomials, i.e.\ that it would have been
possible to simply formulate it for $S_{r}$ being the Fourier partial
sum of $S_{s}$. However, as the proof will show, we need to have
the full distribution acting ``in the background'' restricting both
what $S_{r}$ and $S_{s}$ may do.
\begin{proof}
Fix $r$ and $s>r$. It will be convenient to assume $s/\log^{4}s\ge r$,
so let us make this assumption until further notice. Denote $K=\supp\sum c_{l}e(lx)$
and let $I$ be a component of $K^{c}$ with $|I|>(2\log^{3}s)/s$.
Let $\varphi_{I}$ be a function with the following properties:
\begin{enumerate}
\item If $I=(a,b)$ then $\varphi_{I}$ restricted to $[a+(\log^{3}s)/s,b-(\log^{3}s)/s]$
is identically $1$.
\item $\supp\varphi_{I}\subset I$ (note that $I$ is open, so this inclusion
must be strict).
\item $\varphi_{I}(x)\in[0,1]$ for all $x\in[0,1]$.
\item $|\widehat{\varphi_{I}}(l)|\le C\exp\Big(-c\sqrt{(|l|\log^{3}s)/s}\Big)$.
\end{enumerate}
It is easy to see that such a $\varphi_{I}$ exists \textemdash{}
take a standard construction of a $C^{\infty}$ function $\psi:\mathbb{R}\to[0,1]$
with $\psi|_{(-\infty,0)}\equiv0$, $\psi|_{[1,\infty)}\equiv1$ and
$||\psi^{(k)}||_{\infty}\le C(k!)^{2}$ (see e.g.\ \cite[\S V.2]{K04}),
define $\varphi$ by mapping $\psi$ (restricted to an appropriate
interval) linearly to each half of $I$ and estimate $\widehat{\varphi}(l)$
by writing $|\widehat{\varphi}(l)|\le l^{-k}\cdot||\varphi^{(k)}||_{\infty}$
and optimising over $k$. We skip any further details.
Let
\[
\varphi=\sum_{I}\varphi_{I}
\]
where the sum is taken over all $I$ as above, i.e.\ $I$ is a component
of $K^{c}$ with $|I|>(2\log^{3}s)/s$. Our lemma is based on the
following decomposition
\[
||S_{r}||^{2}=\int S_{s}\cdot S_{r}=\int S_{s}\cdot S_{r}\cdot\varphi+\int S_{s}\cdot S_{r}\cdot(1-\varphi).
\]
To estimate the first summand, first note that
\begin{align*}
|\widehat{S_{r}\cdot\varphi_{I}}(n)| & \le\sum_{l=-r}^{r}|c_{l}\widehat{\varphi_{I}}(n-l)|\le C\sum_{l=-r}^{r}\exp\left(-c\sqrt{\frac{|n-l|\log^{3}s}{s}}\right)\\
& \stackrel{\mathclap{{(*)}}}{\le}Cr\exp\Big(-c\sqrt{(|n|\log^{3}s)/s}\Big).
\end{align*}
The inequality marked by $(*)$ is a simple exercise, but let us remark
on it anyway. If $|n|<2s/\log^{3}s$ then both sides of $(*)$ are
$\approx r$ and it holds. If $|n|\ge2s/\log^{3}s$ then, because
we assumed $s/\log^{4}s>r$, we get that $\frac{1}{2}|n|>r\ge|l|$
so $|n-l|\ge\frac{1}{2}|n|$ and $(*)$ holds again.
Summing over $I$ gives
\[
|\widehat{S_{r}\cdot\varphi}(n)|\le Crs\exp\Big(-c\sqrt{(|n|\log^{3}s)/s}\Big).
\]
Next, because $S_{r}\cdot\varphi$ is supported outside $K$ we have
\[
\sum_{l=-\infty}^{\infty}c_{l}\widehat{S_{r}\cdot\varphi}(l)=0
\]
so
\[
\int S_{s}\cdot S_{r}\cdot\varphi=-\sum_{|l|>s}c_{l}\widehat{S_{r}\cdot\varphi}(l)
\]
and then
\begin{align}
\Big|\int S_{s}\cdot S_{r}\cdot\varphi\Big| & \le\sum_{|l|>s}|c_{l}|\cdot|\widehat{S_{r}\cdot\varphi}(l)|\le C\sum_{|l|>s}rs\exp\Big(-c\sqrt{(|l|\log^{3}s)/s}\Big)\nonumber \\
& \le C\exp(-c\log^{3/2}s)\label{eq:SrSsphi}
\end{align}
which is negligible (the last inequality can be seen, say, by dividing
into blocks of size $s$, getting the expression $Crs^{2}\sum_{k=1}^{\infty}\exp(-ck\log^{3/2}s)$
which is clearly comparable to its first term $\exp(-c\log^{3/2}s)$,
and finally noting that the term $rs^{2}\le s^{3}$ may be dropped
at the price of changing the constants $C$ and $c$).
We move to the main term, $\int S_{r}S_{s}(1-\varphi)$, which we
will estimate using Cauchy-Schwarz
\[
\Big|\int S_{s}\cdot S_{r}\cdot(1-\varphi)\Big|\le||S_{s}||\cdot||S_{r}(1-\varphi)||.
\]
Hence we need to estimate $||S_{r}(1-\varphi)||$. For this we do
not need the smoothness of $\varphi$ so define $E:=\supp(1-\varphi)$
and replace $1-\varphi$ with $\mathbbm{1}_{E}$. Thus the lemma will
be proved once we show
\begin{claim*}
If $s>r^{3/2}\log^{4}r$ then $||S_{r}\mathbbm{1}_{E}||\le C$.
\end{claim*}
To show the claim, we need the following definition. Let $I$ be a
component of $K^{c}$ (not necessarily large, any component) and denote,
for each such $I$ and for each $M$,
\[
A_{I,M}\mathrel{\mathop:}=|\{x\in I\cap E:|S_{r}'|\in[M,2M]\}|.
\]
We need a simple bound for the values of $M$ that interest us, and
we use that $|S_{r}'|\le Cr^{2}$ always (simply because the $c_{l}$
are bounded). For any $x\in I\cap E$ we may then estimate $S_{r}$
itself by integrating $S_{r}'$ from the closest point of $K$ up
to $x$. We get
\begin{equation}
|S_{r}(x)|\le C+\sum_{\substack{M=1\\
\text{scale}
}
}^{Cr^{2}}2MA_{I,M}\label{eq:SraIM-1}
\end{equation}
where the word ``scale'' below the $\Sigma$ means that $M$ runs
through powers of $2$ (i.e.\ it is equivalent to $\sum_{m=0}^{\lfloor\log_{2}Cr^{2}\rfloor}$
and $M=2^{m}$). Note that (\ref{eq:SraIM-1}) uses our assumption
that $\max_{x\in K}|S_{r}(x)|\le C$ for a constant $C$ independent
of $r$ (and the additive constant $C$ in (\ref{eq:SraIM-1}) is
the same $C$). Rewriting (\ref{eq:SraIM-1}) as
\[
|S_{r}\cdot\mathbbm{1}_{E}|\le\sum_{I}\mathbbm{1}_{I\cap E}\Big(C+\sum_{M\textrm{ scale}}^{Cr^{2}}2MA_{I,M}\Big)
\]
gives
\begin{align}
||S_{r}\mathbbm{1}_{E}|| & \le C\Big\Vert\sum_{I}\mathbbm{1}_{I\cap E}\Big\Vert+\sum_{M\textrm{ scale}}^{Cr^{2}}\Big\Vert\sum_{I}2MA_{I,M}\mathbbm{1}_{I\cap E}\Big\Vert\nonumber \\
& =C\sqrt{|E|}+\sum_{M\textrm{ scale}}^{Cr^{2}}2M\sqrt{\sum_{I}|I\cap E|A_{I,M}^{2}}.\label{eq:norm Sr}
\end{align}
To estimate the sum notice that $A_{I,M}\le|I\cap E|\le2(\log s)^{3}/s$
so
\begin{align*}
\sum_{I}|I\cap E|A_{I,M}^{2} & \le4\frac{\log^{6}s}{s^{2}}\sum_{I}A_{I,M}\le4\frac{\log^{6}s}{s^{2}}|\{x:|S_{r}'(x)|\ge M\}|\\
& \stackrel{\mathclap{{(*)}}}{\le}4\frac{\log^{6}s}{s^{2}}\frac{||S_{r}'||^{2}}{M^{2}}\le4\frac{\log^{6}s}{s^{2}}\frac{||S_{r}||^{2}r^{2}}{M^{2}}
\end{align*}
where the inequality marked by $(*)$ follows by Chebyshev's inequality.
The sum over scales in (\ref{eq:norm Sr}) has only $C\log r\le C\log s$
terms, so we get
\[
||S_{r}\mathbbm{1}_{E}||\le C\Big(\sqrt{|E|}+\frac{||S_{r}||r\log^{4}s}{s}\Big).
\]
This finishes the claim, since we assumed $s>r^{3/2}\log^{4}r$ and
since $||S_{r}||\le C\sqrt{r}$ because the coefficients $c_{l}$
are bounded. \qed
Let us recall how the claim implies the lemma: using Cauchy-Schwarz
and $(1-\varphi)\le\mathbbm{1}_{E}$ gives
\begin{equation}
\Big|\int S_{s}\cdot S_{r}\cdot(1-\varphi)\Big|\le C||S_{s}||\Big(\sqrt{|E|}+\frac{||S_{r}||r\log^{4}s}{s}\Big)\label{eq:norm Sr final}
\end{equation}
Recall that (\ref{eq:SrSsphi}) showed that the other term in $||S_{r}||^{2}$
is negligible, so we get the same kind of estimate for $||S_{r}||^{2}$:
\begin{equation}
||S_{r}||^{2}\le C||S_{s}||\bigg(\sqrt{|E|}+\frac{||S_{r}||r\log^{4}s}{s}\bigg).\label{eq:dim}
\end{equation}
With $s>r^{3/2}\log^{4}r$ and $||S_{r}||\le C\sqrt{r}$ equation
(\ref{eq:dim}) translates to $||S_{r}||^{2}\le C||S_{s}||$, as needed.
Before putting the q.e.d.\ tombstone, though, let us reformulate
(\ref{eq:dim}) in a way that will be useful in the proof of theorem
\ref{thm:dim}. We no longer assume $s>r^{3/2}\log^{4}r$ (though
we cannot yet remove the assumption $s/\log^{4}s>r$ from the beginning
of the proof, as it was used to reach (\ref{eq:dim})). Recall that
$E=\supp(1-\varphi)$, that $\varphi=\sum\varphi_{I}$ and that each
$\varphi_{I}$ is $1$ except in a $(\log^{3}s)/s$ neighbourhood
of $K$. Hence $E\subset K+[-(\log s)^{3}/s,(\log s)^{3}/s]$ (the
sum here is the Minkowski sum of two sets) and (\ref{eq:dim}) can
be written as
\begin{equation}
||S_{r}||^{2}\le C||S_{s}||\bigg(\sqrt{\bigg|K+\left[-\frac{\log^{3}s}{s},\frac{\log^{3}s}{s}\right]\bigg|}+\frac{||S_{r}||r\log^{4}s}{s}\bigg).\label{eq:dim-2}
\end{equation}
Finally, note that (\ref{eq:dim-2}) does not actually require the
assumption $s/\log^{4}s>r$ because in the other case it holds trivially.
Hence (\ref{eq:dim-2}) holds for all $s>r$. Now we can put the tombstone.
\end{proof}
\begin{proof}
[Proof of theorem \ref{thm:doubly exponentially}] Let $K$ be the
support of the distribution $\sum c_{l}e(lx)$. We first claim that
we can assume without loss of generality that $S_{n_{k}}$ is bounded
on $K$. This uses the localisation principle exactly like we did
in the proof of proposition \ref{prop:measure}, but let us do it
in details nonetheless. Since $S_{n_{k}}(x)\to0$ everywhere $\sup_{k}|S_{n_{k}}(x)|$
is finite everywhere. Applying the Baire category theorem to the function
$\sup_{k}|S_{n_{k}}(x)|$ on $K$ we see that there is an open interval
$I$ such that $S_{n_{k}}$ is bounded on a dense subset of $K\cap I$,
and $K\cap I\ne\emptyset$. Continuity of $S_{n_{k}}$ shows that
they are in fact bounded on the whole of $K\cap I$. By the definition
of support of a distribution, we can find a smooth test function $\varphi$
supported on $I$ such that $\sum\widehat{\varphi}(l)c_{l}$ is not
zero. Let $d_{l}=c_{l}*\widehat{\varphi}$ (and hence $d$ is not
zero either). Then by the localisation principle (\ref{eq:Rajchman}),
$\sum_{-n_{k}}^{n_{k}}d_{l}e(lx)$ converges everywhere to zero and
is bounded on $K\cap I$, which contains the support of $\sum d_{l}e(lx)$.
Hence we can rename $d_{l}$ to $c_{l}$ and simply assume that $S_{n_{k}}$
is bounded on $K$.
We now construct a series $r_{i}$ as follows: we take $r_{1}=n_{1}$
and for each $i\ge1$ let $r_{i+1}$ be the first element of the series
$n_{k}$ which is larger than $r_{i}^{7/4}$. Because $n_{k+1}=n_{k}^{1+o(1)}$
we will have in fact that $r_{i+1}=r_{i}^{7/4+o(1)}$ and hence
\begin{equation}
r_{i}=\exp((7/4+o(1))^{i}).\label{eq:ridoubly}
\end{equation}
We now apply lemma \ref{lem:new riemann} with $r_{\text{lemma \ref{lem:new riemann}}}=r_{i}$
and $s_{\text{lemma \ref{lem:new riemann}}}=r_{i+1}$. We get
\[
||S_{r_{i+1}}||\ge c||S_{r_{i}}||^{2}
\]
Denote this last constant by $\lambda$ for clarity (i.e.\ $||S_{r_{i+1}}||\ge\lambda||S_{r_{i}}||^{2}$).
Iterating the inequality $||S_{r_{i+1}}||\ge\lambda||S_{r_{i}}||^{2}$
starting from some $i_{0}$ such that $||S_{r_{i_{0}}}||>e/\lambda$
gives
\[
||S_{r_{i}}||\ge(\lambda||S_{r_{i_{0}}}||)^{2^{i-i_{0}}}>\exp(2^{i-i_{0}})
\]
Together with (\ref{eq:ridoubly}) we get
\[
||S_{r_{i}}||\ge\exp((\log r_{i})^{1.2386+o(1)})
\]
(the number is $\approx\log2/\log\nicefrac{7}{4}$) which certainly
contradicts the boundedness of the $c_{l}$.
\end{proof}
\begin{proof}
[Proof of theorem \ref{thm:dim}]Denote $d=\dim_{\Mink}(K)$ (recall
that this is the upper Minkowski dimension). Assume by contradiction
that $c_{l}\not\equiv0$ and without loss of generality assume that
$c_{0}=1$ (if $c_{0}=0$, shift the sequence $c_{l}$ and note that
the condition $c_{l}\to0$ ensures that $S_{n_{k}}(x)\to0$ even for
the shifted sequence).
Fix $s\in\mathbb{N}$ and let $\varphi$ be as in the proof of lemma
\ref{lem:new riemann}: let us remind the most important properties:
\begin{enumerate}
\item $\supp\varphi\cap K=\emptyset$;
\item $\supp(1-\varphi)\subset K+[-(\log^{3}s)/s,(\log^{3}s)/s]$;
\item $\varphi(x)\in[0,1]$ for all $x\in[0,1]$; and
\item $|\widehat{\varphi}(l)|\le Cs\exp\Big(-c\sqrt{(|l|\log^{3}s)/s}\Big)$.
\end{enumerate}
From this we can get a lower bound for $||S_{s}||$. From $\supp\varphi\cap K=\emptyset$
we get
\[
\sum_{l=-\infty}^{\infty}c_{l}\widehat{\varphi}(l)=0
\]
so
\[
\int S_{s}\varphi=\sum_{l=-s}^{s}c_{l}\widehat{\varphi}(l)=-\sum_{|l|>s}c_{l}\widehat{\varphi}(l)
\]
giving
\[
\Big|\int S_{s}\varphi\Big|\le\sum_{|l|>s}Cs\exp\Big(-c\sqrt{(|l|\log^{3}s)/s}\Big)\le C\exp(-c\log^{3/2}s).
\]
By assumption $\int S_{s}=c_{0}=1$ so for $s$ sufficiently large
\[
\Big|\int S_{s}(1-\varphi)\Big|=1-O(\exp(-c\log^{3/2}s))>\nicefrac{1}{2}.
\]
Using Cauchy-Schwarz gives
\begin{align*}
\nicefrac{1}{2} & <||S_{s}||\sqrt{|\supp(1-\varphi)|}\\
& \le||S_{s}||\sqrt{\left|K+\Big[-\frac{\log^{3}s}{s},\frac{\log^{3}s}{s}\Big]\right|}\le||S_{s}||\cdot\sqrt{s^{d-1+o(1)}}
\end{align*}
where in the last inequality we covered $K$ by intervals of size
$1/s$ \textemdash{} no more than $s^{d+o(1)}$ by the definition
of upper Minkowski dimension \textemdash{} and inflated each one by
$(\log^{3}s)/s$. We conclude that
\begin{equation}
||S_{s}||\ge s^{(1-d)/2+o(1)}\label{eq:lower bound}
\end{equation}
as $s\to\infty$.
In the other direction, fix some $r$ in our sequence and use (\ref{eq:dim-2})
to get:
\[
||S_{r}||^{2}\le C||S_{s}||\bigg(s^{(d-1)/2+o(1)}+\frac{||S_{r}||r\log^{4}s}{s}\bigg).
\]
Choose $s=(r||S_{r}||)^{2/(d+1)}$ (this makes the summands approximately
equal) and get
\begin{align}
\frac{||S_{s}||}{\sqrt{s}} & \ge||S_{r}||^{2}\cdot(r||S_{r}||)^{{\displaystyle \Big(-\frac{d}{d+1}+o(1)\Big)}}\nonumber \\
& \stackrel{\textrm{\ensuremath{\mathclap{{(*)}}}}}{\ge}r^{{\displaystyle \Big(-\frac{d}{d+1}+\frac{1-d}{2}\cdot\frac{d+2}{d+1}+o(1)\Big)}}\label{eq:power of r}
\end{align}
where the inequality marked by $(*)$ follows from $||S_{r}||\ge r^{(1-d)/2+o(1)}$,
which is (\ref{eq:lower bound}) with $s$ replaced by $r$. When
$d<\frac{1}{2}(\sqrt{17}-3)$ the power of the $r$ in (\ref{eq:power of r})
is positive. This means that $||S_{s}||/\sqrt{s}\to\infty$, contradicting
the boundedness of the coefficients $c_{l}$.
\end{proof}
\section{\label{sec:Localisation}Localisation with bounded coefficients}
Our last remark is that there is a version of the localisation principle
suitable even when the coefficients of the series do not converge
to zero, but are still bounded. Let us state it first
\begin{thm}
\label{thm:local}Let $c_{l}$ be bounded and $n_{k}$ some sequence
and let $\varphi$ be a smooth function. Then there exists a subsequence
$m_{k}$ and two functions $a$ and $b$ such that
\[
\varphi(x)\sum_{l=-m_{k}}^{m_{k}}c_{l}e(lx)-\sum_{l=-m_{k}}^{m_{k}}(c*\widehat{\varphi})(l)e(lx)+e^{im_{k}x}a(x)+e^{-im_{k}x}b(x)
\]
converges to zero uniformly.
Further, $a$ and $b$ have some smoothness that depends on $\varphi$
as follows:
\[
|\widehat{a}(l)|\le\sum_{|j|>|l|}|\widehat{\varphi}(j)|.
\]
and ditto for $b$.
\end{thm}
(recall that in the classic Rajchman formulation $a\equiv b\equiv0$
and $m_{k}$ can be taken to be $n_{k}$, one does not need to take
a subsequence).
\begin{proof}
Denote
\[
E_{n}(x)=\varphi(x)\sum_{l=-n}^{n}c_{l}e(lx)-\sum_{l=-n}^{n}(c*\widehat{\varphi})(l)e(lx).
\]
For $|j|>n$ only the first term appears in $\widehat{E_{n}}(j)$
and we get
\[
\widehat{E_{n}}(j)=\sum_{l=-\infty}^{\infty}c_{j-l}\widehat{\varphi}(l)\mathbbm{1}\{|j-l|\le n\}
\]
and in particular $|\widehat{E_{n}}(n+r)|\le C\sum_{s\ge r}|\widehat{\varphi}(s)|$,
and similarly for $\widehat{E_{n}}(-n-r)$. For $|l|\le n$ the second
term also appears, but since it is simply the sum without the restriction
$|j-l|\le n$ the difference takes the following simple form:
\[
\widehat{E_{n}}(j)=-\sum_{l=-\infty}^{\infty}c_{j-l}\widehat{\varphi}(l)\mathbbm{1}\{|j-l|>n\}.
\]
Again we get $|\widehat{E_{n}}(n-r)|\le C\sum_{|s|\ge r}|\widehat{\varphi}(s)|$
and similarly for $\widehat{E_{n}}(-n+r)$.
These uniform bounds for $|\widehat{E_{n_{k}}}(\pm n_{k}+r)|$ allow
us to use compactness to take a subsequence $m_{k}$ of $n_{k}$ such
that both $\widehat{E_{m_{k}}}(m_{k}+r)$ and $\widehat{E_{m_{k}}}(-m_{k}+r)$
converge for all $r$. Defining
\begin{align*}
a(x) & =-\sum_{r=-\infty}^{\infty}e(rx)\lim_{k\to\infty}\widehat{E_{m_{k}}}(m_{k}+r)\\
b(x) & =-\sum_{r=-\infty}^{\infty}e(rx)\lim_{k\to\infty}\widehat{E_{m_{k}}}(-m_{k}+r)
\end{align*}
the theorem is proved.
\end{proof}
Theorem \ref{thm:local} can be used to strengthen both theorems \ref{thm:doubly exponentially}
and \ref{thm:dim} to hold for bounded coefficients rather than for
coefficients tending to zero. But let us skip these applications and
show only how to use it to strengthen proposition \ref{prop:measure}.
\begin{thm}
Let $\mu$ be a measure and let $n_{k}$ be a series such that
\[
\lim_{k\to\infty}S_{n_{k}}(\mu;x)=0\qquad\forall x.
\]
Then $\mu=0$.
\end{thm}
\begin{proof}
Let $K$ be the support of $\mu$ and let, as in the proof of proposition
\ref{prop:measure}, $I$ be an interval such that $S_{n_{k}}(\mu)$
is bounded on $I$ and $I\cap K\ne\emptyset$. Let $\varphi$ be a
smooth function supported on all of $I$. We use theorem \ref{thm:local}
to find a subsequence $m_{k}$ of $n_{k}$ and an $a$ and a $b$
such that
\begin{equation}
\varphi S_{m_{k}}(\mu)-S_{m_{k}}(\varphi\mu)+e^{im_{k}x}a+e^{-im_{k}x}b\to0.\label{eq:a and b}
\end{equation}
This has two applications. First we conclude that $\varphi\mu\not\in L^{2}$.
Indeed, if we had that $\varphi\mu\in L^{2}$ then we would get that
$\varphi S_{m_{k}}(\mu)\to0$ pointwise while $S_{m_{k}}(\varphi\mu)\to\varphi\mu$
in measure, which can only hold if $\varphi\mu\equiv0$ (also $a$
and $b$ need to be zero, but we do not need this fact). This contradicts
our assumption that $I\cap K\ne\emptyset$ and that $\varphi$ is
supported on all of $I$.
Our second conclusion from (\ref{eq:a and b}) is that $S_{m_{k}}(\varphi\mu)$
is bounded on $I\cap K$, which is the support of $\varphi\mu$. From
here the proof continues as in the proof of proposition \ref{prop:measure}.
\end{proof}
\end{document} |
\begin{document}
\title[\tiny{Grothendieck ring of semialgebraic formulas }]{\rm Grothendieck ring of semialgebraic formulas and motivic real Milnor fibres}{}
\author{Georges COMTE}
\address{Laboratoire de Math\'ematiques de l'Universit\'e de Savoie, UMR CNRS 5127,
B\^atiment Chablais, Campus scientifique,
73376 Le Bourget-du-Lac cedex, France}
\email{[email protected]}
\urladdr{http://gc83.perso.sfr.fr/}
\author{Goulwen FICHOU}
\address{IRMAR, UMR 6625 du CNRS, Campus de Beaulieu, 35042 Rennes cedex, France}
\email{[email protected]}
\urladdr{http://perso.univ-rennes1.fr/goulwen.fichou/}
\begin{abstract}
We define a Grothendieck ring for basic
real semialgebraic formulas,
that is for systems of real algebraic equations and inequalities.
In this ring the class of a formula takes into consideration
the algebraic nature of the set of points satisfying this formula and this ring
contains as a subring the usual Grothendieck ring of real algebraic formulas.
We give a realization of our ring that allows us to express a class as a
${ \mathbb Z}[\mathbb F_2rac{1}{2}]$-linear combination of
classes of real algebraic formulas, so this realization gives rise to a notion of
virtual Poincar\'e polynomial for basic semialgebraic
formulas. We then define zeta functions with coefficients in our ring, built on
semialgebraic
formulas in arc spaces. We show that they are rational
and relate them to the topology of real Milnor fibres.
\end{abstract}
\maketitle
\renewcommand{\partname}{}
\section*{Introduction}
Let us consider the category $SA({ \mathbb R})$ of real semialgebraic sets,
the morphisms being the semialgebraic maps. We denote by
$(K_0(SA({ \mathbb R})),+,\cdot)$, or simply $K_0(SA({ \mathbb R}))$,
the Grothendieck ring of $SA({ \mathbb R})$, that is to say the free ring
generated by all semialgebraic sets A, denoted by $[A]$ as viewed as
element of
$K_0(SA({ \mathbb R}))$, in such a way that for all objects $A,B$ of $SA({ \mathbb R})$
one has:
$[A\times B]=[A]\cdot[B]$ and for all closed semialgebraic set $F$ in $A$
one has: $[A\setminus F]+[F]=[A]$ (this implies that for every
semialgebraic sets $A,B$, one has: $[A\cup B]=[A]+[B]-[A\cap B]$).
When furthermore an equivalence relation for semialgebraic
sets is previously considered for the definition of $K_0(SA({ \mathbb R}))$,
one has to be aware that the induced quotient ring,
still denoted for simplicity by $K_0(SA({ \mathbb R}))$, may dramatically collapse.
For instance let us consider the equivalence relation $A\sim B$ if and only if
there exists a semialgebraic bijection from $A$ to $B$.
In this case we simply say that $A$ and $B$ are isomorphic.
Then for the definition of $K_0(SA({ \mathbb R}))$, starting from
classes of isomorphic sets instead of simply sets, one obtains a quite
trivial Grothendieck ring, namely $K_0(SA({ \mathbb R}))={ \mathbb Z}$.
Indeed, denoting
$[{ \mathbb R}]$ by ${ \mathbb L}$ and $[\{*\}]$ by ${ \mathbb P}$, from the fact that
$\{*\}\times \{*\}\sim \{*\}$, one gets
$$ { \mathbb P}^k={ \mathbb P}, \ \mathbb F_2orall k\in { \mathbb N}^*,$$
and from the fact that ${ \mathbb R}=
]-\infty, 0[ \cup \{0\} \cup ]0,+\infty[$ and that intervals
of the same type are isomorphic, one gets
$$ { \mathbb L}=-{ \mathbb P}. $$
On the other hand, by the semialgebraic cell decomposition theorem,
we obtain that a real semialgebraic set is a finite union of disjoint
open cells, each of which is isomorphic to ${ \mathbb R}^k$, with $k\in { \mathbb N}$
(with the convention that ${ \mathbb R}^0=\{*\}$). It follows that
$K_0(SA({ \mathbb R}))=<{ \mathbb P}>$, the ring generated by ${ \mathbb P}$.
At this point, the ring $<{ \mathbb P}>$ could be trivial.
But one knows that the Euler-Poincar\'e characteristic with compact
supports $\chi_c:SA({ \mathbb R})\to { \mathbb Z}$ is surjective. Let us recall that the Euler-Poincar\'e characteristic with compact
supports is a topological invariant defined on locally compact semialgebraic sets and uniquely extended to an additive
invariant on all semialgebraic sets (see for instance \cite{Coste}, Theorem 1.22). Since $\chi_c$ is additive,
multiplicative and invariant under isomorphims, it factors through
$K_0(SA({ \mathbb R}))$, giving a surjective morphism of rings, and finally
an isomorphism of rings, still denoted for simplicity by
$\chi_c$ (cf also \cite{Q}):
\vskip0mm
$$ \shorthandoff{;:!?}
\xymatrix{
SA({ \mathbb R})\ar[d] \ar[r]^{\chi_c}& { \mathbb Z} \\
<{ \mathbb P}> =K_0(SA({ \mathbb R})) \ar[ru]_{\chi_c} & \\} $$
The characteristic $\chi_c(A)$ of a semialgebraic set $A$ is in fact
defined in the same way, so we obtain the equality
$K_0(SA({ \mathbb R}))=<{ \mathbb P}>$,
that is from a specific cell decomposition of $A$, where $<{ \mathbb P}>$ is
replaced by $\chi_c(\{*\})=1$. The difficulty in the definition of $\chi_c$ is
then to show that $\chi_c$ is
independent of the choice of the cell decomposition of $A$ (it technically
consists in showing that the definition of $\chi_c(A)$ does not depend on the
isomorphism class of $A$, see \cite{Dri} for instance).
When one starts from the category of real algebraic varieties ${\rm Var}_{ \mathbb R}$ or from
the category of
real algebraic sets ${ \mathbb R} {\rm Var}$,
as we do not have algebraic cell decompositions, we could expect
that the induced Grothendieck ring $K_0({\rm Var}_{ \mathbb R})$
is no longer trivial. This is indeed the case, since for instance
the virtual Poincar\'e polynomial morphism
factors through $K_0({\rm Var}_{ \mathbb R})$ and has image ${ \mathbb Z}[u]$ (see \cite{MCP}).
The first part of this article is devoted to the construction of
non-trivial
Gro\-then\-dieck ring $K_0(BSA_{{ \mathbb R}})$ associated to $SA({ \mathbb R})$, with a
canonical inclusion
$$K_0({\rm Var}_{ \mathbb R}) \hookrightarrow K_0(BSA_{{ \mathbb R}}),$$
that gives rise to a notion of virtual
Poincar\'e polynomial
for basic real semialgebraic formulas extending the virtual Poincar\'e polynomial
of real algebraic sets
and that allows factorization of the Euler-Poincar\'e
characteristic of real semialgebraic sets of points satisfying the formulas.
To be more precise, we first construct
$K_0(BSA_{{ \mathbb R}}))$, the Grothendieck ring of basic real semialgebraic
formulas (which are quantifier free real semialgebraic formulas
or simply systems of real algebraic equations and inequalities) where the class
of basic formulas without inequality is considered up to algebraic isomorphism
of the underlying real algebraic varieties.
In general a class in $K_0(BSA_{{ \mathbb R}})$
of a basic real semialgebraic
formula depends strongly on the formula itself rather than
only on the geometry of the real
semialgebraic set of points satisfying this formula. This construction is achieved in
Section $2$.
In order to make some computations more convenient we present a realization, denoted $\chi$,
of the ring
$K_0(BSA_{{ \mathbb R}})$ in the somewhat more simple ring $K_0({\rm Var}_{ \mathbb R})\otimes { \mathbb Z}[\mathbb F_2rac{1}{2}]$,
that is a
morphism of rings
$\chi : K_0(BSA_{{ \mathbb R}}) \to K_0({\rm Var}_{ \mathbb R})\otimes { \mathbb Z}[\mathbb F_2rac{1}{2}],$
that restricts to the identity map on $K_0({\rm Var}_{ \mathbb R})\hookrightarrow K_0(BSA_{{ \mathbb R}})$.
The morphism $\chi$ provides an explicit computation (see Proposition \ref{prop-alg})
presenting a class of
$K_0(BSA_{{ \mathbb R}})$ as a ${ \mathbb Z}[\mathbb F_2rac{1}{2}]$-linear combination of classes of $K_0({\rm Var}_{ \mathbb R})$.
When one wants to further simplify the computation of a class of a basic real semialgebraic
formula,
one can shrink the original ring $K_0(BSA_{{ \mathbb R}})$
a little bit more from
$K_0({\rm Var}_{ \mathbb R})\otimes { \mathbb Z}[\mathbb F_2rac{1}{2}]$ to $K_0({ \mathbb R}{\rm Var})\otimes { \mathbb Z}[\mathbb F_2rac{1}{2}]$, where
for instance algebraic formulas with empty set of real points have trivial class.
However as noted in point \ref{nontrivial} of Remark \ref{rmks}
the class of a basic real semialgebraic formula
with empty set of real points may be not trivial in
$K_0({ \mathbb R}{\rm Var})\otimes { \mathbb Z}[\mathbb F_2rac{1}{2}]$.
The ring $K_0(BSA_{{ \mathbb R}})$ is not defined with a prior notion of isomorphism relation
contrary to the ring $K_0({\rm Var}_{ \mathbb R})$ where algebraic isomorphism classes of varieties
are generators. Nevertheless we indicate a notion of isomorphism for basic semialgebraic
formulas that factors through $K_0(BSA_{{ \mathbb R}})$ (see Proposition \ref{iso}).
This is done in Section $2$.
The realization $ \chi : K_0(BSA_{{ \mathbb R}}) \to K_0({\rm Var}_{ \mathbb R}))\otimes { \mathbb Z}[\mathbb F_2rac{1}{2}]$ naturally
allows us to define in Section $4$ a notion of virtual Poincar\'e polynomial for basic real semialgebraic
formulas: for a class $[F]$ in $K_0(BSA_{{ \mathbb R}})$ that is written as a
${ \mathbb Z}[\mathbb F_2rac{1}{2}]$-linear combination $ \sum_{i=1}^q a_i[A_i]$
of classes $[A_i] \in K_0({\rm Var}_{ \mathbb R})$ of real algebraic varieties $A_i$,
we simply define
the virtual Poincar\'e polynomial of $F$ as the corresponding
${ \mathbb Z}[\mathbb F_2rac{1}{2}]$-linear combination $ \sum_{i=1}^q a_i\beta(A_i)$ of virtual Poincar\'e
polynomials $\beta(A_i)$ of the varieties $A_i$. The virtual Poincar\'e polynomial of
$F$ is thus a polynomial $\beta(F)$ in ${ \mathbb Z}[\mathbb F_2rac{1}{2}][u]$.
It is then shown that the evaluation at $-1$ of $\beta(F)$ is the Euler-Poincar\'e
characteristic of the real semialgebraic set of points satisfying the basic formula $F$
(Proposition \ref{eval}).
These constructions are summed up in the following commutative diagram
\vskip-3mm
$$ \shorthandoff{;:!?}
\xymatrix{
Var_{ \mathbb R} \ar[d] \ar[rd] \ar@{^{(}->}[rrr]& & &BSA_{{ \mathbb R}} \ar[ddd]^{\chi_c} \ar[lld] \\
K_0({\rm Var}_{ \mathbb R}) \ar[dd]_\beta \ar@{^{(}->}[r] \hskip2mm\ar@{^{(}->}[rd]
& K_0(BSA_{{ \mathbb R}}) \ar[d]^\chi \ar[rd]^\chi& &\\
& K_0({\rm Var}_{ \mathbb R}) \otimes { \mathbb Z}[\mathbb F_2rac{1}{2}] \ar[d]^\beta \ar[r] & K_0({ \mathbb R}{\rm Var}) \otimes { \mathbb Z}[\mathbb F_2rac{1}{2}]
\ar[ld]^\beta& \\
{ \mathbb Z}[u] \ar@{^{(}->}[r] & { \mathbb Z}[\mathbb F_2rac{1}{2}] [u] \ar[rr]^{u=-1}& &{ \mathbb Z} \\}
$$
\vskip0mm
The second and last part of this article concerns the real Milnor fibres of a given
polynomial function $f\in { \mathbb R}[x_1,\cdots, x_d]$.
As geometrical objects, we consider real semialgebraic Milnor fibres of the following
types: $f^{-1}(\pm c)\cap \bar B(0,\alpha) $,
$f^{-1}(]0,\pm c[)\cap \bar B(0,\alpha) $,
$f^{-1}(]0,\pm\infty[)\cap S(0,\alpha) $, for
$0<\vert c \vert \ll\alpha\ll 1$, $ \bar B(0,\alpha)$ the closed ball of
${ \mathbb R}^d$ of centre $0$ and radius $\alpha$ and $ S(0,\alpha)$ the sphere of centre $0$
and radius $\alpha$. The topological types of these fibres are easily comparable, and
in order to present a motivic version of these real semialgebraic Milnor fibres we define appropriate
zeta functions
with coefficients in $(K_0({\rm Var}_{ \mathbb R})\otimes { \mathbb Z}[\mathbb F_2rac{1}{2}])[{ \mathbb L}^{-1}]$
(the localization of the ring $K_0({\rm Var}_{ \mathbb R})\otimes { \mathbb Z}[{1\over 2}]$ with respect to
the multiplicative set generated by ${ \mathbb L}$). As
in the complex context (see \cite{DL1}, \cite{DL2}), we prove that these zeta functions
are rational functions expressed in terms of an embedded resolution of $f$ (see Theorem
\ref{Zeta function}).
For a complex hypersurface $f$, the rationality of the corresponding zeta function
allows
the
definition of the motivic Milnor fibre $S_f$, defined as the negative of the limit at
infinity
of the rational expression of the zeta function. In the real semialgebraic case, the same
definition makes sense
but we obtain a class $S_f$ in $K_0({\rm Var}_{ \mathbb R}))\otimes { \mathbb Z}[\mathbb F_2rac{1}{2}]$ having a realization under
the Euler-Poincar\'e characteristic of greater
combinatorial complexity in terms of the data of the resolution of $f$ than in the
complex case. Indeed, all the strata
of the natural stratification of the exceptional divisor of the resolution of $f$ appear in
the expression of $\chi_c(S_f)$ in the real case.
Nevertheless we show that the motivic real semialgebraic Milnor fibres have
for value under the Euler-Poincar\'e characteristic morphism the Euler-Poincar\'e
characteristic
of the corresponding set-theoretic real semialgebraic Milnor fibres
(Theorem \ref{Milnor}).
In what follows we sometimes simply say {\sl measure} for
the class of an object in a given Grothendieck ring. The term {\sl inequation} refers to the symbol $\not=$,
and the term {\sl inequality} refers to the symbol $>$.
\tableofcontents
\section{The Grothendieck ring of basic semialgebraic formulas.}
\subsection{Affine real algebraic varieties.}
By an affine algebraic variety over ${ \mathbb R}$ we mean an affine reduced and
separated scheme of finite type over ${ \mathbb R}$. The category of
affine algebraic varieties over ${ \mathbb R}$ is denoted by ${\rm Var}_{ \mathbb R}$.
An affine real algebraic variety $X$ is then defined by a subset of $\mathbb A^n$ together
with a finite number of polynomial equations. Namely, there exist $P_i
\in { \mathbb R}[X_1,\ldots,X_n]$, for $i=1,\ldots,r$, such that the real points $X({ \mathbb R})$
of $X$ are given by
$$X({ \mathbb R})=\{x\in \mathbb A^n | P_i(x)=0,~i=1,\ldots,r\}.$$
A Zariski-constructible subvariety $Z$ of $\mathbb A^n$ is similarly defined by real
polynomial equations and inequations. Namely there exist $P_i, Q_j
\in { \mathbb R}[X_1,\ldots,X_n]$, for $i=1,\ldots,p$ and $j=1,\ldots,q$, such
that the real points $Z({ \mathbb R})$ of $Z$ are given by
$$Z({ \mathbb R})=\{x\in \mathbb A^n | P_i(x)=0, Q_j(x) \neq 0,~i=1,\ldots,p,~j=1,\ldots,q
\}.$$
As an abelian group, the Grothendieck ring $K_0({\rm Var}_{{ \mathbb R}})$ of affine real algebraic
varieties is formally generated by isomorphism classes $[X]$ of
Zariski-constructible real algebraic varieties,
subject to the additivity relation
$$[X]=[Y]+[X\setminus Y],$$
in case $Y\subset X$ is a closed subvariety of $X$. Here $X\setminus Y$
is the Zariski-constructible variety defined by combining
the equations and inequations that define $X$ together with the
equations and inequations obtained by
reversing the equations and inequations that define $Y$.
The product of
constructible sets induces a ring structure on $K_0({\rm Var}_{{ \mathbb R}})$. We
denote by ${ \mathbb L}$ the class in $K_0({\rm Var}_{{ \mathbb R}})$ of $\mathbb A^1$.
\subsection{Real algebraic sets.}
The real points $X({ \mathbb R})$ of an affine algebraic variety $X$ over ${ \mathbb R}$ form a real
algebraic set (in the sense of \cite{BCR}). The Grothendieck
ring $K_0({ \mathbb R}{\rm Var})$ of affine real algebraic sets
\cite{MCP} is defined in a similar way than that of real algebraic
varieties over ${ \mathbb R}$. Taking the real points of an affine real
algebraic variety over ${ \mathbb R}$ gives a ring morphism from $K_0({\rm Var}_{{ \mathbb R}})$
to $K_0({ \mathbb R}{\rm Var})$. A great advantage of $K_0({ \mathbb R}{\rm Var})$ from a geometrical point of view is that the additivity property implies that the measure of
an algebraic set without real point is zero in $K_0({ \mathbb R}{\rm Var})$.
We already know some realizations of $K_0({ \mathbb R}{\rm Var})$ in simpler rings,
such as the Euler characteristics with compact supports in ${ \mathbb Z}$ or the virtual
Poincar\'e polynomial in ${ \mathbb Z}[u]$ (cf. \cite{MCP}). We obtain therefore similar
realizations for $K_0({\rm Var}_{{ \mathbb R}})$ by composition with the realizations of
$K_0({\rm Var}_{{ \mathbb R}})$ in $K_0({ \mathbb R}{\rm Var})$.
\subsection{Basic semialgebraic formulas.}
Let us now specify the definition of the Grothendieck ring
$K_0(BSA_{{ \mathbb R}})$
of basic semialgebraic formulas. This definition is inspired by \cite{DL3}.
The ring $K_0(BSA_{{ \mathbb R}})$ will
contain $K_0({\rm Var}_{{ \mathbb R}})$ as a subring (Proposition \ref{incl}) and will be projected
on the ring $K_0({\rm Var}_{{ \mathbb R}})\otimes { \mathbb Z}[\mathbb F_2rac{1}{2}]$
(Theorem \ref{thm-prin}) by
an explicit computational process.
A basic semialgebraic formula $A$ in $n$ variables is defined as a
finite
number of equations, inequations and inequalities, namely there exist
$P_i, Q_j, R_k
\in { \mathbb R}[X_1,\ldots,X_n]$, for $i=1,\ldots,p$, $j=1,\ldots,q$ and
$k=1,\ldots,r$, such that $A({ \mathbb R})$ is equal to the set of points
$x\in \mathbb A^n$ such that
$$P_i(x)=0, Q_j(x) \neq
0,R_k(x)>0,~i=1,\ldots,p,~j=1,\ldots,q, ~k=1,\ldots,r.$$
The relations $Q_j(x) \neq 0$ are called inequations and the relations
$R_k(x)>0$ are called inequalities.
We will simply denote a basic semialgebraic formula by
$$A=\{P_i=0, Q_j\neq 0, R_k>0,~i=1,\ldots,p,~j=1,\ldots,q, ~k=1,\ldots,r \}.$$
In particular $A$ is not characterized by its real points $A({ \mathbb R})$, that is
by the real
solutions of these equations, inequations and inequalities, but by these
equations, inequations and inequalities themselves.
We will consider basic semialgebraic formulas up to algebraic
isomorphisms, when the basic semialgebraic formulas are defined without
inequality.
\begin{remark} In the sequel, we will allow ourselves to use the notation
$\{P<0\}$ for the basic semialgebraic formula $\{-P>0\}$ and similarly $\{P>1\}$ instead of
$\{P-1>0\}$, where $P$ denotes a polynomial with real coefficients. Furthermore given two basic semialgebraic formulas $A$ and $B$, the notation $\{A,B\}$ will denote the basic formula
with equations, inequations and inequalities coming from $A$
and $B$ together.
\end{remark}
We define the Grothendieck ring $K_0(BSA_{{ \mathbb R}})$ of basic semialgebraic
formulas as the free abelian ring generated by basic semialgebraic formulas $[A]$,
up to algebraic isomorphim when the formula $A$ has no
inequality, and subject to the three following relations
\begin{enumerate}
\item (\textit{algebraic additivity}) $$[A]=[A,S=0]+[A,
\{S\neq 0\}]$$ where $A$ is a basic semialgebraic formula in $n$ variables and
$S\in { \mathbb R}[X_1,\ldots,X_n]$.
\item (\textit{semialgebraic additivity}) $$[A,R\neq 0]=[A, R>0]+
[A,-R>0]$$ where $A$ is a basic semialgebraic
formula in $n$ variables and $R\in { \mathbb R}[X_1,\ldots,X_n]$.
\item (\textit{product}) The product of basic semialgebraic formulas, defined by taking the
conjonction of the formulas with disjoint sets of free variables, induces the ring
product on
$K_0(BSA_{{ \mathbb R}})$. In other words we consider the relation
$$ [A,B]=[A]\cdot[B], $$
for $A$ and $B$ basic real semialgebraic formulas with disjoint set of variables.
\end{enumerate}
\begin{remark}\label{rmk-iso}
\begin{enumerate}
\item Contrary to the Grothendieck ring of algebraic varieties or algebraic sets,
we do not consider isomorphism classes of basic real semialgebraic formulas in
the definition of $K_0(BSA_{{ \mathbb R}})$. As a consequence the realization
we are interested in does depend in a crucial way on the
description of the basic semialgebraic set as a basic semialgebraic formula.
For instance $\{X-1>0\}$ and $\{X>0,X-1>0\}$ will have different measures.
\item One may decide to enlarge the basic semialgebraic formulas with non-strict inequalities
by imposing, by convention, that the measure of $\{ A,R \geq 0\}$, for $A$ a basic
semialgebraic
formula in $n$ variables and $R\in { \mathbb R}[X_1,\ldots,X_n]$, is the sum of the measures of
$\{A, R > 0\}$ and of $\{A, R = 0\}$.
\end{enumerate}
\end{remark}
\begin{prop}\label{incl} The natural map $i$ from $K_0({\rm Var}_{{ \mathbb R}})$ that associates to
an affine real algebraic variety its value in the Grothendieck ring $K_0(BSA_{{ \mathbb R}})$
of basic real semialgebraic formulas is an injective morphism
$$i:K_0({\rm Var}_{{ \mathbb R}}) \longrightarrow K_0(BSA_{{ \mathbb R}}).$$
\end{prop}
We therefore identify $K_0({\rm Var}_{{ \mathbb R}})$ with a subring of $K_0(BSA_{{ \mathbb R}}).$
\begin{proof} We construct a left inverse $j$ of $i$ as follows. Let
$a \in K_0(BSA_{{ \mathbb R}})$ be a sum of products of measures of
basic semialgebraic formulas. If there exist Zariski
constructible real algebraic sets $Z_1,\ldots,Z_m$ such that
$[Z_1]+\cdots+[Z_m]$ is equal to $a$ in
$K_0(BSA_{{ \mathbb R}})$, then we define the image of $a$ by
$j$ to be
$$j(a)=[Z_1]+\cdots+[Z_m] \in K_0({\rm Var}_{{ \mathbb R}}).$$
Otherwise, the image of $a$ by
$j$ is defined to be zero in $K_0({\rm Var}_{{ \mathbb R}})$.
The map $j$ is well-defined. Indeed, if $Y_1,\ldots, Y_l$ are other
Zariski constructible sets such that $[Y_1]+\cdots+[Y_l]$ is equal to
$a$ in $K_0(BSA_{{ \mathbb R}})$, then
$$[Y_1]+\cdots+[Y_l]=[Z_1]+\cdots+[Z_m]$$
in $K_0(BSA_{{ \mathbb R}})$. This equality still holds in $K_0({\rm Var}_{{ \mathbb R}})$ by
definition of the structure ring of $K_0({\rm Var}_{{ \mathbb R}})$ and the fact that $j$ defines a left inverse of $i$ is immediate.
\end{proof}
\begin{remark} Note however that the map $j$ constructed in the proof
of Proposition \ref{incl} is not a group
morphism. For instance $j([X>0])=j([X<0])=0$ whereas
$j([X\neq 0])={ \mathbb L}-1$.
\end{remark}
\section{A realization of $K_0(BSA_{{ \mathbb R}})$}
An example of a ring morphism from $K_0(BSA_{{ \mathbb R}})$ to $\mathbb Z$ is given by
the Euler characteristic with compact supports $\chi_c$. We construct in this section a
realization for elements in $K_0(BSA_{{ \mathbb R}})$ with values in the ring of
polynomials with coefficient in ${ \mathbb Z}[\mathbb F_2rac{1}{2}]$. This realization specializes to the
Euler characteristic with compact supports. To
this aim, we construct a ring morphism from
$K_0(BSA_{{ \mathbb R}})$ to the tensor product of $K_0({\rm Var}_{{ \mathbb R}})$ with
${ \mathbb Z}[\mathbb F_2rac{1}{2}]$.
\subsection{The realization.}
We define a morphism $\chi$ from the ring $K_0(BSA_{{ \mathbb R}})$ to the ring $K_0({\rm Var}_{{ \mathbb R}})\otimes { \mathbb Z}[\mathbb F_2rac{1}{2}]$ as follows. Let
$A$ be a basic semialgebraic formula without inequality. We assign to $A$ its value $\chi(A)=[A]$ in $K_0({\rm Var}_{{ \mathbb R}})$ as a
constructible set. We proceed now by induction on the number of
inequalities in the description of the basic semialgebraic formulas.
Assuming that we have defined $\chi$ for basic
semialgebraic formulas with at most $k$ inequalities, $k\in { \mathbb N}$, let $A$
be a basic real semialgebraic formula with $n$ variables and at most $k$ inequalities and let us consider $R\in
{ \mathbb R}[X_1,\ldots,X_n]$. Define $\chi([A, R>0])$ by
$$\chi([A, R>0]):=\mathbb F_2rac{1}{4}\big(\chi([A, Y^2=R])-\chi([A,
Y^2=-R])\big)+\mathbb F_2rac{1}{2} \chi([A, R\neq 0]),$$
where $\{A, Y^2=\pm R\}$ is a basic real semialgebraic formula with $n+1$ variables,
with at most $k$ inequalities and $\{A, R\neq 0\}$ is a
basic semialgebraic formula with $n$ variables with at most $k$ inequalities.
\begin{remark}\label{rmk-ori}
The way to define $\chi$ may be seen as an average of two different
natural ways of understanding a basic semialgebraic formula as a quotient
of algebraic varieties. Namely, for a basic semialgebraic formula in
$n$ variables of the form $\{R>0\}$, we may see its set of real
points as the projection, with fibre two points, of
$\{Y^2=R\}$ minus the zero set of $R$, or as the complement
of the projection of $Y^2=-R$. The algebraic
average of these two possible points of view is
$$\mathbb F_2rac{1}{2}\Big(\big(\mathbb F_2rac{1}{2}[Y^2=R]-[R=0]\big)+ \big( { \mathbb L}^n-\mathbb F_2rac{1}{2}[Y^2=-R]\big) \Big),$$
which, considering that $ { \mathbb L}^n-[R=0]= [R\not=0]$, gives for $\chi(R>0)$ the expression
just defined above.
\end{remark}
We give below the general formula that computes the measure of a basic
semialgebraic formula in terms of the measure of real algebraic varieties.
\begin{prop}\label{prop-alg} Let $Z$ be a constructible set in ${ \mathbb R}^n$ and take $R_k\in
{ \mathbb R}[X_1,\ldots,X_n]$, with $k=1,\ldots,r$. For $I\subset
\{1,\ldots,r\}$ a subset of cardinal $\sharp I=i$ and $ vari\'et\'eepsilon \in \{\pm
1\}^i$, we denote by $R_{I, vari\'et\'eepsilon}$ the real constructible set defined by
$$R_{I, vari\'et\'eepsilon}=\{Y_j^2= vari\'et\'eepsilon_jR_j(X),j\in I;~~R_k(X)\neq 0, k\notin I\}.$$
Then
$\chi([Z,R_k>0,~k=1,\ldots,r])$ is equal to
$$\sum_{i=0}^r \mathbb F_2rac{1}{2^{r+i}}\sum_{I\subset
\{1,\ldots,r\},\sharp I=i}\sum_{ vari\'et\'eepsilon \in \{\pm 1\}^i}(\prod_{j\in I} vari\'et\'eepsilon_j)
[Z,R_{I, vari\'et\'eepsilon}]$$
\end{prop}
\begin{proof} If $r=1$ it follows from the
definition of $\chi$. We prove the general result by induction on
$r\in { \mathbb N}$. Assume $Z={ \mathbb R}^n$ to simplify notation. Take $R_k\in { \mathbb R}[X_1,\ldots,X_n]$, with
$k=1,\ldots,r+1$.
Denote by $A$ the formula $R_1>0,\ldots, R_r>0$. By definition of $\chi$ we obtain
$$\chi([A,R_{r+1}>0])=\mathbb F_2rac{1}{4}(\chi([A, Y^2=R_{r+1}])-\chi([A, Y^2=-R_{r+1}]))+\mathbb F_2rac{1}{2}\chi([A, R_{r+1}\neq 0]).$$
Now we can use the induction assumption to express the terms in the right hand side of the formula upstair as
$$\sum_{i=0}^r \mathbb F_2rac{1}{2^{r+i}}\sum_{I\subset
\{1,\ldots,r\},\sharp I=i}\sum_{ vari\'et\'eepsilon \in \{\pm 1\}^i}(\prod_{j\in I} vari\'et\'eepsilon_j) \big(\mathbb F_2rac{1}{4}([R_{I, vari\'et\'eepsilon}, Y^2=R_{r+1}]-[R_{I, vari\'et\'eepsilon}, Y^2=-R_{r+1}])$$
$$+\mathbb F_2rac{1}{2}[R_{I, vari\'et\'eepsilon}, R_{r+1}\neq 0] \big) $$
Choose $I\subset
\{1,\ldots,r\}$ a subset of cardinal $\sharp I=i$ and $ vari\'et\'eepsilon \in \{\pm
1\}^i$. Then, we obtain from the definition of $\chi$ that
$$\mathbb F_2rac{1}{4}([R_{I, vari\'et\'eepsilon}, Y^2=R_{r+1}]-[R_{I, vari\'et\'eepsilon}, Y^2=-R_{r+1}])+\mathbb F_2rac{1}{2}[R_{I, vari\'et\'eepsilon}, R_{r+1}\neq 0]$$
is equal to
$$\mathbb F_2rac{1}{4}([R_{I\cup \{r+1\}, vari\'et\'eepsilon^+}]-[R_{I\cup \{r+1\}, vari\'et\'eepsilon^-}])+\mathbb F_2rac{1}{2}[R_{\tilde I, vari\'et\'eepsilon}]$$
where $ vari\'et\'eepsilon^+=( vari\'et\'eepsilon_1,\ldots, vari\'et\'eepsilon_r,1)$,
$ vari\'et\'eepsilon^-=( vari\'et\'eepsilon_1,\ldots, vari\'et\'eepsilon_r,-1)$ and $\tilde I$ denotes $I$ as a
subset of $\{1,\ldots,r+1\}$. Therefore
$$\mathbb F_2rac{1}{2^{r+i}}(\prod_{j\in I} vari\'et\'eepsilon_j)[R_{r+1}>0,
R_{I, vari\'et\'eepsilon}]$$
is equal to
$$\mathbb F_2rac{1}{2^{(r+1)+(i+1)}}(\prod_{j\in I} vari\'et\'eepsilon_j)([R_{I\cup
\{r+1\}, vari\'et\'eepsilon^+}]-[R_{I\cup
\{r+1\}, vari\'et\'eepsilon^-}])+\mathbb F_2rac{1}{2^{(r+1)+i}}(\prod_{j\in
I} vari\'et\'eepsilon_j)[R_{\tilde I, vari\'et\'eepsilon}]$$
which gives the result.
\end{proof}
The morphism $\chi$ is then defined on $K_0(BSA_{{ \mathbb R}})$.
\begin{thm}\label{thm-prin} The map $$\chi: K_0(BSA_{{ \mathbb R}}) \longrightarrow
K_0({\rm Var}_{{ \mathbb R}})\otimes { \mathbb Z}[\mathbb F_2rac{1}{2}]$$ is a ring morphism that is
the identity on $K_0({\rm Var}_{{ \mathbb R}})\subset K_0(BSA_{{ \mathbb R}})$.
\end{thm}
\begin{proof} We must prove that the given definition of $\chi$ is
compatible with the algebraic and semialgebraic additivities. However the semialgebraic additivity follows directly from the definition of
$\chi$. Indeed, if $A$ is a basic semialgebraic formula and $R$ a real
polynomial, then the sum of $\chi([A ,R>0])$ and $\chi([A ,-R>0])$ is equal to
$$\mathbb F_2rac{1}{4}\big(\chi([A,Y^2=R])-\chi([A,Y^2=-R])\big)+\mathbb F_2rac{1}{2}\chi([A,R\neq 0])$$
$$+\mathbb F_2rac{1}{4}\big(\chi([A,Y^2=-R])-\chi([A,Y^2=R])\big)+\mathbb F_2rac{1}{2}
\chi([A,-R\neq 0])$$
$$=\chi([A,-R\neq 0]).$$
The algebraic additivity as well as the multiplicativity follow from Proposition \ref{prop-alg} that
enables to express the measure of a basic semialgebraic formula in terms
of algebraic varieties for which additivity and multiplicativity hold.
We conclude by noting that we may construct a left inverse to $\chi$
restricted to $K_0({\rm Var}_{{ \mathbb R}})$ in the same way as in the proof of
Proposition \ref{incl}.
\end{proof}
\begin{ex}\label{ex1}
\begin{enumerate}
\item A half-line defined by $X>0$ has measure in $K_0({\rm Var}_{{ \mathbb R}})\otimes
{ \mathbb Z}[\mathbb F_2rac{1}{2}]$ half of the value of the line minus one point, as expected, since
by definition
$$\chi([X>0])=\mathbb F_2rac{1}{4}({ \mathbb L}-{ \mathbb L})+\mathbb F_2rac{1}{2}\big({ \mathbb L}-1)=\mathbb F_2rac{1}{2}\big({ \mathbb L}-1).$$
However, if we add one more inequality, like $\{X>0,X>-1\}$, then
the measure has more complexity. We will see in section \ref{sect-virt} that, evaluated in the polynomial ring ${ \mathbb Z}[\mathbb F_2rac{1}{2}][u]$ we
obtain in that case
$$\beta([X>0,X>-1])=\mathbb F_2rac{5u-11}{16}.$$
\item Using the multiplicativity, we find the measure of the
half-plane and the measure of the quarter
plane as expected
$$\chi([X_1>0])=\mathbb F_2rac{1}{2}({ \mathbb L}^2-{ \mathbb L})$$
and
$$\chi([X_1>0,X_2>0])=\mathbb F_2rac{1}{4}({ \mathbb L}-1)^2.$$
\end{enumerate}
\end{ex}
\begin{remark}\label{rmks}
\begin{enumerate}
\item Let $R\in { \mathbb R}[X_1,\ldots,X_n]$ be odd. Then
$$\chi([R>0])=\chi([R<0])=\mathbb F_2rac{[R\neq 0]}{2}.$$
Indeed, the varieties $Y^2=R(X)$ and $Y^2=-R(X)$
are isomorphic via $X\mapsto -X$, and the result follows from the
definition of $\chi$.
\item\label{nontrivial} The ring morphism from $K_0({\rm Var}_{{ \mathbb R}})$ to
$K_0({ \mathbb R}{\rm Var})$ gives a realization from the ring $K_0(BSA_{{ \mathbb R}})$ to the ring
$K_0({ \mathbb R}{\rm Var})\otimes { \mathbb Z}[\mathbb F_2rac{1}{2}]$
for which the measure of a
real algebraic variety without real point is zero, this is why it is often
convenient to push the computations to the ring
$K_0({ \mathbb R}{\rm Var})\otimes { \mathbb Z}[\mathbb F_2rac{1}{2}]$ rather than staying at the higher level
of $K_0({\rm Var}_{{ \mathbb R}})\otimes { \mathbb Z}[\mathbb F_2rac{1}{2}]$. However we have to notice that the
measure of a basic real semialgebraic formula without real point is not
necessarily zero in $K_0({ \mathbb R}{\rm Var})\otimes { \mathbb Z}[\mathbb F_2rac{1}{2}]$.
For instance, let us compute the measure of $X^2+1>0$ in
$K_0({ \mathbb R}{\rm Var})\otimes { \mathbb Z}[\mathbb F_2rac{1}{2}]$.
By definition of $\chi$ we obtain that $\chi([X^2+1>0])$ is equal to
$$\mathbb F_2rac{1}{4}\big(\chi([Y^2=X^2+1])-\chi([Y^2=-X^2-1])\big)+\mathbb F_2rac{1}{2}\chi([X^2+1
\neq 0])$$
$$=\mathbb F_2rac{1}{4}({ \mathbb L}-1)+\mathbb F_2rac{1}{2}{ \mathbb L}=\mathbb F_2rac{1}{4}(3{ \mathbb L}-1).$$
By additivity we have
$$ \chi([X^2+1<0])=\chi([X^2+1\not=0])-\chi([X^2+1>0])$$
$$ ={ \mathbb L}-\chi([X^2+1=0])-\chi([X^2+1>0]). $$
But since $\chi([X^2+1=0])=0$ in $K_0({ \mathbb R}{\rm Var})\otimes { \mathbb Z}[\mathbb F_2rac{1}{2}]$,
we obtain that the measure of $\{X^2+1<0\}$ in $K_0({ \mathbb R}{\rm Var})\otimes { \mathbb Z}[\mathbb F_2rac{1}{2}]$, whose real points set is empty, is
$$\chi([X^2+1<0])=\mathbb F_2rac{1}{4}({ \mathbb L}+1).$$
\item In a similar way, the basic semialgebraic formula $\{P>0,-P>0\}$ with
$P(X)=1+X^2$, whose set of real points is empty, has measure
$$\chi([P>0,-P>0])=\mathbb F_2rac{1}{8}({ \mathbb L}+1).$$
\end{enumerate}
\end{remark}
\subsection{Isomorphism between basic semialgebraic formulas}
In this section we give a condition for two basic semialgebraic formulas to have the same realization by $\chi$. It deals with the
complexification of the algebraic liftings of the basic semialgebraic
formulas.
Let $X$ be a
real algebraic subvariety of $\mathbb R^n$ defined by $P_i
\in { \mathbb R}[X_1,\ldots,X_n]$, for $i=1,\ldots,r$. The complexification
$X_{{ \mathbb C}}$ of $X$ is defined to be the complex algebraic subvariety of
${ \mathbb C}^n$ defined by the same polynomials $P_1,\ldots,P_r$. We define
similarly the complexification of a real algebraic map.
Let $Y\subset { \mathbb R}^n$ be a Zariski
constructible subset of ${ \mathbb R}^n$ and take $R_1,\ldots,R_r
\in { \mathbb R}[X_1,\ldots,X_n]$. Let $A$ denote the basic
semialgebraic formula of ${ \mathbb R}^n$ defined by $Y$ together with the inequalities
$R_1>0,\ldots,R_r>0$, and $V$ denote the Zariski constructible subset
of ${ \mathbb R}^{n+r}$ defined by
$$V=\{Y,Y_1^2=R_1,\ldots,Y_r^2=R_r\}.$$
Note that $V$ is endowed with an action
of $\{\pm 1\}^r$ defined by multiplication by $-1$ on the
indeterminates $Y_1,\ldots,Y_r$.
Let $Z\subset { \mathbb R}^n$ be a Zariski constructible subset of ${ \mathbb R}^n$ and
take similarly $S_1,\ldots,S_r
\in { \mathbb R}[X_1,\ldots,X_n]$. Let $B$ denote the basic
semialgebraic formula of ${ \mathbb R}^n$ defined by $Z$ together with the inequalities
$S_1>0,\ldots,S_r>0$, and $W$ denote the Zariski constructible subset
of ${ \mathbb R}^{n+r}$ defined by
$$W=\{Z,Y_1^2=S_1,\ldots,Y_r^2=S_r\}.$$
\begin{definition}
We say that the basic semialgebraic formulas $A$ and $B$ are isomorphic
if there exists a real algebraic isomorphism $\phi:V \longrightarrow
W$ between $V$ and $W$ which is equivariant with respect to the action
of $\{\pm 1\}^r$ on $V$ and $W$, and whose complexification
$\phi_{{ \mathbb C}}$ induces a complex algebraic isomorphism between the
complexifications $V_{{ \mathbb C}}$ and $W_{{ \mathbb C}}$ of $V$ and $W$.
\end{definition}
\begin{remark}\label{rmk-1} Let us consider first the particular case $Y={ \mathbb R}^n$, $Z={ \mathbb R}^n$ and $r=1$.
Change moreover the notation as
follows. Put $V^+=V$ and $W^+=W$, and define $V^-=\{y^2=-R(x)\}$ and
$W^-=\{y^2=-S(x)\}$.
Then the complex points $V_{\mathbb C}^+$ and $V_{\mathbb C}^-$ of
$V^+$ and $V^-$ are isomorphic via the complex (and not real)
isomorphism $(x,y)\mapsto (x,iy)$. Now, suppose that the basic
semialgebraic formula $\{R>0\}$ is isomorphic to $\{S>0\}$. Let $\phi=(f,g):(x,y)\mapsto (f(x,y),g(x,y))$ be the real isomorphism involved in the definition (that is $f$ and $g$ are defined by real equations,
and moreover $f(x,-y)=f(x,y)$ and $g(x,-y)=-g(x,y)$). Then the following diagram
$$ \begin{matrix}
V^+_{\mathbb C} &\buildrel{(f,g)}\over \longrightarrow & W^+_{\mathbb C}\\
\buildrel{(x,y)\mapsto (x,iy)}\over \downarrow & & \buildrel{(x,y)\mapsto (x,iy)}\over \downarrow \\
V^-_{\mathbb C} & & W^-_{\mathbb C}\\
\end{matrix} $$
induces a complex isomorphism $(F,G)$ between $ V^-_{\mathbb C}$ and $W^-_{\mathbb C}$ given by
$$(x,y) \mapsto (f(x,-iy),ig(x,-iy)).$$
In fact, this isomorphism is defined over $\mathbb R$ since
$$\overline {F(x,y)}=\overline {f(x,-iy)}=f(\overline x, \overline {-iy})=f(\overline x,i \overline y)=f(\overline x,-i \overline y)=F(\overline x,\overline y)$$ and
$$\overline {G(x,y)}=\overline {ig(x,-iy)}=-ig(\overline x, \overline
{-iy})=-ig(\overline x,i \overline y)=ig(\overline x,-i \overline
y)=G(\overline x,\overline y),$$
where the bar denotes complex conjugation. Therefore it induces a real
algebraic isomorphism between $V^-$ and $W^-$.
Moreover $g(x,0)=-g(x,0)$ so $g(x,0)=0$ and then the real algebraic sets $\{R=0\}$ and $\{S=0\}$ are also isomorphic.
\end{remark}
\begin{prop}\label{iso} If the basic semialgebraic formulas $A$ and $B$ are
isomorphic, then $\chi([A])=\chi([B])$.
\end{prop}
\begin{proof} Thanks to Proposition \ref{prop-alg}, we only need to prove that
the real algebraic varieties $R_{I, vari\'et\'eepsilon}$ corresponding to $A$ and
$B$ are isomorphic two by two, which is a direct generalization of
Remark \ref{rmk-1}.
\end{proof}
\section{Virtual Poincar\'e polynomial}
\subsection{Polynomial realization}\label{sect-virt}
The best realization known (with respect to the highest
algebraic complexity of the realization ring) of the Grothendieck ring of real algebraic varieties is
given by the virtual Poincar\'e polynomial \cite{MCP}. This
polynomial, whose coefficients coincide with the Betti numbers with
coefficients in $\mathbb F_2rac{{ \mathbb Z}}{2{ \mathbb Z}}$ when sets are compact and nonsingular,
has coefficient in ${ \mathbb Z}$. As a corollary of Theorem \ref{thm-prin} we
obtain the following realization of $K_0(BSA_{{ \mathbb R}})$ in ${ \mathbb Z}[\mathbb F_2rac{1}{2}][u]$.
\begin{prop} There exists a ring morphism
$$\beta:K_0(BSA_{{ \mathbb R}}) \longrightarrow { \mathbb Z}[\mathbb F_2rac{1}{2}][u]$$
whose restriction to $K_0({\rm Var}_{{ \mathbb R}})\subset K_0(BSA_{{ \mathbb R}})$ coincides with
the virtual Poincar\'e polynomial.
\end{prop}
The interest of such a realization is that it enables to make concrete computations.
\begin{ex}
\begin{enumerate}
\item The virtual Poincar\'e polynomial of the open disc $X_1^2+X_2^2<1$ is equal to
$$\mathbb F_2rac{1}{4}\big(\beta([Y^2=1-(X_1^2+X_2^2)])-\beta([Y^2=X_1^2+X_2^2-1])\big)+
\mathbb F_2rac{1}{2}\beta([X_1^2+X_2^2\neq 1])$$
$$=\mathbb F_2rac{1}{4}(u^2+1-u(u+1))+\mathbb F_2rac{1}{2}(u^2-u-1)=\mathbb F_2rac{1}{4}(2u^2-3u-1).$$
\item Let us compute the measure of the formula $X>a,X>b$ with $a\neq b \in \mathbb R$. By Proposition \ref{prop-alg}, we are lead to compute the virtual Poincar\'e polynomial of the real algebraic subsets of $\mathbb R^3$ defined by $\{y^2=\pm (x - a),~~z^2=\pm (x - b)\}$. These sets are isomorphic to $\{y^2 \pm z^2 = \pm (a - b)\}$, and we recognise either a circle, a hyperbola or the emptyset.
In particular, using the formula in Proposition \ref{prop-alg}, we obtain
$$\beta([X>a,X>b])=\mathbb F_2rac{1}{16}(2(u-1)-(u+1))+\mathbb F_2rac{1}{8}(2u-2u)+\mathbb F_2rac{1}{8}(2-2)+\mathbb F_2rac{1}{4}(u-2)=\mathbb F_2rac{5u-11}{16}$$
\end{enumerate}
\end{ex}
\begin{remark} In case the set of real points of a basic semialgebraic
formula is a real algebraic set (or even an arc symmetric set \cite{KK,F}), its
virtual Poincar\'e polynomial does not coincide in general with the
virtual Poincar\'e polynomial of the real algebraic set. For
instance, the basic semialgebraic formula $X^2+1>0$, considered in
Remark \ref{rmks}, has virtual Poincar\'e
polynomial equal to
$\mathbb F_2rac{1}{4}(3u-1)$ whereas its set of points is a real line whose
with virtual Poincar\'e polynomial equals $u$ as a real algebraic set.
\end{remark}
Evaluating $u$ at an integer gives another realization, with
coefficient in ${ \mathbb Z}[\mathbb F_2rac{1}{2}]$.
The virtual Poincar\'e polynomial of a real algebraic variety, evaluated
at $u=-1$, coincides with its Euler characteristic with compact
supports \cite{MCP}. Indeed, evaluating the virtual Poincar\'e
polynomial of a basic semialgebraic formula gives also the Euler characteristic with compact
supports of its set of real points, and therefore has its values in ${ \mathbb Z}$.
\begin{prop}\label{eval} The virtual Poincar\'e
polynomial $\beta(A)$ of a basic semialgebraic formula $A$ is equal to
the Euler characteristic with compact supports of its set of real
points $A({ \mathbb R})$
when evaluated at $u=-1$. In other words
$$\beta(A)(-1)=\chi_c(A({ \mathbb R})).$$
\end{prop}
\begin{proof} We recall that in Proposition \ref{prop-alg} we explain how to express the class of $A$ as a
linear combination
of classes of real algebraic varieties for which the virtual Poincar\'e polynomial evaluated
at $u=-1$ coincides with the Euler characteristic with compact
supports. At each step of our inductive process to obtain such a linear combination, we introduce a new
variable and a double covering of the set of points satisfying one less inequality. The inductive formula
$$\chi([B, R>0]):=\mathbb F_2rac{1}{4}\big(\chi([B, Y^2=R])-\chi([B,
Y^2=-R])\big)+\mathbb F_2rac{1}{2} \chi([B, R\neq 0]),$$
used at this step to eliminate one inequality by replacing the system $\{B,R>0\}$ by other systems $\{B, Y^2=R\}, \{B, Y^2=-R\},
\{B, R\neq 0\}$ is compatible with the Euler characteristic of the underlying sets of points, that is to say that
our induction formula is true for $\chi=\chi_c$. The geometric reason for this fact is explained in Remark \ref{rmk-ori}, and is
the intuitive motivation to define the realization $\chi$ by induction precisely as it is defined.
\end{proof}
\subsection{Homogeneous case}
We propose some computations of the virtual Poin\-ca\-r\'e polynomial of
basic real semialgebraic formulas of the form $\{R>0\}$ where $R$ is
homogeneous. Looking at Euler characteristic with compact supports, it
is equal to the product of the Euler characteristics with compact
supports of $\{X>0\}$ with
$\{R=1\}$. We investigate the case of virtual Poincar\'e polynomial. A
key point in the proofs will be the invariance of the virtual Poincar\'e polynomial
of constructible sets under regular homeomorphisms (see \cite{MCP2}, Proposition 4.3).
\begin{prop}\label{homo-odd} Let $R\in { \mathbb R}[X_1,\ldots,X_n]$ be a homogeneous polynomial
of degre $d$. Assume $d$ is odd. Then
$$\beta([R>0])=\beta([X>0])\beta([R=1]).$$
\end{prop}
\begin{proof} The algebraic varieties defined by $Y^2=R(X)$ and
$Y^2=-R(X)$ are isomorphic since $R(-X)=-R(X)$, therefore
$$\beta([R>0])=\mathbb F_2rac{\beta([R\neq 0])}{2}.$$
The map $(\lambda,x)\mapsto \lambda x$ from $\mathbb R^* \times
\{R=1\}$ to $R\neq 0$ is a regular homeomorphism with inverse $y
\mapsto (R(y)^{1/d}, \mathbb F_2rac{y}{R(y)^{1/d}})$ therefore
$$\beta([R\neq 0])=\beta(\mathbb R^*)\beta([R=1]),$$
so that
$$\beta([R>0])=\mathbb F_2rac{\beta(\mathbb R^*)}{2}\beta([R=1])=\beta([X>0])\beta([R=1]).$$
\end{proof}
The result is no longer true when the degre is even. However, in the
particular case of the square of a homogeneous polynomial of odd
degre, the relation of Proposition \ref{homo-odd} remains valid.
\begin{prop} Let $P\in { \mathbb R}[X_1,\ldots,X_n]$ be a homogeneous polynomial
of degre $k$. Assume $k$ is odd, and define $R\in
{ \mathbb R}[X_1,\ldots,X_n]$ by $R=P^2$. Then
$$\beta([R>0])=\beta([X>0])\beta([R=1]).$$
\end{prop}
\begin{proof} Note first that $\{Y^2-R\}$ can be factorized as
$(Y-P)(Y+P)$ therefore the virtual Poincar\'e polynomial of
$Y^2-R$ is equal to
$$\beta(Y-P=0 )+\beta(Y+P=0)-\beta(P=0).$$
However the algebraic varieties $Y-P=0$ and $Y+P=0$ are
isomorphic to a $n$-dimensional affine space, whereas $Y^2+R=0$
is isomorphic to $P=0$ since $R=P^2$ is positive, so
that the virtual Poincar\'e polynomial of $R>0$ is equal to
$$\mathbb F_2rac{1}{4}(2\beta(\mathbb
R^n)-2\beta([P=0]))+\mathbb F_2rac{1}{2}\beta([P\neq 0])=\beta([P\neq 0]).$$
To compute $\beta([P\neq 0]$, note that the map
$(\lambda,x)\mapsto \lambda x$ from
$\mathbb R^* \times \{P=1\}$ to $\{P\neq 0\}$ is a regular homeomorphism with inverse $y
\mapsto (R(y)^{1/k}, \mathbb F_2rac{y}{R(y)^{1/k}})$ therefore
$$\beta([P\neq 0])=\beta(\mathbb R^*)\beta([P=1]).$$
We achieve the proof by noticing that $R-1=(P-1)(P+1)$ so that
$\beta([P=1])=\mathbb F_2rac{\beta([R=1])}{2}$ because the degree of the
homogeneous polynomial $P$ is odd.
Finally
$$\beta([R>0])=\mathbb F_2rac{\beta(\mathbb R^*)}{2}\beta([R=1])$$
and the proof is achieved.
\end{proof}
More generally, for a homogeneous polynomial $R$ of degre twice a odd number, we can
express the
virtual Poincar\'e polynomial of $[R>0]$ in terms of that of
$[R=1]$, $[R=-1]$ and $[R\neq 0]$ as follows.
\begin{prop}\label{homo-even} Let $k\in \mathbb N$ be odd and put $d=2k$.
Let $R\in { \mathbb R}[X_1,\ldots,X_n]$ be a homogeneous polynomial
of degre $d$. Then
$$\beta([R>0])=\mathbb F_2rac{1}{4}\beta(\mathbb R^*)(\beta([R=1])-\beta([R=-1]))+\mathbb F_2rac{1}{2}\beta([R\neq 0]).$$
\end{prop}
\begin{ex} We cannot do better in general as illustrated by the
following examples. For $R_1=X_1^2+X_2^2$ one obtain
$$\beta([R_1>0])=\mathbb F_2rac{3}{2}\beta([X>0])\beta([R_1=1])$$
whereas for $R_2=X_1^2-X_2^2$ one has
$$\beta([R_2>0])=\beta([X>0])\beta([R_2=1]).$$
\end{ex}
The proof of Proposition \ref{homo-even} is a direct consequence of the next lemma.
\begin{lemma} Let $k\in \mathbb N$ be odd and put $d=2k$. Let $R\in { \mathbb R}[X_1,\ldots,X_n]$ be a homogeneous polynomial
of degre $d$. Then
$$\beta([Y^2=R])=\beta([R= 0])+\beta(\mathbb R^*)\beta([R=1]).$$
\end{lemma}
\begin{proof} Note first that the algebraic varieties $Y^2=R$ and
$Y^d=R$ have the same virtual Poincar\'e polynomial. Indeed the
map $(x,y)\mapsto (x,y^k)$ realizes a regular homeomorphism between
$Y^2=R$ and
$Y^d=R$, whose inverse is given by $(x,y)\mapsto (x,y^{1/k})$.
However the polynomial $Y^d-R$ being homogeneous, we obtain a regular homeomorphism
$$\mathbb R^* \times (\{R=1\}\cap \{Y^d=R\}) \longrightarrow \{R\neq 0\}\cap \{Y^d=R\}$$
defined by $(\lambda,x,y) \mapsto (\lambda x,\lambda y)$. As a consequence
$$\beta([Y^d-R=0])=\beta([R= 0])+\beta(\mathbb R^*)\beta([R=1]).$$
\end{proof}
\section{Zeta functions and Motivic real Milnor fibres}
We apply in this section the preceding construction of
$\chi : K_0(BSA_{{ \mathbb R}})\to K_0({\rm Var}_{ \mathbb R})\otimes
{ \mathbb Z}[\mathbb F_2rac{1}{2}]$ in defining, for a given polynomial $f\in { \mathbb R}[X_1,\cdots, X_d]$,
zeta functions whose coefficients are classes
in
$(K_0({\rm Var}_{ \mathbb R})\otimes { \mathbb Z}[\mathbb F_2rac{1}{2}])[{ \mathbb L}^{-1}]$
of real semialgebraic formulas in truncated arc spaces.
We then show that these
zeta functions are deeply related to the topology of some
corresponding set-theoretic real semialgebraic Milnor fibres of~$f$.
\subsection{Semialgebraic zeta functions and real Denef-Loeser formulas.}
Let $f:{ \mathbb R}^d \to { \mathbb R}$ be a polynomial function with coefficients in ${ \mathbb R}$ sending $0$
to $0$.
We denote by ${\cal L}$ or $\cal L({ \mathbb R}^d,0)$ the space of formal arcs
$\gamma(t)=(\gamma_1(t), \cdots, \gamma_d(t))$ on ${ \mathbb R}^d$, with $\gamma_j(0)=0$ for all
$j\in \{1, \cdots, d\}$, by $\cal L_n$ or $\cal L_n({ \mathbb R}^d,0)$ the space of
truncated arcs ${\cal L}/(t^{n+1})$ and by $\pi_n : \cal L \to \cal L_n$
the truncation map. More generally, for $M$ a variety and $W$ a closed subset of $M$,
$\cal L(M,W)$ (resp. $\cal L_n(M,W)$) will denote the
space of arcs on $M$ (resp. the $n$-th jet-space on $M$)
with endpoints in $W$.
Let $ vari\'et\'eepsilonilon$ be one of the symbols in the set
$\{ \hbox{\sl naive}, -1, 1, >, < \}$.
For such a symbol $ vari\'et\'eepsilonilon$, via the
realization of $K_0(BSA_{{ \mathbb R}})$ in
$K_0({\rm Var}_{ \mathbb R})\otimes { \mathbb Z}[\mathbb F_2rac{1}{2}]$, we
define a zeta function $Z_f^ vari\'et\'eepsilonilon(T)\in (K_0({\rm Var}_{ \mathbb R})\otimes
{ \mathbb Z}[\mathbb F_2rac{1}{2}])[{ \mathbb L}^{-1}][[T]]$ by
$$ Z_f^ vari\'et\'eepsilonilon(T):=\sum_{n\ge 1}\ [X_{n,f}^ vari\'et\'eepsilonilon]{ \mathbb L}^{-nd}T^n,$$
where $X_{n,f}^ vari\'et\'eepsilonilon$ is defined in the following way:
\vskip0,3cm
- $X_{n,f}^{naive}=\{\gamma \in {\cal L}_n;
\ f(\gamma(t))=at^n+\cdots, a\not=0\}$,
\vskip0,1cm
- $X_{n,f}^{-1}=\{\gamma \in {\cal L}_n;
\ f(\gamma(t))=at^n+\cdots, a=-1\}$,
\vskip0,1cm
- $X_{n,f}^{1}=\{\gamma \in {\cal L}_n;
\ f(\gamma(t))=at^n+\cdots, a=1\}$,
\vskip0,1cm
- $X_{n,f}^>=\{\gamma \in {\cal L}_n;
\ f(\gamma(t))=at^n+\cdots, a >0\}$,
\vskip0,1cm
- $X_{n,f}^<=\{\gamma \in {\cal L}_n;
\ f(\gamma(t))=at^n+\cdots, a<0\}$.
\vskip3mm
Note that $X_{n,f}^ vari\'et\'eepsilonilon$ is a real algebraic variety for $ vari\'et\'eepsilonilon = -1$ or $1$,
a real algebraic constructible set for $ vari\'et\'eepsilonilon =naive$
and a semialgebraic set, given by an explicit description involving one inequality, for $ vari\'et\'eepsilonilon$ being the symbol $ >$ or the symbol $<$.
Consequently,
$Z_f^ vari\'et\'eepsilonilon (T)\in K_0({\rm Var}_{ \mathbb R})[{ \mathbb L}^{-1}][[T]]$ for $ vari\'et\'eepsilonilon \in \{naive, -1, 1\}$ and
$Z_f^ vari\'et\'eepsilonilon (T)\in (K_0({\rm Var}_{ \mathbb R})\otimes
{ \mathbb Z}[\mathbb F_2rac{1}{2}])[{ \mathbb L}^{-1}][[T]]$ for $ vari\'et\'eepsilonilon \in \{ > , <\}$.
We show in this section that $Z_f^ vari\'et\'eepsilonilon(T)$ is a rational function expressed in terms
of the combinatorial data of a resolution of $f$. To define those data let us consider
$\sigma : (M,\sigma^{-1}(0))\to
({ \mathbb R}^d,0)$ a proper birational map which is an isomorphism over the complement
of $\{f=0\}$ in $({ \mathbb R}^d,0)$, such that $f\circ \sigma$ and the jacobian determinant
$\jac\ \sigma$ are normal crossings and $\sigma^{-1}(0)$ is a union of components of the exceptional divisor. We denote by
$ E_j$, for $ j\in \cal J$, the irreducible components of
$(f\circ \sigma)^{-1}(0)$ and
assume that $E_k$ are the irreducible components of
$\sigma^{-1}(0)$ for $k\in \cal K \subset \cal J$. For $j\in \cal J$
we denote by $N_j$ the multiplicity
$mult_{E_j}f\circ \sigma $ of
$f\circ \sigma$ along $E_j$ and for $k\in \cal K$ by $\nu_k$ the number $\nu_k=
1+mult_{E_k}\jac \ \sigma$. For any $I\subset \cal J$, we put
$E^0_I=(\bigcap_{i\in I} E_i)\setminus (\bigcup_{j\in \cal J\setminus I}E_j)$.
These sets $E^0_I$ are constructible sets and the collection $(E_I^0)_{I\subset \cal J}$ gives a canonical stratification of the divisor
$f\circ \sigma=0$, compatible with $\sigma=0$ such that in some affine open subvariety $U$
in $M$ we have $f\circ \sigma (x)=u(x) \prod_{i\in I} x_i^{N_i}$, where
$u$ is a unit, that is to say a rational function which does not vanish on $U$, and
$x=(x',(x_i)_{i\in I})$ are local coordinates.
Finally for $ vari\'et\'eepsilonilon \in \{-1, 1, >,<\}$ and $I\subset \cal J$,
we define $\widetilde E^{0, vari\'et\'eepsilonilon}_I$ as the gluing along $E^0_I$ of the sets
$$ R_U^ vari\'et\'eepsilonilon=\{ (x,t)\in (E^0_I\cap U)\times { \mathbb R}; \ t^m \cdot u(x)\ ?_ vari\'et\'eepsilonilon \ \},$$
where $?_ vari\'et\'eepsilonilon$ is $=-1$, $=1$, $>0$ or $<0$ in case $ vari\'et\'eepsilonilon$ is $-1,1, >$ or
$< $ and $m=gcd_{i\in I}(N_i)$.
\begin{remark}\label{gluing} The definition of the $R_U^ vari\'et\'eepsilonilon$'s is
independent of the choice of the coordinates, as well as
the gluing of the $R^ vari\'et\'eepsilonilon_U$ is allowed, up to isomorphism, since when in some Zariski neighborhood of
$E^0_I$ one has in another coordinate system $z=z(x)=(z', (z_i)_{i\in I})$ the expression $f\circ \sigma (z)= v(z)\prod_{i\in I}
z^{N_i}$, there exist non-vanishing functions $\alpha_i$
so that $z_i=\alpha_i(z)\cdot x_i$. We thus obtain
$v(z)\prod_{i\in I}\alpha_i^{N_i}(z)=u(x)$, and the transformation
$$ \begin{matrix}
& \{(x,t)\in (E_I^0\cap U)\times { \mathbb R}; t^m \cdot u(x)\ ?_{ vari\'et\'eepsilonilon} \} & \to &
\{(z,s)\in (E_I^0\cap U)\times { \mathbb R}; s^m\cdot v(z) \ ?_{ vari\'et\'eepsilonilon} \} \\
&
(x,t) & \mapsto & (z,s=t\prod_{i\in I}\alpha_i(z)^{N_i/m})
\\
\end{matrix} $$
is an isomorphism in case $?_ vari\'et\'eepsilonilon$ is $=1$ or $=-1$, and induces an isomorphism between the associate double covers
$\cal R^ vari\'et\'eepsilonilon_U =\{(x,t,y)\in (E^0_I\cap U)\times { \mathbb R}\times { \mathbb R}; t^m\cdot u(x)\cdot y^2= \eta( vari\'et\'eepsilonilon) \} $ and
$\cal R^{' vari\'et\'eepsilonilon}_U=\{(z,s,w)\in (E_I^0\cap U)\times { \mathbb R}\times { \mathbb R}; s^m\cdot v(z)\cdot w^2=\eta( vari\'et\'eepsilonilon)\} $, with $\eta( vari\'et\'eepsilonilon)=1$ when $ vari\'et\'eepsilonilon$ is the symbol $>$
and $\eta( vari\'et\'eepsilonilon)=-1$ when $ vari\'et\'eepsilonilon$ is the symbol $<$, the induced isomorphism simply being
$$ \begin{matrix}
&
\cal R^ vari\'et\'eepsilonilon_U & \to &
\cal R^{' vari\'et\'eepsilonilon}_U
\\
&
(x,t,y) & \mapsto & (z,s, w=y ).
\\
\end{matrix}$$
Also notice that $\widetilde E_I^{0, vari\'et\'eepsilonilon}$ is a constructible set when
$ vari\'et\'eepsilonilon$ is $-1$ or $1$ and a semialgebraic set with explicit description
over the constructible set $E_I^0$ when $ vari\'et\'eepsilonilon$ is $<$ or $>$.
\end{remark}
We can thus
define the class $[\widetilde E_I^{0, vari\'et\'eepsilonilon}]\in \chi(K_0(BSA_{{ \mathbb R}}))$ as follows. Choosing a finite covering $(U_l)_{l\in L}$ of $M$ by affine open subvarieties $U_l$, for $l\in L$, we set
$$[\widetilde E_I^{0, vari\'et\'eepsilonilon}]=\sum _{S\subset L} (-1)^{|S|+1}[R^ vari\'et\'eepsilonilon_{\cap_{s\in S}U_{s}}].$$
The class $[\widetilde E_I^{0, vari\'et\'eepsilonilon}]$ does not depend on the choice of the covering thanks to Remark \ref{gluing} and the algebraic additivity in $K_0(BSA_{{ \mathbb R}})$.
With this notation one can give the expression of $Z_f^ vari\'et\'eepsilonilon(T)$ in terms of
$[\widetilde E^{0, vari\'et\'eepsilonilon}_I]$, as,
for instance, in \cite{DL1}, \cite{DL2}, \cite{DL4}, \cite{Loo}, essentially
using the Kontsevitch change of variables formula in motivic integration (\cite{Kon},
\cite{DL2} for instance).
\begin{theorem}\label{Zeta function}
With the notation above, one has
$$Z_f^ vari\'et\'eepsilonilon(T)= \sum_{I\cap \cal K \not= \emptyset}({ \mathbb L}-1)^{\vert I\vert -1 }[\widetilde E^{0, vari\'et\'eepsilonilon}_I ]\prod_{i\in I}
\mathbb F_2rac{{ \mathbb L}^{-\nu_i}T^{N_i}}{1-{ \mathbb L}^{-\nu_i}T^{N_i}}$$
for $ vari\'et\'eepsilonilon$ being $-1,1,>$ or $<$.
\end{theorem}
\begin{remark} Classically, the right hand side of equality
of Theorem
\ref{Zeta function} does not depend, as a formal series
in $(K_0({\rm Var}_{ \mathbb R})\otimes { \mathbb Z}[\mathbb F_2rac{1}{2}])[{ \mathbb L}^{-1}][[T]]$, on the
choice of the resolution $\sigma$,
as the definition of $Z_f^ vari\'et\'eepsilonilon(T)$ does not depend itself on
any choice of resolution.
\end{remark}
To prove this theorem, we first start with a lemma that needs the following notation. We denote by
$$\sigma_*: \cal L(M,\sigma^{-1}(0))\to \cal L({ \mathbb R}^d,0),$$ and for $n\in { \mathbb N}$, by
$$\sigma_{n,*} : \cal L_n(M,\sigma^{-1}(0))\to \cal L_n({ \mathbb R}^d,0)$$
the natural mappings induced by $\sigma : (M,\sigma^{-1}(0)) \to ({ \mathbb R}^d,0)$. Let
$$Y_{n,f}^ vari\'et\'eepsilonilon=\pi_n^{-1}(X_{n,f}^ vari\'et\'eepsilonilon).$$
Then $Y_{n,f\circ \sigma}^ vari\'et\'eepsilonilon= \{\gamma\in \cal L(M,\sigma^{-1}(0)); \
f(\sigma(\pi_n(\gamma)))(t)=at^n+\cdots, \ a \ ?_ vari\'et\'eepsilonilon \}$, where $?_ vari\'et\'eepsilonilon$ is $=-1$, $=1$, $>0$ or $<0$ in case $ vari\'et\'eepsilonilon$ is $-1,1, >$ or
$< $, and note also that $Y_{n,f\circ \sigma}^ vari\'et\'eepsilonilon = \sigma_*^{-1}(Y_{n,f}^ vari\'et\'eepsilonilon)$.
Finally for $e\ge 1$, let
$$\Delta_e=\{ \gamma\in \cal L (M,\sigma^{-1}(0)); \ mult_t \ (\jac \ \sigma)(\gamma(t))=e \}
\hbox{ and } Y_{e,n,f\circ \sigma}^ vari\'et\'eepsilonilon=Y_{n,f\circ \sigma}^ vari\'et\'eepsilonilon\cap \Delta_e.$$
\begin{lem}\label{lemme calculatoire}
With the notation above, there exists $c\in { \mathbb N}$ such that
$$ Z_f^ vari\'et\'eepsilonilon(T)
= \displaystyle { \mathbb L}^d\sum_{n\ge 1} T^n
\sum_{e\le cn} { \mathbb L}^{-e}\sum_{I\not=\emptyset}{ \mathbb L}^{-(n+1)d} [{\cal L}_n(M,E_I^0\cap \sigma^{-1}(0)) \cap \pi_n(\Delta_e)
\cap X_{n,f\circ \sigma}^ vari\'et\'eepsilonilon].$$
\end{lem}
\begin{proof}
As usual in motivic integration, the class of the cylinder $Y_{n,f}^ vari\'et\'eepsilonilon=\pi_n^{-1}(X_{n,f}^ vari\'et\'eepsilonilon)$, $n\ge 1$, is
an element of $(K_0({\rm Var}_{ \mathbb R})\otimes { \mathbb Z}[{1\over 2}])[{ \mathbb L}^{-1}]$,
the localization of the ring $K_0({\rm Var}_{ \mathbb R})\otimes { \mathbb Z}[{1\over 2}]$ with respect to
the multiplicative set generated by ${ \mathbb L}$, and defined
by $[Y_{n,f}^ vari\'et\'eepsilonilon]:={ \mathbb L}^{-(n+1)d}[X_{n,f}^ vari\'et\'eepsilonilon]$, since the truncation morphisms $\pi_{k+1,k}:\cal L_{k+1}({ \mathbb R}^d,0)\to \cal L_k({ \mathbb R}^d,0)$, $k\ge 1$, are locally trivial
fibrations with fibre ${ \mathbb R}^d$. Hence $Z_f^ vari\'et\'eepsilonilon(T)= \displaystyle { \mathbb L}^d\sum_{n\ge 1} [Y_{n,f}^ vari\'et\'eepsilonilon]T^n$.
Take now $\gamma \in \sigma_*^{-1}(Y_{n,f}^ vari\'et\'eepsilonilon)$,
and let $I\subset \cal J$ such that $\gamma(0)\in E_I^0$. In some neighbourhood of
$E_I^0$, one has coordinates such that $f\circ \sigma (x)=u(x)\prod_{i\in I}x_i^{N_i}$ and $\jac(\sigma)(x)=v(x)\prod_{i\in I}x_i^{\nu_i-1}$, with $u$ and
$v$ units.
If one denotes $\gamma=(\gamma_1, \cdots, \gamma_d)$ in these coordinates, with $k_i$ the multiplicity of $\gamma_i$ at $0$ for $i\in I$,
then we have
$mult_t(f\circ \sigma\circ \gamma(t))=\sum_{i\in I} k_i N_i=n$. Now
$$mult_t(\jac \sigma)(\gamma(t))=\sum_{i\in I}k_i(\nu_i-1)\le \max_{i\in I} (\mathbb F_2rac{\nu_i-1}{N_i})\sum_{i\in I}N_ik_i
=\max_{i\in I} (\mathbb F_2rac{\nu_i-1}{N_i})n.$$
Therefore if one sets $c= \max_{i\in I} (\mathbb F_2rac{\nu_i-1}{N_i})$, one has
$$Y_{n,f\circ \sigma}^ vari\'et\'eepsilonilon=\bigcup_{e\ge 1}Y^ vari\'et\'eepsilonilon_{e,n,f\circ \sigma} =\bigcup_{1\le e\le cn} Y^ vari\'et\'eepsilonilon_{e,n,f\circ \sigma}, $$
as disjoint unions. Now we can apply the change of variables theorem (see \cite{DL2}, \cite{Kon}) to compute $[Y^ vari\'et\'eepsilonilon_{n,f}]$ in terms of
$[Y^ vari\'et\'eepsilonilon_{e,n,f\circ \sigma}]$:
$$ [Y^ vari\'et\'eepsilonilon_{n,f}]= \sum_{e\le cn} { \mathbb L}^{-e}[Y^ vari\'et\'eepsilonilon_{e,n,f\circ \sigma}], $$
and summing over the subsets $I$ of $\cal J$, as $Y^ vari\'et\'eepsilonilon_{e,n,f\circ \sigma}$ is the disjoint union
$$\bigcup_{I\not =\emptyset} Y^ vari\'et\'eepsilonilon_{e,n,f\circ \sigma}
\cap \pi_0^{-1}(E_I^0\cap \sigma^{-1}(0)),$$
we obtain
$$Z_f^ vari\'et\'eepsilonilon(T)= \displaystyle { \mathbb L}^d\sum_{n\ge 1} [Y_{n,f}^ vari\'et\'eepsilonilon]T^n=\displaystyle { \mathbb L}^d\sum_{n\ge 1} T^n
\sum_{e\le cn} { \mathbb L}^{-e}\sum_{I\not=\emptyset}
[Y^ vari\'et\'eepsilonilon_{e,n,f\circ \sigma} \cap \pi_0^{-1}
(E_I^0\cap \sigma^{-1}(0))]$$
$$=\displaystyle { \mathbb L}^d\sum_{n\ge 1} T^n
\sum_{e\le cn} { \mathbb L}^{-e}\sum_{I\not=\emptyset} { \mathbb L}^{-(n+1)d}
[\pi_n(Y^ vari\'et\'eepsilonilon_{e,n,f\circ \sigma} \cap \pi_0^{-1}
(E_I^0\cap \sigma^{-1}(0)))]= $$
$$=\displaystyle { \mathbb L}^d\sum_{n\ge 1} T^n
\sum_{e\le cn} { \mathbb L}^{-e}\sum_{I\not=\emptyset}{ \mathbb L}^{-(n+1)d} [{\cal L}_n(M,E_I^0\cap \sigma^{-1}(0)) \cap \pi_n(\Delta_e)
\cap X_{n,f\circ \sigma}^ vari\'et\'eepsilonilon].$$
\end{proof}
\vskip5mm
\begin{proof}[Proof of Theorem \ref{Zeta function} ]
Considering the expression of $Z_f^ vari\'et\'eepsilonilon(T)$ given by Lemma \ref{lemme calculatoire},
we have to compute the class of $[{\cal L}_n(M,E_I^0\cap \sigma^{-1}(0)) \cap \pi_n(\Delta_e)
\cap X_{n,f\circ \sigma}^ vari\'et\'eepsilonilon]$. For this we notice that on
some neighbourhood $U$ of the end point
$\gamma(0)\in E_I^0\cap \sigma^{-1}(0)$,
one has coordinates such that
$$ f\circ \sigma (x)=u(x)\prod_{i\in I}x_i^{N_i} \hbox{ and }
\jac(\sigma)(x)=v(x)\prod_{i\in I}x_i^{\nu_i-1},$$
with $u$ and $v$ units.
As a consequence ${\cal L}_n(M,E_I^0\cap U\cap \sigma^{-1}(0)) \cap \pi_n(\Delta_e) \cap X_{n,f\circ \sigma}^ vari\'et\'eepsilonilon$ is isomorphic to
$$\{\gamma\in {\cal L}_n(M,\sigma^{-1}(0)); \gamma(0)\in E_I^0\cap U\cap \sigma^{-1}(0), \sum_{i\in I}N_i k_i=n, \sum_{i\in I}
k_i(\nu_i-1)=e,$$
$$ f\circ \sigma (\gamma(t))=at^n+\cdots, a \ ?_ vari\'et\'eepsilonilon \},$$
where $?_ vari\'et\'eepsilonilon$ is $=-1$, $=1$, $>0$ or $<0$ in case $ vari\'et\'eepsilonilon$ is $-1,1, >$ or
$< $ and $k_i$ is the multiplicity of $\gamma_i$ for $i\in I$.
Now denoting by $A(I,n,e)$ the set
$$A(I,n,e):=\{k=(k_1, \cdots,k_d)\in { \mathbb N}^d ; \sum_{i\in I}N_i k_i=n, \sum_{i\in I} k_i(\nu_i-1)=e \},$$
and identifying for simplicity
$x$ and $((x_i)_{i\not\in I},(x_i)_{i\in I})$,
the set
$${\cal L}_n(M,E_I^0\cap U\cap \sigma^{-1}(0)) \cap \pi_n(\Delta_e) \cap X_{n,f\circ \sigma}^ vari\'et\'eepsilonilon$$
is isomorphic to the product
$$({ \mathbb R}^n)^{d-\vert I\vert}\times \bigcup_{k\in A(I,n,e)} \{ x\in (E_I^0\cap U \cap \sigma^{-1}(0))\times ({ \mathbb R}^*)^{\vert I\vert };
u((x_i)_{i\not\in I},0) \prod_{i\in I} x_i^{N_i}\ ?_ vari\'et\'eepsilonilon\}
\times \prod_{i\in I}({ \mathbb R}^{n-k_i})$$
Indeed, denoting $\gamma=(\gamma_1, \dots, \gamma_d)$ by
$\gamma_i(t)=a_{i,0}+\cdots + a_{i,n}t^n$ for $i\not \in I$
and $\gamma_i(t)=a_{i,k_i}t^{k_i}+\cdots + a_{i,n} t^n$ for $i\in I$, an arc of $\cal L_n(M,E_I^0\cap U \cap \sigma^{-1}(0))$, the first
factor of the product comes from the free choice of the coefficients $a_{i,j}$, $i\not\in I$, $j=1, \cdots, n$, the last factor of the product
comes from the free choice of the coefficients $a_{i,j}$, $i\in I$, $j= k_i+1, \dots, n $
and the middle factor of the product comes from the choice of the coefficients $a_{i,0}\in E_I^0\cap U \cap \sigma^{-1}(0)$, $i\not\in I$ and from
the choice of the coefficients $a_{i,k_i}$, $i\in I$, subject to $f\circ \sigma (\gamma(t))=u(\gamma(t))\prod_{i\in I}\gamma^{N_i}_i(t)=
u((a_{i,0})_{i\not\in I},0) (\prod_{i\in I}a_{i,k_i}^{N_i})t^n+\cdots=at^n+\cdots, a\ ?_ vari\'et\'eepsilonilon$.
\vskip2mm
We now choose $n_i\in { \mathbb Z}$ such that $\sum_{i\in I}n_iN_i=m=gcd_{i\in I}(N_i)$ and consider the two semialgebraic sets
$$W_U^ vari\'et\'eepsilonilon = \{ x\in (E_I^0\cap U \cap \sigma^{-1}(0))\times ({ \mathbb R}^*)^{\vert I\vert };
u((x_i)_{i\not\in I},0) \prod_{i\in I} x_i^{N_i}\ ?_ vari\'et\'eepsilonilon\}$$
and
$$W_U^{' vari\'et\'eepsilonilon}= \{ (x',t)\in (E_I^0\cap U \cap \sigma^{-1}(0))\times ({ \mathbb R}^*)^{\vert I\vert }\times { \mathbb R}^*; u((x'_i)_{i\not\in I},0)t^m\ ?_ vari\'et\'eepsilonilon, \
\prod_{i\in I}x_i'^{N_i/m}=1 \}, $$
where $?_ vari\'et\'eepsilonilon$ is $=-1$, $=1$, $>0$ or $<0$ in case $ vari\'et\'eepsilonilon$ is $-1,1, >$ or
$< $.
In case $?_ vari\'et\'eepsilonilon=1$ or $?_ vari\'et\'eepsilonilon=-1$, the mapping
$$ \begin{matrix} &W^{' vari\'et\'eepsilonilon}_U &\to &W^ vari\'et\'eepsilonilon_U
\\
&(x',t) &\mapsto &x=((x'_i)_{i\not\in I}, (t^{n_i}x'_i)_{i\in I} )\\
\end{matrix}$$
is an isomorphism of inverse
$$ \begin{matrix} &W_U^ vari\'et\'eepsilonilon&\to &W_U^{' vari\'et\'eepsilonilon}
\\
&x &\mapsto &(x'=((x_i)_{i\not\in I},
((\prod_{\ell\in I} x_\ell^{N_\ell/m})^{-n_i}x_i)_{i\in I}),
t=\prod_{\ell\in I} x_\ell^{N_\ell/m} ). \\
\end{matrix}$$
In the semialgebraic case, this isomorphism induces a natural isomorphism
on the double-covers $\cal W_U^ vari\'et\'eepsilonilon$ and $\cal W_U^{' vari\'et\'eepsilonilon}$
associated to $ W_U^ vari\'et\'eepsilonilon$ and $W_U^{' vari\'et\'eepsilonilon}$ and defined by
$$\cal W_U^ vari\'et\'eepsilonilon=\{(x,y)\in (E_I^0\cap U \cap \sigma^{-1}(0))\times
({ \mathbb R}^*)^{\vert I\vert }\times { \mathbb R};
y^2u((x'_i)_{i\not\in I},0)\prod_{i\in I} x_i^{N_i}
=\eta( vari\'et\'eepsilonilon) \}$$ and
$$\cal W_U^{' vari\'et\'eepsilonilon}=\{ (x,t,w)\in (E_I^0\cap U \cap \sigma^{-1}(0))\times ({ \mathbb R}^*)^{\vert I\vert }\times { \mathbb R}^*\times { \mathbb R};$$
$$w^2u((x'_i)_{i\not\in I},0)t^m =\eta( vari\'et\'eepsilonilon), \
\prod_{i\in I}x_i'^{N_i/m}=1\},$$
where $\eta( vari\'et\'eepsilonilon)=1$ when $ vari\'et\'eepsilonilon$ is the symbol $>$ and
$\eta( vari\'et\'eepsilonilon)=-1$ when $ vari\'et\'eepsilonilon$ is the symbol $<$. In consequence, $[W_U^ vari\'et\'eepsilonilon]=[W_U^{' vari\'et\'eepsilonilon}]$ in the algebraic
case ($ vari\'et\'eepsilonilon=-1$ or $1$) as well as in the semialgebraic case ($ vari\'et\'eepsilonilon=<$ or $>$) considering our realization formula for basic semialgebraic
formulas in
$ K_0({\rm Var}_{ \mathbb R})\otimes { \mathbb Z}[\mathbb F_2rac{1}{2}]$.
Now we observe in the case where $ vari\'et\'eepsilonilon$ is $-1$ or $1$ that $W_U^{' vari\'et\'eepsilonilon}$ is isomorphic to
$R^ vari\'et\'eepsilonilon_U \times ({ \mathbb R}^*)^{\vert I\vert -1}$ (see \cite{DL4}, Lemma 2.5) whereas in the case where
$ vari\'et\'eepsilonilon$ is $<$ or $>$, we obtain that the class of $W_U^{' vari\'et\'eepsilonilon}$ is equal to the class of
$R^ vari\'et\'eepsilonilon_U \times ({ \mathbb R}^*)^{\vert I\vert -1}$, considering again the double coverings associated to the
basic semialgebraic formulas defining these two sets.
We finally obtain
$$[{\cal L}_n(M,E_I^0\cap \sigma^{-1}(0)) \cap \pi_n(\Delta_e)
\cap X_{n,f\circ \sigma}^ vari\'et\'eepsilonilon]=\displaystyle\sum_{k\in A(I,n,e)}
{ \mathbb L}^{nd-\sum_{i\in I}k_i}[W_U^{' vari\'et\'eepsilonilon}]=$$
$$\displaystyle\sum_{k\in A(I,n,e)}
{ \mathbb L}^{nd-\sum_{i\in I}k_i}\times[R_U^{ vari\'et\'eepsilonilon}]\times
({ \mathbb L}-1)^{\vert I\vert -1}.$$
Summing over the charts $U$, the expression of $Z_f^ vari\'et\'eepsilonilon(T)$ given by Lemma \ref{lemme calculatoire} is now
$$ Z_f^ vari\'et\'eepsilonilon(T)=\displaystyle \sum_{I\cap \cal K \not= \emptyset} { \mathbb L}^d\sum_{n\ge 1} T^n
\sum_{e\le cn} { \mathbb L}^{-e}({ \mathbb L}-1)^{\vert I \vert-1 }{ \mathbb L}^{-(n+1)d}
[{\widetilde E}_I^{0, vari\'et\'eepsilonilon}]\displaystyle\sum_{k\in A(I,n,e)} { \mathbb L}^{nd-\sum_{i\in I}k_i}$$
$$=\displaystyle \sum_{I\cap \cal K \not= \emptyset} ({ \mathbb L}-1)^{\vert I \vert-1 }
[{\widetilde E}_I^{0, vari\'et\'eepsilonilon} ]
\sum_{n\ge 1} T^n
\sum_{e\le cn}\sum_{k\in A(I,n,e)} { \mathbb L}^{-e-\sum_{i\in I}k_i}$$
Noticing that the $(k_i)_{i\in I}$'s such that $k=((k_i)_{i\not\in I}),
(k_i)_{i\in I})\in \displaystyle \bigcup_{e\le cn, n\ge 1} A(I,n,e)$ are
in bijection with ${ \mathbb N}^{*\vert I \vert }$, we have
$$Z_f^ vari\'et\'eepsilonilon(T)=
\displaystyle \sum_{I\cap \cal K\not= \emptyset} ({ \mathbb L}-1)^{\vert I \vert-1 }
[{\widetilde E}_I^{0, vari\'et\'eepsilonilon}]
\sum_{(k_i)_{i\in I}\in { \mathbb N}^{\vert I \vert }}
\prod_{i\in I}({ \mathbb L}^{-\nu_i}T^{N_i})^{k_i}$$
$$= \displaystyle \sum_{I\cap \cal K\not= \emptyset} ({ \mathbb L}-1)^{\vert I \vert-1 }
[{\widetilde E}_I^{0, vari\'et\'eepsilonilon}]
\prod_{i\in I}\mathbb F_2rac{{ \mathbb L}^{-\nu_i}T^{N_i}}{1-{ \mathbb L}^{-\nu_i}T^{N_i}}. $$
\end{proof}
\subsection{Motivic real Milnor fibres and their realizations.}
We can now define a motivic real Milnor fibre by taking the constant term
of the rational function $Z_f^ vari\'et\'eepsilonilon(T)$ viewed as a power series in $T^{-1}$.
This process formally consists in letting $T$ going to $\infty$ in the rational
expression of $Z_f^ vari\'et\'eepsilonilon(T)$ given by Theorem \ref{Zeta function} and using
the usual computation rules as in the convergent case (see for instance \cite{DL1}, \cite{DL4}).
\begin{definition}\label{real D-L}
Let $f:{ \mathbb R}^d\to { \mathbb R}$ be a polynomial function and $ vari\'et\'eepsilonilon$ be one of the symbols $naive, 1, -1, >$ or $<$.
Consider a resolution of $f$ as above and let us adopt the same notation $(E_I^0)_I$ for the stratification of the exceptional
divisor of this resolution, leading to the notation $\widetilde E_I^{0, vari\'et\'eepsilonilon}$.
The real motivic
Milnor $ vari\'et\'eepsilonilon$-fibre $S^ vari\'et\'eepsilonilon_f$ of $f$ is defined as (see \cite{DL4} for the complex case)
$$ S^ vari\'et\'eepsilonilon_f:=-\lim_{T\to \infty} Z^ vari\'et\'eepsilonilon_f(T):=\displaystyle
-\sum_{I\cap \cal K\not= \emptyset} (-1)^{\vert I \vert}
[{\widetilde E}_I^{0, vari\'et\'eepsilonilon}]({ \mathbb L}-1)^{\vert I \vert-1 }
\in K_0({\rm Var}_{ \mathbb R})\otimes { \mathbb Z}[\mathbb F_2rac{1}{2}].$$
It does not depend on the choice of the resolution $\sigma$.
\end{definition}
For $ vari\'et\'eepsilonilon$ being the symbol $1$ for instance, we have $S_f^1\in K_0({\rm Var}_{ \mathbb R})$.
We can consider, first in the complex case, the realization
of $S_f^1$ via the Euler-Poincar\'e characteristic ring morphism $\chi_c : K_0({\rm Var}_{ \mathbb C})\to { \mathbb Z}$. Note that in the complex case, the Euler characteristics with and without compact supports are equal. For $f:{ \mathbb C}^d\to { \mathbb C}$, since $\chi_c({ \mathbb L}-1)=0$, we obtain
$$\chi_c(S_f^1)=\displaystyle\sum_{\vert I\vert=1, I\subset \cal K}
\chi_c({\widetilde E}_I^{0,1}) =\displaystyle\sum_{\vert I\vert=1, I\subset \cal K}
N_I\cdot \chi_c(E_I^0\cap \sigma^{-1}(0)).$$
Now denoting by
$F$ the set-theoretic Milnor fibre of the fibration $f_{\vert B(0,\alpha)\cap f^{-1}(D^\times_\eta)}:
B(0,\alpha)\cap f^{-1}(D^\times_\eta)
\to D_\eta^\times $, with $B(0,\alpha)$ the open ball in ${ \mathbb C}^d$ of radius $\alpha$
centred at
$0$, $D_\eta$ the disc in ${ \mathbb C}$
of radius $\eta$ centred at $0$ and $D_\eta^\times=D_\eta\setminus\{0\} $, with
$0<\eta\ll \alpha\ll 1$,
comparing the above expression $\chi_c(S_f^1)=\displaystyle
\sum_{\vert I\vert=1, I\subset \cal K}
N_I\cdot \chi_c(E_I^0)$ with the following A'Campo formula of \cite{ACA}
for the first Lefschetz number of the iterates of the monodromy
$M:H^*(F,{ \mathbb C})\to H^*(F,{ \mathbb C})$ of $f$,
that is for the Euler-Poincar\'e characteristic of the fibre $F$:
$$ \chi_c(F)=\displaystyle\sum_{\vert I\vert=1, I\subset \cal K} N_I\cdot
\chi_c(E_I^0\cap \sigma^{-1}(0)) $$
we simply observe that
$$\chi_c(S_f^1)= \chi_c(F).$$
The closure $f^{-1}(c)\cap \bar B(0,\alpha)$, $0<\vert c\vert \ll \alpha \ll 1$, of the
Milnor fibre $F$ being denoted by $\bar F$ and the boundary of $\bar F$
being the odd dimensional compact
manifold $f^{-1}(c)\cap S(0,\alpha)$,
$\chi_c(f^{-1}(c)\cap S(0,\alpha) )=0$ and we finally have
$$\chi_c(S_f^1)=\chi_c(F)=\chi_c(\bar F).$$
\begin{remark} There is {\sl a priori} no hint in the definition of $Z_f^ vari\'et\'eepsilonilon(T)$
that the opposite of the constant term $S_f^1$ of the power series in $T^{-1}$ induced by the
rationality of $Z_f^ vari\'et\'eepsilonilon(T)$
could be the motivic version of the Milnor fibre of $f$ (as well
as, for instance, there is no evident hint that the expression of $Z_f^ vari\'et\'eepsilonilon$ in Theorem \ref{Zeta function}
does not depend on the resolution $\sigma$).
As mentionned above, in the complex case, we just observe that the
expression of $\chi_c(S_f^1)$ is the expression of $\chi_c(F)$ provided
by the A'Campo formula. Exactly in the same way there is no
{\sl a priori} reason for $\chi_c(S_f^ vari\'et\'eepsilonilon)$, regarding the definition of $Z_f^ vari\'et\'eepsilonilon$, to be so acurately related to
the topology of $f^{-1}( vari\'et\'eepsilonilon \vert c \vert)\cap B(0,\alpha)$. Nevertheless we prove
that it is actually the case (Theorem \ref{Milnor}).
\end{remark}
In order to establish this result we start hereafter by a
geometrical proof of the formula in the complex
case (compare with \cite{ACA} where only ${ \mathbb L}ambda(M^0)$ is considered, $M^k$ being
the $k$th iterate of the monodromy $M:H^*(F,{ \mathbb C})\to H^*(F,{ \mathbb C})$ of $f$).
We will then extend to the reals this computational proof in
the proof of Theorem \ref{Milnor}, allowing us interpret the complex proof
as the first complexity level of its real extension.
\begin{remark} Note that in the complex case a proof of the fact that ${ \mathbb L}ambda(M^k)=\chi_c(X^1_{k,f})$,
for $k\ge 1$, is given in \cite{HL}
without the help of resolution of singularities, that is to say without help of
A'Campo's formulas (see Theorem 1.1.1 of \cite{HL}). As a direct corollary it is thus proved that $\chi_c(S^1_f)=\chi_c(F)$
in the complex case, without using
A'Campo formulas.
\end{remark}
\vskip5mm
\noindent
{\bf Realization of the complex motivic Milnor fibre under $\chi_c$. }
The fibre $F=\{ f=c\}\cap B(0,\alpha)$ is homeomorphic to the fibre
$\cal F=\{f\circ \sigma = c\}\cap \sigma^{-1}(B(0,\alpha))$, with $\sigma^{-1}(S(0,\alpha))$ viewed as the boundary of a
tubular neighbourhood
of $\sigma^{-1}(0)=\bigcup_{E_J^0\subset \sigma^{-1}(0)}E_J^0$,
keeping the same notation $(E_J^0)_J$ as before for the natural stratification of the strict transform $\sigma^{-1}(\{f=0\})$ of $f=0$.
Now the formula may be established for
$\cal F$ in some chart of $M\cap \sigma^{-1}(B(0,\alpha))$, by additivity.
In such a chart, where $f\circ \sigma$ is normal crossing, we consider
\begin{enumerate}
\item[-] the set $E_J=\bigcap_{i\in J}E_i \subset \sigma^{-1}(0)$,
given by $x_i=0$, $ i\in J$,
\item[-] a closed small enough tubular neighbourhood $V_J$ in $M$ of
$\bigcup_{J\subset K, K\not= J} E^0_K$, that is
a tubular neighbourhood of all the $E^0_K$'s bounding $E^0_J$, such that
$E_J^0\setminus V_J$ is homeomorphic to $E_J^0$,
\item[-] and $\pi_J$ the projection onto $E_J$ along the
$x_j$'s coordinates, for $j\in J$.
\item[-] an open neighbourhood $\cal E_J$
of $E_J^0\setminus V_J$ in $\sigma^{-1}(B(0,\alpha)) $ given by
$\pi_J^{-1}(E_J^0\setminus V_J), \vert x_j\vert \le \eta_J$,
$j\in J $, with $\eta_J>0$ small enough,
\end{enumerate}
\begin{remark}\label{Thom}
For $I=\{i \}$, we remark that $\cal F \cap \cal E_I$ is homeomorphic
to $N_i$ copies of $E_I^0\cap \cal E_I$, and thus to $N_i$ copies of
$E_I^0$. Indeed, assuming $f\circ \sigma = u(x)x_i^{N_i}$
in $\cal E_I$, we observe that the family $(f_t)_{t\in [0,1]}$, with
$f_t=u((x_j)_{j\not\in I}, t\cdot x_i) x_i^{N_i}-c$,
has homeomorphic fibres $\{f_t=0\}\cap \cal E_J$,
$t\in [0,1]$, by Thom's isotopy lemma, since
$$\mathbb F_2rac{\partial f_t}{\partial x_i}(x)=t\mathbb F_2rac{\partial u}{\partial x_i}(x)
x_i^{N_i}+
u(x) x_i^{N_i-1}=0, $$
would imply $\displaystyle t\mathbb F_2rac{\partial u}{\partial x_i}(x)x_i+
u(x)=0$. But the first term in this sum goes to $0$ as $x_i$ goes to $0$, since the
derivatives of $u$ are
bounded on the compact $adh(\cal E_I)$, although the norm of the
second term is bounded from below on $\cal E_I$ by a non zero constant, since $u$ is a unit.
Finally, as
$\{ f_1=0\}\cap \cal E_I$ is homeomorphic to $\{ f_0=0\}\cap \cal E_I$ and
$\{ f_0=0\}\cap \cal E_I$
is a $N_i$-graph
over $E_I^0\cap \cal E_I$, $\cal F\cap \cal E_I$ is homeomorphic to $N_i$ copies of $E_I^0$.
\end{remark}
By this remark, $\cal F$ covers maximal dimensional stratum $E_I^0$,
$\vert I \vert=1$, $I \subset \cal K$, with $N_i$ copies of a leaf $\cal F_I$ of $\cal F$.
To be more accurate, with the notation introduced above,
$\cal F_I$ covers the neighbourhood $E_I^0 \cap \cal E_I$ of $E_I^0\setminus V_I$.
Moreover the $\cal F_I$'s overlap in $\cal F$
over the open set $E_J^0 \cap \cal E_J$ of the strata $E^0_J$ that bound
the $E^0_I$'s, for $\vert I \vert =1$, $\vert J\vert =2$
and $I \subset J$, in bundles over the $E_J^0 \cap \cal E_J$'s of fibre ${ \mathbb C}^*$.
Those sub-leaves
$\cal F_J$ of $\cal F$
overlap in turn over the open $E_Q^0 \cap \cal E_Q$ of the strata $E_Q^0$,
$\vert Q\vert=3, J\subset Q $,
that bound the $E_J^0$'s, in bundles over the $E_Q^0 \cap \cal E_Q$'s of fibres
$({ \mathbb C}^*)^2$ and so forth...
For instance when $f\circ \sigma = u(x)\prod_{i\in I}x_i^{N_i}$ in
$\cal E_I$, $I=\{i\}$,
and
$f\circ \sigma = v(x) x_i^{N_i}x_j^{N_j}$ in $\cal E_J$, $J=\{i,j\}$, the $N_i$ leaves
$\cal F_I$, homeomorphic to the $N_i$ copies $x_i^{N_i}=c /u(x)$ of $E_I^0$, overlap
over $E_J^0 \cap \cal E_J$
in sub-leaves $\cal F_J$ of $\cal F_I$, given by $v(x)x_i^{N_i}x_j^{N_j}=c$, fibering over $E_J^0$ with
fibre $GCD(\{N_i,N_j\})$ copies of $({ \mathbb C}^*)^{\vert J\vert -1}$ and so forth... (see figure 1).
\vskip1,0cm
\hskip1,5cm \includegraphics[height=8cm]{fig1.eps}
\vskip-7,7cm
\hskip11,4cm$f\circ \sigma=c$
\vskip-0,4cm
\hskip1,0cm $\cal F_{I'}$
\vskip0,8cm
\hskip0,9cm $E^0_K$
\vskip0,5cm
\hskip1,0cm $\cal F_K$
\vskip0,8cm
\hskip1,1cm $\cal F_J$
\vskip-0,5cm
\hskip11,7cm $E^0_I$
\vskip0,7cm
\hskip10,9cm $\cal F_I$
\vskip-0,2cm
\hskip0,9cm $E^0_J $
\vskip3,0cm
\centerline{figure 1}
\vskip1cm
\begin{remark}\label{retract}
Note that the topology of $\cal F=\{f\circ \sigma =c\} \cap \sigma^{-1}(B(0,\alpha))$
is the same as the topology of
$\bigcup_{J\cap \cal K \not=0}\cal F_J$ (that is the topology of $\cal F$ above the strata
$E^0_J$ of $\sigma^{-1}(0)$) since
the retraction of $\cal F$ onto $\bigcup_{J\cap \cal K \not=\emptyset} \cal F_J$,
as $\alpha$ goes to $0$, induces
a homeomorphism from $\cal F$ to $\bigcup_{J\cap \cal K \not=\emptyset} \cal F_J$.
\end{remark}
From Remark \ref{retract}, by additivity, it follows that the Euler-Poincar\'e characteristic of $\cal F$ (in our chart) is the sum
$$\displaystyle\sum_{\vert I\vert=1, I\subset \cal K}
N_I\cdot \chi_c(E_I^0\cap \sigma^{-1}(0)) + L,\eqno(*)$$ where $L$ is some ${ \mathbb Z}$-linear
combination of
Euler-Poincar\'e characteristics of bundles
over the open sets $E_J\cap \cal E_J^0$, $\vert J \vert>1 $, of fibre a power of tori ${ \mathbb C}^*$. Now the A'Campo formula
$$ \chi_c(F)=\displaystyle\sum_{\vert I\vert=1, I\subset \cal K}
N_I\cdot \chi_c(E_I^0\cap \sigma^{-1}(0))$$
for
the Milnor number follows from the fact that $\chi_c({ \mathbb C}^*)=0$ implies $L=0$.
\vskip5mm
\noindent
{\bf Realization of the real motivic Milnor fibres under $\chi_c$. }
The partial covering of $\cal F$ by the pieces $\cal F_J$, for
$J\cap \cal K \not= \emptyset$,
over the strata of the stratification $(E^0_J)_{J\cap \cal K \not= \emptyset}$ of
$\sigma^{-1}(0)$ allows us to compute the Euler-Poincar\'e characteristic
of the Milnor fibre $\cal F$ in terms of the Euler-Poincar\'e characteristic of
the strata $E_J^0$, in the complex as well as in the real case. In the complex case, as noted above, for $J$ with $\vert J\vert >1$,
one has $\chi_c(\cal F_J)=0$. This cancellation provides a quite simple formula for
$\chi_c(F)$: only the strata of the maximal
dimension of the divisor $\sigma^{-1}(0)$ appear in this formula, as expected
from the A'Campo formula.
In the real case one does not have such cancellations: on one hand
the expression of $\chi_c(F)$
in terms of $\chi_c(\widetilde E_J^{0, vari\'et\'eepsilonilon})$ is no more trivial
(the remaining term $L$ of equation $(*)$ is not zero and consequently terms
$\chi_c(\widetilde E_J^{0, vari\'et\'eepsilonilon})$, for $\vert J\vert >1 $ and
$E_j \cap \sigma^{-1}(0)\not=\emptyset$, appear), and on the other hand the
expression
of $\chi_c(S_f^ vari\'et\'eepsilonilon)$ given by the real Denef-Loeser formula in Definition
\ref{real D-L} have terms $2^{\vert J \vert -1}\chi_c(\widetilde E_J^{0, vari\'et\'eepsilonilon})$
, for $\vert J\vert >1 $ and $J\cap \cal K \not=\emptyset $
(since $\chi_c({ \mathbb L}-1) =-2$ in the real case).
Nevertheless, in the real case we show that
$\chi_c(S_f^ vari\'et\'eepsilonilon)$ is again $\chi_c(\bar F)$, justifying the terminology of
motivic real semialgebraic Milnor fibre of $f$ at $0$ for $S_f^ vari\'et\'eepsilonilon$. The formula
stated in Theorem \ref{Milnor} below
is the real analogue of the A'Campo-Denef-Loeser formula for complex hypersurface
singularities and thus appears as the extension to the
reals of this complex formula, or, in other words, the complex formula is the notably
first level
of complexity of the more general real formula.
\begin{notation}\label{Khim}
Let $f: { \mathbb R}^d\to { \mathbb R}$ be a polynomial function such that $f(0)=0$ and with isolated
singularity at $0$, that is $\grad f(x)=0$ only for $x=0$ in some open neighbourhood of
$0$.
Let $0<\eta \ll \alpha$ be such that the topological type of
$f^{-1}(c)\cap B(0,\alpha)$ does
not depend on $c$ and $\alpha$, for $0<c<\eta$ or for $-\eta<c<0$.
- Let us denote, for $ vari\'et\'eepsilonilon\in \{-1,1\}$ and $ vari\'et\'eepsilonilon\cdot c>0$, this topological type by $F_ vari\'et\'eepsilonilon$, by $\bar F_ vari\'et\'eepsilonilon$ the topological type of the closure of the Milnor fibre $F_ vari\'et\'eepsilonilon$ and by
$Lk(f)$ the link $f^{-1}(0)\cap S(0,\alpha)$ of $f$ at the origin.
We recall that the topology of $Lk(f)$ is the same as the topology of the boundary
$f^{-1}(c)\cap S(0,\alpha)$ of the Milnor fibre $\bar F_ vari\'et\'eepsilonilon$, when $f$ has an
isolated singularity at $0$.
- Let us denote, for $ vari\'et\'eepsilonilon\in \{<,>\}$, the topological type
of $f^{-1}(]0,c_ vari\'et\'eepsilonilon[)\cap B(0,\alpha)$ by $F_ vari\'et\'eepsilonilon$, and
the topological type of $f^{-1}(]0,c_ vari\'et\'eepsilonilon[)\cap \bar B(0,\alpha)$
by $\bar F_ vari\'et\'eepsilonilon$,
where $c_<\in ]-\eta, 0[$ and $c_>\in ]0,\eta[$.
- Let us denote, for $ vari\'et\'eepsilonilon\in \{<,>\}$, the topological type
of $\{f \ \bar vari\'et\'eepsilonilon \ 0\}\cap S(0,\alpha)$ by $G_ vari\'et\'eepsilonilon$, where
$\bar vari\'et\'eepsilonilon$ is $\le$ when $ vari\'et\'eepsilonilon$ is $<$ and $\bar vari\'et\'eepsilonilon$ is $\ge$
when $ vari\'et\'eepsilonilon$ is $>$.
\end{notation}
\begin{remark}\label{bord}
When $d$ is odd, $Lk(f)$ is a smooth odd-dimensional submanifold of ${ \mathbb R}^d$ and
consequently $\chi_c(Lk(f))=0$. For
$ vari\'et\'eepsilonilon \in \{-1, 1, <, >\}$, we thus have in this situation,
$\chi_c(F_ vari\'et\'eepsilonilon)=\chi_c(\bar F_ vari\'et\'eepsilonilon)$. This is the situation in the complex
setting. When $d$ is even and for $ vari\'et\'eepsilonilon \in \{-1, 1\}$ since
$\bar F_ vari\'et\'eepsilonilon$ is a compact manifold with boundary
$Lk(f)$, one knows that
$$\chi_c(\bar F_ vari\'et\'eepsilonilon)=-\chi_c (F_ vari\'et\'eepsilonilon)=\mathbb F_2rac{1}{2}\chi_c(Lk(f)).$$
For general $d\in { \mathbb N}$ and for $ vari\'et\'eepsilonilon \in \{-1, 1, <, >\}$, we thus have
$$ \chi_c(\bar F_ vari\'et\'eepsilonilon)=(-1)^{d+1}\chi_c (F_ vari\'et\'eepsilonilon).$$
On the other hand we recall that for $ vari\'et\'eepsilonilon \in \{ <, >\}$
$$ \chi_c(G_ vari\'et\'eepsilonilon )=\chi_c(\bar F_{\delta_ vari\'et\'eepsilonilon}),$$
where $\delta_>$ is $1$ and $\delta_<$ is $-1$
(see \cite{Ar}, \cite{Wa}).
\end{remark}
\begin{theorem}\label{Milnor}
With notation \ref{Khim}, we have, for $ vari\'et\'eepsilonilon\in \{-1, 1, <, >\}$
$$ \chi_c(S_f^ vari\'et\'eepsilonilon)=\chi_c(\bar F_ vari\'et\'eepsilonilon)=(-1)^{d+1}\chi_c (F_ vari\'et\'eepsilonilon), $$
and for $ vari\'et\'eepsilonilon\in \{<, >\}$
$$ \chi_c(S_f^ vari\'et\'eepsilonilon)=-\chi_c(G_ vari\'et\'eepsilonilon).$$
\end{theorem}
\begin{proof} Assume first that $ vari\'et\'eepsilonilon\in \{-1, 1\}$. We denote
by $\cal F$ the fibre
$\sigma^{-1}(F_ vari\'et\'eepsilonilon)$ and recall that $\cal F$ and $F_ vari\'et\'eepsilonilon$ have the same
topological type.
Let us denote $\bar{\cal K}$ the set of multi-indices
$J\subset \cal I$ such that
$\bar E_J \cap \sigma^{-1}(0)\not=\emptyset$, with
$\bar E_J$ the closure of $E_J=\bigcap_{i\in J} E_i$. In what follows
only $J\in\bar{\cal K}$ are concerned,
since we study the
local Milnor fibre at $0$.
The proof consists in the computation
of the Euler-Poincar\'e characteristic of $\cal F$ using the decomposition of $\cal F$ by the overlapping components $\cal F_I$
introduced just before figure 1 and illustrated on figure 1. We simply count the number of these overlapping components in the decomposition of $\cal F$ they provide.
Note that a connected component of $E^0_J$ (still denoted $E_J^0$ for simplicity in the sequel), for $J\subset \cal J$, is covered by
$n_J:=M_J\cdot 2^{\vert J\vert -1}$ connected components $\cal G$
of $\cal F$, where $M_J$ is
$0$, $1$ or $2$ depending on the fact that the multiplicity $m_J=gcd_{j\in J}(N_j)$ defining $\widetilde E^{0, vari\'et\'eepsilonilon}_J$ is
odd or even, and on sign condition on $c$ (remember from figure 1 how $E^0_J$ is covered by $\cal F_J$. Here the term covered simply means that one can naturally project the component $\cal F_J$ onto $E^0_J$).
Note furthermore that $M_J$ is the degree of the covering $\widetilde E_J^{0, vari\'et\'eepsilonilon}$ of $E_J^0$.
Now expressing a connected component $\cal G$ of $\cal F$ as the union
$\displaystyle\bigcup_{\vert I \vert=1, \cal F_I\subset \cal G} \cal F_I$, where the
(connected) leaves $\cal F_I$
cover (the open subset $E^0_I\cap \cal E_I^0$ of $E^0_I$ homeomorphic to)
$E^0_I$,
and using the additivity of $\chi_c$, one has
that $\chi_c(\cal G)$ is expressed as a sum of characteristics of the overlapping
connected sub-leaves
$\cal F_J$ of the $\cal F_I$'s, each of them with sign coefficient
$s_J:=(-1)^{\vert J \vert -1 }$ .
Note that (a connected component of)
$E_J^0$ is covered by $n_J$ copies of such
a $\cal F_J$, coming from the $n_J$ connected components of $\cal F$ above $E_J^0 \cap \cal E_J^0$,
and that a connected sub-leaf $\cal F_J$ has the topology of
$(E_J^0\cap \cal E_J^0)\times { \mathbb R}^{\vert J\vert -1}$. We denote by $t_J$ the characteristic
$t_J:=\chi_c({ \mathbb R}^{\vert J\vert -1})=(-1)^{\vert J\vert -1}$.
With this notation, summing over all the connected components $\cal G$ of $\cal F$, one
gets
$$\chi_c(\cal F)=\sum_{J\in \bar{\cal K}}
s_J \times n_J \times \chi_c(E^0_J)\times t_J$$
$$=\sum_{J\in \bar{\cal K}}
(-1)^{\vert J \vert -1} \times 2^{\vert J\vert -1}M_J \times \chi_c(E^0_J)
\times (-1)^{\vert J \vert -1}$$
$$=
\sum_{J\in \bar{\cal K}} 2^{\vert J\vert-1}
\chi_c(\widetilde E_J^{0, vari\'et\'eepsilonilon}) $$
$$=\sum_{J\cap\cal K\not=\emptyset} 2^{\vert J\vert-1} \chi_c(\widetilde E_J^{0, vari\'et\'eepsilonilon})
+ \sum_{J\cap\cal K=\emptyset, J\in \bar{\cal K}} 2^{\vert J\vert-1} \chi_c(\widetilde E_J^{0, vari\'et\'eepsilonilon}) $$
$$ = \chi_c(S_f^ vari\'et\'eepsilonilon)
+\sum_{J\cap\cal K=\emptyset,J\in \bar{\cal K}} 2^{\vert J\vert-1}
\chi_c(\widetilde E_J^{0, vari\'et\'eepsilonilon})$$
$$ = \chi_c(S_f^ vari\'et\'eepsilonilon)
+\chi_c(\bigcup_{J\cap\cal K=\emptyset,J\in \bar{\cal K}} \cal F_J). $$
Note that the sub-leaves $\cal F_J$ for $J\cap \cal K=\emptyset$ and $J\in \bar{\cal K}$ cover
the set $\{f\circ \sigma = c\}\cap \hat S(0,\alpha)$, for $ vari\'et\'eepsilonilon\cdot c >0$,
where $\hat S(0,\alpha)$
is a neighbourhood $\sigma^{-1}(S(0,\alpha)\times ]0,\beta[)$ of
$\sigma^{-1}(S(0,\alpha))$, with $0<\beta \ll \alpha$.
It follows that
$$\chi_c(\bigcup_{J\cap\cal K=\emptyset,J\in \bar{\cal K}} \cal F_J)=
\chi_c(F_ vari\'et\'eepsilonilon\cap (S(0,\alpha)\times ]0,\beta[))=\chi_c(Lk(f)\times ]0,\beta[)=
-\chi_c(Lk(f)). $$
We finally obtain
$$ \chi_c(F_ vari\'et\'eepsilonilon)=\chi_c(S_f^ vari\'et\'eepsilonilon)-\chi_c(Lk(f)),$$
and
$$ \chi_c(\bar F_ vari\'et\'eepsilonilon)= \chi_c(F_ vari\'et\'eepsilonilon)+\chi_c(Lk(f))=\chi_c(S_f^ vari\'et\'eepsilonilon).$$
This proves the first equality of our statement, the equality
$\chi_c(\bar F_ vari\'et\'eepsilonilon)=(-1)^{d+1}\chi_c (F_ vari\'et\'eepsilonilon)$ being proved
in Remark \ref{bord}.
Assume now that $ vari\'et\'eepsilonilon \in \{<,>\}$, and denote $\delta_<:=-1$ and $\delta_>:=1$, like
in Remark \ref{bord}.
With this notation
$\bar F_ vari\'et\'eepsilonilon=\bar F_{\delta_ vari\'et\'eepsilonilon}\times ]0,1[$,
and by the formula proved above in the case $ vari\'et\'eepsilonilon\in \{-1,1\}$, we obtain
$$ \chi_c(\bar F_ vari\'et\'eepsilonilon)=\chi_c(\bar F_{\delta_ vari\'et\'eepsilonilon})\chi_c(]0,1[)
=-\chi_c(\bar F_{\delta_ vari\'et\'eepsilonilon})=-\chi_c(S_f^{\delta_ vari\'et\'eepsilonilon})=-
\sum_{J\cap \cal K \not=\emptyset } 2^{\vert J\vert-1}
\chi(\widetilde E_J^{0,\delta_ vari\'et\'eepsilonilon}).$$
But since $\widetilde E_J^{0, vari\'et\'eepsilonilon}=
\widetilde E_J^{0,\delta_ vari\'et\'eepsilonilon}\times { \mathbb R}_+$, it follows that
$$\chi_c( \bar F_ vari\'et\'eepsilonilon)= \sum_{J\cap \cal K \not=\emptyset } 2^{\vert J\vert-1}
\chi(\widetilde E_J^{0,\delta_ vari\'et\'eepsilonilon})\chi_c({ \mathbb R}_+)=
\sum_{J\cap \cal K \not=\emptyset} 2^{\vert J\vert-1} \chi(\widetilde E_J^{0, vari\'et\'eepsilonilon})
=\chi_c(S_f^ vari\'et\'eepsilonilon).$$
This proves the first equality of our statement. The equality
$\chi_c(\bar F_ vari\'et\'eepsilonilon)=(-1)^{d+1}\chi_c (F_ vari\'et\'eepsilonilon)$
is the consequence of $\chi_c(\bar F_ vari\'et\'eepsilonilon)=\chi_c(\bar F_{\delta_ vari\'et\'eepsilonilon})\chi_c(]0,1[)$,
$\chi_c( F_ vari\'et\'eepsilonilon)=\chi_c( F_{\delta_ vari\'et\'eepsilonilon})\chi_c(]0,1[)$
and
$\chi_c(\bar F_{\delta_ vari\'et\'eepsilonilon})=(-1)^{d+1}\chi_c (F_{\delta_ vari\'et\'eepsilonilon})$.
To finish, equality $\chi_c(S_f^ vari\'et\'eepsilonilon)=-\chi_c(G_ vari\'et\'eepsilonilon)$ comes from the
equality $ \chi_c(G_ vari\'et\'eepsilonilon )=\chi_c(\bar F_{\delta_ vari\'et\'eepsilonilon})$
recalled in Remark \ref{bord} and from
$\chi_c(\bar F_ vari\'et\'eepsilonilon)=-\chi_c(\bar F_{\delta_ vari\'et\'eepsilonilon})$,
$\chi_c(S_f^ vari\'et\'eepsilonilon)=\chi_c(\bar F_ vari\'et\'eepsilonilon)$.
\end{proof}
\begin{remark}
As stated in Theorem \ref{Milnor}, the realization via $\chi_c$
of the motivic Milnor fibre $S_f^ vari\'et\'eepsilonilon$, for $ vari\'et\'eepsilonilon\in \{-1,1,<,>\}$, gives the
Euler-Poincar\'e characteristic of the corresponding set theoretic semialgebraic
closed Milnor fibre $\bar F_ vari\'et\'eepsilonilon$.
Nevertheless it is worth noting that this equality is in general not true at the higher
level of
$\chi(K_0[BSA_{ \mathbb R}])$. Even computed in $K_0({\rm Var}_{ \mathbb R})\otimes { \mathbb Z}[\mathbb F_2rac{1}{2}]$, we may have
$S_f^ vari\'et\'eepsilonilon\not=[A_{f, vari\'et\'eepsilonilon}]$, for a given semialgebraic formula
$A_{f, vari\'et\'eepsilonilon}$ with real points $\bar F_ vari\'et\'eepsilonilon$.
Let us illustrate this remark by the following quite trivial example.
\end{remark}
\begin{example}
Let us consider the simple case where $f:{ \mathbb R}^2\to { \mathbb R}$ is given by $f(x,y)=xy$.
After one blowing-up $\sigma: M\to { \mathbb R}^2$ of the origin of ${ \mathbb R}^2$, the situation is as required by Theorem \ref{Zeta function}.
We denote by $E_1$ the exceptional divisor $\sigma^{-1}(0)$ (which is isomorphic to ${ \mathbb P}_1$) and by
$E_2,E_3$ the irreducible components of the strict transform $\sigma^{-1}(\{f=0\})$.
The induced
stratification of $E_1$ is given by $E_{1,2}^0=E_1\cap E_2$, $E_{1,3}^0=E_1\cap E_3$, and
the two connected components ${E}_1^{'0}, {E}_1^{''0}$ of $E_1\setminus (E_2\cup E_3)$.
We consider a chart $(X,Y)$ of $M$ such that $\sigma(X,Y)=(x=Y,y=XY)$. In this chart
$(f\circ \sigma)(X,Y)=XY^2$. The multiplicity of $f\circ \sigma$ along $E_1$ is $N_1=2$,
and the multiplicity of $\jac\sigma$ along $E_1$ is $1$, thus $\nu_1=2$. Assuming that
$ E_1^{'0}$ corresponds to $X>0$ and $ E_1^{''0}$ corresponds to $X<0$,
it follows that
$$\widetilde E^{'0, vari\'et\'eepsilonilon}_1=\{(X,t); X\in E_1^{'0}, t\in { \mathbb R}, Xt^2?_ vari\'et\'eepsilonilon\}
\hbox{ and } \widetilde E^{''0, vari\'et\'eepsilonilon}_1=\{(X,t); X\in E_1^{''0}, t\in { \mathbb R}, Xt^2?_ vari\'et\'eepsilonilon\},$$
where $?_ vari\'et\'eepsilonilon$ is
$=1$, $=-1$, $>$ or $<0$ in case $ vari\'et\'eepsilonilon$ is $1$, $-1$, $>$ or $<$.
In case $ vari\'et\'eepsilonilon=1$ we obtain
$$[\widetilde E^{'0,1}_1]={ \mathbb L}-1 \hbox{ and } [\widetilde E^{''0,1}_1]=0$$
since $\widetilde E^{'0,1}_1$ has a one-to-one projection onto
$\{(X,Y); X=0, Y\not=0\}$) and $\widetilde E^{''0,1}_1$ is
empty.
Now in a neighbourhood of $E^0_{1,2}$, $f\circ \sigma (X,Y)=XY^2$,
giving $N_1=1$, $N_2=2$ and $m=gcd(N_1,N_2)=1$. We also have $\nu_1=2$ and $\nu_2=1$.
It follows that
$$\widetilde E^{0,1}_{1,2}=\{(0,t); t\in { \mathbb R}, t=1\} \hbox{ thus } [\widetilde E^{0,1}_{1,2}]=1.$$
In the same way, using another chart, one finds
$$ \ [\widetilde E^{0,1}_{1,3}]=1.$$
By Theorem \ref{Zeta function} we then have
$$ Z_f^1(T)=({ \mathbb L}-1)^{1-1}({ \mathbb L}-1)\Big(\mathbb F_2rac{{ \mathbb L}^{-2}T^2}{1-{ \mathbb L}^{-2}T^2}\Big)
+
2({ \mathbb L}-1)^{2-1}\Big(\mathbb F_2rac{{ \mathbb L}^{-2}T^2}{1-{ \mathbb L}^{-2}T^2}\Big)
\Big(\mathbb F_2rac{{ \mathbb L}^{-1}T}{1-{ \mathbb L}^{-1}T}\Big),$$
$$Z^1_f(T)=\mathbb F_2rac{{ \mathbb L}-1}{({ \mathbb L} T^{-1}-1)^2} \hbox{ and } S_f^1=-({ \mathbb L}-1).$$
Of course we find that $\chi_c(S_f)=\chi_c(\{ f=c \}\cap \bar B(0,1))=2$,
$0<c\ll 1$.
Now let us for instance choose $\{ xy=c,1-x^2-y^2>0\}$, for $0<c\ll 1$, as a basic semialgebraic formula
to represent the open Milnor fibre of $f=0$ and let us compute
$\beta([xy=c,1-x^2-y^2>0])$ (rather than $[xy=c,1-x^2-y^2>0]$ itself, since we use regular homeomorphims in our computations).
By definition of the realization $\beta:K_0(BSA_{ \mathbb R})\to { \mathbb Z}[\mathbb F_2rac{1}{2}][u]$,
we have
$$\beta([xy=c,1-x^2-y^2>0])=\mathbb F_2rac{1}{4}\beta([xy=c,z^2=1-x^2-y^2])$$
$$
-\mathbb F_2rac{1}{4}\beta([xy=c,z^2=x^2+y^2-1])+
\mathbb F_2rac{1}{2}\beta([xy=c,1-x^2-y^2\not=0]).$$
Projecting the algebraic set $ \{ xy=c,z^2=1-x^2-y^2\}$ orthogonally to the plane $x=-y$ with
coordinates $(X=1/\sqrt 2(x-y),z)$ one finds
twice the quadric $z^2+2X^2=1-2c$ that is, up to regular homeomorphism, two circles. A circle having
class $u+1$, we have
$$\beta([xy=c,z^2=1-x^2-y^2])=2(u+1). $$
Projecting the algebraic set $ \{ xy=c,z^2=x^2+y^2-1\}$ to the plane $x=-y$ with
coordinates $(X=1/\sqrt 2(x-y),z)$ one finds
twice the hyperbola $2X^2-z^2=1-2c$. Projecting orthogonally again the hyperbola onto one of its asymptotic
axes we see that this hyperbola has class $u-1$. It gives
$$\beta([xy=c,z^2=x^2+y^2-1])=2(u-1).$$
Finally the constructible set $\{xy=c,1-x^2-y^2\not=0\}$ being the hyperbola without
$4$ points, we have
$$\beta([xy=c,1-x^2-y^2>0])=\mathbb F_2rac{1}{2}(u+1)-\mathbb F_2rac{1}{2}(u-1)+\mathbb F_2rac{1}{2}(u-1)-2=
\mathbb F_2rac{u-3}{2}. $$
Of course $\chi_c(\chi([xy=c,1-x^2-y^2>0]))=\chi_c(\{f=c\}\cap B(0,1)) =-2$.
The simple semialgebraic formula representing the set theoretic closed Milnor fibre is
$\{xy=c,1-x^2-y^2\ge0\}$, it has class $\displaystyle \beta([xy=c,1-x^2-y^2>0])+4\beta([\{*\}])=\mathbb F_2rac{u+5}{2}$
in $ { \mathbb Z}[\mathbb F_2rac{1}{2}][u]$. But although
$$\chi_c(\chi([xy=c,1-x^2-y^2\ge0]))=\chi_c(S_f^1)=\chi_c(\{f=c\}\cap \bar B(0,1))=2$$
as expected from Theorem \ref{Milnor},
we observe that
$$\mathbb F_2rac{u+5}{2}=\beta([xy=c,1-x^2-y^2\ge0]) \not=\beta(S_f^1)=-(u-1).$$
As a final consequence, we certainly cannot have
this equality between $\chi([xy=c,1-x^2-y^2\ge0])$ and $S_f^1 $ at the level of $K_0({\rm Var}_{ \mathbb R})\otimes { \mathbb Z}[\mathbb F_2rac{1}{2}]$.
\end{example}
\vskip2cm
\end{document} |
\begin{document}
\begin{abstract}
In this note we make a few remarks about the geometry of the
holomorphic symplectic manifold $Z$ constructed in
\cite{LLSvS} as a two-step contraction of the variety of twisted cubic curves
on a cubic fourfold $Y\subset \mathbb{P}^5$.
We show that $Z$
is birational to a component of the moduli space of stable sheaves
in the Calabi-Yau subcategory of the derived category of $Y$.
Using this description we deduce that the twisted
cubics contained in a hyperplane section $Y_H = Y \cap H$ of $Y$ give rise to a
Lagrangian subvariety $Z_H \subset Z$. For a generic choice of the hyperplane,
$Z_H$ is birational to the theta-divisor in the intermediate Jacobian $\IJac{Y_H}$.
\end{abstract}
\title{On the geometry of the Lehn--Lehn--Sorger--van Straten eightfold}
\tableofcontents
\section{Introduction}
We work over the field of complex numbers. Throughout the paper $Y\subset \mathbb{P}^5$ is
a smooth cubic fourfold not containing a plane. In \cite{LLSvS} the variety $M_3(Y)$ of generalized
twisted cubic curves on $Y$ was studied. It was shown that $M_3(Y)$ is 10-dimensional,
smooth and irreducible. Starting from this variety an 8-dimensional irreducible holomorphic symplectic
(IHS) manifold $Z$ was constructed. More precisely, it was shown that there exist morphisms
\begin{equation}\label{eqn_contractions}
M_3(Y)\stackrel{a}{\longrightarrow} Z'\stackrel{\sigma}{\longrightarrow} Z,
\end{equation}
and
\begin{equation}\label{eqn_embedding}
\mu\colon Y\hookrightarrow Z,
\end{equation}
where $a$ is a $\mathbb{P}^2$-fibre bundle and $\sigma$ is the blow-up along the image
of $\mu$.
It was later shown in \cite{AL} that $Z$ is birational --- and hence deformation
equivalent --- to a Hilbert scheme of four points on a K3 surface.
In this paper we present another point of view on $Z$. We show that an open
subset of $Z$ can be described as a moduli space of Gieseker stable torsion-free sheaves of rank $3$ on $Y$.
Kuznetsov and Markushevich \cite{KM} have constructed a closed two-form on any moduli space of sheaves on $Y$.
Properties of the Kuznetsov-Markushevich form are known to be closely related to the structure of the
derived category of $Y$.
The bounded derived category $\EuScript{D}^b(Y)$ of coherent sheaves on $Y$
has an exceptional collection $\mathcal O_Y$, $\mathcal O_Y(1)$, $\mathcal O_Y(2)$ with right orthogonal
$\mathcal A_Y$, so that $\EuScript{D}^b(Y)=\langle\mathcal A_Y, \mathcal O_Y, \mathcal O_Y(1), \mathcal O_Y(2)\rangle$. The category
$\mathcal A_Y$ is a Calabi-Yau category of dimension two, meaning that its Serre functor is the shift by
$2$ \cite[Section 4]{K}.
It was shown in \cite{KM} that
the two-form on moduli spaces of sheaves on $Y$ is non-degenerate if the sheaves lie in $\mathcal A_Y$.
The torsion-free sheaves mentioned above lie in $\mathcal A_Y$.
This gives an alternative description of the symplectic form on $Z$:
{\bf Theorem \ref{thm_MMF}.}
{\it The component $\mathcal M_F$ of the moduli space of Gieseker stable rank 3 sheaves on $Y$
with Hilbert polynomial $\frac38 n^4 + \frac94 n^3 + \frac{33}{8} n^2 + \frac94 n$
is birational to the IHS manifold $Z$. Under this birational equivalence
the symplectic form on $Z$ defined in \cite{LLSvS} corresponds
to the Kuznetsov-Markushevich form on $\mathcal M_F$.}
A similar approach relying on the description of an open part of $Z$ as a moduli space
was used by Addington and Lehn in \cite{AL} to prove that the variety $Z$ is a deformation
of a Hilbert scheme of four points on a K3 surface. In \cite{O} Ouchi considered the
case of cubic fourfolds containing a plane. He proved that one can describe (a birational model) of
the LLSVS variety as a moduli space of Bridgeland-stable objects in the derived category
of a twisted K3 surface. Moreover, in this situation one also has a Lagrangian embedding
of the cubic fourfold into the LLSVS variety as in (\ref{eqn_embedding}).
Another similar construction has been proposed by \cite{LMS} who proved that $Z$ is birational
to a component of the moduli space of stable vector bundles of rank $6$ on $Y$.
Using the birational equivalence between $Z$ and the moduli space of sheaves on $Y$
we show that twisted cubics lying in hyperplane sections $Y_H$ of $Y$ give rise
to Lagrangian subvarieties in $Z$ and discuss the geometry of these subvarieties:
{\bf Theorem.}
{\it Denote by $Z_H$ the image in $Z$ of twisted cubics lying in a hyperplane section $Y_H = Y\cap H$
under the map $a$ from (\ref{eqn_contractions}). If $Y$ and $H$ are generic, then $Z_H$ is a Lagrangian subvariety of $Z$
which is birational to the theta-divisor of the intermediate Jacobian of $Y_H$.}
\begin{proof} See Proposition \ref{prop_Lagr} and Theorem \ref{thm_AJ}.
\end{proof}
This is analogous to the case of lines on $Y$: it is well-known that lines on $Y$ form
an IHS fourfold, and lines contained in hyperplane sections of $Y$ form Lagrangian
surfaces in this fourfold, see for example \cite{V}.
{\bf Acknowledgements} The authors would like to thank Alexander Kuznetsov, Daniel Huybrechts, Christoph Sorger,
Manfred Lehn and Yukinobu Toda for useful discussions and remarks.
\section{Twisted cubics and sheaves on a cubic fourfold}
\subsection{Twisted cubics on cubic surfaces and determinantal representations}
Let us recall the structure of the general fibre of the map $a\colon M_3(Y)\to Z'$
in (\ref{eqn_contractions}). We follow \cite{LLSvS} in notation and
terminology and we refer to \cite{EPS, LLSvS} for all details about the geometry
of twisted cubics.
Consider a cubic surface $S=Y\cap \mathbb{P}^3$ where $\mathbb{P}^3$
is a general linear subspace in $\mathbb{P}^5$. There exist several families of generalized
twisted cubics on $S$. Each of the families is isomorphic to $\mathbb{P}^2$ and these
are the fibres of the map $a$. The number of families depends on $S$. If the surface
is smooth there are 72 families, corresponding to 72 ways to represent $S$ as a
blow-up of $\mathbb{P}^2$ (and to the 72 roots in the lattice $E_6$). Each of the families
is a linear system which gives a map to $\mathbb{P}^2$. If $S$
is singular, generalized twisted cubics on it can be of two different types. Curves of
the first type are arithmetically Cohen-Macaulay (aCM), and those
of the second type are non-CM. The detailed description of their geometry on surfaces with different
singularity types can be found in \cite{LLSvS}, \S 2. For our purposes it is enough
to recall that the image in $Z'$ of non-CM curves under the map $a$ is exactly the
exceptional divisor of the blow-up $\sigma\colon Z'\to Z$ in (\ref{eqn_contractions}),
see \cite{LLSvS}, Proposition 4.1.
In this section we deal only with aCM curves and we also assume that
the surface $S$ has only ADE singularities. In this case every aCM curve belongs
to a two-dimensional linear system with smooth general member, just as in the case of smooth $S$ \cite[Theorem 2.1]{LLSvS}.
Moreover, these linear systems are in one-to-one correspondence with the determinantal
representations of $S$. Let us explain this in detail.
Let $S$ be a cubic surface in $\mathbb{P}^3$ with at most ADE singularities. Let $\alpha \colon S\hookrightarrow \mathbb{P}^3$
denote the embedding and let $p\colon \tilde{S}\to S$ be the minimal resolution of singularities.
Take a general aCM twisted cubic $C$ on $S$ and let $\tilde{C} \subset \tilde{S}$ be its proper preimage.
Let $\tilde{L}=\mathcal O_{\tilde{S}}(\tilde{C})$ be the corresponding line bundle and let $L=p_*\tilde{L}$ be its direct image.
\begin{lemma}\label{lem_L} The sheaf $L$ has the following properties:
(1) $H^0(S,L)=\mathbb{C}^3$, $H^k(S,L)=0$ for $k \geqslant 1$; $H^{k}(S, L(-1)) = H^k(S, L(-2)) = 0$ for $k\geqslant 0$;
(2) We have the following resolution:
\begin{equation}\label{eqn_determinantal}
0\longrightarrow\mathcal O_{\mathbb{P}^3}(-1)^{\oplus 3}\stackrel{A}{\longrightarrow}\mathcal O_{\mathbb{P}^3}^{\oplus 3}\longrightarrow \alpha_*L\longrightarrow 0,
\end{equation}
where $A$ is given by a $3\times 3$ matrix of linear forms on $\mathbb{P}^3$, and the surface
$S$ is the vanishing locus of $\det A$;
(3) $\EuScript{E}xt^k(L,L) = 0$ for $k\geqslant 1$.
\end{lemma}
\begin{proof}
We note that the map $\alpha\circ p\colon \tilde{S}\to \mathbb{P}^3$ is given by the anticanonical
linear system on $\tilde{S}$, so we will use the notation $K_{\tilde{S}} = \mathcal O_{\tilde{S}}(-1)$.
{\it (1)} First we observe that $\mathrm{R}^mp_*\tilde{L} = 0$ for $m\geqslant 1$. This follows from the long
exact sequence of higher direct images for the triple
\begin{equation}\label{eqn_triple}
0\longrightarrow\mathcal O_{\tilde{S}}\longrightarrow\tilde{L}\longrightarrow \mathcal O_{\tilde{C}}\otimes \tilde{L}\longrightarrow 0,
\end{equation}
because the singularities of $S$ are rational, so that $\mathrm{R}^m p_*\mathcal O_{\tilde{S}} = 0$ for $m \geqslant 1$ and the map $p$ induces an embedding of
$\tilde{C}$ into $S$, so that $\mathrm{R}^m p_*$ vanishes on sheaves supported on $\tilde{C}$ for $m \geqslant 1$.
Analogously, $\mathrm{R}^mp_*\tilde{L}(-1) = \mathrm{R}^mp_*\tilde{L}(-2) = 0$ for $m\geqslant 1$.
Hence it is enough to verify the cohomology vanishing for $\tilde{L}$.
The linear system $|\tilde{L}|$ is two-dimensional and base point free (we refer
to \S 2 of \cite{LLSvS}, in particular Proposition 2.5). We also know the intersection products
$\tilde{L}\cdot \tilde{L} = 1$, $\tilde{L}\cdot K_{\tilde{S}} = -3$ and $K_{\tilde{S}}\cdot K_{\tilde{S}} = 3$. Using Riemann-Roch
we find $\chi(\tilde{L}) = 3$ and $\chi(\tilde{L}(-1)) = \chi(\tilde{L}(-2)) = 0$.
We have $H^0(\tilde{S},\tilde{L}(-1)) = H^0(\tilde{S},\tilde{L}(-2)) = 0$
which is clear from (\ref{eqn_triple}) since $\tilde{L}|_{\tilde{C}} = \mathcal O_{\mathbb{P}^1}(1)$ and
$\mathcal O_{\tilde{S}}(1)|_{\tilde{C}}=\mathcal O_{\mathbb{P}^1}(3)$. By Serre duality we have $H^2(\tilde{S},\tilde{L}) =
H^0(\tilde{S},\tilde{L}^\vee(-1))^* = 0$, $H^2(\tilde{S},\tilde{L}(-1)) = H^0(\tilde{S},\tilde{L}^\vee)^* = 0$
because $\tilde{L}^\vee$ is the ideal sheaf of $\tilde{C}$,
and $H^2(\tilde{S},\tilde{L}(-2)) = H^0(\tilde{S},\tilde{L}^\vee(1))^* = 0$. The last vanishing
follows from the fact that $C$ is not contained in any hyperplane in $\mathbb{P}^3$.
It follows that $H^1(\tilde{S},\tilde{L}) = H^1(\tilde{S},\tilde{L}(-1)) = H^1(\tilde{S},\tilde{L}(-2)) = 0$.
{\it (2)} We decompose the sheaf $\alpha_*L$ with respect to the full exceptional collection
$\EuScript{D}^b(\mathbb{P}^3) = \langle \mathcal O_{\mathbb{P}^3}(-1),\mathcal O_{\mathbb{P}^3},\\ \mathcal O_{\mathbb{P}^3}(1), \mathcal O_{\mathbb{P}^3}(2)\rangle$.
From part {\it (1)} it follows that $\alpha_*L$ is right-orthogonal to $\mathcal O_{\mathbb{P}^3}(2)$ and $\mathcal O_{\mathbb{P}^3}(1)$.
The left mutation of $\alpha_*L$ through $\mathcal O_{\mathbb{P}^3}$ is given by a cone of the morphism
$\mathcal O_{\mathbb{P}^3}^{\oplus 3}\to \alpha_*L$ induced by the global sections of $L$.
This cone is contained in the subcategory generated by the exceptional object $\mathcal O_{\mathbb{P}^3}(-1)$.
Hence it must be equal to $\mathcal O_{\mathbb{P}^3}(-1)^{\oplus 3}[1]$,
and we obtain the resolution (\ref{eqn_determinantal}) for $\alpha_*L$.
{\it (3)} Since $L$ is a vector bundle outside of the singular points of $S$,
the sheaves $\EuScript{E}xt^k(L,L)$ for $k\geqslant 1$ must have zero-dimensional support.
It follows that it will be sufficient to prove that $\mathrm{Ext}^k(L,L)=0$ for $k\geqslant 0$.
We first compute $\mathrm{Ext}^k(\alpha_*L,\alpha_*L)$. Applying $\mathrm{Hom}(-,\alpha_*L)$ to (\ref{eqn_determinantal})
we get the exact sequence
$$
0\longrightarrow\mathrm{Hom}(\alpha_*L,\alpha_*L)\longrightarrow H^0(\mathbb{P}^3,\alpha_*L)^{\oplus 3}\longrightarrow H^0(\mathbb{P}^3,\alpha_*L(1))^{\oplus 3}\longrightarrow \mathrm{Ext}^1(\alpha_*L,\alpha_*L)\longrightarrow 0,
$$
where we use that $H^k(\mathbb{P}^3,\alpha_*L(m)) = 0$ for $k\geqslant 1$, $m\geqslant 0$ which is clear from (\ref{eqn_determinantal}).
This also shows that $\mathrm{Ext}^k(\alpha_*L,\alpha_*L) = 0$ for $k\geqslant 2$. We have $\dim\mathrm{Hom}(\alpha_*L,\alpha_*L)=1$ and
from the sequence above and (\ref{eqn_determinantal}) we compute $\dim\mathrm{Ext}^1(\alpha_*L,\alpha_*L)=19$.
The object $\mathrm{L} \alpha^*\alpha_*L$ is included into the triangle $\mathrm{L} \alpha^*\alpha_*L\to L\to L(-3)[2]\to \mathrm{L} \alpha^*\alpha_*L[1]$,
see \cite{KM}, Lemma 1.3.1. Applying $\mathrm{Hom}(-,L)$ to this triangle and using
$\mathrm{Ext}^k(\mathrm{L} \alpha^*\alpha_*L,L) = \mathrm{Ext}^k(\alpha_*L,\alpha_*L)$ we get the exact sequence
$$
0\longrightarrow \mathrm{Ext}^1(L,L)\longrightarrow \mathrm{Ext}^1(\alpha_*L,\alpha_*L)\longrightarrow \mathrm{Hom}(L,L(3))\longrightarrow \mathrm{Ext}^2(L,L)\longrightarrow 0.
$$
The arrow in the middle is an isomorphism. To see this note that
$\mathrm{Hom}(L,L(3))=H^0(S,N_{S/\mathbb{P}^3})=\mathbb{C}^{19}$ and that all the deformations of $\alpha_*L$
are induced by the deformations of its support $S$. It follows that $\mathrm{Ext}^1(L,L)=\mathrm{Ext}^2(L,L)=0$.
As we have mentioned above the sheaves $\EuScript{E}xt^k(L,L)$ have zero-dimensional support for $k\geqslant 1$,
and from the local-to-global spectral sequence we see that $\mathrm{Ext}^k(L,L)=H^0(S,\EuScript{E}xt^k(L,L))$ for $k\geqslant 1$.
It follows that $\EuScript{E}xt^1(L,L)=\EuScript{E}xt^2(L,L)=0$. To prove the vanishing of higher $\EuScript{E}xt$'s we
construct a quasi-periodic free resolution for $L$. From (\ref{eqn_determinantal}) we see
that the restriction of the complex $\mathcal O_{\mathbb{P}^3}(-1)^{\oplus 3}\stackrel{A}{\longrightarrow}\mathcal O_{\mathbb{P}^3}^{\oplus 3}$
to $S$ will have cohomology $L$ in degree $0$ and $L(-3)$ in degree $-1$. Hence $L$ is
quasi-isomorphic to the complex of the form
$$
\ldots\longrightarrow\mathcal O_S(-7)^{\oplus 3}\longrightarrow\mathcal O_S(-6)^{\oplus 3}\longrightarrow\mathcal O_S(-4)^{\oplus 3}\longrightarrow\mathcal O_S(-3)^{\oplus 3}\longrightarrow\mathcal O_S(-1)^{\oplus 3}\longrightarrow\mathcal O_S^{\oplus 3}\longrightarrow 0.
$$
This complex is quasi-periodic of period two, with subsequent entries obtained by tensoring by $\mathcal O_S(-3)$.
Applying $\EuScript{H}om(-,L)$ to this complex we see that $\EuScript{E}xt^k(L,L)$ are also quasi-periodic, and
vanishing of the first two of these sheaves implies vanishing of the rest.
\end{proof}
Starting from $L$, we have constructed the determinantal representation of $S$.
Conversely, given a sequence (\ref{eqn_determinantal}),
generalized twisted cubics corresponding to this determinantal representation can
be recovered as vanishing loci of sections of $L$.
More detailed discussion of determinantal representations of cubic surfaces
with different singularity types can be found in \cite{LLSvS}, \S 3.
\subsection{Moduli spaces of sheaves on a cubic fourfold}
Let $S=Y\cap \mathbb{P}^3$ be a linear
section of $Y$ with ADE singularities and $L$ a sheaf which gives a determinantal
representation of $S$ as in (\ref{eqn_determinantal}). Denote by $i\colon S\hookrightarrow Y$
the embedding. We consider the moduli space of torsion sheaves on $Y$ of the form $i_*L$
to get a description of an open subset of $Z$.
\begin{lemma}\label{lem-unobs}
For any $u \in \mathrm{Ext}^1(i_*L, i_*L)$ its Yoneda square $u \circ u \in \mathrm{Ext}^2(i_*L, i_*L)$ is zero,
so that the deformations of $i_*L$ are unobstructed.
\end{lemma}
\begin{proof}
Recall that $L$ is a rank one sheaf on $S$.
The unobstructedness is clear when
$S$ is smooth, because $L$ is a line bundle in this case. Then the local $\EuScript{E}xt$'s are
given by $\EuScript{E}xt^k(i_*L,i_*L) = i_*\Lambda^kN_{S/Y}$ (see \cite{KM}, Lemma 1.3.2 for the proof of this).
In the case when $S$ is singular and $L$ is not locally free we can use the same argument
as in Lemma 1.3.2 of \cite{KM} to obtain a spectral sequence
$E_2^{p,q}=i_*(\EuScript{E}xt^p(L,L)\otimes \Lambda^qN_{S/Y}) \Rightarrow \EuScript{E}xt^{p+q}(i_*L,i_*L)$.
Now we can use the second part of Lemma \ref{lem_L} to conclude that in this case
$\EuScript{E}xt^k(i_*L,i_*L) = i_*\Lambda^kN_{S/Y}$ as well.
We have $N_{S/Y}=\mathcal O_S(1)^{\oplus 2}$ and $H^m(S,\mathcal O_S(k)) = 0$ for $k\geqslant 0$,
$m\geqslant 1$ and from the local-to-global spectral sequence we deduce that $\mathrm{Ext}^k(i_*L,i_*L)=H^0(S,\Lambda^kN_{S/Y})$.
The algebra structure is induced
by exterior product $\Lambda^kN_{S/Y}\otimes \Lambda^mN_{S/Y}\to \Lambda^{k+m}N_{S/Y}$ (see \cite{KM}, Lemma 1.3.3).
The exterior square of any section of
$N_{S/Y}$ is zero and unobstructedness follows.
\end{proof}
The sheaf $i_*L$ has Hilbert polynomial $P(i_*L,n)=\frac{3}{2} n^2+\frac{9}{2} n +3$ which
is easy to compute from (\ref{eqn_determinantal}).
Denote by $\mathcal M_L$ the irreducible component of the moduli space of semistable sheaves with this
Hilbert polynomial containing $i_*L$.
Let us denote by $V$ the 6-dimensional vector space, so that $Y\subset \mathbb{P}(V)=\mathbb{P}^5$.
Denote by $G$ the Grassmannian $\mathrm{Gr}(4,V)$.
Recall from \cite{LLSvS} that we have a closed embedding $\mu\colon Y\hookrightarrow Z$,
and the open subset $Z\backslash \mu(Y)$ corresponds to aCM twisted cubics.
There exists a map $\pi: Z\backslash \mu(Y)\to G$ which sends a twisted cubic
to its linear span in $\mathbb{P}^5$.
If we consider linear sections $S = Y\cap \mathbb{P}^3$, then
$S$ can have non-ADE singularities, but the codimension in $G$ of such linear subspaces
is at least 4 by Proposition 4.2 and Proposition 4.3 in \cite{LLSvS}.
Denote by $G^\circ\subset G$ the open subset consisting of $U\in G$, such that $Y\cap \mathbb{P}(U)$ has
only ADE singularities.
Let $Z^\circ = \pi^{-1}(G^\circ)$ be the corresponding open subset in $Z\backslash \mu(Y)$.
This open subset has complement of codimension 4.
\begin{lemma}\label{lem_MML}
There exists an open subset $\mathcal M_L^\circ\hookrightarrow \mathcal M_L$ isomorphic to $Z^\circ$.
The sheaves on $Y$ corresponding to points of $\mathcal M_L^\circ$ are of the form $i_*L$,
where $L$ gives a determinantal representation for a linear section $S=Y\cap\mathbb{P}^3$ with ADE
singularities.
\end{lemma}
\begin{proof}
Denote by $\mathrm{U}U$ the universal subbundle of $\mathcal O_G\otimes V$.
Let $p: \mathbb{P}(\mathrm{U}U)\to G$ be the projection and $\mathcal H=\mathcal{H}om_{p}(\mathcal O_{\mathbb{P}(\mathrm{U}U)}(-1)^{\oplus 3},\mathcal O_{\mathbb{P}(\mathrm{U}U)}^{\oplus 3})$.
We have $\mathcal H\simeq(\mathrm{U}U^\vee)^{\oplus 9}$. We will denote by the same letter $\mathcal H$ the total space
of the bundle $\mathcal H$. By construction, over $\mathcal H\times_G\mathbb{P}(\mathrm{U}U)$ we have
the universal morphism
$$
\mathcal O_{\mathbb{P}(\mathrm{U}U)}(-1)^{\oplus 3}\stackrel{\mathcal A}{\longrightarrow}\mathcal O_{\mathbb{P}(\mathrm{U}U)}^{\oplus 3}.
$$
Denote by $\mathcal H^\circ$ the open subset in the total space of $\mathcal H$ where $\mathrm{det}(\mathcal A)\neq 0$.
Consider the closed embedding $j: \mathcal H^\circ\times_G\mathbb{P}(\mathrm{U}U) \hookrightarrow \mathcal H^\circ\times \mathbb{P}(V)$
and the sheaf $\mathcal M = \mathrm{coker}(j_*\mathcal A)$ on $\mathcal H^\circ\times \mathbb{P}(V)$.
Let $q: \mathcal H^\circ\times \mathbb{P}(V)\to \mathcal H^\circ$ be the projection.
For a point $A\in \mathcal H^\circ$ the restriction $\mathcal M|_{q^{-1}(A)}$ is a sheaf that defines a determinantal representation
of a cubic surface in $\mathbb{P}(U)\subset \mathbb{P}(V)$. The condition that this surface is contained in $Y$
defines a closed subvariety $\mathcal W\subset \mathcal H^\circ$.
Let $\beta: \mathcal W\times Y\hookrightarrow \mathcal H^\circ\times \mathbb{P}(V)$ be the closed embedding. Define $\mathcal L=\mathcal M|_{\mathcal W\times Y}$
and consider the open subset $G^\circ\subset G$ of such subspaces $U\subset V$ that $\mathbb{P}(U)\cap Y$ has ADE singularities.
Let $\mathcal W^\circ$ be the preimage of $G^\circ$ under the natural map $\mathcal W\to G$.
The sheaf $\mathcal L$ on $\mathcal W^\circ\times Y$ is flat over $\mathcal W^\circ$ since Hilbert polynomials
of its restrictions to the fibres are the same (see \cite{H}, chapter III, Theorem 9.9). We obtain a morphism
$\psi: \mathcal W^\circ\to \mathcal M_L$. Denote its image by $\mathcal M_L^\circ$.
Consider the fibre $\mathcal W_U$ of the map $\mathcal W^\circ\to G^\circ$ over a point $U\in G$ and the restriction
of $\mathcal L$ to $\mathcal W_U\times Y$. Over a point $w\in \mathcal W_U$ the sheaf $\mathcal L$ defines a determinantal representation
of the surface $Y\cap \mathbb{P}(U)$. The general structure of determinantal representations (see \cite{LLSvS} \S 3)
implies that each connected component of the fibre $\mathcal W_U$ is a single $(\mathrm{GL}_3\times \mathrm{GL}_3)/\mathbb{C}^*$ orbit
(\cite{LLSvS} Corollary 3.7).
Connected components of $\mathcal W_U$ are in one-to-one correspondence with non-isomorphic determinantal representations
of $Y\cap \mathbb{P}(U)$. The restriction of $\mathcal L$ to each connected component of $\mathcal W_U\times Y$ is a constant family of sheaves,
so the map $\psi$ contracts connected components of the fibre $\mathcal W_U$. From the explicit description of $Z^\circ$ given
above, we see that $\mathcal M_L^\circ$ is isomorphic to $Z^\circ$.
The properties stated in the lemma
are clear from construction. We also see that $\mathcal W^\circ$ is a $(\mathrm{GL}_3\times \mathrm{GL}_3)/\mathbb{C}^*$-fibre bundle
over $Z^\circ$.
\end{proof}
The sheaves $i_*L$ are not contained in the subcategory $\mathcal A_Y$.
In order to show that the
closed 2-form described in \cite{KM} is a symplectic form on $\mathcal M_L^\circ$, we are going to project the
sheaves $i_*L$ to $\mathcal A_Y$, and then show that this projection
induces an isomorphism of open subsets of moduli spaces respecting the 2-forms (up to a sign).
\begin{lemma}\label{lem_proj}
The sheaves $i_*L$ are globally generated and lie in the
subcategory $\langle\mathcal A_Y,\mathcal O_Y\rangle$.
The space of global sections $H^0(Y, i_*L)$ is three-dimensional, and the sheaf $F_L$, defined by the exact triple
\begin{equation}\label{eqn_F}
0\longrightarrow F_L\longrightarrow \mathcal O_Y^{\oplus 3}\longrightarrow i_*L\longrightarrow 0.
\end{equation}
lies in $\mathcal A_Y$.
\end{lemma}
\begin{proof}
From Lemma \ref{lem_L} we deduce that $i_*L$ is right orthogonal to $\mathcal O_Y(1)$, $\mathcal O_Y(2)$, so that $i_*L$ lies in $\langle\mathcal A_Y,\mathcal O_Y\rangle$.
It also follows from Lemma \ref{lem_L} that $i_*L$ is globally generated, the global sections are three-dimensional and that
the higher cohomology groups of $L$ vanish. Thus $F_L$ is (up to a shift)
the left mutation of $i_*L$ through the exceptional bundle $\mathcal O_Y$, and in particular it lies in $\mathcal A_Y$.
\end{proof}
\begin{lemma}\label{lem_F}
Consider the exact triple (\ref{eqn_F}) where $i_*L$ is in $\mathcal M_L^\circ$. Then
$F_L$ is a Gieseker-stable rank 3 sheaf contained in $\mathcal A_Y$ with Hilbert polynomial $P(F_L, n) = \frac38 n^4 + \frac94 n^3 + \frac{33}{8} n^2 + \frac94 n$.
\end{lemma}
\begin{proof}
By Lemma \ref{lem_MML} the sheaf $i_*L$ is right-orthogonal to $\mathcal O_Y(2)$ and $\mathcal O_Y(1)$.
The sheaf $F_L$ is a shift of the left mutation of $i_*L$ through $\mathcal O_Y$, hence it is
contained if $\mathcal A_Y$. The Hilbert polynomial can be computed using the Hirzebruch-Riemann-Roch formula.
It remains to check the stability of $F_L$.
The sheaf $F_L$ is a subsheaf of $\mathcal O_Y^{\oplus 3}$, hence it has no torsion. In order
to check the stability we consider all proper saturated subsheaves $\mathcal G\subset F_L$.
We have to make sure that $p(\mathcal G, n)< p(F_L, n)$ where $p$ is the reduced Hilbert polynomial
(see \cite{HL} for all the relevant definitions). We use the convention
that the inequalities between polynomials are supposed to hold for $n \gg 0$.
We denote by $P$ the non-reduced Hilbert polynomial. We have $P(\mathcal O_Y, n) = a_0 n^4 + a_1 n^3 +\ldots + a_4$,
with the leading coefficient $a_0 = \frac{3}{4!}$. From the exact sequence
(\ref{eqn_F}) we see that $P(F_L, n) = 3P(\mathcal O_Y,n) - P(i_*L,n)$.
Since $i_*L$ has two-dimensional support, the degree of $P(i_*L,n)$ is two, and hence
the leading coefficient of $P(F_L, n)$ equals $3a_0$. So we have
\begin{equation}\label{pFF_C}
p(F_L, n) = p(\mathcal O_Y, n) - \frac{1}{3a_0}P(i_*L, n).
\end{equation}
Let $\tilde{\mathcal G}$ be the saturation of $\mathcal G$ inside $\mathcal O_Y^{\oplus 3}$.
Then $\tilde{\mathcal G}$ is a reflexive sheaf and we have a diagram:
$$
\begin{tikzcd}[]
0\rar & \mathcal G\dar\rar & \tilde{\mathcal G}\dar\rar & \mathcal H\dar\rar & 0\\
0\rar & F_L\rar & \mathcal O_Y^{\oplus 3}\rar & i_*L\rar & 0
\end{tikzcd}
$$
In this diagram $\mathcal H$ is a torsion sheaf which injects into $i_*L$ because $F_L/\mathcal G$ is torsion-free.
Note that $\mathcal O_Y^{\oplus 3}$ is Mumford-polystable, so $c_1(\mathcal G)\leqslant c_1(\tilde{\mathcal G})\leqslant 0$. If $c_1(\mathcal G)< 0$
then $\mathcal G$ is not destabilizing in $F_L$ because $c_1(F_L) = 0$.
Next we consider the case $c_1(\mathcal G)= c_1(\tilde{\mathcal G})= 0$. In this case $\tilde{\mathcal G}=\mathcal O_Y^{\oplus m}$ where $m=1$ or $m=2$.
This is clear if $\mathrm{rk}\,{\tilde{\mathcal G}}=1$ since a reflexive sheaf of rank one is a line bundle.
If $\mathrm{rk}\,{\tilde{\mathcal G}}=2$ we can consider the quotient $\mathcal O_Y^{\oplus 3}/\tilde{\mathcal G}$ which is torsion-free,
globally generated, of rank one and has zero first Chern class. It follows that the quotient is
isomorphic to $\mathcal O_Y$ and then $\tilde{\mathcal G}=\mathcal O_Y^{\oplus 2}$.
We have an exact triple $0\longrightarrow\mathcal G\longrightarrow\mathcal O_Y^{\oplus m}\longrightarrow\mathcal H\longrightarrow 0$ with $m$ equal to $1$ or $2$.
We see that $p(\mathcal G,n) = p(\mathcal O_Y,n) - \frac{1}{ma_0}P(\mathcal H, n)$. Note that $\mathcal H$ is a non-zero sheaf
which injects into $i_*L$, and the sheaf $L$ on the surface $S$ is torsion-free of rank one. Hence the
leading coefficient of $P(\mathcal H, n)$ is the same as for $P(i_*L, n)$ and this implies
$\frac{1}{ma_0}P(\mathcal H, n)> \frac{1}{3a_0}P(i_*L, n)$. From this and (\ref{pFF_C}) we conclude that
$p(\mathcal G,n)<p(F_L,n)$, hence $\mathcal G$ is not destabilizing. This completes the proof.
\iffalse
We consider two cases depending on the rank of $\mathcal G\subset F_L$.
{\it The case $\mathrm{rk}(\mathcal G) = 1$.} Denote by $\mathcal A$ the saturation of $\mathcal G$ inside $\mathcal O_Y^{\oplus 3}$.
Then $\mathcal A$ is of rank one and we have $0\to \mathcal G\to \mathcal A\to \mathcal H\to 0$ with $\mathcal H$ a torsion sheaf.
We have a diagram:
$$
\begin{tikzcd}[]
& 0\dar & 0\dar & 0\dar\\
0\rar & \mathcal G\dar\rar & \mathcal A\dar\rar & \mathcal H\dar\rar & 0\\
0\rar & F_L\dar\rar & \mathcal O_Y^{\oplus 3}\dar\rar & i_*L\dar\rar & 0\\
0\rar & \mathcal G'\dar\rar & \mathcal A'\dar\rar & \mathcal H'\dar\rar & 0\\
& 0 & 0 & 0
\end{tikzcd}
$$
The morphism $\mathcal H\to i_*L$ in the right column is injective since the sheaf $\mathcal G' = F_L/\mathcal G$ is torsion-free.
The sheaf $\mathcal A$ has rank one and it is saturated in the reflexive sheaf $\mathcal O_Y^{\oplus 3}$, hence it is
a line bundle (see \cite{OSS} Lemma 1.1.15 and Lemma 1.1.16). The Hilbert polynomial is
$P(\mathcal A, n) = \int e^{c_1(\mathcal A(n))} td(Y) = a_0 n^4 + a_1' n^3 + \ldots + a_4'$,
where the leading coefficient is the same as for $P(\mathcal O_Y, n)$. We have $p(\mathcal G, n) = p(\mathcal A, n) - \frac{1}{a_0} P(\mathcal H, n)$,
because $\mathcal H$ has at most two-dimensional support. For the same reason, if $\mathcal A$ is not destabilizing in
$\mathcal O_Y^{\oplus 3}$, then $p(\mathcal G, n) < p(\mathcal O_Y, n)$ and these polynomials differ in degree 3 terms.
Thus, we also have that $p(\mathcal G, n) < p(F_L, n)$ (see \ref{pFF_C}) and $\mathcal G$ is not destabilizing in $F_L$.
So it remains to consider the case when $\mathcal A$ is destabilizing in $\mathcal O_Y^{\oplus 3}$, that is
$\mathcal A = \mathcal O_Y$. Then $\mathcal G$ is an ideal sheaf which can not be trivial, since $\mathrm{Hom}(\mathcal O_Y,F_L) = 0$.
It follows that $\mathcal H = i_*\mathcal O_S$. We have an equality
$$
p(\mathcal G, n) = p(F_L,n) + \frac{1}{a_0} \leqslantft( \frac{1}{3} P(i_*L,n) - P(i_*\mathcal O_S,n) \right).
$$
The polynomials $P(i_*L,n)$ and $P(i_*\mathcal O_S,n)$ have the same positive leading coefficient. Hence the expression
in brackets is negative and $\mathcal G$ is not destabilizing.
{\it The case $\mathrm{rk}(\mathcal G) = 2$.} We can use the same diagram as before. In this case $\mathcal G'$ is of rank one,
$\mathcal A$ is again the saturation of $\mathcal G$ in $\mathcal O_Y^{\oplus 3}$ and $\mathcal H$ a torsion sheaf. We can check stability
by looking at the bottom row of the diagram and proving that $p(\mathcal G', n) > p(F_L, n)$.
The sheaf $(\mathcal A')^\vee$ injects into $\mathcal O_Y^{\oplus 3}$ with torsion-free quotient, hence it is a line bundle.
Then $\mathcal A'$ is a rank one subsheaf of the line bundle $\mathcal L' = (\mathcal A')^{\vee\vee}$. It follows that $\mathcal A'$ has
the same first Chern class as $\mathcal L'$. Suppose that $c_1(\mathcal L') \neq 0$, then the sheaf $\mathcal A'$ is not destabilizing
for $\mathcal O_Y^{\oplus 3}$, so $p(\mathcal A',n) > p(\mathcal O_Y, n)$ and these polynomials differ in degree 3 term.
We have $p(\mathcal G', n) = p(\mathcal A', n) - \frac{1}{a_0}P(\mathcal H', n)$. But $\mathcal H'$ has at most two-dimensional support and
we again conclude that in this case $p(\mathcal G', n) > p(F_L, n)$.
It remains to consider the case when $\mathcal A'$ is destabilizing, that is $\mathcal A' = \mathcal O_Y$ and $\mathcal A = \mathcal O_Y^{\oplus 2}$.
Note that in this case we should have $\mathcal H \neq 0$ (because $\mathrm{Hom}(\mathcal O_Y^{\oplus 2}, F_L) = 0$), consequently
$\mathcal H = i_* \mathcal E$ for some rank one sheaf $\mathcal E$ on $S$. Then we have $\mathcal H' = i_*\mathcal E'$ and this sheaf has at most
one-dimensional support. We use the equality
$$
p(\mathcal G', n) = p(F_L,n) + \frac{1}{a_0} \leqslantft( \frac{1}{3} P(i_*L,n) - P(i_*\mathcal E',n) \right),
$$
and observe that the degree of $P(i_*\mathcal E',n)$ is at most one, but $P(i_*L,n)$ has degree two with positive
leading coefficient, so we have $p(\mathcal G', n) > p(F_L, n)$. This completes the proof.
\fi
\end{proof}
Let us consider the moduli space of rank 3 semistable sheaves on $Y$ with Hilbert polynomial $P(F_L,n)$.
Denote by $\mathcal M_F$ its irreducible component which contains the sheaves $F_L$ from (\ref{eqn_F}).
\begin{lemma}\label{lem_mutation}
The left mutation of $i_*L$ through $\mathcal O_Y$ gives an open embedding $\mathcal M_L^\circ\to \mathcal M_F$.
\end{lemma}
\begin{proof}
Recall from the proof of Lemma \ref{lem_MML} that $\mathcal M_L^\circ$ was defined as the image of a map $\mathcal W^\circ\to \mathcal M_L$
where $\mathcal W^\circ$ was a fibre bundle over $Z^\circ$. On $X = \mathcal W^\circ\times Y$ a universal
sheaf $\mathcal L$ flat over $\mathcal W^\circ$ was constructed. Denote by $\pi\colon X\to \mathcal W^\circ$ the projection.
By definition of $\mathcal M_L^\circ$ and from Lemma \ref{lem_L} it follows that $\pi_*\mathcal L$
is a rank 3 vector bundle and we have an exact sequence $0\to\mathcal F_\mathcal L\to\pi^*\pi_*\mathcal L\to \mathcal L\to 0$.
The family of sheaves $\mathcal F_\mathcal L$ defines a map $\mathcal W^\circ\to \mathcal M_F$ which factors
through $\mathcal M_L^\circ\to \mathcal M_F$. We will show
that the differential of the latter map is an isomorphism.
For a sheaf $i_*L$ corresponding to a point of $\mathcal M_L^\circ$ and any tangent vector
$u\in\mathrm{Ext}^1(i_*L,i_*L)$ we have unique morphism of triangles
\begin{equation}\label{eqn_mutation}
\begin{tikzcd}[]
F_L \dar{u'}\rar & \mathcal O_Y^{\oplus 3} \dar{0}\rar & i_*L \dar{u}\rar & F_L[1] \dar{u'[1]} \\
F_L[1] \rar & \mathcal O_Y^{\oplus 3}[1] \rar & i_*L[1]\rar & F_L[2]
\end{tikzcd}
\end{equation}
Uniqueness of $u'$ follows from $\mathrm{Ext}^1(\mathcal O_Y,F_L) = 0$. Moreover, $u$ is uniquely
determined by $u'$ because $\mathrm{Ext}^1(i_*L,\mathcal O_Y) = \mathrm{Ext}^3(\mathcal O_Y,i_*L(-3))^* = 0$. This
shows that the mutation induces an isomorphism of $\mathrm{Ext}^1(i_*L,i_*L)$ and $\mathrm{Ext}^1(F_L,F_L)$.
Finally, let us prove that the map $\mathcal M_L^\circ\to \mathcal M_F$ is injective. It follows from
Grothendieck-Verdier duality that $\EuScript{E}xt^2(i_*L,\mathcal O_Y)=i_*L^\vee(2)$.
Then from (\ref{eqn_F}) we see that $\EuScript{E}xt^1(F_L,\mathcal O_Y)=i_*L^\vee(2)$ and hence
$L$ can be reconstructed from $F_L$.
\end{proof}
\subsection{The symplectic form and Lagrangian subvarieties}
Let us recall the description of the two-form on the moduli spaces of sheaves on $Y$ from \cite{KM}.
Given a coherent sheaf $\mathcal F$ on $Y$ we can define its Atiyah class $\Ati{\mathcal F}\in\mathrm{Ext}^1(\mathcal F,\mathcal F\otimes\Omega_Y)$.
The Atiyah class is functorial, meaning that for any morphism of sheaves $\alpha\colon \mathcal F\to \mathcal G$
we have $\Ati{\mathcal G}\circ\alpha=(\alpha\otimes \mathrm{id})\circ\Ati{\mathcal F}$.
We define a bilinear form $\sigma$ on the vector space $\mathrm{Ext}^1(\mathcal F,\mathcal F)$. Given two elements $u,v\in \mathrm{Ext}^1(\mathcal F,\mathcal F)$
we consider the composition $\Ati{\mathcal F}\circ u\circ v\in \mathrm{Ext}^3(\mathcal F,\mathcal F\otimes\Omega_Y)$ and apply the
trace map $\mathrm{Tr}\colon\mathrm{Ext}^3(\mathcal F,\mathcal F\otimes\Omega_Y)\to \mathrm{Ext}^3(\mathcal O_Y,\Omega_Y)= H^{1,3}(Y)= \mathbb{C}$ to it:
\begin{equation}\label{eqn_sigma}
\sigma(u,v)=\mathrm{Tr}(\Ati{\mathcal F}\circ u\circ v).
\end{equation}
Note that when the Kuranishi space of $\mathcal F$ is smooth then for any $u\in \mathrm{Ext}^1(\mathcal F,\mathcal F)$ we have
$u\circ u=0$ and then $\sigma(u,u)=0$. In this case $\sigma$ is antisymmetric. Hence the formula
(\ref{eqn_sigma}) defines a two-form at smooth points of moduli spaces of sheaves on $Y$. This form
is closed by \cite{KM}, Theorem 2.2.
\begin{lemma}\label{lem_symplectic}
The formula (\ref{eqn_sigma}) defines a symplectic form on $\mathcal M_L^\circ$ which
coincides up to a non-zero constant with the restriction of the symplectic form on $Z$ under the isomorphism $\mathcal M_L^\circ\simeq Z^\circ$.
\end{lemma}
\begin{proof}
By Lemma \ref{lem-unobs} the sheaves $i_*L$ from $\mathcal M_L^\circ$ have unobstructed deformations,
so that (\ref{eqn_sigma}) indeed defines a two-form.
Recall from Lemma \ref{lem_mutation} that we have an open embedding $\mathcal M_L^\circ\hookrightarrow \mathcal M_F$.
Let us show that this embedding respects (up to a sign) symplectic forms on $\mathcal M_L$ and $\mathcal M_F$ given by (\ref{eqn_sigma}).
Note that by functoriality of Atiyah classes the following diagram gives a morphism
of triangles:
$$
\begin{tikzcd}[]
F_L \dar{\Ati{F_L}}\rar & \mathcal O_Y^{\oplus 3} \dar{\Ati{\mathcal O_Y^{\oplus 3}} = 0}\rar & i_*L \dar{\Ati{i_*L}}\rar & F_L[1] \dar{\Ati{F_L}[1]} \\
F_L\otimes\Omega_Y[1] \rar & \Omega_Y^{\oplus 3}[1] \rar & i_*L\otimes\Omega_Y[1]\rar & F_L\otimes\Omega_Y[2]
\end{tikzcd}
$$
For any pair of tangent vectors $u,v\in \mathrm{Ext}^1(i_*L,i_*L)$ we have two morphisms of triangles as in (\ref{eqn_mutation}).
If we compose these two morphisms of triangles with the one induced by Atiyah classes then we get the following:
$$
\begin{tikzcd}[]
F_L \dar{\Ati{F_L}\circ u'\circ v'}\rar & \mathcal O_Y^{\oplus 3} \dar{0}\rar &
i_*L \dar{\Ati{i_*L}\circ u\circ v}\rar & F_L[1] \dar{\Ati{F_L}\circ u'\circ v'[1]} \\
F_L\otimes\Omega_Y[3] \rar & \Omega_Y^{\oplus 3}[3] \rar & i_*L\otimes\Omega_Y[3]\rar & F_L\otimes\Omega_Y[4]
\end{tikzcd}
$$
This diagram is a morphism of triangles and the additivity of traces implies that $\sigma(u,v)=-\sigma(u',v')$.
By Theorem 4.3 from \cite{KM} the form $\sigma$ on $\mathcal M_F$ is symplectic, because
the sheaves $F_L$ are contained in $\mathcal A_Y$. Hence $\sigma$ is a symplectic form
on $\mathcal M_L^\circ$. But $\mathcal M_L^\circ$ is embedded into $Z$ as an open subset with
complement of codimension four. This implies that the symplectic form on
$\mathcal M_L^\circ$ is unique up to a constant, because $Z$ is IHS. This completes the
proof.
\end{proof}
\begin{thm}\label{thm_MMF}
The component $\mathcal M_F$ of the moduli space of Gieseker stable sheaves with Hilbert polynomial $P(F_L, n)$
is birational to the IHS manifold $Z$. Under this birational equivalence
the symplectic form on $Z$ defined in \cite{LLSvS} corresponds
to the Kuznetsov-Markushevich form on $\mathcal M_F$.
\end{thm}
\begin{proof}
Follows from Lemmas \ref{lem_MML}, \ref{lem_F}, \ref{lem_mutation}, \ref{lem_symplectic}.
\end{proof}
Now we explain how hyperplane sections of $Y$ give rise to Lagrangian
subvarieties of $Z$.
Let $H\subset \mathbb{P}^5$ be a generic hyperplane, so that $Y_H=Y\cap H$ is a smooth
cubic threefold. Twisted cubics contained in $Z$ form a subvariety $M_3(Y)_H\subset M_3(Y)$
whose image in $Z$ we denote by $Z_H$. Its open subset $Z_H^\circ = Z_H\cap Z^\circ$
consists of sheaves $i_*L$ whose support is contained in $H$.
\begin{prop}\label{prop_Lagr}
$Z_H$ is a Lagrangian subvariety of $Z$.
\end{prop}
\begin{proof}
It is clear that $Z_H$ has dimension four since the Grassmannian of three-dimensional
subspaces in $H$ is $\mathbb{P}^4$.
Consider a sheaf $i_*L$ whose support $S$ is smooth and contained in $Y_H$.
Since $L$ is a locally free sheaf on $S$ we have $\EuScript{E}xt^k(i_*L,i_*L)=i_*\Lambda^kN_{S/Y}$
(see for example \cite{KM}, Lemma 1.3.2). The higher cohomologies of the sheaves $\EuScript{E}xt^k(i_*L,i_*L)$
vanish for $k\geqslant 0$, because $N_{S/Y}=\mathcal O_S(1)^{\oplus 2}$ and the sheaves $\mathcal O_S(k)$
have no higher cohomologies for $k\geqslant 0$. Hence from the local-to-global spectral
sequence we find that $T_{i_*L}\mathcal M_L=\mathrm{Ext}^1(i_*L, i_*L)=H^0(S,N_{S/Y})$. Moreover, the Yoneda
multiplication on $\mathrm{Ext}$'s is given by the map $H^0(S,N_{S/Y})\times H^0(S,N_{S/Y})\to H^0(S,\Lambda^2N_{S/Y})$
which is induced from the exterior product morphism $N_{S/Y}\otimes N_{S/Y}\to \Lambda^2N_{S/Y}$
(see \cite{KM}, Lemma 1.3.3).
Now, the tangent space to $Z_H$ at $i_*L$ is $H^0(S,N_{S/Y_H})$. But the exterior product
$N_{S/Y_H}\otimes N_{S/Y_H}\to \Lambda^2N_{S/Y_H}=0$ vanishes because $N_{S/Y_H}$ is of rank one.
So the Yoneda product vanishes on the corresponding subspace of $\mathrm{Ext}^1(i_*L, i_*L)$
and from the definition of the symplectic form (\ref{eqn_sigma}) we conclude that the
tangent subspace to $Z_H$ is Lagrangian. This holds on an open subset of
$Z_H$, so $Z_H$ is a Lagrangian subvariety.
\end{proof}
In the next section we give a description of the subvarieties $Z_H$ in
terms of intermediate Jacobians of the threefolds $Y_H$.
\section{Twisted cubics on a cubic threefold}
In this section we assume that the cubic fourfold $Y$ and its hyperplane section $Y_H$ are
chosen generically, so that $Y_H$ is smooth and all the surfaces obtained by intersecting $Y_H$ with
three-dimensional subspaces have at worst ADE singularities. For general $Y$ and $H$ this indeed
will be the case, because for a general cubic threefold in $\mathbb{P}^4$ its hyperplane sections
have only ADE singularities. One can see this from dimension count by considering the codimensions
of loci of cubic surfaces with different singularity types (see for example \cite{LLSvS}, sections 2.2 and 2.3).
The cubic threefold $Y_H$ has an intermediate Jacobian $\IJac{Y_H}$ which is a principally polarized
abelian variety.
We will show that if we choose a general hyperplane $H$ then the Abel-Jacobi map
\[
\mathrm{AJ}\colon Z_H\to \IJac{Y_H}
\]
defines
a closed embedding on an open subset $Z_H^\circ$ and the complement $Z_H\backslash Z_H^\circ$ is contracted
to a point.
The image of $\mathrm{AJ}$ is the theta-divisor $\Theta \subset \IJac{Y_H}$.
Recall from the description of $Z$ that we have an embedding $\mu\colon Y\hookrightarrow Z$.
We have $Z_H^\circ \simeq Z_H\setminus \mu(Y)$ and $Z_H\cap \mu(Y)\simeq Y_H$.
Hence the Abel-Jacobi map $\mathrm{AJ}\colon Z_H\to \IJac{Y_H}$
gives a resolution of the unique singular point of the theta-divisor and the exceptional
divisor of this map is isomorphic to $Y_H$. This explicit description of the
singularity of the theta-divisor first obtained in \cite{B2} implies
Torelli theorem for cubic threefolds.
The fact that $Z_H$ is birational to the theta-divisor in $\IJac{Y_H}$ also
follows from \cite{I} (see also \cite[Proposition 4.2]{B1}).
\subsection{Differential of the Abel-Jacobi map}
As before, we will identify the open subset $Z_H^\circ$ with an open subset in the moduli space
of sheaves of the form $i_*L$, where $i\colon S\hookrightarrow Y_H$ is a hyperplane
section and $L$ is a sheaf which gives a determinantal representation (\ref{eqn_determinantal}) of this section.
The Abel-Jacobi map $\mathrm{AJ}\colon Z_H^\circ\to \IJac{Y_H}$ can be described as follows.
We use the Chern classes with values in the Chow ring $\mathrm{CH}(Y_H)$.
The second Chern class $c_2(i_*L) \in \mathrm{CH}^2(Y_H)$ is a cycle class of degree $3$.
Let $h\in \mathrm{CH}^1(Y_H)$ denote the class of a hyperplane section, then
$c_2(i_*L)-h^2$ is a cycle class homologous to zero, and it defines an element in the intermediate Jacobian.
Since $c_2(i_* L)$ can be represented by corresponding twisted cubics, the map above extends to $AJ: Z_H \to \IJac{Y_H}$.
\begin{lemma}\label{lem_dAJ}
The differential of the Abel-Jacobi map $d\mathrm{AJ}_{i_*L}\colon \mathrm{Ext}^1(i_*L,i_*L)\to H^{1,2}(Y_H)$
at the point corresponding to the sheaf $i_*L$ is given by
\begin{equation}\label{eqn_dAJ}
d\mathrm{AJ}_{i_*L}(u) = \frac12 \mathrm{Tr}(\Ati{i_*L}\circ u),
\end{equation}
for any $u\in \mathrm{Ext}^1(i_*L,i_*L)$.
\end{lemma}
\begin{proof}
We apply the general formula for the derivative of the Abel-Jacobi map, see Appendix \ref{appendix}, Proposition \ref{prop_dAJ_general}.
We have $c_1(i_*L) = 0$, so that $s_2(i_*L) = -2c_2(i_*L)$, which yields the $\frac12$ factor in the statement.
\end{proof}
\iffalse
We will reduce the question to the case when the Abel-Jacobi map is applied
to a structure sheaf of a smooth curve $C\subset Y_H$, a section of $L$.
In this case it is known (see \cite{BF}, Proposition 8.7) that the differential
is given by an analogous formula: for any $v\in\mathrm{Ext}^1(i_*\mathcal O_{C}, i_*\mathcal O_{C})$
we have $d\mathrm{AJ}_{i_*\mathcal O_{C}}(v) = \mathrm{Tr}(\Ati{i_*\mathcal O_{C}}\circ v)$. Next note, that the
same will be true if we replace $O_{C}$ by any line bundle on the curve $C$ because
the second Chern class will stay the same.
On $S$ we have an exact triple $0\longrightarrow\mathcal O_S\longrightarrow L\longrightarrow j_*\mathcal O_{\mathbb{P}^1}(2)\longrightarrow 0$
where $C$ is a smooth curve given by a section of $L$ and $j\colon C\hookrightarrow S$ denotes the embedding
(recall from \cite{LLSvS} that any generalized aCM twisted cubic belongs to the unique
two-dimensional linear system and is smooth generically). Let us use the notation
$L_C = j_*\mathcal O_{\mathbb{P}^1(2)}$.
Local deformations of the sheaf $i_*L$ are in one-to-one correspondence with local
deformations of $i_*\mathcal O_S$, so given any $u\in \mathrm{Ext}^1(i_*L,i_*L)$ we can find unique
$u'\in \mathrm{Ext}^1(i_*\mathcal O_S,i_*\mathcal O_S)$ and some $v\in\mathrm{Ext}^1(i_*L_C, i_*L_C)$ which give a morphism
of triangles
$$
\begin{tikzcd}[]
i_*\mathcal O_S \dar{u'}\rar & i_*L \dar{u}\rar & i_*L_C \dar{v}\rar & i_*\mathcal O_S[1] \dar{u'[1]} \\
i_*\mathcal O_S[1] \rar & i_*L[1] \rar & i_*L_C[1]\rar & i_*\mathcal O_S[2]
\end{tikzcd}
$$
Composing $u$, $u'$ and $v$ with Atiyah classes for $i_*L$, $i_*\mathcal O_S$ and $i_*L_S$ gives
us a morphism of triangles
$$
\begin{tikzcd}[]
i_*\mathcal O_S \dar{\Ati{i_*\mathcal O_S}\circ u'}\rar & i_*L \dar{\Ati{i_*L}\circ u}\rar
& i_*L_C \dar{\Ati{i_*L_C}\circ v}\rar & i_*\mathcal O_S[1] \dar{\Ati{i_*\mathcal O_S}\circ u'[1]} \\
\Omega_{Y_H}\otimes i_*\mathcal O_S[2] \rar & \Omega_{Y_H}\otimes i_*L[2] \rar & \Omega_{Y_H}\otimes i_*L_C[2] \rar & \Omega_{Y_H}\otimes i_*\mathcal O_S[3]
\end{tikzcd}
$$
where we have used the functoriality of Atiyah classes. Now we note that $\mathrm{Tr}(\Ati{i_*\mathcal O_S}\circ u') = 0$.
The reason is that the morphism $u'\colon i_*\mathcal O_S\to i_*\mathcal O_S[1]$ is included into the following
morphism of triangles
$$
\begin{tikzcd}[]
\mathcal O_{Y_H} \dar{0}\rar & i_*\mathcal O_S \dar{u'}\rar & \mathcal O_{Y_H}(-1)[1] \dar{0}\rar & \mathcal O_{Y_H}[1] \dar{0} \\
\mathcal O_{Y_H}[1] \rar & i_*\mathcal O_S[1] \rar & \mathcal O_{Y_H}(-1)[2] \rar & \mathcal O_{Y_H}[2]
\end{tikzcd}
$$
The left and the central squares in this diagram commute because of the identities $\mathrm{Ext}^1(\mathcal O_{Y_H},i_*\mathcal O_S)=H^1(S,\mathcal O_S)=0$ and
$\mathrm{Ext}^2(i_*\mathcal O_S,\mathcal O_{Y_H}(-1))=H^1(S,\mathcal O_S(-1))=0$. If we take the composition with the Atiyah classes,
the additivity of traces will show that $\mathrm{Tr}(\Ati{i_*\mathcal O_S}\circ u') = 0$.
From the discussion above we conclude that $\mathrm{Tr}(\Ati{i_*L}\circ u) = - \mathrm{Tr}(\Ati{i_*L_C}\circ v)$.
But $\mathrm{AJ}(i_*L) = -\mathrm{AJ}(i_*L_C)$ and we know that $d\mathrm{AJ}_{i_*L_C} = \mathrm{Tr}(\Ati{i_*L_C}\circ v)$,
hence the claim of the lemma.
\fi
It will be convenient for us to rewrite (\ref{eqn_dAJ}) in terms of the linkage class of a sheaf, see \cite{KM}.
We recall its definition in our particular case of the embedding $j\colon Y_H\hookrightarrow \mathbb{P}^4$.
If $\mathcal F$ is a sheaf on $Y_H$ then the object $j^*j_*\mathcal F\in \EuScript{D}^b(Y_H)$ has non-zero cohomologies only in degrees
$-1$ and $0$. They are equal to $\mathcal F\otimes N_{Y/\mathbb{P}^4}^\vee = \mathcal F(-3)$ and $\mathcal F$ respectively.
Hence the triangle
\[
\mathcal F(-3)[1]\longrightarrow \mathrm{L} j^*j_*\mathcal F\longrightarrow \mathcal F\longrightarrow \mathcal F(-3)[2].
\]
The last morphism in
this triangle is called the linkage class of $\mathcal F$ and will be denoted by $\epsilon_\mathcal F\colon \mathcal F\to \mathcal F(-3)[2]$.
The linkage class can also be described as follows (see \cite{KM}, Theorem 3.2):
let us denote by $\kappa\in \mathrm{Ext}^1(\Omega_{Y_H},\mathcal O_{Y_H}(-3))$
the extension class of the conormal sequence $0\to\mathcal O_{Y_H}(-3)\to\Omega_{\mathbb{P}^4}|_{Y_H}\to \Omega_{Y_H}\to 0$; then
we have $\epsilon_{\mathcal F} = (\mathrm{id}_{\mathcal F}\otimes \kappa)\circ\Ati{\mathcal F}$.
Note that composition with $\kappa$ gives an isomorphism of vector spaces $H^{1,2}(Y_H)=\mathrm{Ext}^2(\mathcal O_{Y_H},\Omega_{Y_H})$
and $\mathrm{Ext}^3(\mathcal O_{Y_H}, \mathcal O_{Y_H}(-3)) = H^0(Y_H,\mathcal O_{Y_H}(1))^*$. Composing the right hand side
of (\ref{eqn_dAJ}) with $\kappa$ and using the fact that taking traces commutes with compositions,
we obtain the following expression for $d\mathrm{AJ}(u)$ where $u\in\mathrm{Ext}^1(i_*L,i_*L)$:
\begin{equation}\label{eqn_dAJ2}
\kappa \circ d\mathrm{AJ}_{i_*L}(u) = \frac12 \mathrm{Tr}(\epsilon_{i_*L}\circ u) \in H^0(Y_H, \mathcal O_H(1))^*
\end{equation}
\begin{prop}\label{prop_dAJ}
The differential of the Abel-Jacobi map (\ref{eqn_dAJ}) is injective.
\end{prop}
\begin{proof}
As before, we will denote by $i\colon S\hookrightarrow Y_H$ and $j\colon Y_H\hookrightarrow \mathbb{P}^4$ the
embeddings. A point of $Z_H^\circ$ is represented by a sheaf $i_*L$.
Let us also use the notation $\mathcal F = i_*L$.
It suffices to show that the map $u \mapsto \kappa \circ d\mathrm{AJ}_{i_*L}(u)$ is injective.
The proof is done in three steps.
{\it Step 1.} Let us construct a locally free resolution of $j_*\mathcal F$.
We decompose $j_*\mathcal F$ with respect to the exceptional collection
$\mathcal O_{\mathbb{P}^4}(-2)$, $\mathcal O_{\mathbb{P}^4}(-1)$, $\mathcal O_{\mathbb{P}^4}$, $\mathcal O_{\mathbb{P}^4}(1)$, $\mathcal O_{\mathbb{P}^4}(2)$.
The sheaf $j_*\mathcal F$ is
already left-orthogonal to $\mathcal O_{\mathbb{P}^4}(2)$ and $\mathcal O_{\mathbb{P}^4}(1)$ (see Lemma \ref{lem_L}).
It is globally generated by (\ref{eqn_determinantal}) and its left
mutation is the shift of the sheaf $\mathcal K$ from the exact triple
$0\longrightarrow\mathcal K\longrightarrow\mathcal O_{\mathbb{P}^4}^{\oplus 3}\longrightarrow j_*\mathcal F\longrightarrow 0$.
From cohomology exact sequence we see that $H^0(\mathbb{P}^4,\mathcal K(1)) = \mathbb{C}^6$ and $H^k(\mathbb{P}^4,\mathcal K(1))=0$
for $k\geqslant 1$. We can also check that $\mathcal K(1)$ is globally generated (it is in fact
Castelnuovo-Mumford $0$-regular, as one can see using (\ref{eqn_determinantal})).
The left mutation of $\mathcal K$ through $\mathcal O_{\mathbb{P}^4}(-1)$ is the cone of
the surjection $\mathcal O_{\mathbb{P}^4}(-1)^{\oplus 6}\to \mathcal K$, and it lies in the subcategory generated
by $\mathcal O_{\mathbb{P}^4}(-2)$. Since it has rank 3, this completes the construction of the
resolution for $j_*\mathcal F$.
The resulting resolution is:
\begin{equation}\label{eqn_resjF}
0\longrightarrow\mathcal O_{\mathbb{P}^4}(-2)^{\oplus 3}\longrightarrow \mathcal O_{\mathbb{P}^4}(-1)^{\oplus 6}\longrightarrow \mathcal O_{\mathbb{P}^4}^{\oplus 3}\longrightarrow j_*\mathcal F\longrightarrow 0.
\end{equation}
{\it Step 2.} Let us show that the linkage class $\epsilon_\mathcal F$ induces an isomorphism
\[
\mathrm{Ext}^1(\mathcal F,\mathcal F) \to \mathrm{Ext}^3(\mathcal F,\mathcal F(-3)).
\]
The object $\mathrm{L} j^*j_*\mathcal F$ is included into the triangle
$$
\mathrm{L} j^*j_*\mathcal F\longrightarrow \mathcal F\stackrel{\epsilon_\mathcal F}{\longrightarrow} \mathcal F(-3)[2]\longrightarrow \mathrm{L} j^*j_*\mathcal F[1].
$$
Applying $\mathrm{Hom}(\mathcal F,-)$ to this triangle we find the following exact sequence:
$$
\mathrm{Ext}^1(\mathcal F,\mathrm{L} j^*j_*\mathcal F)\longrightarrow \mathrm{Ext}^1(\mathcal F,\mathcal F)\stackrel{\epsilon_\mathcal F\circ-}{\longrightarrow} \mathrm{Ext}^3(\mathcal F,\mathcal F(-3))
\longrightarrow \mathrm{Ext}^2(\mathcal F,\mathrm{L} j^*j_*\mathcal F).
$$
Note that by (\ref{eqn_resjF}) the object $\mathrm{L} j^*j_*\mathcal F$ is represented by a complex of the form
$0\to \mathcal O_{Y_H}(-2)^{\oplus 3}\to \mathcal O_{Y_H}(-1)^{\oplus 6}\to \mathcal O_{Y_H}^{\oplus 3}\to 0$.
Let us check that $\mathrm{Ext}^2(\mathcal F,\mathrm{L} j^*j_*\mathcal F) = 0$. By Serre duality $\mathrm{Ext}^q(\mathcal F,\mathcal O_{Y_H}(-p))
=\mathrm{Ext}^{3-q}(\mathcal O_{Y_H}(-p),\mathcal F(-2))^* = H^{3-q}(Y_H,\mathcal F(p-2))^*$ and from (\ref{eqn_determinantal})
we see that for $p=0$ and $1$ these cohomology groups vanish, and for $p=2$ the only
non-vanishing group corresponds to $q=3$. The spectral sequence computing
$\mathrm{Ext}^k(\mathcal F,\mathrm{L} j^*j_*\mathcal F)$, obtained from the complex representing $\mathrm{L} j^*j_*\mathcal F$, implies
that $\mathrm{Ext}^k(\mathcal F,\mathrm{L} j^*j_*\mathcal F)=0$ for $k\neq 1$ and $\mathrm{Ext}^1(\mathcal F,\mathrm{L} j^*j_*\mathcal F)=H^0(Y_H,\mathcal F)^*=\mathbb{C}^3$.
We conclude that the map
$\mathrm{Ext}^1(\mathcal F,\mathcal F)\stackrel{\epsilon_\mathcal F\circ-}{\longrightarrow} \mathrm{Ext}^3(\mathcal F,\mathcal F(-3))$ is surjective.
It is actually an isomorphism, because the vector spaces are of the same dimension.
The dimensions can be computed in the same way as in the proof of Lemma \ref{lem_symplectic}.
{\it Step 3.}
Let us show that $\mathrm{Tr}: \mathrm{Ext}^3(\mathcal F, \mathcal F(-3)) \to H^3(Y_H, \mathcal O_{Y_H}(-3))$ is injective.
Using Serre duality we identify the dual to the trace map with
\[
\mathrm{Tr}^*: H^0(Y_H, \mathcal O_{Y_H}(1)) \to \mathrm{Hom}(\mathcal F, \mathcal F(1)).
\]
One can show as in the proof of Lemma \ref{lem-unobs} that $\mathrm{Hom}(\mathcal F, \mathcal F(1)) = H^0(S, \mathcal O(1))$ and postcomposing
$\mathrm{Tr}^*$ with this isomorphism gives the restriction map
\[
H^0(Y_H,\mathcal O_{Y_H}(1))\to H^0(S,\mathcal O_S(1))
\]
which is surjective.
We see that the composition
\[
\mathrm{Ext}^1(\mathcal F, \mathcal F) \to \mathrm{Ext}^3(\mathcal F, \mathcal F(-3)) \to H^3(Y_H, \mathcal O(-3))
\]
is injective and the proof is finished by means of formula (\ref{eqn_dAJ2}).
\end{proof}
\subsection{Image of the Abel-Jacobi map}
\begin{thm} \label{thm_AJ}
Assume that $Y_H$ is smooth and all its hyperplane sections have at worst ADE singularities.
Then the image of the Abel-Jacobi map $\mathrm{AJ}\colon Z_H\to \IJac{Y_H}$ is the theta-divisor $\Theta \subset \IJac{Y_H}$.
The map $\mathrm{AJ}$ is an embedding on $Z_H^\circ$ and contracts the divisor $Y_H = Z_H \backslash Z_H^\circ$
to the unique singular point of $\Theta$.
\end{thm}
\begin{proof}
The divisor $Y_H$ is contracted by the Abel-Jacobi map
to a point because $Y_H$ is a cubic threefold which has no global one-forms.
To identify the image of $\mathrm{AJ}$ it is enough to check that a general point of $Z_H$ is mapped to a point of $\Theta$.
General point $z\in Z_H$ is represented by a smooth twisted cubic $C$ on a smooth hyperplane section $S\subset Y_H$.
Denote by $C_0\subset S$ a hyperplane section of $S$. Then $C-C_0$ is a degree zero cycle on $Y_H$ and $z$ is
mapped to the corresponding element of the intermediate Jacobian. The cohomology class $[C-C_0]\in H^2(S,\mathbb{Z})$ is
orthogonal to the class of the canonical bundle $K_S$ and has square $-2$. Hence it is a root in the $E_6$ lattice.
All such cohomology classes can be represented by differences of pairs of lines $l_1-l_2$ in 6 different
ways.
Recall that the Fano variety of lines on the
cubic threefold $Y_H$ is a surface which we will denote by $X$. It was shown in \cite{CG} that
the theta divisor $\Theta\subset \IJac{Y_H}$ can be described as the image of the map $X\times X\to \IJac{Y_H}$
which sends a pair of lines $(l_1,l_2)$ to the point in $\IJac{Y_H}$ corresponding to degree zero
cycle $l_1-l_2$.
The map $X \times X \to \Theta$ has degree $6$.
We get a commutative diagram:
$$
\begin{tikzcd}[]
X \times X \arrow[dashed]{d}[swap]{6:1} \arrow{r}{6:1} & \Theta \\
Z_H \arrow{ur}[swap]{\mathrm{AJ}} &
\end{tikzcd}
$$
It follows from the diagram above that $\mathrm{AJ}$ is generically of degree one.
Since $\mathrm{AJ}$ is \'etale on $Z_H^\circ$ by Proposition \ref{prop_dAJ} and the theta-divisor $\Theta$ is a normal variety \cite[Proposition 2, \S3]{B2}
we deduce that $\mathrm{AJ}: Z_H^\circ \to \Theta$ is on open embedding. This completes the proof.
\end{proof}
\appendix
\section{Differential of the Abel-Jacobi map}\label{appendix}
Let $X$ be a smooth complex projective variety of dimension $n$.
Recall that the
$p$-th intermediate Jacobian of $X$ is the complex torus
$$
\mathrm{J}^p(X) = H^{2p-1}(X,\mathbb{C})/(F^pH^{2p-1}(X,\mathbb{C})+H^{2p-1}(X,\mathbb{Z})),
$$
where $F^{\sdot}$ denotes the Hodge filtration.
We use the Abel-Jacobi map \cite[Appendix A]{G2}
\[
\mathrm{AJ}^p\colon \mathrm{CH}^p(X,\mathbb{Z})_{h}\to \mathrm{J}^p(X)
\]
where $\mathrm{CH}^p(X)_{h}$ is the group of homologically trivial codimension $p$ algebraic cycles on $X$
up to rational equivalence.
For a coherent sheaf $\mathcal F_0$ on $X$ we consider integral characteristic classes
\[
s_p(\mathcal F_0) = p! \cdot ch_p(\mathcal F_0) \in CH^p(X,\mathbb{Z})
\]
where $ch_p(\mathcal F_0)$ is the $p$'th component of the Chern character $ch(\mathcal F_0)$.
These classes can be expressed in terms of the Chern classes using Newton's formula \cite[\S16]{MS}.
Let us consider a deformation of $\mathcal F_0$ over a smooth base $B$ with base point $0\in B$,
that is a coherent sheaf $\mathcal F$ on $X\times B$ flat over $B$ and with $\mathcal F_0 \simeq \mathcal F|_{\pi_B^{-1}(0)}$.
We will denote by $\pi_B$ and $\pi_X$ the two projections from $X\times B$
and by $\mathcal F_t$ the restriction of $\mathcal F$ to $\pi_B^{-1}(t)$, $t\in B$.
In this setting the difference $s_p(\mathcal F_t)-s_p(\mathcal F_0)$ is contained in $\mathrm{CH}^p(X, \mathbb{Z})_{h}$
and we get an induced Abel-Jacobi map
\[
\mathrm{AJ}^p_{\mathcal F}: B \to J^p(X).
\]
Since the classes $s_p$ are additive, it follows that
if $0 \to \mathcal F' \to \mathcal F \to \mathcal F'' \to 0$ is a short exact sequences of sheaves on $X \times B$ flat over $B$, then
\begin{equation}\label{AJ-add}
\mathrm{AJ}^p_{\mathcal F} = \mathrm{AJ}^p_{\mathcal F'} + \mathrm{AJ}^p_{\mathcal F''}.
\end{equation}
Recall that a coherent sheaf $\mathcal F_0$ has an Atiyah class $\Ati{\mathcal F_0}\in \mathrm{Ext}^1(\mathcal F_0,\mathcal F_0\otimes\Omega_X)$ \cite[1.6]{KM}.
The vector space $\bigoplus_{p,q\geqslant 0}\mathrm{Ext}^q(\mathcal F_0,\mathcal F_0\otimes\Omega_X^p)$ has the structure of a
bi-graded algebra with multiplication induced by Yoneda product of $\mathrm{Ext}$'s and exterior
product of differential forms and this defines the $p$'th power of the Atiyah class
\[
\Ati{\mathcal F_0}^p \in \mathrm{Ext}^p(\mathcal F_0,\mathcal F_0\otimes\Omega^p_X).
\]
Given any tangent vector $v\in T_{0}B$ we shall denote its Kodaira-Spencer class
by $\mathrm{KS}_{\mathcal F_0}(v)\in \mathrm{Ext}^1(\mathcal F_0,\mathcal F_0)$
and we consider the composition $\Ati{\mathcal F_0}^p \circ \mathrm{KS}_{\mathcal F_0}(v) \in \mathrm{Ext}^{p+1}(\mathcal F_0, \mathcal F_0 \otimes \Omega_X^p)$.
We will also use the trace maps \cite[1.2]{KM}
$$\mathrm{Tr}\colon \mathrm{Ext}^q(\mathcal F_0,\mathcal F_0\otimes\Omega_X^p)\to \mathrm{Ext}^q(\mathcal O_X,\Omega_X^p)=H^{p,q}(X).$$
\begin{prop}\label{prop_dAJ_general}
In the above setting the differential of the Abel-Jacobi map $\mathrm{AJ}_{\mathcal F}^p: B \to \mathrm{J}^p(X)$, $p \geqslant 2$ at $0 \in B$ is given by
\begin{equation}\label{eqn_dAJ_general}
d\mathrm{AJ}^p_{\mathcal F,0}(v)=\mathrm{Tr}\bigl( (-1)^{p-1}\Ati{\mathcal F_0}^{p-1} \circ \mathrm{KS}_{\mathcal F_0}(v)\bigr),
\end{equation}
for any $v\in T_{0}B$. The right hand side is an element of
$H^{p-1,p}(X) \subset H^{2p-1}(X,\mathbb{C})/F^pH^{2p-1}(X,\mathbb{C})$.
\end{prop}
\begin{proof}
We argue by induction on the length of a locally free resolution of $\mathcal F$.
The base of induction is the case when $\mathcal F_0$ is a vector bundle. Then the result is essentially contained
in the paper of Griffiths \cite{G1} (in particular formula 6.8). We will show how to do
the induction step. We note that the statement is local, so we may replace the base
$B$ by an open neighborhood of $0\in B$ every time it is necessary. In particular we assume that $B$ is affine.
By our assumptions $X$ is projective and we denote by $\mathcal O_X(1)$ an ample line bundle.
Then we can find $k$ big enough, so that $\mathcal F(k)$ is generated by global sections
and has no higher cohomology. We define
a sheaf $\mathcal G$ on $X\times B$ as the kernel of the natural map:
$$
0\longrightarrow\mathcal G\longrightarrow\pi_B^*{\pi_B}_*(\mathcal F(k))\otimes\mathcal O_X(-k)\longrightarrow \mathcal F\longrightarrow 0.
$$
Since $\mathcal F$ is flat over $B$ and ${\pi_B}_*(\mathcal F_0(k))$
is a vector bundle on $B$ for $k$ large enough \cite[Proof of Theorem 9.9]{H},
the sheaf $\mathcal G$ is flat over $B$.
It follows from (\ref{AJ-add}) that
$\mathrm{AJ}_{\mathcal G}^p = -\mathrm{AJ}_{\mathcal F}^p$.
Since homological dimension of $\mathcal G$ has dropped by one,
induction hypothesis yields the formula (\ref{eqn_dAJ_general}) for $\mathcal G$. It remains to relate right hand side
of (\ref{eqn_dAJ_general}) for $\mathcal G_0$ and for $\mathcal F_0$.
Using functoriality of the Kodaira-Spencer classes
we obtain the following morphism of triangles:
$$
\begin{tikzcd}[]
\mathcal G_0 \dar{u'}\rar & H^0(X,\mathcal F_0(k))\otimes\mathcal O_X(-k) \dar{0}\rar & \mathcal F_0 \dar{u}\rar & \mathcal G_0[1] \dar{u'[1]} \\
\mathcal G_0[1]\rar & H^0(X,\mathcal F_0(k))\otimes\mathcal O_X(-k)[1]\rar & \mathcal F_0[1]\rar & \mathcal G_0[2]
\end{tikzcd}
$$
where $u = \mathrm{KS}_{\mathcal F_0}(v)\in \mathrm{Ext}^1(\mathcal F_0,\mathcal F_0)$ and $u' = \mathrm{KS}_{\mathcal G_0}(v)\in \mathrm{Ext}^1(\mathcal G_0,\mathcal G_0)$.
Composing the vertical arrows with $\Ati{\mathcal F_0}^{p-1}$, $\Ati{\mathcal O_X(-k)}^{p-1}$ and $\Ati{\mathcal F_0}^{p-1}$ respectively
and using the additivity of traces we get
$\mathrm{Tr}(\Ati{\mathcal F_0}^{p-1}\circ \mathrm{KS}_{\mathcal F_0}(v)) = -\mathrm{Tr}(\Ati{\mathcal G_0}^{p-1}\circ \mathrm{KS}_{\mathcal G_0}(v))$
because the map in the middle is zero. This completes the induction step.
\end{proof}
\end{document} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.